Index: head/sys/alpha/alpha/busdma_machdep.c =================================================================== --- head/sys/alpha/alpha/busdma_machdep.c (revision 110231) +++ head/sys/alpha/alpha/busdma_machdep.c (revision 110232) @@ -1,945 +1,943 @@ /* * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#define MAX(a,b) (((a) > (b)) ? (a) : (b)) -#define MIN(a,b) (((a) < (b)) ? (a) : (b)) #define MAX_BPAGES 128 struct bus_dma_tag { bus_dma_tag_t parent; bus_size_t alignment; bus_size_t boundary; bus_addr_t lowaddr; bus_addr_t highaddr; bus_dma_filter_t *filter; void *filterarg; bus_size_t maxsize; u_int nsegments; bus_size_t maxsegsz; int flags; int ref_count; int map_count; }; struct bounce_page { vm_offset_t vaddr; /* kva of bounce buffer */ bus_addr_t busaddr; /* Physical address */ vm_offset_t datavaddr; /* kva of client data */ bus_size_t datacount; /* client data count */ STAILQ_ENTRY(bounce_page) links; }; int busdma_swi_pending; static struct mtx bounce_lock; static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; static int free_bpages; static int reserved_bpages; static int active_bpages; static int total_bpages; static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; struct bus_dmamap { struct bp_list bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; void *buf; /* unmapped buffer pointer */ bus_size_t buflen; /* unmapped buffer length */ vm_offset_t busaddress; /* address in bus space */ bus_dmamap_callback_t *callback; void *callback_arg; void *sgmaphandle; /* handle into sgmap */ STAILQ_ENTRY(bus_dmamap) links; }; static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; static struct bus_dmamap nobounce_dmamap; static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map); static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) { int retval; retval = 0; do { if (paddr > dmat->lowaddr && paddr <= dmat->highaddr && (dmat->filter == NULL || (*dmat->filter)(dmat->filterarg, paddr) != 0)) retval = 1; dmat = dmat->parent; } while (retval == 0 && dmat != NULL); return (retval); } #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 /* * Allocate a device specific dma_tag. */ int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) { bus_dma_tag_t newtag; int error = 0; /* Return a NULL tag on failure */ *dmat = NULL; newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); if (newtag == NULL) return (ENOMEM); newtag->parent = parent; newtag->alignment = alignment; newtag->boundary = boundary; newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1); newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1); newtag->filter = filter; newtag->filterarg = filterarg; newtag->maxsize = maxsize; newtag->nsegments = nsegments; newtag->maxsegsz = maxsegsz; newtag->flags = flags; newtag->ref_count = 1; /* Count ourself */ newtag->map_count = 0; /* Take into account any restrictions imposed by our parent tag */ if (parent != NULL) { newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); /* * XXX Not really correct??? Probably need to honor boundary * all the way up the inheritence chain. */ newtag->boundary = MAX(parent->boundary, newtag->boundary); if (newtag->filter == NULL) { /* * Short circuit looking at our parent directly * since we have encapsulated all of its information */ newtag->filter = parent->filter; newtag->filterarg = parent->filterarg; newtag->parent = parent->parent; } if (newtag->parent != NULL) { parent->ref_count++; } } if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) { /* Must bounce */ if (lowaddr > bounce_lowaddr) { /* * Go through the pool and kill any pages * that don't reside below lowaddr. */ panic("bus_dma_tag_create: page reallocation " "not implemented"); } if (ptoa(total_bpages) < maxsize) { int pages; pages = atop(maxsize) - total_bpages; /* Add pages to our bounce pool */ if (alloc_bounce_pages(newtag, pages) < pages) error = ENOMEM; } /* Performed initial allocation */ newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; } if (error != 0) { free(newtag, M_DEVBUF); } else { *dmat = newtag; } return (error); } int bus_dma_tag_destroy(bus_dma_tag_t dmat) { if (dmat != NULL) { if (dmat->map_count != 0) return (EBUSY); while (dmat != NULL) { bus_dma_tag_t parent; parent = dmat->parent; dmat->ref_count--; if (dmat->ref_count == 0) { free(dmat, M_DEVBUF); } dmat = parent; } } return (0); } /* * Allocate a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { int error; error = 0; if ((dmat->flags & BUS_DMA_ISA) && chipset.sgmap != NULL) { bus_dmamap_t map; map = (bus_dmamap_t)malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); if (map == NULL) return (ENOMEM); map->busaddress = sgmap_alloc_region(chipset.sgmap, dmat->maxsize, dmat->boundary, &map->sgmaphandle); dmat->map_count++; *mapp = map; return (0); } if (dmat->lowaddr < ptoa(Maxmem)) { /* Must bounce */ int maxpages; *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO); if (*mapp == NULL) return (ENOMEM); /* Initialize the new map */ STAILQ_INIT(&((*mapp)->bpages)); /* * Attempt to add pages to our pool on a per-instance * basis up to a sane limit. */ maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || (dmat->map_count > 0 && total_bpages < maxpages)) { int pages; if (dmat->lowaddr > bounce_lowaddr) { /* * Go through the pool and kill any pages * that don't reside below lowaddr. */ panic("bus_dmamap_create: page reallocation " "not implemented"); } pages = atop(dmat->maxsize) + 1; pages = MIN(maxpages - total_bpages, pages); if (alloc_bounce_pages(dmat, pages) < pages) error = ENOMEM; if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { if (error == 0) dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; } else { error = 0; } } } else { *mapp = &nobounce_dmamap; } if (error == 0) dmat->map_count++; return (error); } /* * Destroy a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) { if ((dmat->flags & BUS_DMA_ISA) && chipset.sgmap != NULL) { sgmap_free_region(chipset.sgmap, map->sgmaphandle); } if (map != NULL) { if (STAILQ_FIRST(&map->bpages) != NULL) return (EBUSY); /* * The nobounce_dmamap map is not dynamically * allocated, thus we should on no account try to * free it. */ if (map != &nobounce_dmamap) free(map, M_DEVBUF); } dmat->map_count--; return (0); } /* * Allocate a piece of memory that can be efficiently mapped into * bus device space based on the constraints lited in the dma tag. * A dmamap to for use with dmamap_load is also allocated. */ int bus_dmamem_alloc_size(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp, bus_size_t size) { if (size > dmat->maxsize) return (ENOMEM); /* If we succeed, no mapping/bouncing will be required */ *mapp = &nobounce_dmamap; if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) { *vaddr = malloc(size, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : 0); } else { /* * XXX Use Contigmalloc until it is merged into this facility * and handles multi-seg allocations. Nobody is doing * multi-seg allocations yet though. */ *vaddr = contigmalloc(size, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : 0, 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, dmat->boundary); } if (*vaddr == NULL) return (ENOMEM); return (0); } int bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { return (bus_dmamem_alloc_size(dmat, vaddr, flags, mapp, dmat->maxsize)); } /* * Free a piece of memory and it's allociated dmamap, that was allocated * via bus_dmamem_alloc. */ void bus_dmamem_free_size(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map, bus_size_t size) { /* * dmamem does not need to be bounced, so the map should be * NULL */ if (map != &nobounce_dmamap) panic("bus_dmamem_free: Invalid map freed\n"); if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) free(vaddr, M_DEVBUF); else contigfree(vaddr, size, M_DEVBUF); } void bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) { bus_dmamem_free_size(dmat, vaddr, map, dmat->maxsize); } #define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1) /* * Map the buffer buf into bus space using the dmamap map. */ vm_offset_t alpha_XXX_dmamap_or = 1024UL*1024UL*1024UL; /*XXX */ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { vm_offset_t vaddr; vm_offset_t paddr; #ifdef __GNUC__ bus_dma_segment_t dm_segments[dmat->nsegments]; #else bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; #endif bus_dma_segment_t *sg; int seg; int error; vm_offset_t nextpaddr; error = 0; if ((dmat->flags & BUS_DMA_ISA) && chipset.sgmap != NULL) { /* * For ISA dma, we use the chipset's scatter-gather * map to map the tranfer into the ISA reachable range * of the bus address space. */ vaddr = trunc_page((vm_offset_t) buf); dm_segments[0].ds_addr = map->busaddress + (vm_offset_t) buf - vaddr; dm_segments[0].ds_len = buflen; buflen = round_page((vm_offset_t) buf + buflen) - vaddr; sgmap_load_region(chipset.sgmap, map->busaddress, vaddr, buflen); map->buflen = buflen; (*callback)(callback_arg, dm_segments, 1, error); return (0); } /* * If we are being called during a callback, pagesneeded will * be non-zero, so we can avoid doing the work twice. */ if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) { vm_offset_t vendaddr; /* * Count the number of bounce pages * needed in order to complete this transfer */ vaddr = trunc_page(buf); vendaddr = (vm_offset_t)buf + buflen; while (vaddr < vendaddr) { paddr = pmap_kextract(vaddr); if (run_filter(dmat, paddr) != 0) { map->pagesneeded++; } vaddr += PAGE_SIZE; } } /* Reserve Necessary Bounce Pages */ if (map->pagesneeded != 0) { int s; s = splhigh(); if (reserve_bounce_pages(dmat, map) != 0) { /* Queue us for resources */ map->dmat = dmat; map->buf = buf; map->buflen = buflen; map->callback = callback; map->callback_arg = callback_arg; STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); splx(s); return (EINPROGRESS); } splx(s); } vaddr = (vm_offset_t)buf; sg = &dm_segments[0]; seg = 1; sg->ds_len = 0; nextpaddr = 0; do { bus_size_t size; paddr = pmap_kextract(vaddr); size = PAGE_SIZE - (paddr & PAGE_MASK); if (size > buflen) size = buflen; if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { paddr = add_bounce_page(dmat, map, vaddr, size); } if (sg->ds_len == 0) { sg->ds_addr = paddr + alpha_XXX_dmamap_or; sg->ds_len = size; } else if (paddr == nextpaddr) { sg->ds_len += size; } else { /* Go to the next segment */ sg++; seg++; if (seg > dmat->nsegments) break; sg->ds_addr = paddr + alpha_XXX_dmamap_or; sg->ds_len = size; } vaddr += size; nextpaddr = paddr + size; buflen -= size; } while (buflen > 0); if (buflen != 0) { printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", buflen); error = EFBIG; } (*callback)(callback_arg, dm_segments, seg, error); return (0); } /* * Utility function to load a linear buffer. lastaddrp holds state * between invocations (for multiple-buffer loads). segp contains * the starting segment on entrace, and the ending segment on exit. * first indicates if this is the first invocation of this function. */ static int _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[], void *buf, bus_size_t buflen, struct thread *td, int flags, vm_offset_t *lastaddrp, int *segp, int first) { bus_size_t sgsize; bus_addr_t curaddr, lastaddr, baddr, bmask; vm_offset_t vaddr = (vm_offset_t)buf; int seg; pmap_t pmap; if (td != NULL) pmap = vmspace_pmap(td->td_proc->p_vmspace); else pmap = NULL; lastaddr = *lastaddrp; bmask = ~(dmat->boundary - 1); for (seg = *segp; buflen > 0 ; ) { /* * Get the physical address for this segment. */ if (pmap) curaddr = pmap_extract(pmap, vaddr); else curaddr = pmap_kextract(vaddr); /* * Compute the segment size, and adjust counts. */ sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); if (buflen < sgsize) sgsize = buflen; /* * Make sure we don't cross any boundaries. */ if (dmat->boundary > 0) { baddr = (curaddr + dmat->boundary) & bmask; if (sgsize > (baddr - curaddr)) sgsize = (baddr - curaddr); } /* * Insert chunk into a segment, coalescing with * previous segment if possible. */ if (first) { segs[seg].ds_addr = curaddr + alpha_XXX_dmamap_or; segs[seg].ds_len = sgsize; first = 0; } else { if (curaddr == lastaddr && (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && (dmat->boundary == 0 || (segs[seg].ds_addr & bmask) == (curaddr & bmask))) segs[seg].ds_len += sgsize; else { if (++seg >= dmat->nsegments) break; segs[seg].ds_addr = curaddr + alpha_XXX_dmamap_or; segs[seg].ds_len = sgsize; } } lastaddr = curaddr + sgsize; vaddr += sgsize; buflen -= sgsize; } *segp = seg; *lastaddrp = lastaddr; /* * Did we fit? */ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } /* * Like _bus_dmamap_load(), but for mbufs. */ int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, bus_dmamap_callback2_t *callback, void *callback_arg, int flags) { #ifdef __GNUC__ bus_dma_segment_t dm_segments[dmat->nsegments]; #else bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; #endif int nsegs, error; KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, ("bus_dmamap_load_mbuf: No support for bounce pages!")); KASSERT(m0->m_flags & M_PKTHDR, ("bus_dmamap_load_mbuf: no packet header")); nsegs = 0; error = 0; if (m0->m_pkthdr.len <= dmat->maxsize) { int first = 1; vm_offset_t lastaddr = 0; struct mbuf *m; for (m = m0; m != NULL && error == 0; m = m->m_next) { error = _bus_dmamap_load_buffer(dmat, dm_segments, m->m_data, m->m_len, NULL, flags, &lastaddr, &nsegs, first); first = 0; } } else { error = EINVAL; } if (error) { /* force "no valid mappings" in callback */ (*callback)(callback_arg, dm_segments, 0, 0, error); } else { (*callback)(callback_arg, dm_segments, nsegs+1, m0->m_pkthdr.len, error); } return (error); } /* * Like _bus_dmamap_load(), but for uios. */ int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, bus_dmamap_callback2_t *callback, void *callback_arg, int flags) { vm_offset_t lastaddr; #ifdef __GNUC__ bus_dma_segment_t dm_segments[dmat->nsegments]; #else bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; #endif int nsegs, error, first, i; bus_size_t resid; struct iovec *iov; struct thread *td = NULL; KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, ("bus_dmamap_load_uio: No support for bounce pages!")); resid = uio->uio_resid; iov = uio->uio_iov; if (uio->uio_segflg == UIO_USERSPACE) { td = uio->uio_td; KASSERT(td != NULL, ("bus_dmamap_load_uio: USERSPACE but no proc")); } nsegs = 0; error = 0; first = 1; for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { /* * Now at the first iovec to load. Load each iovec * until we have exhausted the residual count. */ bus_size_t minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; caddr_t addr = (caddr_t) iov[i].iov_base; error = _bus_dmamap_load_buffer(dmat, dm_segments, addr, minlen, td, flags, &lastaddr, &nsegs, first); first = 0; resid -= minlen; } if (error) { /* force "no valid mappings" in callback */ (*callback)(callback_arg, dm_segments, 0, 0, error); } else { (*callback)(callback_arg, dm_segments, nsegs+1, uio->uio_resid, error); } return (error); } /* * Release the mapping held by map. */ void _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) { struct bounce_page *bpage; if ((dmat->flags & BUS_DMA_ISA) && chipset.sgmap != NULL) { sgmap_unload_region(chipset.sgmap, map->busaddress, map->buflen); return; } while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { STAILQ_REMOVE_HEAD(&map->bpages, links); free_bounce_page(dmat, bpage); } } void _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { struct bounce_page *bpage; if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { /* * Handle data bouncing. We might also * want to add support for invalidating * the caches on broken hardware */ switch (op) { case BUS_DMASYNC_PREWRITE: while (bpage != NULL) { bcopy((void *)bpage->datavaddr, (void *)bpage->vaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } break; case BUS_DMASYNC_POSTREAD: while (bpage != NULL) { bcopy((void *)bpage->vaddr, (void *)bpage->datavaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } break; case BUS_DMASYNC_PREREAD: case BUS_DMASYNC_POSTWRITE: /* No-ops */ break; } } } static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) { int count; count = 0; if (total_bpages == 0) { mtx_init(&bounce_lock, "BouncePage", NULL, MTX_DEF); STAILQ_INIT(&bounce_page_list); STAILQ_INIT(&bounce_map_waitinglist); STAILQ_INIT(&bounce_map_callbacklist); } while (numpages > 0) { struct bounce_page *bpage; bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, M_NOWAIT | M_ZERO); if (bpage == NULL) break; bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT, 0ul, dmat->lowaddr, PAGE_SIZE, dmat->boundary); if (bpage->vaddr == NULL) { free(bpage, M_DEVBUF); break; } bpage->busaddr = pmap_kextract(bpage->vaddr); mtx_lock(&bounce_lock); STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); total_bpages++; free_bpages++; mtx_unlock(&bounce_lock); count++; numpages--; } return (count); } static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) { int pages; pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); free_bpages -= pages; reserved_bpages += pages; map->pagesreserved += pages; pages = map->pagesneeded - map->pagesreserved; return (pages); } static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, bus_size_t size) { struct bounce_page *bpage; if (map->pagesneeded == 0) panic("add_bounce_page: map doesn't need any pages"); map->pagesneeded--; if (map->pagesreserved == 0) panic("add_bounce_page: map doesn't need any pages"); map->pagesreserved--; mtx_lock(&bounce_lock); bpage = STAILQ_FIRST(&bounce_page_list); if (bpage == NULL) panic("add_bounce_page: free page list is empty"); STAILQ_REMOVE_HEAD(&bounce_page_list, links); reserved_bpages--; active_bpages++; mtx_unlock(&bounce_lock); bpage->datavaddr = vaddr; bpage->datacount = size; STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); } static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) { struct bus_dmamap *map; bpage->datavaddr = 0; bpage->datacount = 0; mtx_lock(&bounce_lock); STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); free_bpages++; active_bpages--; if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { if (reserve_bounce_pages(map->dmat, map) == 0) { STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links); busdma_swi_pending = 1; swi_sched(vm_ih, 0); } } mtx_unlock(&bounce_lock); } void busdma_swi(void) { struct bus_dmamap *map; mtx_lock(&bounce_lock); while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); mtx_unlock(&bounce_lock); bus_dmamap_load(map->dmat, map, map->buf, map->buflen, map->callback, map->callback_arg, /*flags*/0); mtx_lock(&bounce_lock); } mtx_unlock(&bounce_lock); } Index: head/sys/amd64/amd64/busdma_machdep.c =================================================================== --- head/sys/amd64/amd64/busdma_machdep.c (revision 110231) +++ head/sys/amd64/amd64/busdma_machdep.c (revision 110232) @@ -1,899 +1,897 @@ /* * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#define MAX(a,b) (((a) > (b)) ? (a) : (b)) -#define MIN(a,b) (((a) < (b)) ? (a) : (b)) #define MAX_BPAGES 128 struct bus_dma_tag { bus_dma_tag_t parent; bus_size_t alignment; bus_size_t boundary; bus_addr_t lowaddr; bus_addr_t highaddr; bus_dma_filter_t *filter; void *filterarg; bus_size_t maxsize; u_int nsegments; bus_size_t maxsegsz; int flags; int ref_count; int map_count; }; struct bounce_page { vm_offset_t vaddr; /* kva of bounce buffer */ bus_addr_t busaddr; /* Physical address */ vm_offset_t datavaddr; /* kva of client data */ bus_size_t datacount; /* client data count */ STAILQ_ENTRY(bounce_page) links; }; int busdma_swi_pending; static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; static int free_bpages; static int reserved_bpages; static int active_bpages; static int total_bpages; static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; struct bus_dmamap { struct bp_list bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; void *buf; /* unmapped buffer pointer */ bus_size_t buflen; /* unmapped buffer length */ bus_dmamap_callback_t *callback; void *callback_arg; STAILQ_ENTRY(bus_dmamap) links; }; static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; static struct bus_dmamap nobounce_dmamap; static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map); static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); /* * Return true if a match is made. * * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. * * If paddr is within the bounds of the dma tag then call the filter callback * to check for a match, if there is no filter callback then assume a match. */ static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) { int retval; retval = 0; do { if (paddr > dmat->lowaddr && paddr <= dmat->highaddr && (dmat->filter == NULL || (*dmat->filter)(dmat->filterarg, paddr) != 0)) retval = 1; dmat = dmat->parent; } while (retval == 0 && dmat != NULL); return (retval); } #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 /* * Allocate a device specific dma_tag. */ int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) { bus_dma_tag_t newtag; int error = 0; /* Return a NULL tag on failure */ *dmat = NULL; newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); if (newtag == NULL) return (ENOMEM); newtag->parent = parent; newtag->alignment = alignment; newtag->boundary = boundary; newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); newtag->filter = filter; newtag->filterarg = filterarg; newtag->maxsize = maxsize; newtag->nsegments = nsegments; newtag->maxsegsz = maxsegsz; newtag->flags = flags; newtag->ref_count = 1; /* Count ourself */ newtag->map_count = 0; /* Take into account any restrictions imposed by our parent tag */ if (parent != NULL) { newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); /* * XXX Not really correct??? Probably need to honor boundary * all the way up the inheritence chain. */ newtag->boundary = MAX(parent->boundary, newtag->boundary); if (newtag->filter == NULL) { /* * Short circuit looking at our parent directly * since we have encapsulated all of its information */ newtag->filter = parent->filter; newtag->filterarg = parent->filterarg; newtag->parent = parent->parent; } if (newtag->parent != NULL) { parent->ref_count++; } } if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) { /* Must bounce */ if (lowaddr > bounce_lowaddr) { /* * Go through the pool and kill any pages * that don't reside below lowaddr. */ panic("bus_dma_tag_create: page reallocation " "not implemented"); } if (ptoa(total_bpages) < maxsize) { int pages; pages = atop(maxsize) - total_bpages; /* Add pages to our bounce pool */ if (alloc_bounce_pages(newtag, pages) < pages) error = ENOMEM; } /* Performed initial allocation */ newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; } if (error != 0) { free(newtag, M_DEVBUF); } else { *dmat = newtag; } return (error); } int bus_dma_tag_destroy(bus_dma_tag_t dmat) { if (dmat != NULL) { if (dmat->map_count != 0) return (EBUSY); while (dmat != NULL) { bus_dma_tag_t parent; parent = dmat->parent; dmat->ref_count--; if (dmat->ref_count == 0) { free(dmat, M_DEVBUF); /* * Last reference count, so * release our reference * count on our parent. */ dmat = parent; } else dmat = NULL; } } return (0); } /* * Allocate a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { int error; error = 0; if (dmat->lowaddr < ptoa(Maxmem)) { /* Must bounce */ int maxpages; *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO); if (*mapp == NULL) return (ENOMEM); /* Initialize the new map */ STAILQ_INIT(&((*mapp)->bpages)); /* * Attempt to add pages to our pool on a per-instance * basis up to a sane limit. */ maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || (dmat->map_count > 0 && total_bpages < maxpages)) { int pages; if (dmat->lowaddr > bounce_lowaddr) { /* * Go through the pool and kill any pages * that don't reside below lowaddr. */ panic("bus_dmamap_create: page reallocation " "not implemented"); } pages = atop(dmat->maxsize); pages = MIN(maxpages - total_bpages, pages); error = alloc_bounce_pages(dmat, pages); if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { if (error == 0) dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; } else { error = 0; } } } else { *mapp = NULL; } if (error == 0) dmat->map_count++; return (error); } /* * Destroy a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) { if (map != NULL) { if (STAILQ_FIRST(&map->bpages) != NULL) return (EBUSY); free(map, M_DEVBUF); } dmat->map_count--; return (0); } /* * Allocate a piece of memory that can be efficiently mapped into * bus device space based on the constraints lited in the dma tag. * A dmamap to for use with dmamap_load is also allocated. */ int bus_dmamem_alloc_size(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp, bus_size_t size) { if (size > dmat->maxsize) return (ENOMEM); /* If we succeed, no mapping/bouncing will be required */ *mapp = NULL; if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) { *vaddr = malloc(size, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : 0); } else { /* * XXX Use Contigmalloc until it is merged into this facility * and handles multi-seg allocations. Nobody is doing * multi-seg allocations yet though. */ *vaddr = contigmalloc(size, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : 0, 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, dmat->boundary); } if (*vaddr == NULL) return (ENOMEM); return (0); } int bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { return (bus_dmamem_alloc_size(dmat, vaddr, flags, mapp, dmat->maxsize)); } /* * Free a piece of memory and it's allociated dmamap, that was allocated * via bus_dmamem_alloc. Make the same choice for free/contigfree. */ void bus_dmamem_free_size(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map, bus_size_t size) { /* * dmamem does not need to be bounced, so the map should be * NULL */ if (map != NULL) panic("bus_dmamem_free: Invalid map freed\n"); if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) free(vaddr, M_DEVBUF); else contigfree(vaddr, size, M_DEVBUF); } void bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) { bus_dmamem_free_size(dmat, vaddr, map, dmat->maxsize); } #define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1) /* * Map the buffer buf into bus space using the dmamap map. */ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { vm_offset_t vaddr; vm_offset_t paddr; #ifdef __GNUC__ bus_dma_segment_t dm_segments[dmat->nsegments]; #else bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; #endif bus_dma_segment_t *sg; int seg; int error; vm_offset_t nextpaddr; if (map == NULL) map = &nobounce_dmamap; error = 0; /* * If we are being called during a callback, pagesneeded will * be non-zero, so we can avoid doing the work twice. */ if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) { vm_offset_t vendaddr; /* * Count the number of bounce pages * needed in order to complete this transfer */ vaddr = trunc_page((vm_offset_t)buf); vendaddr = (vm_offset_t)buf + buflen; while (vaddr < vendaddr) { paddr = pmap_kextract(vaddr); if (run_filter(dmat, paddr) != 0) { map->pagesneeded++; } vaddr += PAGE_SIZE; } } /* Reserve Necessary Bounce Pages */ if (map->pagesneeded != 0) { int s; s = splhigh(); if (reserve_bounce_pages(dmat, map) != 0) { /* Queue us for resources */ map->dmat = dmat; map->buf = buf; map->buflen = buflen; map->callback = callback; map->callback_arg = callback_arg; STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); splx(s); return (EINPROGRESS); } splx(s); } vaddr = (vm_offset_t)buf; sg = &dm_segments[0]; seg = 1; sg->ds_len = 0; nextpaddr = 0; do { bus_size_t size; paddr = pmap_kextract(vaddr); size = PAGE_SIZE - (paddr & PAGE_MASK); if (size > buflen) size = buflen; if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { paddr = add_bounce_page(dmat, map, vaddr, size); } if (sg->ds_len == 0) { sg->ds_addr = paddr; sg->ds_len = size; } else if (paddr == nextpaddr) { sg->ds_len += size; } else { /* Go to the next segment */ sg++; seg++; if (seg > dmat->nsegments) break; sg->ds_addr = paddr; sg->ds_len = size; } vaddr += size; nextpaddr = paddr + size; buflen -= size; } while (buflen > 0); if (buflen != 0) { printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", (u_long)buflen); error = EFBIG; } (*callback)(callback_arg, dm_segments, seg, error); return (0); } /* * Utility function to load a linear buffer. lastaddrp holds state * between invocations (for multiple-buffer loads). segp contains * the starting segment on entrace, and the ending segment on exit. * first indicates if this is the first invocation of this function. */ static int _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[], void *buf, bus_size_t buflen, struct thread *td, int flags, vm_offset_t *lastaddrp, int *segp, int first) { bus_size_t sgsize; bus_addr_t curaddr, lastaddr, baddr, bmask; vm_offset_t vaddr = (vm_offset_t)buf; int seg; pmap_t pmap; if (td != NULL) pmap = vmspace_pmap(td->td_proc->p_vmspace); else pmap = NULL; lastaddr = *lastaddrp; bmask = ~(dmat->boundary - 1); for (seg = *segp; buflen > 0 ; ) { /* * Get the physical address for this segment. */ if (pmap) curaddr = pmap_extract(pmap, vaddr); else curaddr = pmap_kextract(vaddr); /* * Compute the segment size, and adjust counts. */ sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); if (buflen < sgsize) sgsize = buflen; /* * Make sure we don't cross any boundaries. */ if (dmat->boundary > 0) { baddr = (curaddr + dmat->boundary) & bmask; if (sgsize > (baddr - curaddr)) sgsize = (baddr - curaddr); } /* * Insert chunk into a segment, coalescing with * previous segment if possible. */ if (first) { segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; first = 0; } else { if (curaddr == lastaddr && (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && (dmat->boundary == 0 || (segs[seg].ds_addr & bmask) == (curaddr & bmask))) segs[seg].ds_len += sgsize; else { if (++seg >= dmat->nsegments) break; segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; } } lastaddr = curaddr + sgsize; vaddr += sgsize; buflen -= sgsize; } *segp = seg; *lastaddrp = lastaddr; /* * Did we fit? */ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } /* * Like _bus_dmamap_load(), but for mbufs. */ int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, bus_dmamap_callback2_t *callback, void *callback_arg, int flags) { #ifdef __GNUC__ bus_dma_segment_t dm_segments[dmat->nsegments]; #else bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; #endif int nsegs, error; KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, ("bus_dmamap_load_mbuf: No support for bounce pages!")); KASSERT(m0->m_flags & M_PKTHDR, ("bus_dmamap_load_mbuf: no packet header")); nsegs = 0; error = 0; if (m0->m_pkthdr.len <= dmat->maxsize) { int first = 1; vm_offset_t lastaddr = 0; struct mbuf *m; for (m = m0; m != NULL && error == 0; m = m->m_next) { error = _bus_dmamap_load_buffer(dmat, dm_segments, m->m_data, m->m_len, NULL, flags, &lastaddr, &nsegs, first); first = 0; } } else { error = EINVAL; } if (error) { /* force "no valid mappings" in callback */ (*callback)(callback_arg, dm_segments, 0, 0, error); } else { (*callback)(callback_arg, dm_segments, nsegs+1, m0->m_pkthdr.len, error); } return (error); } /* * Like _bus_dmamap_load(), but for uios. */ int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, bus_dmamap_callback2_t *callback, void *callback_arg, int flags) { vm_offset_t lastaddr; #ifdef __GNUC__ bus_dma_segment_t dm_segments[dmat->nsegments]; #else bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; #endif int nsegs, error, first, i; bus_size_t resid; struct iovec *iov; struct thread *td = NULL; KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, ("bus_dmamap_load_uio: No support for bounce pages!")); resid = uio->uio_resid; iov = uio->uio_iov; if (uio->uio_segflg == UIO_USERSPACE) { td = uio->uio_td; KASSERT(td != NULL, ("bus_dmamap_load_uio: USERSPACE but no proc")); } nsegs = 0; error = 0; first = 1; for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { /* * Now at the first iovec to load. Load each iovec * until we have exhausted the residual count. */ bus_size_t minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; caddr_t addr = (caddr_t) iov[i].iov_base; error = _bus_dmamap_load_buffer(dmat, dm_segments, addr, minlen, td, flags, &lastaddr, &nsegs, first); first = 0; resid -= minlen; } if (error) { /* force "no valid mappings" in callback */ (*callback)(callback_arg, dm_segments, 0, 0, error); } else { (*callback)(callback_arg, dm_segments, nsegs+1, uio->uio_resid, error); } return (error); } /* * Release the mapping held by map. */ void _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) { struct bounce_page *bpage; while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { STAILQ_REMOVE_HEAD(&map->bpages, links); free_bounce_page(dmat, bpage); } } void _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { struct bounce_page *bpage; if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { /* * Handle data bouncing. We might also * want to add support for invalidating * the caches on broken hardware */ switch (op) { case BUS_DMASYNC_PREWRITE: while (bpage != NULL) { bcopy((void *)bpage->datavaddr, (void *)bpage->vaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } break; case BUS_DMASYNC_POSTREAD: while (bpage != NULL) { bcopy((void *)bpage->vaddr, (void *)bpage->datavaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } break; case BUS_DMASYNC_PREREAD: case BUS_DMASYNC_POSTWRITE: /* No-ops */ break; } } } static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) { int count; count = 0; if (total_bpages == 0) { STAILQ_INIT(&bounce_page_list); STAILQ_INIT(&bounce_map_waitinglist); STAILQ_INIT(&bounce_map_callbacklist); } while (numpages > 0) { struct bounce_page *bpage; int s; bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, M_NOWAIT | M_ZERO); if (bpage == NULL) break; bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT, 0ul, dmat->lowaddr, PAGE_SIZE, 0); if (bpage->vaddr == 0) { free(bpage, M_DEVBUF); break; } bpage->busaddr = pmap_kextract(bpage->vaddr); s = splhigh(); STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); total_bpages++; free_bpages++; splx(s); count++; numpages--; } return (count); } static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) { int pages; pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); free_bpages -= pages; reserved_bpages += pages; map->pagesreserved += pages; pages = map->pagesneeded - map->pagesreserved; return (pages); } static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, bus_size_t size) { int s; struct bounce_page *bpage; if (map->pagesneeded == 0) panic("add_bounce_page: map doesn't need any pages"); map->pagesneeded--; if (map->pagesreserved == 0) panic("add_bounce_page: map doesn't need any pages"); map->pagesreserved--; s = splhigh(); bpage = STAILQ_FIRST(&bounce_page_list); if (bpage == NULL) panic("add_bounce_page: free page list is empty"); STAILQ_REMOVE_HEAD(&bounce_page_list, links); reserved_bpages--; active_bpages++; splx(s); bpage->datavaddr = vaddr; bpage->datacount = size; STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); } static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) { int s; struct bus_dmamap *map; bpage->datavaddr = 0; bpage->datacount = 0; s = splhigh(); STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); free_bpages++; active_bpages--; if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { if (reserve_bounce_pages(map->dmat, map) == 0) { STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links); busdma_swi_pending = 1; swi_sched(vm_ih, 0); } } splx(s); } void busdma_swi(void) { int s; struct bus_dmamap *map; s = splhigh(); while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); splx(s); bus_dmamap_load(map->dmat, map, map->buf, map->buflen, map->callback, map->callback_arg, /*flags*/0); s = splhigh(); } splx(s); } Index: head/sys/cam/scsi/scsi_cd.c =================================================================== --- head/sys/cam/scsi/scsi_cd.c (revision 110231) +++ head/sys/cam/scsi/scsi_cd.c (revision 110232) @@ -1,3557 +1,3553 @@ /* * Copyright (c) 1997 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999, 2000, 2001 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Portions of this driver taken from the original FreeBSD cd driver. * Written by Julian Elischer (julian@tfs.com) * for TRW Financial Systems for use under the MACH(2.5) operating system. * * TRW Financial Systems, in accordance with their agreement with Carnegie * Mellon University, makes this software available to CMU to distribute * or use in any manner that they see fit as long as this message is kept with * the software. For this reason TFS also grants any other persons or * organisations permission to use or modify this software. * * TFS supplies this software to be publicly redistributed * on the understanding that TFS is not responsible for the correct * functioning of this software in any circumstances. * * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992 * * from: cd.c,v 1.83 1997/05/04 15:24:22 joerg Exp $ */ #include "opt_cd.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define LEADOUT 0xaa /* leadout toc entry */ struct cd_params { u_int32_t blksize; u_long disksize; }; typedef enum { CD_Q_NONE = 0x00, CD_Q_NO_TOUCH = 0x01, CD_Q_BCD_TRACKS = 0x02, CD_Q_NO_CHANGER = 0x04, CD_Q_CHANGER = 0x08 } cd_quirks; typedef enum { CD_FLAG_INVALID = 0x001, CD_FLAG_NEW_DISC = 0x002, CD_FLAG_DISC_LOCKED = 0x004, CD_FLAG_DISC_REMOVABLE = 0x008, CD_FLAG_TAGGED_QUEUING = 0x010, CD_FLAG_CHANGER = 0x040, CD_FLAG_ACTIVE = 0x080, CD_FLAG_SCHED_ON_COMP = 0x100, CD_FLAG_RETRY_UA = 0x200 } cd_flags; typedef enum { CD_CCB_PROBE = 0x01, CD_CCB_BUFFER_IO = 0x02, CD_CCB_WAITING = 0x03, CD_CCB_TYPE_MASK = 0x0F, CD_CCB_RETRY_UA = 0x10 } cd_ccb_state; typedef enum { CHANGER_TIMEOUT_SCHED = 0x01, CHANGER_SHORT_TMOUT_SCHED = 0x02, CHANGER_MANUAL_CALL = 0x04, CHANGER_NEED_TIMEOUT = 0x08 } cd_changer_flags; #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 typedef enum { CD_STATE_PROBE, CD_STATE_NORMAL } cd_state; struct cd_softc { cam_pinfo pinfo; cd_state state; volatile cd_flags flags; struct bio_queue_head bio_queue; LIST_HEAD(, ccb_hdr) pending_ccbs; struct cd_params params; union ccb saved_ccb; cd_quirks quirks; struct devstat device_stats; STAILQ_ENTRY(cd_softc) changer_links; struct cdchanger *changer; int bufs_left; struct cam_periph *periph; dev_t dev; eventhandler_tag clonetag; }; struct cd_quirk_entry { struct scsi_inquiry_pattern inq_pat; cd_quirks quirks; }; /* * These quirk entries aren't strictly necessary. Basically, what they do * is tell cdregister() up front that a device is a changer. Otherwise, it * will figure that fact out once it sees a LUN on the device that is * greater than 0. If it is known up front that a device is a changer, all * I/O to the device will go through the changer scheduling routines, as * opposed to the "normal" CD code. */ static struct cd_quirk_entry cd_quirk_table[] = { { { T_CDROM, SIP_MEDIA_REMOVABLE, "NRC", "MBR-7", "*"}, /*quirks*/ CD_Q_CHANGER }, { { T_CDROM, SIP_MEDIA_REMOVABLE, "PIONEER", "CD-ROM DRM*", "*"}, /* quirks */ CD_Q_CHANGER }, { { T_CDROM, SIP_MEDIA_REMOVABLE, "NAKAMICH", "MJ-*", "*"}, /* quirks */ CD_Q_CHANGER }, { { T_CDROM, SIP_MEDIA_REMOVABLE, "CHINON", "CD-ROM CDS-535","*"}, /* quirks */ CD_Q_BCD_TRACKS } }; -#ifndef MIN -#define MIN(x,y) ((xdev); l = strlen(p); if (bcmp(name, p, l)) return; if (name[l] != 'a' && name[l] != 'c') return; if (name[l + 1] != '\0') return; *dev = softc->dev; return; } static void cdinit(void) { cam_status status; struct cam_path *path; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status == CAM_REQ_CMP) { struct ccb_setasync csa; xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE; csa.callback = cdasync; csa.callback_arg = NULL; xpt_action((union ccb *)&csa); status = csa.ccb_h.status; xpt_free_path(path); } if (status != CAM_REQ_CMP) { printf("cd: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void cdoninvalidate(struct cam_periph *periph) { int s; struct cd_softc *softc; struct bio *q_bp; struct ccb_setasync csa; softc = (struct cd_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_setup_ccb(&csa.ccb_h, periph->path, /* priority */ 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = cdasync; csa.callback_arg = periph; xpt_action((union ccb *)&csa); softc->flags |= CD_FLAG_INVALID; /* * Although the oninvalidate() routines are always called at * splsoftcam, we need to be at splbio() here to keep the buffer * queue from being modified while we traverse it. */ s = splbio(); /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ while ((q_bp = bioq_first(&softc->bio_queue)) != NULL){ bioq_remove(&softc->bio_queue, q_bp); q_bp->bio_resid = q_bp->bio_bcount; biofinish(q_bp, NULL, ENXIO); } splx(s); /* * If this device is part of a changer, and it was scheduled * to run, remove it from the run queue since we just nuked * all of its scheduled I/O. */ if ((softc->flags & CD_FLAG_CHANGER) && (softc->pinfo.index != CAM_UNQUEUED_INDEX)) camq_remove(&softc->changer->devq, softc->pinfo.index); xpt_print_path(periph->path); printf("lost device\n"); } static void cdcleanup(struct cam_periph *periph) { struct cd_softc *softc; int s; softc = (struct cd_softc *)periph->softc; xpt_print_path(periph->path); printf("removing device entry\n"); s = splsoftcam(); /* * In the queued, non-active case, the device in question * has already been removed from the changer run queue. Since this * device is active, we need to de-activate it, and schedule * another device to run. (if there is another one to run) */ if ((softc->flags & CD_FLAG_CHANGER) && (softc->flags & CD_FLAG_ACTIVE)) { /* * The purpose of the short timeout is soley to determine * whether the current device has finished or not. Well, * since we're removing the active device, we know that it * is finished. So, get rid of the short timeout. * Otherwise, if we're in the time period before the short * timeout fires, and there are no other devices in the * queue to run, there won't be any other device put in the * active slot. i.e., when we call cdrunchangerqueue() * below, it won't do anything. Then, when the short * timeout fires, it'll look at the "current device", which * we are free below, and possibly panic the kernel on a * bogus pointer reference. * * The long timeout doesn't really matter, since we * decrement the qfrozen_cnt to indicate that there is * nothing in the active slot now. Therefore, there won't * be any bogus pointer references there. */ if (softc->changer->flags & CHANGER_SHORT_TMOUT_SCHED) { untimeout(cdshorttimeout, softc->changer, softc->changer->short_handle); softc->changer->flags &= ~CHANGER_SHORT_TMOUT_SCHED; } softc->changer->devq.qfrozen_cnt--; softc->changer->flags |= CHANGER_MANUAL_CALL; cdrunchangerqueue(softc->changer); } /* * If we're removing the last device on the changer, go ahead and * remove the changer device structure. */ if ((softc->flags & CD_FLAG_CHANGER) && (--softc->changer->num_devices == 0)) { /* * Theoretically, there shouldn't be any timeouts left, but * I'm not completely sure that that will be the case. So, * it won't hurt to check and see if there are any left. */ if (softc->changer->flags & CHANGER_TIMEOUT_SCHED) { untimeout(cdrunchangerqueue, softc->changer, softc->changer->long_handle); softc->changer->flags &= ~CHANGER_TIMEOUT_SCHED; } if (softc->changer->flags & CHANGER_SHORT_TMOUT_SCHED) { untimeout(cdshorttimeout, softc->changer, softc->changer->short_handle); softc->changer->flags &= ~CHANGER_SHORT_TMOUT_SCHED; } STAILQ_REMOVE(&changerq, softc->changer, cdchanger, changer_links); xpt_print_path(periph->path); printf("removing changer entry\n"); free(softc->changer, M_DEVBUF); num_changers--; } devstat_remove_entry(&softc->device_stats); destroy_dev(softc->dev); EVENTHANDLER_DEREGISTER(dev_clone, softc->clonetag); free(softc, M_DEVBUF); splx(s); } static void cdasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (SID_TYPE(&cgd->inq_data) != T_CDROM && SID_TYPE(&cgd->inq_data) != T_WORM) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(cdregister, cdoninvalidate, cdcleanup, cdstart, "cd", CAM_PERIPH_BIO, cgd->ccb_h.path, cdasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("cdasync: Unable to attach new device " "due to status 0x%x\n", status); break; } case AC_SENT_BDR: case AC_BUS_RESET: { struct cd_softc *softc; struct ccb_hdr *ccbh; int s; softc = (struct cd_softc *)periph->softc; s = splsoftcam(); /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= CD_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= CD_CCB_RETRY_UA; splx(s); /* FALLTHROUGH */ } default: cam_periph_async(periph, code, path, arg); break; } } static cam_status cdregister(struct cam_periph *periph, void *arg) { struct cd_softc *softc; struct ccb_setasync csa; struct ccb_getdev *cgd; caddr_t match; cgd = (struct ccb_getdev *)arg; if (periph == NULL) { printf("cdregister: periph was NULL!!\n"); return(CAM_REQ_CMP_ERR); } if (cgd == NULL) { printf("cdregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct cd_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); if (softc == NULL) { printf("cdregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); LIST_INIT(&softc->pending_ccbs); softc->state = CD_STATE_PROBE; bioq_init(&softc->bio_queue); if (SID_IS_REMOVABLE(&cgd->inq_data)) softc->flags |= CD_FLAG_DISC_REMOVABLE; if ((cgd->inq_data.flags & SID_CmdQue) != 0) softc->flags |= CD_FLAG_TAGGED_QUEUING; periph->softc = softc; softc->periph = periph; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)cd_quirk_table, sizeof(cd_quirk_table)/sizeof(*cd_quirk_table), sizeof(*cd_quirk_table), scsi_inquiry_match); if (match != NULL) softc->quirks = ((struct cd_quirk_entry *)match)->quirks; else softc->quirks = CD_Q_NONE; /* * We need to register the statistics structure for this device, * but we don't have the blocksize yet for it. So, we register * the structure and indicate that we don't have the blocksize * yet. Unlike other SCSI peripheral drivers, we explicitly set * the device type here to be CDROM, rather than just ORing in * the device type. This is because this driver can attach to either * CDROM or WORM devices, and we want this peripheral driver to * show up in the devstat list as a CD peripheral driver, not a * WORM peripheral driver. WORM drives will also have the WORM * driver attached to them. */ devstat_add_entry(&softc->device_stats, "cd", periph->unit_number, 0, DEVSTAT_BS_UNAVAILABLE, DEVSTAT_TYPE_CDROM | DEVSTAT_TYPE_IF_SCSI, DEVSTAT_PRIORITY_CD); softc->dev = make_dev(&cd_cdevsw, periph->unit_number, UID_ROOT, GID_OPERATOR, 0640, "cd%d", periph->unit_number); softc->dev->si_drv1 = periph; softc->clonetag = EVENTHANDLER_REGISTER(dev_clone, cdclone, softc, 1000); /* * Add an async callback so that we get * notified if this device goes away. */ xpt_setup_ccb(&csa.ccb_h, periph->path, /* priority */ 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE; csa.callback = cdasync; csa.callback_arg = periph; xpt_action((union ccb *)&csa); /* * If the target lun is greater than 0, we most likely have a CD * changer device. Check the quirk entries as well, though, just * in case someone has a CD tower with one lun per drive or * something like that. Also, if we know up front that a * particular device is a changer, we can mark it as such starting * with lun 0, instead of lun 1. It shouldn't be necessary to have * a quirk entry to define something as a changer, however. */ if (((cgd->ccb_h.target_lun > 0) && ((softc->quirks & CD_Q_NO_CHANGER) == 0)) || ((softc->quirks & CD_Q_CHANGER) != 0)) { struct cdchanger *nchanger; struct cam_periph *nperiph; struct cam_path *path; cam_status status; int found; /* Set the changer flag in the current device's softc */ softc->flags |= CD_FLAG_CHANGER; if (num_changers == 0) STAILQ_INIT(&changerq); /* * Now, look around for an existing changer device with the * same path and target ID as the current device. */ for (found = 0, nchanger = (struct cdchanger *)STAILQ_FIRST(&changerq); nchanger != NULL; nchanger = STAILQ_NEXT(nchanger, changer_links)){ if ((nchanger->path_id == cgd->ccb_h.path_id) && (nchanger->target_id == cgd->ccb_h.target_id)) { found = 1; break; } } /* * If we found a matching entry, just add this device to * the list of devices on this changer. */ if (found == 1) { struct chdevlist *chlunhead; chlunhead = &nchanger->chluns; /* * XXX KDM look at consolidating this code with the * code below in a separate function. */ /* * Create a path with lun id 0, and see if we can * find a matching device */ status = xpt_create_path(&path, /*periph*/ periph, cgd->ccb_h.path_id, cgd->ccb_h.target_id, 0); if ((status == CAM_REQ_CMP) && ((nperiph = cam_periph_find(path, "cd")) != NULL)){ struct cd_softc *nsoftc; nsoftc = (struct cd_softc *)nperiph->softc; if ((nsoftc->flags & CD_FLAG_CHANGER) == 0){ nsoftc->flags |= CD_FLAG_CHANGER; nchanger->num_devices++; if (camq_resize(&nchanger->devq, nchanger->num_devices)!=CAM_REQ_CMP){ printf("cdregister: " "camq_resize " "failed, changer " "support may " "be messed up\n"); } nsoftc->changer = nchanger; nsoftc->pinfo.index =CAM_UNQUEUED_INDEX; STAILQ_INSERT_TAIL(&nchanger->chluns, nsoftc,changer_links); } xpt_free_path(path); } else if (status == CAM_REQ_CMP) xpt_free_path(path); else { printf("cdregister: unable to allocate path\n" "cdregister: changer support may be " "broken\n"); } nchanger->num_devices++; softc->changer = nchanger; softc->pinfo.index = CAM_UNQUEUED_INDEX; if (camq_resize(&nchanger->devq, nchanger->num_devices) != CAM_REQ_CMP) { printf("cdregister: camq_resize " "failed, changer support may " "be messed up\n"); } STAILQ_INSERT_TAIL(chlunhead, softc, changer_links); } /* * In this case, we don't already have an entry for this * particular changer, so we need to create one, add it to * the queue, and queue this device on the list for this * changer. Before we queue this device, however, we need * to search for lun id 0 on this target, and add it to the * queue first, if it exists. (and if it hasn't already * been marked as part of the changer.) */ else { nchanger = malloc(sizeof(struct cdchanger), M_DEVBUF, M_NOWAIT); if (nchanger == NULL) { softc->flags &= ~CD_FLAG_CHANGER; printf("cdregister: unable to malloc " "changer structure\ncdregister: " "changer support disabled\n"); /* * Yes, gotos can be gross but in this case * I think it's justified.. */ goto cdregisterexit; } /* zero the structure */ bzero(nchanger, sizeof(struct cdchanger)); if (camq_init(&nchanger->devq, 1) != 0) { softc->flags &= ~CD_FLAG_CHANGER; printf("cdregister: changer support " "disabled\n"); goto cdregisterexit; } num_changers++; nchanger->path_id = cgd->ccb_h.path_id; nchanger->target_id = cgd->ccb_h.target_id; /* this is superfluous, but it makes things clearer */ nchanger->num_devices = 0; STAILQ_INIT(&nchanger->chluns); STAILQ_INSERT_TAIL(&changerq, nchanger, changer_links); /* * Create a path with lun id 0, and see if we can * find a matching device */ status = xpt_create_path(&path, /*periph*/ periph, cgd->ccb_h.path_id, cgd->ccb_h.target_id, 0); /* * If we were able to allocate the path, and if we * find a matching device and it isn't already * marked as part of a changer, then we add it to * the current changer. */ if ((status == CAM_REQ_CMP) && ((nperiph = cam_periph_find(path, "cd")) != NULL) && ((((struct cd_softc *)periph->softc)->flags & CD_FLAG_CHANGER) == 0)) { struct cd_softc *nsoftc; nsoftc = (struct cd_softc *)nperiph->softc; nsoftc->flags |= CD_FLAG_CHANGER; nchanger->num_devices++; if (camq_resize(&nchanger->devq, nchanger->num_devices) != CAM_REQ_CMP) { printf("cdregister: camq_resize " "failed, changer support may " "be messed up\n"); } nsoftc->changer = nchanger; nsoftc->pinfo.index = CAM_UNQUEUED_INDEX; STAILQ_INSERT_TAIL(&nchanger->chluns, nsoftc, changer_links); xpt_free_path(path); } else if (status == CAM_REQ_CMP) xpt_free_path(path); else { printf("cdregister: unable to allocate path\n" "cdregister: changer support may be " "broken\n"); } softc->changer = nchanger; softc->pinfo.index = CAM_UNQUEUED_INDEX; nchanger->num_devices++; if (camq_resize(&nchanger->devq, nchanger->num_devices) != CAM_REQ_CMP) { printf("cdregister: camq_resize " "failed, changer support may " "be messed up\n"); } STAILQ_INSERT_TAIL(&nchanger->chluns, softc, changer_links); } } cdregisterexit: /* Lock this peripheral until we are setup */ /* Can't block */ cam_periph_lock(periph, PRIBIO); if ((softc->flags & CD_FLAG_CHANGER) == 0) xpt_schedule(periph, /*priority*/5); else cdschedule(periph, /*priority*/ 5); return(CAM_REQ_CMP); } static int cdopen(dev_t dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct cd_softc *softc; u_int32_t size; int error; int s; periph = (struct cam_periph *)dev->si_drv1; if (periph == NULL) return (ENXIO); softc = (struct cd_softc *)periph->softc; /* * Grab splsoftcam and hold it until we lock the peripheral. */ s = splsoftcam(); if (softc->flags & CD_FLAG_INVALID) { splx(s); return(ENXIO); } if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) { splx(s); return (error); } splx(s); if (cam_periph_acquire(periph) != CAM_REQ_CMP) return(ENXIO); cdprevent(periph, PR_PREVENT); /* find out the size */ if ((error = cdsize(dev, &size)) != 0) { cdprevent(periph, PR_ALLOW); cam_periph_unlock(periph); cam_periph_release(periph); return(error); } /* * We unconditionally (re)set the blocksize each time the * CD device is opened. This is because the CD can change, * and therefore the blocksize might change. * XXX problems here if some slice or partition is still * open with the old size? */ if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0) softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE; softc->device_stats.block_size = softc->params.blksize; cam_periph_unlock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("leaving cdopen\n")); return (error); } static int cdclose(dev_t dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct cd_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; if (periph == NULL) return (ENXIO); softc = (struct cd_softc *)periph->softc; if ((error = cam_periph_lock(periph, PRIBIO)) != 0) return (error); if ((softc->flags & CD_FLAG_DISC_REMOVABLE) != 0) cdprevent(periph, PR_ALLOW); /* * Since we're closing this CD, mark the blocksize as unavailable. * It will be marked as available whence the CD is opened again. */ softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE; cam_periph_unlock(periph); cam_periph_release(periph); return (0); } static void cdshorttimeout(void *arg) { struct cdchanger *changer; int s; s = splsoftcam(); changer = (struct cdchanger *)arg; /* Always clear the short timeout flag, since that's what we're in */ changer->flags &= ~CHANGER_SHORT_TMOUT_SCHED; /* * Check to see if there is any more pending or outstanding I/O for * this device. If not, move it out of the active slot. */ if ((bioq_first(&changer->cur_device->bio_queue) == NULL) && (changer->cur_device->device_stats.busy_count == 0)) { changer->flags |= CHANGER_MANUAL_CALL; cdrunchangerqueue(changer); } splx(s); } /* * This is a wrapper for xpt_schedule. It only applies to changers. */ static void cdschedule(struct cam_periph *periph, int priority) { struct cd_softc *softc; int s; s = splsoftcam(); softc = (struct cd_softc *)periph->softc; /* * If this device isn't currently queued, and if it isn't * the active device, then we queue this device and run the * changer queue if there is no timeout scheduled to do it. * If this device is the active device, just schedule it * to run again. If this device is queued, there should be * a timeout in place already that will make sure it runs. */ if ((softc->pinfo.index == CAM_UNQUEUED_INDEX) && ((softc->flags & CD_FLAG_ACTIVE) == 0)) { /* * We don't do anything with the priority here. * This is strictly a fifo queue. */ softc->pinfo.priority = 1; softc->pinfo.generation = ++softc->changer->devq.generation; camq_insert(&softc->changer->devq, (cam_pinfo *)softc); /* * Since we just put a device in the changer queue, * check and see if there is a timeout scheduled for * this changer. If so, let the timeout handle * switching this device into the active slot. If * not, manually call the timeout routine to * bootstrap things. */ if (((softc->changer->flags & CHANGER_TIMEOUT_SCHED)==0) && ((softc->changer->flags & CHANGER_NEED_TIMEOUT)==0) && ((softc->changer->flags & CHANGER_SHORT_TMOUT_SCHED)==0)){ softc->changer->flags |= CHANGER_MANUAL_CALL; cdrunchangerqueue(softc->changer); } } else if ((softc->flags & CD_FLAG_ACTIVE) && ((softc->flags & CD_FLAG_SCHED_ON_COMP) == 0)) xpt_schedule(periph, priority); splx(s); } static void cdrunchangerqueue(void *arg) { struct cd_softc *softc; struct cdchanger *changer; int called_from_timeout; int s; s = splsoftcam(); changer = (struct cdchanger *)arg; /* * If we have NOT been called from cdstrategy() or cddone(), and * instead from a timeout routine, go ahead and clear the * timeout flag. */ if ((changer->flags & CHANGER_MANUAL_CALL) == 0) { changer->flags &= ~CHANGER_TIMEOUT_SCHED; called_from_timeout = 1; } else called_from_timeout = 0; /* Always clear the manual call flag */ changer->flags &= ~CHANGER_MANUAL_CALL; /* nothing to do if the queue is empty */ if (changer->devq.entries <= 0) { splx(s); return; } /* * If the changer queue is frozen, that means we have an active * device. */ if (changer->devq.qfrozen_cnt > 0) { if (changer->cur_device->device_stats.busy_count > 0) { changer->cur_device->flags |= CD_FLAG_SCHED_ON_COMP; changer->cur_device->bufs_left = changer->cur_device->device_stats.busy_count; if (called_from_timeout) { changer->long_handle = timeout(cdrunchangerqueue, changer, changer_max_busy_seconds * hz); changer->flags |= CHANGER_TIMEOUT_SCHED; } splx(s); return; } /* * We always need to reset the frozen count and clear the * active flag. */ changer->devq.qfrozen_cnt--; changer->cur_device->flags &= ~CD_FLAG_ACTIVE; changer->cur_device->flags &= ~CD_FLAG_SCHED_ON_COMP; /* * Check to see whether the current device has any I/O left * to do. If so, requeue it at the end of the queue. If * not, there is no need to requeue it. */ if (bioq_first(&changer->cur_device->bio_queue) != NULL) { changer->cur_device->pinfo.generation = ++changer->devq.generation; camq_insert(&changer->devq, (cam_pinfo *)changer->cur_device); } } softc = (struct cd_softc *)camq_remove(&changer->devq, CAMQ_HEAD); changer->cur_device = softc; changer->devq.qfrozen_cnt++; softc->flags |= CD_FLAG_ACTIVE; /* Just in case this device is waiting */ wakeup(&softc->changer); xpt_schedule(softc->periph, /*priority*/ 1); /* * Get rid of any pending timeouts, and set a flag to schedule new * ones so this device gets its full time quantum. */ if (changer->flags & CHANGER_TIMEOUT_SCHED) { untimeout(cdrunchangerqueue, changer, changer->long_handle); changer->flags &= ~CHANGER_TIMEOUT_SCHED; } if (changer->flags & CHANGER_SHORT_TMOUT_SCHED) { untimeout(cdshorttimeout, changer, changer->short_handle); changer->flags &= ~CHANGER_SHORT_TMOUT_SCHED; } /* * We need to schedule timeouts, but we only do this after the * first transaction has completed. This eliminates the changer * switch time. */ changer->flags |= CHANGER_NEED_TIMEOUT; splx(s); } static void cdchangerschedule(struct cd_softc *softc) { struct cdchanger *changer; int s; s = splsoftcam(); changer = softc->changer; /* * If this is a changer, and this is the current device, * and this device has at least the minimum time quantum to * run, see if we can switch it out. */ if ((softc->flags & CD_FLAG_ACTIVE) && ((changer->flags & CHANGER_SHORT_TMOUT_SCHED) == 0) && ((changer->flags & CHANGER_NEED_TIMEOUT) == 0)) { /* * We try three things here. The first is that we * check to see whether the schedule on completion * flag is set. If it is, we decrement the number * of buffers left, and if it's zero, we reschedule. * Next, we check to see whether the pending buffer * queue is empty and whether there are no * outstanding transactions. If so, we reschedule. * Next, we see if the pending buffer queue is empty. * If it is, we set the number of buffers left to * the current active buffer count and set the * schedule on complete flag. */ if (softc->flags & CD_FLAG_SCHED_ON_COMP) { if (--softc->bufs_left == 0) { softc->changer->flags |= CHANGER_MANUAL_CALL; softc->flags &= ~CD_FLAG_SCHED_ON_COMP; cdrunchangerqueue(softc->changer); } } else if ((bioq_first(&softc->bio_queue) == NULL) && (softc->device_stats.busy_count == 0)) { softc->changer->flags |= CHANGER_MANUAL_CALL; cdrunchangerqueue(softc->changer); } } else if ((softc->changer->flags & CHANGER_NEED_TIMEOUT) && (softc->flags & CD_FLAG_ACTIVE)) { /* * Now that the first transaction to this * particular device has completed, we can go ahead * and schedule our timeouts. */ if ((changer->flags & CHANGER_TIMEOUT_SCHED) == 0) { changer->long_handle = timeout(cdrunchangerqueue, changer, changer_max_busy_seconds * hz); changer->flags |= CHANGER_TIMEOUT_SCHED; } else printf("cdchangerschedule: already have a long" " timeout!\n"); if ((changer->flags & CHANGER_SHORT_TMOUT_SCHED) == 0) { changer->short_handle = timeout(cdshorttimeout, changer, changer_min_busy_seconds * hz); changer->flags |= CHANGER_SHORT_TMOUT_SCHED; } else printf("cdchangerschedule: already have a short " "timeout!\n"); /* * We just scheduled timeouts, no need to schedule * more. */ changer->flags &= ~CHANGER_NEED_TIMEOUT; } splx(s); } static int cdrunccb(union ccb *ccb, int (*error_routine)(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags), u_int32_t cam_flags, u_int32_t sense_flags) { struct cd_softc *softc; struct cam_periph *periph; int error; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct cd_softc *)periph->softc; error = cam_periph_runccb(ccb, error_routine, cam_flags, sense_flags, &softc->device_stats); if (softc->flags & CD_FLAG_CHANGER) cdchangerschedule(softc); return(error); } static union ccb * cdgetccb(struct cam_periph *periph, u_int32_t priority) { struct cd_softc *softc; int s; softc = (struct cd_softc *)periph->softc; if (softc->flags & CD_FLAG_CHANGER) { s = splsoftcam(); /* * This should work the first time this device is woken up, * but just in case it doesn't, we use a while loop. */ while ((softc->flags & CD_FLAG_ACTIVE) == 0) { /* * If this changer isn't already queued, queue it up. */ if (softc->pinfo.index == CAM_UNQUEUED_INDEX) { softc->pinfo.priority = 1; softc->pinfo.generation = ++softc->changer->devq.generation; camq_insert(&softc->changer->devq, (cam_pinfo *)softc); } if (((softc->changer->flags & CHANGER_TIMEOUT_SCHED)==0) && ((softc->changer->flags & CHANGER_NEED_TIMEOUT)==0) && ((softc->changer->flags & CHANGER_SHORT_TMOUT_SCHED)==0)) { softc->changer->flags |= CHANGER_MANUAL_CALL; cdrunchangerqueue(softc->changer); } else tsleep(&softc->changer, PRIBIO, "cgticb", 0); } splx(s); } return(cam_periph_getccb(periph, priority)); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void cdstrategy(struct bio *bp) { struct cam_periph *periph; struct cd_softc *softc; int s; periph = (struct cam_periph *)bp->bio_dev->si_drv1; if (periph == NULL) { biofinish(bp, NULL, ENXIO); return; } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdstrategy\n")); softc = (struct cd_softc *)periph->softc; /* * Mask interrupts so that the pack cannot be invalidated until * after we are in the queue. Otherwise, we might not properly * clean up one of the buffers. */ s = splbio(); /* * If the device has been made invalid, error out */ if ((softc->flags & CD_FLAG_INVALID)) { splx(s); biofinish(bp, NULL, ENXIO); return; } /* * Place it in the queue of disk activities for this disk */ bioqdisksort(&softc->bio_queue, bp); splx(s); /* * Schedule ourselves for performing the work. We do things * differently for changers. */ if ((softc->flags & CD_FLAG_CHANGER) == 0) xpt_schedule(periph, /* XXX priority */1); else cdschedule(periph, /* priority */ 1); return; } static void cdstart(struct cam_periph *periph, union ccb *start_ccb) { struct cd_softc *softc; struct bio *bp; struct ccb_scsiio *csio; struct scsi_read_capacity_data *rcap; int s; softc = (struct cd_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdstart\n")); switch (softc->state) { case CD_STATE_NORMAL: { int oldspl; s = splbio(); bp = bioq_first(&softc->bio_queue); if (periph->immediate_priority <= periph->pinfo.priority) { start_ccb->ccb_h.ccb_state = CD_CCB_WAITING; SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, periph_links.sle); periph->immediate_priority = CAM_PRIORITY_NONE; splx(s); wakeup(&periph->ccb_list); } else if (bp == NULL) { splx(s); xpt_release_ccb(start_ccb); } else { bioq_remove(&softc->bio_queue, bp); devstat_start_transaction(&softc->device_stats); scsi_read_write(&start_ccb->csio, /*retries*/4, /* cbfcnp */ cddone, MSG_SIMPLE_Q_TAG, /* read */bp->bio_cmd == BIO_READ, /* byte2 */ 0, /* minimum_cmd_size */ 10, /* lba */ bp->bio_blkno / (softc->params.blksize / DEV_BSIZE), bp->bio_bcount / softc->params.blksize, /* data_ptr */ bp->bio_data, /* dxfer_len */ bp->bio_bcount, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 30000); start_ccb->ccb_h.ccb_state = CD_CCB_BUFFER_IO; /* * Block out any asyncronous callbacks * while we touch the pending ccb list. */ oldspl = splcam(); LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); splx(oldspl); /* We expect a unit attention from this device */ if ((softc->flags & CD_FLAG_RETRY_UA) != 0) { start_ccb->ccb_h.ccb_state |= CD_CCB_RETRY_UA; softc->flags &= ~CD_FLAG_RETRY_UA; } start_ccb->ccb_h.ccb_bp = bp; bp = bioq_first(&softc->bio_queue); splx(s); xpt_action(start_ccb); } if (bp != NULL) { /* Have more work to do, so ensure we stay scheduled */ xpt_schedule(periph, /* XXX priority */1); } break; } case CD_STATE_PROBE: { rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), M_TEMP, M_NOWAIT); if (rcap == NULL) { xpt_print_path(periph->path); printf("cdstart: Couldn't malloc read_capacity data\n"); /* cd_free_periph??? */ break; } csio = &start_ccb->csio; scsi_read_capacity(csio, /*retries*/1, cddone, MSG_SIMPLE_Q_TAG, rcap, SSD_FULL_SIZE, /*timeout*/20000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CD_CCB_PROBE; xpt_action(start_ccb); break; } } } static void cddone(struct cam_periph *periph, union ccb *done_ccb) { struct cd_softc *softc; struct ccb_scsiio *csio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cddone\n")); softc = (struct cd_softc *)periph->softc; csio = &done_ccb->csio; switch (csio->ccb_h.ccb_state & CD_CCB_TYPE_MASK) { case CD_CCB_BUFFER_IO: { struct bio *bp; int error; int oldspl; bp = (struct bio *)done_ccb->ccb_h.ccb_bp; error = 0; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int sf; if ((done_ccb->ccb_h.ccb_state & CD_CCB_RETRY_UA) != 0) sf = SF_RETRY_UA; else sf = 0; error = cderror(done_ccb, CAM_RETRY_SELTO, sf); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } } if (error != 0) { int s; struct bio *q_bp; xpt_print_path(periph->path); printf("cddone: got error %#x back\n", error); s = splbio(); while ((q_bp = bioq_first(&softc->bio_queue)) != NULL) { bioq_remove(&softc->bio_queue, q_bp); q_bp->bio_resid = q_bp->bio_bcount; biofinish(q_bp, NULL, EIO); } splx(s); bp->bio_resid = bp->bio_bcount; bp->bio_error = error; bp->bio_flags |= BIO_ERROR; cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else { bp->bio_resid = csio->resid; bp->bio_error = 0; if (bp->bio_resid != 0) { /* * Short transfer ??? * XXX: not sure this is correct for partial * transfers at EOM */ bp->bio_flags |= BIO_ERROR; } } /* * Block out any asyncronous callbacks * while we touch the pending ccb list. */ oldspl = splcam(); LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); splx(oldspl); if (softc->flags & CD_FLAG_CHANGER) cdchangerschedule(softc); biofinish(bp, &softc->device_stats, 0); break; } case CD_CCB_PROBE: { struct scsi_read_capacity_data *rdcap; char announce_buf[120]; /* * Currently (9/30/97) the * longest possible announce * buffer is 108 bytes, for the * first error case below. * That is 39 bytes for the * basic string, 16 bytes for the * biggest sense key (hardware * error), 52 bytes for the * text of the largest sense * qualifier valid for a CDROM, * (0x72, 0x03 or 0x04, * 0x03), and one byte for the * null terminating character. * To allow for longer strings, * the announce buffer is 120 * bytes. */ struct cd_params *cdp; cdp = &softc->params; rdcap = (struct scsi_read_capacity_data *)csio->data_ptr; cdp->disksize = scsi_4btoul (rdcap->addr) + 1; cdp->blksize = scsi_4btoul (rdcap->length); if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { snprintf(announce_buf, sizeof(announce_buf), "cd present [%lu x %lu byte records]", cdp->disksize, (u_long)cdp->blksize); } else { int error; /* * Retry any UNIT ATTENTION type errors. They * are expected at boot. */ error = cderror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } else if (error != 0) { struct scsi_sense_data *sense; int asc, ascq; int sense_key, error_code; int have_sense; cam_status status; struct ccb_getdev cgd; /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); status = done_ccb->ccb_h.status; xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, /* priority */ 1); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0) || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0) || ((status & CAM_AUTOSNS_VALID) == 0)) have_sense = FALSE; else have_sense = TRUE; if (have_sense) { sense = &csio->sense_data; scsi_extract_sense(sense, &error_code, &sense_key, &asc, &ascq); } /* * Attach to anything that claims to be a * CDROM or WORM device, as long as it * doesn't return a "Logical unit not * supported" (0x25) error. */ if ((have_sense) && (asc != 0x25) && (error_code == SSD_CURRENT_ERROR)) { const char *sense_key_desc; const char *asc_desc; scsi_sense_desc(sense_key, asc, ascq, &cgd.inq_data, &sense_key_desc, &asc_desc); snprintf(announce_buf, sizeof(announce_buf), "Attempt to query device " "size failed: %s, %s", sense_key_desc, asc_desc); } else if ((have_sense == 0) && ((status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) && (csio->scsi_status == SCSI_STATUS_BUSY)) { snprintf(announce_buf, sizeof(announce_buf), "Attempt to query device " "size failed: SCSI Status: %s", scsi_status_string(csio)); } else if (SID_TYPE(&cgd.inq_data) == T_CDROM) { /* * We only print out an error for * CDROM type devices. For WORM * devices, we don't print out an * error since a few WORM devices * don't support CDROM commands. * If we have sense information, go * ahead and print it out. * Otherwise, just say that we * couldn't attach. */ /* * Just print out the error, not * the full probe message, when we * don't attach. */ if (have_sense) scsi_sense_print( &done_ccb->csio); else { xpt_print_path(periph->path); printf("got CAM status %#x\n", done_ccb->ccb_h.status); } xpt_print_path(periph->path); printf("fatal error, failed" " to attach to device\n"); /* * Invalidate this peripheral. */ cam_periph_invalidate(periph); announce_buf[0] = '\0'; } else { /* * Invalidate this peripheral. */ cam_periph_invalidate(periph); announce_buf[0] = '\0'; } } } free(rdcap, M_TEMP); if (announce_buf[0] != '\0') { xpt_announce_periph(periph, announce_buf); if (softc->flags & CD_FLAG_CHANGER) cdchangerschedule(softc); } softc->state = CD_STATE_NORMAL; /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the probe lock on the peripheral. * The peripheral will only go away once the last lock * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(done_ccb); cam_periph_unlock(periph); return; } case CD_CCB_WAITING: { /* Caller will release the CCB */ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("trying to wakeup ccbwait\n")); wakeup(&done_ccb->ccb_h.cbfcnp); return; } default: break; } xpt_release_ccb(done_ccb); } static int cdioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct cam_periph *periph; struct cd_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; if (periph == NULL) return(ENXIO); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdioctl\n")); softc = (struct cd_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("trying to do ioctl %#lx\n", cmd)); error = cam_periph_lock(periph, PRIBIO | PCATCH); if (error != 0) return(error); switch (cmd) { case DIOCGMEDIASIZE: *(off_t *)addr = (off_t)softc->params.blksize * softc->params.disksize; break; case DIOCGSECTORSIZE: *(u_int *)addr = softc->params.blksize; break; case CDIOCPLAYTRACKS: { struct ioc_play_track *args = (struct ioc_play_track *) addr; struct cd_mode_data *data; data = malloc(sizeof(struct cd_mode_data), M_TEMP, 0); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCPLAYTRACKS\n")); error = cdgetmode(periph, data, AUDIO_PAGE); if (error) { free(data, M_TEMP); break; } data->page.audio.flags &= ~CD_PA_SOTC; data->page.audio.flags |= CD_PA_IMMED; error = cdsetmode(periph, data); free(data, M_TEMP); if (error) break; if (softc->quirks & CD_Q_BCD_TRACKS) { args->start_track = bin2bcd(args->start_track); args->end_track = bin2bcd(args->end_track); } error = cdplaytracks(periph, args->start_track, args->start_index, args->end_track, args->end_index); } break; case CDIOCPLAYMSF: { struct ioc_play_msf *args = (struct ioc_play_msf *) addr; struct cd_mode_data *data; data = malloc(sizeof(struct cd_mode_data), M_TEMP, 0); CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCPLAYMSF\n")); error = cdgetmode(periph, data, AUDIO_PAGE); if (error) { free(data, M_TEMP); break; } data->page.audio.flags &= ~CD_PA_SOTC; data->page.audio.flags |= CD_PA_IMMED; error = cdsetmode(periph, data); free(data, M_TEMP); if (error) break; error = cdplaymsf(periph, args->start_m, args->start_s, args->start_f, args->end_m, args->end_s, args->end_f); } break; case CDIOCPLAYBLOCKS: { struct ioc_play_blocks *args = (struct ioc_play_blocks *) addr; struct cd_mode_data *data; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCPLAYBLOCKS\n")); data = malloc(sizeof(struct cd_mode_data), M_TEMP, 0); error = cdgetmode(periph, data, AUDIO_PAGE); if (error) { free(data, M_TEMP); break; } data->page.audio.flags &= ~CD_PA_SOTC; data->page.audio.flags |= CD_PA_IMMED; error = cdsetmode(periph, data); free(data, M_TEMP); if (error) break; error = cdplay(periph, args->blk, args->len); } break; case CDIOCREADSUBCHANNEL: { struct ioc_read_subchannel *args = (struct ioc_read_subchannel *) addr; struct cd_sub_channel_info *data; u_int32_t len = args->data_len; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCREADSUBCHANNEL\n")); data = malloc(sizeof(struct cd_sub_channel_info), M_TEMP, 0); if ((len > sizeof(struct cd_sub_channel_info)) || (len < sizeof(struct cd_sub_channel_header))) { printf( "scsi_cd: cdioctl: " "cdioreadsubchannel: error, len=%d\n", len); error = EINVAL; free(data, M_TEMP); break; } if (softc->quirks & CD_Q_BCD_TRACKS) args->track = bin2bcd(args->track); error = cdreadsubchannel(periph, args->address_format, args->data_format, args->track, data, len); if (error) { free(data, M_TEMP); break; } if (softc->quirks & CD_Q_BCD_TRACKS) data->what.track_info.track_number = bcd2bin(data->what.track_info.track_number); len = min(len, ((data->header.data_len[0] << 8) + data->header.data_len[1] + sizeof(struct cd_sub_channel_header))); if (copyout(data, args->data, len) != 0) { error = EFAULT; } free(data, M_TEMP); } break; case CDIOREADTOCHEADER: { struct ioc_toc_header *th; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOREADTOCHEADER\n")); th = malloc(sizeof(struct ioc_toc_header), M_TEMP, 0); error = cdreadtoc(periph, 0, 0, (struct cd_toc_entry *)th, sizeof (*th)); if (error) { free(th, M_TEMP); break; } if (softc->quirks & CD_Q_BCD_TRACKS) { /* we are going to have to convert the BCD * encoding on the cd to what is expected */ th->starting_track = bcd2bin(th->starting_track); th->ending_track = bcd2bin(th->ending_track); } th->len = ntohs(th->len); bcopy(th, addr, sizeof(*th)); free(th, M_TEMP); } break; case CDIOREADTOCENTRYS: { typedef struct { struct ioc_toc_header header; struct cd_toc_entry entries[100]; } data_t; typedef struct { struct ioc_toc_header header; struct cd_toc_entry entry; } lead_t; data_t *data; lead_t *lead; struct ioc_read_toc_entry *te = (struct ioc_read_toc_entry *) addr; struct ioc_toc_header *th; u_int32_t len, readlen, idx, num; u_int32_t starting_track = te->starting_track; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOREADTOCENTRYS\n")); data = malloc(sizeof(data_t), M_TEMP, 0); lead = malloc(sizeof(lead_t), M_TEMP, 0); if (te->data_len < sizeof(struct cd_toc_entry) || (te->data_len % sizeof(struct cd_toc_entry)) != 0 || (te->address_format != CD_MSF_FORMAT && te->address_format != CD_LBA_FORMAT)) { error = EINVAL; printf("scsi_cd: error in readtocentries, " "returning EINVAL\n"); free(data, M_TEMP); free(lead, M_TEMP); break; } th = &data->header; error = cdreadtoc(periph, 0, 0, (struct cd_toc_entry *)th, sizeof (*th)); if (error) { free(data, M_TEMP); free(lead, M_TEMP); break; } if (softc->quirks & CD_Q_BCD_TRACKS) { /* we are going to have to convert the BCD * encoding on the cd to what is expected */ th->starting_track = bcd2bin(th->starting_track); th->ending_track = bcd2bin(th->ending_track); } if (starting_track == 0) starting_track = th->starting_track; else if (starting_track == LEADOUT) starting_track = th->ending_track + 1; else if (starting_track < th->starting_track || starting_track > th->ending_track + 1) { printf("scsi_cd: error in readtocentries, " "returning EINVAL\n"); free(data, M_TEMP); free(lead, M_TEMP); error = EINVAL; break; } /* calculate reading length without leadout entry */ readlen = (th->ending_track - starting_track + 1) * sizeof(struct cd_toc_entry); /* and with leadout entry */ len = readlen + sizeof(struct cd_toc_entry); if (te->data_len < len) { len = te->data_len; if (readlen > len) readlen = len; } if (len > sizeof(data->entries)) { printf("scsi_cd: error in readtocentries, " "returning EINVAL\n"); error = EINVAL; free(data, M_TEMP); free(lead, M_TEMP); break; } num = len / sizeof(struct cd_toc_entry); if (readlen > 0) { error = cdreadtoc(periph, te->address_format, starting_track, (struct cd_toc_entry *)data, readlen + sizeof (*th)); if (error) { free(data, M_TEMP); free(lead, M_TEMP); break; } } /* make leadout entry if needed */ idx = starting_track + num - 1; if (softc->quirks & CD_Q_BCD_TRACKS) th->ending_track = bcd2bin(th->ending_track); if (idx == th->ending_track + 1) { error = cdreadtoc(periph, te->address_format, LEADOUT, (struct cd_toc_entry *)lead, sizeof(*lead)); if (error) { free(data, M_TEMP); free(lead, M_TEMP); break; } data->entries[idx - starting_track] = lead->entry; } if (softc->quirks & CD_Q_BCD_TRACKS) { for (idx = 0; idx < num - 1; idx++) { data->entries[idx].track = bcd2bin(data->entries[idx].track); } } error = copyout(data->entries, te->data, len); free(data, M_TEMP); free(lead, M_TEMP); } break; case CDIOREADTOCENTRY: { /* yeah yeah, this is ugly */ typedef struct { struct ioc_toc_header header; struct cd_toc_entry entry; } data_t; data_t *data; struct ioc_read_toc_single_entry *te = (struct ioc_read_toc_single_entry *) addr; struct ioc_toc_header *th; u_int32_t track; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOREADTOCENTRY\n")); data = malloc(sizeof(data_t), M_TEMP, 0); if (te->address_format != CD_MSF_FORMAT && te->address_format != CD_LBA_FORMAT) { printf("error in readtocentry, " " returning EINVAL\n"); free(data, M_TEMP); error = EINVAL; break; } th = &data->header; error = cdreadtoc(periph, 0, 0, (struct cd_toc_entry *)th, sizeof (*th)); if (error) { free(data, M_TEMP); break; } if (softc->quirks & CD_Q_BCD_TRACKS) { /* we are going to have to convert the BCD * encoding on the cd to what is expected */ th->starting_track = bcd2bin(th->starting_track); th->ending_track = bcd2bin(th->ending_track); } track = te->track; if (track == 0) track = th->starting_track; else if (track == LEADOUT) /* OK */; else if (track < th->starting_track || track > th->ending_track + 1) { printf("error in readtocentry, " " returning EINVAL\n"); free(data, M_TEMP); error = EINVAL; break; } error = cdreadtoc(periph, te->address_format, track, (struct cd_toc_entry *)data, sizeof(data_t)); if (error) { free(data, M_TEMP); break; } if (softc->quirks & CD_Q_BCD_TRACKS) data->entry.track = bcd2bin(data->entry.track); bcopy(&data->entry, &te->entry, sizeof(struct cd_toc_entry)); free(data, M_TEMP); } break; case CDIOCSETPATCH: { struct ioc_patch *arg = (struct ioc_patch *) addr; struct cd_mode_data *data; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETPATCH\n")); data = malloc(sizeof(struct cd_mode_data), M_TEMP, 0); error = cdgetmode(periph, data, AUDIO_PAGE); if (error) { free(data, M_TEMP); break; } data->page.audio.port[LEFT_PORT].channels = arg->patch[0]; data->page.audio.port[RIGHT_PORT].channels = arg->patch[1]; data->page.audio.port[2].channels = arg->patch[2]; data->page.audio.port[3].channels = arg->patch[3]; error = cdsetmode(periph, data); free(data, M_TEMP); } break; case CDIOCGETVOL: { struct ioc_vol *arg = (struct ioc_vol *) addr; struct cd_mode_data *data; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCGETVOL\n")); data = malloc(sizeof(struct cd_mode_data), M_TEMP, 0); error = cdgetmode(periph, data, AUDIO_PAGE); if (error) { free(data, M_TEMP); break; } arg->vol[LEFT_PORT] = data->page.audio.port[LEFT_PORT].volume; arg->vol[RIGHT_PORT] = data->page.audio.port[RIGHT_PORT].volume; arg->vol[2] = data->page.audio.port[2].volume; arg->vol[3] = data->page.audio.port[3].volume; free(data, M_TEMP); } break; case CDIOCSETVOL: { struct ioc_vol *arg = (struct ioc_vol *) addr; struct cd_mode_data *data; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETVOL\n")); data = malloc(sizeof(struct cd_mode_data), M_TEMP, 0); error = cdgetmode(periph, data, AUDIO_PAGE); if (error) { free(data, M_TEMP); break; } data->page.audio.port[LEFT_PORT].channels = CHANNEL_0; data->page.audio.port[LEFT_PORT].volume = arg->vol[LEFT_PORT]; data->page.audio.port[RIGHT_PORT].channels = CHANNEL_1; data->page.audio.port[RIGHT_PORT].volume = arg->vol[RIGHT_PORT]; data->page.audio.port[2].volume = arg->vol[2]; data->page.audio.port[3].volume = arg->vol[3]; error = cdsetmode(periph, data); free(data, M_TEMP); } break; case CDIOCSETMONO: { struct cd_mode_data *data; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETMONO\n")); data = malloc(sizeof(struct cd_mode_data), M_TEMP, 0); error = cdgetmode(periph, data, AUDIO_PAGE); if (error) { free(data, M_TEMP); break; } data->page.audio.port[LEFT_PORT].channels = LEFT_CHANNEL | RIGHT_CHANNEL; data->page.audio.port[RIGHT_PORT].channels = LEFT_CHANNEL | RIGHT_CHANNEL; data->page.audio.port[2].channels = 0; data->page.audio.port[3].channels = 0; error = cdsetmode(periph, data); free(data, M_TEMP); } break; case CDIOCSETSTEREO: { struct cd_mode_data *data; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETSTEREO\n")); data = malloc(sizeof(struct cd_mode_data), M_TEMP, 0); error = cdgetmode(periph, data, AUDIO_PAGE); if (error) { free(data, M_TEMP); break; } data->page.audio.port[LEFT_PORT].channels = LEFT_CHANNEL; data->page.audio.port[RIGHT_PORT].channels = RIGHT_CHANNEL; data->page.audio.port[2].channels = 0; data->page.audio.port[3].channels = 0; error = cdsetmode(periph, data); free(data, M_TEMP); } break; case CDIOCSETMUTE: { struct cd_mode_data *data; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETMUTE\n")); data = malloc(sizeof(struct cd_mode_data), M_TEMP, 0); error = cdgetmode(periph, data, AUDIO_PAGE); if (error) { free(data, M_TEMP); break; } data->page.audio.port[LEFT_PORT].channels = 0; data->page.audio.port[RIGHT_PORT].channels = 0; data->page.audio.port[2].channels = 0; data->page.audio.port[3].channels = 0; error = cdsetmode(periph, data); free(data, M_TEMP); } break; case CDIOCSETLEFT: { struct cd_mode_data *data; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETLEFT\n")); data = malloc(sizeof(struct cd_mode_data), M_TEMP, 0); error = cdgetmode(periph, data, AUDIO_PAGE); if (error) { free(data, M_TEMP); break; } data->page.audio.port[LEFT_PORT].channels = LEFT_CHANNEL; data->page.audio.port[RIGHT_PORT].channels = LEFT_CHANNEL; data->page.audio.port[2].channels = 0; data->page.audio.port[3].channels = 0; error = cdsetmode(periph, data); free(data, M_TEMP); } break; case CDIOCSETRIGHT: { struct cd_mode_data *data; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("trying to do CDIOCSETRIGHT\n")); data = malloc(sizeof(struct cd_mode_data), M_TEMP, 0); error = cdgetmode(periph, data, AUDIO_PAGE); if (error) { free(data, M_TEMP); break; } data->page.audio.port[LEFT_PORT].channels = RIGHT_CHANNEL; data->page.audio.port[RIGHT_PORT].channels = RIGHT_CHANNEL; data->page.audio.port[2].channels = 0; data->page.audio.port[3].channels = 0; error = cdsetmode(periph, data); free(data, M_TEMP); } break; case CDIOCRESUME: error = cdpause(periph, 1); break; case CDIOCPAUSE: error = cdpause(periph, 0); break; case CDIOCSTART: error = cdstartunit(periph); break; case CDIOCSTOP: error = cdstopunit(periph, 0); break; case CDIOCEJECT: error = cdstopunit(periph, 1); break; case CDIOCALLOW: cdprevent(periph, PR_ALLOW); break; case CDIOCPREVENT: cdprevent(periph, PR_PREVENT); break; case CDIOCSETDEBUG: /* sc_link->flags |= (SDEV_DB1 | SDEV_DB2); */ error = ENOTTY; break; case CDIOCCLRDEBUG: /* sc_link->flags &= ~(SDEV_DB1 | SDEV_DB2); */ error = ENOTTY; break; case CDIOCRESET: /* return (cd_reset(periph)); */ error = ENOTTY; break; case CDRIOCREADSPEED: error = cdsetspeed(periph, *(u_int32_t *)addr, CDR_MAX_SPEED); break; case CDRIOCWRITESPEED: error = cdsetspeed(periph, CDR_MAX_SPEED, *(u_int32_t *)addr); break; case DVDIOCSENDKEY: case DVDIOCREPORTKEY: { struct dvd_authinfo *authinfo; authinfo = (struct dvd_authinfo *)addr; if (cmd == DVDIOCREPORTKEY) error = cdreportkey(periph, authinfo); else error = cdsendkey(periph, authinfo); break; } case DVDIOCREADSTRUCTURE: { struct dvd_struct *dvdstruct; dvdstruct = (struct dvd_struct *)addr; error = cdreaddvdstructure(periph, dvdstruct); break; } default: error = cam_periph_ioctl(periph, cmd, addr, cderror); break; } cam_periph_unlock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("leaving cdioctl\n")); if (error && bootverbose) { printf("scsi_cd.c::ioctl cmd=%08lx error=%d\n", cmd, error); } return (error); } static void cdprevent(struct cam_periph *periph, int action) { union ccb *ccb; struct cd_softc *softc; int error; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdprevent\n")); softc = (struct cd_softc *)periph->softc; if (((action == PR_ALLOW) && (softc->flags & CD_FLAG_DISC_LOCKED) == 0) || ((action == PR_PREVENT) && (softc->flags & CD_FLAG_DISC_LOCKED) != 0)) { return; } ccb = cdgetccb(periph, /* priority */ 1); scsi_prevent(&ccb->csio, /*retries*/ 1, cddone, MSG_SIMPLE_Q_TAG, action, SSD_FULL_SIZE, /* timeout */60000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA|SF_NO_PRINT); xpt_release_ccb(ccb); if (error == 0) { if (action == PR_ALLOW) softc->flags &= ~CD_FLAG_DISC_LOCKED; else softc->flags |= CD_FLAG_DISC_LOCKED; } } static int cdsize(dev_t dev, u_int32_t *size) { struct cam_periph *periph; struct cd_softc *softc; union ccb *ccb; struct scsi_read_capacity_data *rcap_buf; int error; periph = (struct cam_periph *)dev->si_drv1; if (periph == NULL) return (ENXIO); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdsize\n")); softc = (struct cd_softc *)periph->softc; ccb = cdgetccb(periph, /* priority */ 1); rcap_buf = malloc(sizeof(struct scsi_read_capacity_data), M_TEMP, 0); scsi_read_capacity(&ccb->csio, /*retries*/ 1, cddone, MSG_SIMPLE_Q_TAG, rcap_buf, SSD_FULL_SIZE, /* timeout */20000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA|SF_NO_PRINT); xpt_release_ccb(ccb); softc->params.disksize = scsi_4btoul(rcap_buf->addr) + 1; softc->params.blksize = scsi_4btoul(rcap_buf->length); /* * SCSI-3 mandates that the reported blocksize shall be 2048. * Older drives sometimes report funny values, trim it down to * 2048, or other parts of the kernel will get confused. * * XXX we leave drives alone that might report 512 bytes, as * well as drives reporting more weird sizes like perhaps 4K. */ if (softc->params.blksize > 2048 && softc->params.blksize <= 2352) softc->params.blksize = 2048; free(rcap_buf, M_TEMP); *size = softc->params.disksize; return (error); } static int cderror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct cd_softc *softc; struct cam_periph *periph; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct cd_softc *)periph->softc; /* * XXX * Until we have a better way of doing pack validation, * don't treat UAs as errors. */ sense_flags |= SF_RETRY_UA; return (cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } /* * Read table of contents */ static int cdreadtoc(struct cam_periph *periph, u_int32_t mode, u_int32_t start, struct cd_toc_entry *data, u_int32_t len) { struct scsi_read_toc *scsi_cmd; u_int32_t ntoc; struct ccb_scsiio *csio; union ccb *ccb; int error; ntoc = len; error = 0; ccb = cdgetccb(periph, /* priority */ 1); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ 1, /* cbfcnp */ cddone, /* flags */ CAM_DIR_IN, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ (u_int8_t *)data, /* dxfer_len */ len, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_read_toc), /* timeout */ 50000); scsi_cmd = (struct scsi_read_toc *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); if (mode == CD_MSF_FORMAT) scsi_cmd->byte2 |= CD_MSF; scsi_cmd->from_track = start; /* scsi_ulto2b(ntoc, (u_int8_t *)scsi_cmd->data_len); */ scsi_cmd->data_len[0] = (ntoc) >> 8; scsi_cmd->data_len[1] = (ntoc) & 0xff; scsi_cmd->op_code = READ_TOC; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdreadsubchannel(struct cam_periph *periph, u_int32_t mode, u_int32_t format, int track, struct cd_sub_channel_info *data, u_int32_t len) { struct scsi_read_subchannel *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cdgetccb(periph, /* priority */ 1); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ 1, /* cbfcnp */ cddone, /* flags */ CAM_DIR_IN, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ (u_int8_t *)data, /* dxfer_len */ len, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_read_subchannel), /* timeout */ 50000); scsi_cmd = (struct scsi_read_subchannel *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = READ_SUBCHANNEL; if (mode == CD_MSF_FORMAT) scsi_cmd->byte1 |= CD_MSF; scsi_cmd->byte2 = SRS_SUBQ; scsi_cmd->subchan_format = format; scsi_cmd->track = track; scsi_ulto2b(len, (u_int8_t *)scsi_cmd->data_len); scsi_cmd->control = 0; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdgetmode(struct cam_periph *periph, struct cd_mode_data *data, u_int32_t page) { struct scsi_mode_sense_6 *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; ccb = cdgetccb(periph, /* priority */ 1); csio = &ccb->csio; bzero(data, sizeof(*data)); cam_fill_csio(csio, /* retries */ 1, /* cbfcnp */ cddone, /* flags */ CAM_DIR_IN, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ (u_int8_t *)data, /* dxfer_len */ sizeof(*data), /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_mode_sense_6), /* timeout */ 50000); scsi_cmd = (struct scsi_mode_sense_6 *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->page = page; scsi_cmd->length = sizeof(*data) & 0xff; scsi_cmd->opcode = MODE_SENSE; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdsetmode(struct cam_periph *periph, struct cd_mode_data *data) { struct scsi_mode_select_6 *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; ccb = cdgetccb(periph, /* priority */ 1); csio = &ccb->csio; error = 0; cam_fill_csio(csio, /* retries */ 1, /* cbfcnp */ cddone, /* flags */ CAM_DIR_OUT, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ (u_int8_t *)data, /* dxfer_len */ sizeof(*data), /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_mode_select_6), /* timeout */ 50000); scsi_cmd = (struct scsi_mode_select_6 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MODE_SELECT; scsi_cmd->byte2 |= SMS_PF; scsi_cmd->length = sizeof(*data) & 0xff; data->header.data_length = 0; /* * SONY drives do not allow a mode select with a medium_type * value that has just been returned by a mode sense; use a * medium_type of 0 (Default) instead. */ data->header.medium_type = 0; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdplay(struct cam_periph *periph, u_int32_t blk, u_int32_t len) { struct ccb_scsiio *csio; union ccb *ccb; int error; u_int8_t cdb_len; error = 0; ccb = cdgetccb(periph, /* priority */ 1); csio = &ccb->csio; /* * Use the smallest possible command to perform the operation. */ if ((len & 0xffff0000) == 0) { /* * We can fit in a 10 byte cdb. */ struct scsi_play_10 *scsi_cmd; scsi_cmd = (struct scsi_play_10 *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_10; scsi_ulto4b(blk, (u_int8_t *)scsi_cmd->blk_addr); scsi_ulto2b(len, (u_int8_t *)scsi_cmd->xfer_len); cdb_len = sizeof(*scsi_cmd); } else { struct scsi_play_12 *scsi_cmd; scsi_cmd = (struct scsi_play_12 *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_12; scsi_ulto4b(blk, (u_int8_t *)scsi_cmd->blk_addr); scsi_ulto4b(len, (u_int8_t *)scsi_cmd->xfer_len); cdb_len = sizeof(*scsi_cmd); } cam_fill_csio(csio, /*retries*/2, cddone, /*flags*/CAM_DIR_NONE, MSG_SIMPLE_Q_TAG, /*dataptr*/NULL, /*datalen*/0, /*sense_len*/SSD_FULL_SIZE, cdb_len, /*timeout*/50 * 1000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdplaymsf(struct cam_periph *periph, u_int32_t startm, u_int32_t starts, u_int32_t startf, u_int32_t endm, u_int32_t ends, u_int32_t endf) { struct scsi_play_msf *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cdgetccb(periph, /* priority */ 1); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ 1, /* cbfcnp */ cddone, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_play_msf), /* timeout */ 50000); scsi_cmd = (struct scsi_play_msf *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_MSF; scsi_cmd->start_m = startm; scsi_cmd->start_s = starts; scsi_cmd->start_f = startf; scsi_cmd->end_m = endm; scsi_cmd->end_s = ends; scsi_cmd->end_f = endf; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdplaytracks(struct cam_periph *periph, u_int32_t strack, u_int32_t sindex, u_int32_t etrack, u_int32_t eindex) { struct scsi_play_track *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cdgetccb(periph, /* priority */ 1); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ 1, /* cbfcnp */ cddone, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_play_track), /* timeout */ 50000); scsi_cmd = (struct scsi_play_track *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PLAY_TRACK; scsi_cmd->start_track = strack; scsi_cmd->start_index = sindex; scsi_cmd->end_track = etrack; scsi_cmd->end_index = eindex; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdpause(struct cam_periph *periph, u_int32_t go) { struct scsi_pause *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cdgetccb(periph, /* priority */ 1); csio = &ccb->csio; cam_fill_csio(csio, /* retries */ 1, /* cbfcnp */ cddone, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_pause), /* timeout */ 50000); scsi_cmd = (struct scsi_pause *)&csio->cdb_io.cdb_bytes; bzero (scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->op_code = PAUSE; scsi_cmd->resume = go; error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdstartunit(struct cam_periph *periph) { union ccb *ccb; int error; error = 0; ccb = cdgetccb(periph, /* priority */ 1); scsi_start_stop(&ccb->csio, /* retries */ 1, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* start */ TRUE, /* load_eject */ FALSE, /* immediate */ FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdstopunit(struct cam_periph *periph, u_int32_t eject) { union ccb *ccb; int error; error = 0; ccb = cdgetccb(periph, /* priority */ 1); scsi_start_stop(&ccb->csio, /* retries */ 1, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* start */ FALSE, /* load_eject */ eject, /* immediate */ FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdsetspeed(struct cam_periph *periph, u_int32_t rdspeed, u_int32_t wrspeed) { struct scsi_set_speed *scsi_cmd; struct ccb_scsiio *csio; union ccb *ccb; int error; error = 0; ccb = cdgetccb(periph, /* priority */ 1); csio = &ccb->csio; /* Preserve old behavior: units in multiples of CDROM speed */ if (rdspeed < 177) rdspeed *= 177; if (wrspeed < 177) wrspeed *= 177; cam_fill_csio(csio, /* retries */ 1, /* cbfcnp */ cddone, /* flags */ CAM_DIR_NONE, /* tag_action */ MSG_SIMPLE_Q_TAG, /* data_ptr */ NULL, /* dxfer_len */ 0, /* sense_len */ SSD_FULL_SIZE, sizeof(struct scsi_set_speed), /* timeout */ 50000); scsi_cmd = (struct scsi_set_speed *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SET_CD_SPEED; scsi_ulto2b(rdspeed, scsi_cmd->readspeed); scsi_ulto2b(wrspeed, scsi_cmd->writespeed); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); xpt_release_ccb(ccb); return(error); } static int cdreportkey(struct cam_periph *periph, struct dvd_authinfo *authinfo) { union ccb *ccb; u_int8_t *databuf; u_int32_t lba; int error; int length; error = 0; databuf = NULL; lba = 0; ccb = cdgetccb(periph, /* priority */ 1); switch (authinfo->format) { case DVD_REPORT_AGID: length = sizeof(struct scsi_report_key_data_agid); break; case DVD_REPORT_CHALLENGE: length = sizeof(struct scsi_report_key_data_challenge); break; case DVD_REPORT_KEY1: length = sizeof(struct scsi_report_key_data_key1_key2); break; case DVD_REPORT_TITLE_KEY: length = sizeof(struct scsi_report_key_data_title); /* The lba field is only set for the title key */ lba = authinfo->lba; break; case DVD_REPORT_ASF: length = sizeof(struct scsi_report_key_data_asf); break; case DVD_REPORT_RPC: length = sizeof(struct scsi_report_key_data_rpc); break; case DVD_INVALIDATE_AGID: length = 0; break; default: error = EINVAL; goto bailout; break; /* NOTREACHED */ } if (length != 0) { databuf = malloc(length, M_DEVBUF, M_ZERO); } else databuf = NULL; scsi_report_key(&ccb->csio, /* retries */ 1, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* lba */ lba, /* agid */ authinfo->agid, /* key_format */ authinfo->format, /* data_ptr */ databuf, /* dxfer_len */ length, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); if (error != 0) goto bailout; if (ccb->csio.resid != 0) { xpt_print_path(periph->path); printf("warning, residual for report key command is %d\n", ccb->csio.resid); } switch(authinfo->format) { case DVD_REPORT_AGID: { struct scsi_report_key_data_agid *agid_data; agid_data = (struct scsi_report_key_data_agid *)databuf; authinfo->agid = (agid_data->agid & RKD_AGID_MASK) >> RKD_AGID_SHIFT; break; } case DVD_REPORT_CHALLENGE: { struct scsi_report_key_data_challenge *chal_data; chal_data = (struct scsi_report_key_data_challenge *)databuf; bcopy(chal_data->challenge_key, authinfo->keychal, min(sizeof(chal_data->challenge_key), sizeof(authinfo->keychal))); break; } case DVD_REPORT_KEY1: { struct scsi_report_key_data_key1_key2 *key1_data; key1_data = (struct scsi_report_key_data_key1_key2 *)databuf; bcopy(key1_data->key1, authinfo->keychal, min(sizeof(key1_data->key1), sizeof(authinfo->keychal))); break; } case DVD_REPORT_TITLE_KEY: { struct scsi_report_key_data_title *title_data; title_data = (struct scsi_report_key_data_title *)databuf; authinfo->cpm = (title_data->byte0 & RKD_TITLE_CPM) >> RKD_TITLE_CPM_SHIFT; authinfo->cp_sec = (title_data->byte0 & RKD_TITLE_CP_SEC) >> RKD_TITLE_CP_SEC_SHIFT; authinfo->cgms = (title_data->byte0 & RKD_TITLE_CMGS_MASK) >> RKD_TITLE_CMGS_SHIFT; bcopy(title_data->title_key, authinfo->keychal, min(sizeof(title_data->title_key), sizeof(authinfo->keychal))); break; } case DVD_REPORT_ASF: { struct scsi_report_key_data_asf *asf_data; asf_data = (struct scsi_report_key_data_asf *)databuf; authinfo->asf = asf_data->success & RKD_ASF_SUCCESS; break; } case DVD_REPORT_RPC: { struct scsi_report_key_data_rpc *rpc_data; rpc_data = (struct scsi_report_key_data_rpc *)databuf; authinfo->reg_type = (rpc_data->byte4 & RKD_RPC_TYPE_MASK) >> RKD_RPC_TYPE_SHIFT; authinfo->vend_rsts = (rpc_data->byte4 & RKD_RPC_VENDOR_RESET_MASK) >> RKD_RPC_VENDOR_RESET_SHIFT; authinfo->user_rsts = rpc_data->byte4 & RKD_RPC_USER_RESET_MASK; authinfo->region = rpc_data->region_mask; authinfo->rpc_scheme = rpc_data->rpc_scheme1; break; } case DVD_INVALIDATE_AGID: break; default: /* This should be impossible, since we checked above */ error = EINVAL; goto bailout; break; /* NOTREACHED */ } bailout: if (databuf != NULL) free(databuf, M_DEVBUF); xpt_release_ccb(ccb); return(error); } static int cdsendkey(struct cam_periph *periph, struct dvd_authinfo *authinfo) { union ccb *ccb; u_int8_t *databuf; int length; int error; error = 0; databuf = NULL; ccb = cdgetccb(periph, /* priority */ 1); switch(authinfo->format) { case DVD_SEND_CHALLENGE: { struct scsi_report_key_data_challenge *challenge_data; length = sizeof(*challenge_data); challenge_data = malloc(length, M_DEVBUF, M_ZERO); databuf = (u_int8_t *)challenge_data; scsi_ulto2b(length - sizeof(challenge_data->data_len), challenge_data->data_len); bcopy(authinfo->keychal, challenge_data->challenge_key, min(sizeof(authinfo->keychal), sizeof(challenge_data->challenge_key))); break; } case DVD_SEND_KEY2: { struct scsi_report_key_data_key1_key2 *key2_data; length = sizeof(*key2_data); key2_data = malloc(length, M_DEVBUF, M_ZERO); databuf = (u_int8_t *)key2_data; scsi_ulto2b(length - sizeof(key2_data->data_len), key2_data->data_len); bcopy(authinfo->keychal, key2_data->key1, min(sizeof(authinfo->keychal), sizeof(key2_data->key1))); break; } case DVD_SEND_RPC: { struct scsi_send_key_data_rpc *rpc_data; length = sizeof(*rpc_data); rpc_data = malloc(length, M_DEVBUF, M_ZERO); databuf = (u_int8_t *)rpc_data; scsi_ulto2b(length - sizeof(rpc_data->data_len), rpc_data->data_len); rpc_data->region_code = authinfo->region; break; } default: error = EINVAL; goto bailout; break; /* NOTREACHED */ } scsi_send_key(&ccb->csio, /* retries */ 1, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* agid */ authinfo->agid, /* key_format */ authinfo->format, /* data_ptr */ databuf, /* dxfer_len */ length, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); bailout: if (databuf != NULL) free(databuf, M_DEVBUF); xpt_release_ccb(ccb); return(error); } static int cdreaddvdstructure(struct cam_periph *periph, struct dvd_struct *dvdstruct) { union ccb *ccb; u_int8_t *databuf; u_int32_t address; int error; int length; error = 0; databuf = NULL; /* The address is reserved for many of the formats */ address = 0; ccb = cdgetccb(periph, /* priority */ 1); switch(dvdstruct->format) { case DVD_STRUCT_PHYSICAL: length = sizeof(struct scsi_read_dvd_struct_data_physical); break; case DVD_STRUCT_COPYRIGHT: length = sizeof(struct scsi_read_dvd_struct_data_copyright); break; case DVD_STRUCT_DISCKEY: length = sizeof(struct scsi_read_dvd_struct_data_disc_key); break; case DVD_STRUCT_BCA: length = sizeof(struct scsi_read_dvd_struct_data_bca); break; case DVD_STRUCT_MANUFACT: length = sizeof(struct scsi_read_dvd_struct_data_manufacturer); break; case DVD_STRUCT_CMI: error = ENODEV; goto bailout; #ifdef notyet length = sizeof(struct scsi_read_dvd_struct_data_copy_manage); address = dvdstruct->address; #endif break; /* NOTREACHED */ case DVD_STRUCT_PROTDISCID: length = sizeof(struct scsi_read_dvd_struct_data_prot_discid); break; case DVD_STRUCT_DISCKEYBLOCK: length = sizeof(struct scsi_read_dvd_struct_data_disc_key_blk); break; case DVD_STRUCT_DDS: length = sizeof(struct scsi_read_dvd_struct_data_dds); break; case DVD_STRUCT_MEDIUM_STAT: length = sizeof(struct scsi_read_dvd_struct_data_medium_status); break; case DVD_STRUCT_SPARE_AREA: length = sizeof(struct scsi_read_dvd_struct_data_spare_area); break; case DVD_STRUCT_RMD_LAST: error = ENODEV; goto bailout; #ifdef notyet length = sizeof(struct scsi_read_dvd_struct_data_rmd_borderout); address = dvdstruct->address; #endif break; /* NOTREACHED */ case DVD_STRUCT_RMD_RMA: error = ENODEV; goto bailout; #ifdef notyet length = sizeof(struct scsi_read_dvd_struct_data_rmd); address = dvdstruct->address; #endif break; /* NOTREACHED */ case DVD_STRUCT_PRERECORDED: length = sizeof(struct scsi_read_dvd_struct_data_leadin); break; case DVD_STRUCT_UNIQUEID: length = sizeof(struct scsi_read_dvd_struct_data_disc_id); break; case DVD_STRUCT_DCB: error = ENODEV; goto bailout; #ifdef notyet length = sizeof(struct scsi_read_dvd_struct_data_dcb); address = dvdstruct->address; #endif break; /* NOTREACHED */ case DVD_STRUCT_LIST: /* * This is the maximum allocation length for the READ DVD * STRUCTURE command. There's nothing in the MMC3 spec * that indicates a limit in the amount of data that can * be returned from this call, other than the limits * imposed by the 2-byte length variables. */ length = 65535; break; default: error = EINVAL; goto bailout; break; /* NOTREACHED */ } if (length != 0) { databuf = malloc(length, M_DEVBUF, M_ZERO); } else databuf = NULL; scsi_read_dvd_structure(&ccb->csio, /* retries */ 1, /* cbfcnp */ cddone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* lba */ address, /* layer_number */ dvdstruct->layer_num, /* key_format */ dvdstruct->format, /* agid */ dvdstruct->agid, /* data_ptr */ databuf, /* dxfer_len */ length, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 50000); error = cdrunccb(ccb, cderror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/SF_RETRY_UA); if (error != 0) goto bailout; switch(dvdstruct->format) { case DVD_STRUCT_PHYSICAL: { struct scsi_read_dvd_struct_data_layer_desc *inlayer; struct dvd_layer *outlayer; struct scsi_read_dvd_struct_data_physical *phys_data; phys_data = (struct scsi_read_dvd_struct_data_physical *)databuf; inlayer = &phys_data->layer_desc; outlayer = (struct dvd_layer *)&dvdstruct->data; dvdstruct->length = sizeof(*inlayer); outlayer->book_type = (inlayer->book_type_version & RDSD_BOOK_TYPE_MASK) >> RDSD_BOOK_TYPE_SHIFT; outlayer->book_version = (inlayer->book_type_version & RDSD_BOOK_VERSION_MASK); outlayer->disc_size = (inlayer->disc_size_max_rate & RDSD_DISC_SIZE_MASK) >> RDSD_DISC_SIZE_SHIFT; outlayer->max_rate = (inlayer->disc_size_max_rate & RDSD_MAX_RATE_MASK); outlayer->nlayers = (inlayer->layer_info & RDSD_NUM_LAYERS_MASK) >> RDSD_NUM_LAYERS_SHIFT; outlayer->track_path = (inlayer->layer_info & RDSD_TRACK_PATH_MASK) >> RDSD_TRACK_PATH_SHIFT; outlayer->layer_type = (inlayer->layer_info & RDSD_LAYER_TYPE_MASK); outlayer->linear_density = (inlayer->density & RDSD_LIN_DENSITY_MASK) >> RDSD_LIN_DENSITY_SHIFT; outlayer->track_density = (inlayer->density & RDSD_TRACK_DENSITY_MASK); outlayer->bca = (inlayer->bca & RDSD_BCA_MASK) >> RDSD_BCA_SHIFT; outlayer->start_sector = scsi_3btoul(inlayer->main_data_start); outlayer->end_sector = scsi_3btoul(inlayer->main_data_end); outlayer->end_sector_l0 = scsi_3btoul(inlayer->end_sector_layer0); break; } case DVD_STRUCT_COPYRIGHT: { struct scsi_read_dvd_struct_data_copyright *copy_data; copy_data = (struct scsi_read_dvd_struct_data_copyright *) databuf; dvdstruct->cpst = copy_data->cps_type; dvdstruct->rmi = copy_data->region_info; dvdstruct->length = 0; break; } default: /* * Tell the user what the overall length is, no matter * what we can actually fit in the data buffer. */ dvdstruct->length = length - ccb->csio.resid - sizeof(struct scsi_read_dvd_struct_data_header); /* * But only actually copy out the smaller of what we read * in or what the structure can take. */ bcopy(databuf + sizeof(struct scsi_read_dvd_struct_data_header), dvdstruct->data, min(sizeof(dvdstruct->data), dvdstruct->length)); break; } bailout: if (databuf != NULL) free(databuf, M_DEVBUF); xpt_release_ccb(ccb); return(error); } void scsi_report_key(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t lba, u_int8_t agid, u_int8_t key_format, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_report_key *scsi_cmd; scsi_cmd = (struct scsi_report_key *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REPORT_KEY; scsi_ulto4b(lba, scsi_cmd->lba); scsi_ulto2b(dxfer_len, scsi_cmd->alloc_len); scsi_cmd->agid_keyformat = (agid << RK_KF_AGID_SHIFT) | (key_format & RK_KF_KEYFORMAT_MASK); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len == 0) ? CAM_DIR_NONE : CAM_DIR_IN, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_send_key(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t agid, u_int8_t key_format, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_send_key *scsi_cmd; scsi_cmd = (struct scsi_send_key *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SEND_KEY; scsi_ulto2b(dxfer_len, scsi_cmd->param_len); scsi_cmd->agid_keyformat = (agid << RK_KF_AGID_SHIFT) | (key_format & RK_KF_KEYFORMAT_MASK); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_OUT, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_dvd_structure(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t address, u_int8_t layer_number, u_int8_t format, u_int8_t agid, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_read_dvd_structure *scsi_cmd; scsi_cmd = (struct scsi_read_dvd_structure *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_DVD_STRUCTURE; scsi_ulto4b(address, scsi_cmd->address); scsi_cmd->layer_number = layer_number; scsi_cmd->format = format; scsi_ulto2b(dxfer_len, scsi_cmd->alloc_len); /* The AGID is the top two bits of this byte */ scsi_cmd->agid = agid << 6; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } Index: head/sys/cam/scsi/scsi_pass.c =================================================================== --- head/sys/cam/scsi/scsi_pass.c (revision 110231) +++ head/sys/cam/scsi/scsi_pass.c (revision 110232) @@ -1,634 +1,630 @@ /* * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { PASS_FLAG_OPEN = 0x01, PASS_FLAG_LOCKED = 0x02, PASS_FLAG_INVALID = 0x04 } pass_flags; typedef enum { PASS_STATE_NORMAL } pass_state; typedef enum { PASS_CCB_BUFFER_IO, PASS_CCB_WAITING } pass_ccb_types; #define ccb_type ppriv_field0 #define ccb_bp ppriv_ptr1 struct pass_softc { pass_state state; pass_flags flags; u_int8_t pd_type; union ccb saved_ccb; struct devstat device_stats; dev_t dev; }; -#ifndef MIN -#define MIN(x,y) ((xsoftc; /* * De-register any async callbacks. */ xpt_setup_ccb(&csa.ccb_h, periph->path, /* priority */ 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = passasync; csa.callback_arg = periph; xpt_action((union ccb *)&csa); softc->flags |= PASS_FLAG_INVALID; /* * XXX Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ if (bootverbose) { xpt_print_path(periph->path); printf("lost device\n"); } } static void passcleanup(struct cam_periph *periph) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; devstat_remove_entry(&softc->device_stats); destroy_dev(softc->dev); if (bootverbose) { xpt_print_path(periph->path); printf("removing device entry\n"); } free(softc, M_DEVBUF); } static void passasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(passregister, passoninvalidate, passcleanup, passstart, "pass", CAM_PERIPH_BIO, cgd->ccb_h.path, passasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("passasync: Unable to attach new device " "due to status %#x: %s\n", status, entry ? entry->status_text : "Unknown"); } break; } default: cam_periph_async(periph, code, path, arg); break; } } static cam_status passregister(struct cam_periph *periph, void *arg) { struct pass_softc *softc; struct ccb_setasync csa; struct ccb_getdev *cgd; int no_tags; cgd = (struct ccb_getdev *)arg; if (periph == NULL) { printf("passregister: periph was NULL!!\n"); return(CAM_REQ_CMP_ERR); } if (cgd == NULL) { printf("passregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct pass_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT); if (softc == NULL) { printf("passregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); softc->state = PASS_STATE_NORMAL; softc->pd_type = SID_TYPE(&cgd->inq_data); periph->softc = softc; /* * We pass in 0 for a blocksize, since we don't * know what the blocksize of this device is, if * it even has a blocksize. */ no_tags = (cgd->inq_data.flags & SID_CmdQue) == 0; devstat_add_entry(&softc->device_stats, "pass", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE | (no_tags ? DEVSTAT_NO_ORDERED_TAGS : 0), softc->pd_type | DEVSTAT_TYPE_IF_SCSI | DEVSTAT_TYPE_PASS, DEVSTAT_PRIORITY_PASS); /* Register the device */ softc->dev = make_dev(&pass_cdevsw, periph->unit_number, UID_ROOT, GID_OPERATOR, 0600, "%s%d", periph->periph_name, periph->unit_number); softc->dev->si_drv1 = periph; /* * Add an async callback so that we get * notified if this device goes away. */ xpt_setup_ccb(&csa.ccb_h, periph->path, /* priority */ 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = passasync; csa.callback_arg = periph; xpt_action((union ccb *)&csa); if (bootverbose) xpt_announce_periph(periph, NULL); return(CAM_REQ_CMP); } static int passopen(dev_t dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int error; int s; error = 0; /* default to no error */ periph = (struct cam_periph *)dev->si_drv1; if (periph == NULL) return (ENXIO); softc = (struct pass_softc *)periph->softc; s = splsoftcam(); if (softc->flags & PASS_FLAG_INVALID) { splx(s); return(ENXIO); } /* * Don't allow access when we're running at a high securelevel. */ error = securelevel_gt(td->td_ucred, 1); if (error) { splx(s); return(error); } /* * Only allow read-write access. */ if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) { splx(s); return(EPERM); } /* * We don't allow nonblocking access. */ if ((flags & O_NONBLOCK) != 0) { xpt_print_path(periph->path); printf("can't do nonblocking accesss\n"); splx(s); return(EINVAL); } if ((error = cam_periph_lock(periph, PRIBIO | PCATCH)) != 0) { splx(s); return (error); } splx(s); if ((softc->flags & PASS_FLAG_OPEN) == 0) { if (cam_periph_acquire(periph) != CAM_REQ_CMP) return(ENXIO); softc->flags |= PASS_FLAG_OPEN; } cam_periph_unlock(periph); return (error); } static int passclose(dev_t dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; if (periph == NULL) return (ENXIO); softc = (struct pass_softc *)periph->softc; if ((error = cam_periph_lock(periph, PRIBIO)) != 0) return (error); softc->flags &= ~PASS_FLAG_OPEN; cam_periph_unlock(periph); cam_periph_release(periph); return (0); } static void passstart(struct cam_periph *periph, union ccb *start_ccb) { struct pass_softc *softc; int s; softc = (struct pass_softc *)periph->softc; switch (softc->state) { case PASS_STATE_NORMAL: s = splbio(); start_ccb->ccb_h.ccb_type = PASS_CCB_WAITING; SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, periph_links.sle); periph->immediate_priority = CAM_PRIORITY_NONE; splx(s); wakeup(&periph->ccb_list); break; } } static void passdone(struct cam_periph *periph, union ccb *done_ccb) { struct pass_softc *softc; struct ccb_scsiio *csio; softc = (struct pass_softc *)periph->softc; csio = &done_ccb->csio; switch (csio->ccb_h.ccb_type) { case PASS_CCB_WAITING: /* Caller will release the CCB */ wakeup(&done_ccb->ccb_h.cbfcnp); return; } xpt_release_ccb(done_ccb); } static int passioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; if (periph == NULL) return(ENXIO); softc = (struct pass_softc *)periph->softc; error = 0; switch (cmd) { case CAMIOCOMMAND: { union ccb *inccb; union ccb *ccb; int ccb_malloced; inccb = (union ccb *)addr; /* * Some CCB types, like scan bus and scan lun can only go * through the transport layer device. */ if (inccb->ccb_h.func_code & XPT_FC_XPT_ONLY) { xpt_print_path(periph->path); printf("CCB function code %#x is restricted to the " "XPT device\n", inccb->ccb_h.func_code); error = ENODEV; break; } /* * Non-immediate CCBs need a CCB from the per-device pool * of CCBs, which is scheduled by the transport layer. * Immediate CCBs and user-supplied CCBs should just be * malloced. */ if ((inccb->ccb_h.func_code & XPT_FC_QUEUED) && ((inccb->ccb_h.func_code & XPT_FC_USER_CCB) == 0)) { ccb = cam_periph_getccb(periph, inccb->ccb_h.pinfo.priority); ccb_malloced = 0; } else { ccb = xpt_alloc_ccb(); if (ccb != NULL) xpt_setup_ccb(&ccb->ccb_h, periph->path, inccb->ccb_h.pinfo.priority); ccb_malloced = 1; } if (ccb == NULL) { xpt_print_path(periph->path); printf("unable to allocate CCB\n"); error = ENOMEM; break; } error = passsendccb(periph, ccb, inccb); if (ccb_malloced) xpt_free_ccb(ccb); else xpt_release_ccb(ccb); break; } default: error = cam_periph_ioctl(periph, cmd, addr, passerror); break; } return(error); } /* * Generally, "ccb" should be the CCB supplied by the kernel. "inccb" * should be the CCB that is copied in from the user. */ static int passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb) { struct pass_softc *softc; struct cam_periph_map_info mapinfo; int error, need_unmap; softc = (struct pass_softc *)periph->softc; need_unmap = 0; /* * There are some fields in the CCB header that need to be * preserved, the rest we get from the user. */ xpt_merge_ccb(ccb, inccb); /* * There's no way for the user to have a completion * function, so we put our own completion function in here. */ ccb->ccb_h.cbfcnp = passdone; /* * We only attempt to map the user memory into kernel space * if they haven't passed in a physical memory pointer, * and if there is actually an I/O operation to perform. * Right now cam_periph_mapmem() only supports SCSI and device * match CCBs. For the SCSI CCBs, we only pass the CCB in if * there's actually data to map. cam_periph_mapmem() will do the * right thing, even if there isn't data to map, but since CCBs * without data are a reasonably common occurance (e.g. test unit * ready), it will save a few cycles if we check for it here. */ if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) && (((ccb->ccb_h.func_code == XPT_SCSI_IO) && ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE)) || (ccb->ccb_h.func_code == XPT_DEV_MATCH))) { bzero(&mapinfo, sizeof(mapinfo)); error = cam_periph_mapmem(ccb, &mapinfo); /* * cam_periph_mapmem returned an error, we can't continue. * Return the error to the user. */ if (error) return(error); /* * We successfully mapped the memory in, so we need to * unmap it when the transaction is done. */ need_unmap = 1; } /* * If the user wants us to perform any error recovery, then honor * that request. Otherwise, it's up to the user to perform any * error recovery. */ error = cam_periph_runccb(ccb, (ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) ? passerror : NULL, /* cam_flags */ CAM_RETRY_SELTO, /* sense_flags */SF_RETRY_UA, &softc->device_stats); if (need_unmap != 0) cam_periph_unmapmem(ccb, &mapinfo); ccb->ccb_h.cbfcnp = NULL; ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv; bcopy(ccb, inccb, sizeof(union ccb)); return(error); } static int passerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct cam_periph *periph; struct pass_softc *softc; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct pass_softc *)periph->softc; return(cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } Index: head/sys/cam/scsi/scsi_targ_bh.c =================================================================== --- head/sys/cam/scsi/scsi_targ_bh.c (revision 110231) +++ head/sys/cam/scsi/scsi_targ_bh.c (revision 110232) @@ -1,792 +1,790 @@ /* * Implementation of the Target Mode 'Black Hole device' for CAM. * * Copyright (c) 1999 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { TARGBH_STATE_NORMAL, TARGBH_STATE_EXCEPTION, TARGBH_STATE_TEARDOWN } targbh_state; typedef enum { TARGBH_FLAG_NONE = 0x00, TARGBH_FLAG_LUN_ENABLED = 0x01 } targbh_flags; typedef enum { TARGBH_CCB_WORKQ, TARGBH_CCB_WAITING } targbh_ccb_types; #define MAX_ACCEPT 8 #define MAX_IMMEDIATE 16 #define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */ -#define MIN(a, b) ((a > b) ? b : a) - /* Offsets into our private CCB area for storing accept information */ #define ccb_type ppriv_field0 #define ccb_descr ppriv_ptr1 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */ #define ccb_atio ppriv_ptr1 TAILQ_HEAD(ccb_queue, ccb_hdr); struct targbh_softc { struct ccb_queue pending_queue; struct ccb_queue work_queue; struct ccb_queue unknown_atio_queue; struct devstat device_stats; targbh_state state; targbh_flags flags; u_int init_level; u_int inq_data_len; struct ccb_accept_tio *accept_tio_list; struct ccb_hdr_slist immed_notify_slist; }; struct targbh_cmd_desc { struct ccb_accept_tio* atio_link; u_int data_resid; /* How much left to transfer */ u_int data_increment;/* Amount to send before next disconnect */ void* data; /* The data. Can be from backing_store or not */ void* backing_store;/* Backing store allocated for this descriptor*/ u_int max_size; /* Size of backing_store */ u_int32_t timeout; u_int8_t status; /* Status to return to initiator */ }; static struct scsi_inquiry_data no_lun_inq_data = { T_NODEVICE | (SID_QUAL_BAD_LU << 5), 0, /* version */2, /* format version */2 }; static struct scsi_sense_data no_lun_sense_data = { SSD_CURRENT_ERROR|SSD_ERRCODE_VALID, 0, SSD_KEY_NOT_READY, { 0, 0, 0, 0 }, /*extra_len*/offsetof(struct scsi_sense_data, fru) - offsetof(struct scsi_sense_data, extra_len), { 0, 0, 0, 0 }, /* Logical Unit Not Supported */ /*ASC*/0x25, /*ASCQ*/0 }; static const int request_sense_size = offsetof(struct scsi_sense_data, fru); static periph_init_t targbhinit; static void targbhasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static cam_status targbhenlun(struct cam_periph *periph); static cam_status targbhdislun(struct cam_periph *periph); static periph_ctor_t targbhctor; static periph_dtor_t targbhdtor; static periph_start_t targbhstart; static void targbhdone(struct cam_periph *periph, union ccb *done_ccb); #ifdef NOTYET static int targbherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); #endif static struct targbh_cmd_desc* targbhallocdescr(void); static void targbhfreedescr(struct targbh_cmd_desc *buf); static struct periph_driver targbhdriver = { targbhinit, "targbh", TAILQ_HEAD_INITIALIZER(targbhdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(targbh, targbhdriver); static void targbhinit(void) { cam_status status; struct cam_path *path; /* * Install a global async callback. This callback will * receive async callbacks like "new path registered". */ status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status == CAM_REQ_CMP) { struct ccb_setasync csa; xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_PATH_REGISTERED; csa.callback = targbhasync; csa.callback_arg = NULL; xpt_action((union ccb *)&csa); status = csa.ccb_h.status; xpt_free_path(path); } if (status != CAM_REQ_CMP) { printf("targbh: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void targbhasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_PATH_REGISTERED: { struct ccb_pathinq *cpi; struct cam_path *new_path; cam_status status; cpi = (struct ccb_pathinq *)arg; /* Only attach to controllers that support target mode */ if ((cpi->target_sprt & PIT_PROCESSOR) == 0) break; /* * Allocate a peripheral instance for * this target instance. */ status = xpt_create_path(&new_path, NULL, xpt_path_path_id(path), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("targbhasync: Unable to create path " "due to status 0x%x\n", status); break; } status = cam_periph_alloc(targbhctor, NULL, targbhdtor, targbhstart, "targbh", CAM_PERIPH_BIO, new_path, targbhasync, AC_PATH_REGISTERED, cpi); xpt_free_path(new_path); break; } case AC_PATH_DEREGISTERED: { targbhdislun(periph); break; } default: break; } } /* Attempt to enable our lun */ static cam_status targbhenlun(struct cam_periph *periph) { union ccb immed_ccb; struct targbh_softc *softc; cam_status status; int i; softc = (struct targbh_softc *)periph->softc; if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) != 0) return (CAM_REQ_CMP); xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1); immed_ccb.ccb_h.func_code = XPT_EN_LUN; /* Don't need support for any vendor specific commands */ immed_ccb.cel.grp6_len = 0; immed_ccb.cel.grp7_len = 0; immed_ccb.cel.enable = 1; xpt_action(&immed_ccb); status = immed_ccb.ccb_h.status; if (status != CAM_REQ_CMP) { xpt_print_path(periph->path); printf("targbhenlun - Enable Lun Rejected with status 0x%x\n", status); return (status); } softc->flags |= TARGBH_FLAG_LUN_ENABLED; /* * Build up a buffer of accept target I/O * operations for incoming selections. */ for (i = 0; i < MAX_ACCEPT; i++) { struct ccb_accept_tio *atio; atio = (struct ccb_accept_tio*)malloc(sizeof(*atio), M_DEVBUF, M_NOWAIT); if (atio == NULL) { status = CAM_RESRC_UNAVAIL; break; } atio->ccb_h.ccb_descr = targbhallocdescr(); if (atio->ccb_h.ccb_descr == NULL) { free(atio, M_DEVBUF); status = CAM_RESRC_UNAVAIL; break; } xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1); atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; atio->ccb_h.cbfcnp = targbhdone; xpt_action((union ccb *)atio); status = atio->ccb_h.status; if (status != CAM_REQ_INPROG) { targbhfreedescr(atio->ccb_h.ccb_descr); free(atio, M_DEVBUF); break; } ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link = softc->accept_tio_list; softc->accept_tio_list = atio; } if (i == 0) { xpt_print_path(periph->path); printf("targbhenlun - Could not allocate accept tio CCBs: " "status = 0x%x\n", status); targbhdislun(periph); return (CAM_REQ_CMP_ERR); } /* * Build up a buffer of immediate notify CCBs * so the SIM can tell us of asynchronous target mode events. */ for (i = 0; i < MAX_ACCEPT; i++) { struct ccb_immed_notify *inot; inot = (struct ccb_immed_notify*)malloc(sizeof(*inot), M_DEVBUF, M_NOWAIT); if (inot == NULL) { status = CAM_RESRC_UNAVAIL; break; } xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1); inot->ccb_h.func_code = XPT_IMMED_NOTIFY; inot->ccb_h.cbfcnp = targbhdone; xpt_action((union ccb *)inot); status = inot->ccb_h.status; if (status != CAM_REQ_INPROG) { free(inot, M_DEVBUF); break; } SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h, periph_links.sle); } if (i == 0) { xpt_print_path(periph->path); printf("targbhenlun - Could not allocate immediate notify " "CCBs: status = 0x%x\n", status); targbhdislun(periph); return (CAM_REQ_CMP_ERR); } return (CAM_REQ_CMP); } static cam_status targbhdislun(struct cam_periph *periph) { union ccb ccb; struct targbh_softc *softc; struct ccb_accept_tio* atio; struct ccb_hdr *ccb_h; softc = (struct targbh_softc *)periph->softc; if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) == 0) return CAM_REQ_CMP; /* XXX Block for Continue I/O completion */ /* Kill off all ACCECPT and IMMEDIATE CCBs */ while ((atio = softc->accept_tio_list) != NULL) { softc->accept_tio_list = ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link; xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1); ccb.cab.ccb_h.func_code = XPT_ABORT; ccb.cab.abort_ccb = (union ccb *)atio; xpt_action(&ccb); } while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) { SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle); xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1); ccb.cab.ccb_h.func_code = XPT_ABORT; ccb.cab.abort_ccb = (union ccb *)ccb_h; xpt_action(&ccb); } /* * Dissable this lun. */ xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1); ccb.cel.ccb_h.func_code = XPT_EN_LUN; ccb.cel.enable = 0; xpt_action(&ccb); if (ccb.cel.ccb_h.status != CAM_REQ_CMP) printf("targbhdislun - Disabling lun on controller failed " "with status 0x%x\n", ccb.cel.ccb_h.status); else softc->flags &= ~TARGBH_FLAG_LUN_ENABLED; return (ccb.cel.ccb_h.status); } static cam_status targbhctor(struct cam_periph *periph, void *arg) { struct ccb_pathinq *cpi; struct targbh_softc *softc; cpi = (struct ccb_pathinq *)arg; /* Allocate our per-instance private storage */ softc = (struct targbh_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT); if (softc == NULL) { printf("targctor: unable to malloc softc\n"); return (CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); TAILQ_INIT(&softc->pending_queue); TAILQ_INIT(&softc->work_queue); softc->accept_tio_list = NULL; SLIST_INIT(&softc->immed_notify_slist); softc->state = TARGBH_STATE_NORMAL; periph->softc = softc; softc->init_level++; return (targbhenlun(periph)); } static void targbhdtor(struct cam_periph *periph) { struct targbh_softc *softc; softc = (struct targbh_softc *)periph->softc; softc->state = TARGBH_STATE_TEARDOWN; targbhdislun(periph); switch (softc->init_level) { default: /* FALLTHROUGH */ case 1: free(softc, M_DEVBUF); break; case 0: panic("targdtor - impossible init level");; } } static void targbhstart(struct cam_periph *periph, union ccb *start_ccb) { struct targbh_softc *softc; struct ccb_hdr *ccbh; struct ccb_accept_tio *atio; struct targbh_cmd_desc *desc; struct ccb_scsiio *csio; ccb_flags flags; int s; softc = (struct targbh_softc *)periph->softc; s = splbio(); ccbh = TAILQ_FIRST(&softc->work_queue); if (periph->immediate_priority <= periph->pinfo.priority) { start_ccb->ccb_h.ccb_type = TARGBH_CCB_WAITING; SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, periph_links.sle); periph->immediate_priority = CAM_PRIORITY_NONE; splx(s); wakeup(&periph->ccb_list); } else if (ccbh == NULL) { splx(s); xpt_release_ccb(start_ccb); } else { TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe); TAILQ_INSERT_HEAD(&softc->pending_queue, ccbh, periph_links.tqe); splx(s); atio = (struct ccb_accept_tio*)ccbh; desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; /* Is this a tagged request? */ flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); csio = &start_ccb->csio; /* * If we are done with the transaction, tell the * controller to send status and perform a CMD_CMPLT. * If we have associated sense data, see if we can * send that too. */ if (desc->data_resid == desc->data_increment) { flags |= CAM_SEND_STATUS; if (atio->sense_len) { csio->sense_len = atio->sense_len; csio->sense_data = atio->sense_data; flags |= CAM_SEND_SENSE; } } cam_fill_ctio(csio, /*retries*/2, targbhdone, flags, (flags & CAM_TAG_ACTION_VALID)? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, desc->status, /*data_ptr*/desc->data_increment == 0 ? NULL : desc->data, /*dxfer_len*/desc->data_increment, /*timeout*/desc->timeout); /* Override our wildcard attachment */ start_ccb->ccb_h.target_id = atio->ccb_h.target_id; start_ccb->ccb_h.target_lun = atio->ccb_h.target_lun; start_ccb->ccb_h.ccb_type = TARGBH_CCB_WORKQ; start_ccb->ccb_h.ccb_atio = atio; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("Sending a CTIO\n")); xpt_action(start_ccb); /* * If the queue was frozen waiting for the response * to this ATIO (for instance disconnection was disallowed), * then release it now that our response has been queued. */ if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(periph->path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); atio->ccb_h.status &= ~CAM_DEV_QFRZN; } s = splbio(); ccbh = TAILQ_FIRST(&softc->work_queue); splx(s); } if (ccbh != NULL) xpt_schedule(periph, /*priority*/1); } static void targbhdone(struct cam_periph *periph, union ccb *done_ccb) { struct targbh_softc *softc; softc = (struct targbh_softc *)periph->softc; if (done_ccb->ccb_h.ccb_type == TARGBH_CCB_WAITING) { /* Caller will release the CCB */ wakeup(&done_ccb->ccb_h.cbfcnp); return; } switch (done_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: { struct ccb_accept_tio *atio; struct targbh_cmd_desc *descr; u_int8_t *cdb; int priority; atio = &done_ccb->atio; descr = (struct targbh_cmd_desc*)atio->ccb_h.ccb_descr; cdb = atio->cdb_io.cdb_bytes; if (softc->state == TARGBH_STATE_TEARDOWN || atio->ccb_h.status == CAM_REQ_ABORTED) { targbhfreedescr(descr); free(done_ccb, M_DEVBUF); return; } /* * Determine the type of incoming command and * setup our buffer for a response. */ switch (cdb[0]) { case INQUIRY: { struct scsi_inquiry *inq; inq = (struct scsi_inquiry *)cdb; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("Saw an inquiry!\n")); /* * Validate the command. We don't * support any VPD pages, so complain * if EVPD is set. */ if ((inq->byte2 & SI_EVPD) != 0 || inq->page_code != 0) { atio->ccb_h.flags &= ~CAM_DIR_MASK; atio->ccb_h.flags |= CAM_DIR_NONE; /* * This needs to have other than a * no_lun_sense_data response. */ atio->sense_data = no_lun_sense_data; atio->sense_len = sizeof(no_lun_sense_data); descr->data_resid = 0; descr->data_increment = 0; descr->status = SCSI_STATUS_CHECK_COND; break; } /* * Direction is always relative * to the initator. */ atio->ccb_h.flags &= ~CAM_DIR_MASK; atio->ccb_h.flags |= CAM_DIR_IN; descr->data = &no_lun_inq_data; descr->data_resid = MIN(sizeof(no_lun_inq_data), SCSI_CDB6_LEN(inq->length)); descr->data_increment = descr->data_resid; descr->timeout = 5 * 1000; descr->status = SCSI_STATUS_OK; break; } case REQUEST_SENSE: { struct scsi_request_sense *rsense; rsense = (struct scsi_request_sense *)cdb; /* Refer to static sense data */ atio->ccb_h.flags &= ~CAM_DIR_MASK; atio->ccb_h.flags |= CAM_DIR_IN; descr->data = &no_lun_sense_data; descr->data_resid = request_sense_size; descr->data_resid = MIN(descr->data_resid, SCSI_CDB6_LEN(rsense->length)); descr->data_increment = descr->data_resid; descr->timeout = 5 * 1000; descr->status = SCSI_STATUS_OK; break; } default: /* Constant CA, tell initiator */ /* Direction is always relative to the initator */ atio->ccb_h.flags &= ~CAM_DIR_MASK; atio->ccb_h.flags |= CAM_DIR_NONE; atio->sense_data = no_lun_sense_data; atio->sense_len = sizeof (no_lun_sense_data); descr->data_resid = 0; descr->data_increment = 0; descr->timeout = 5 * 1000; descr->status = SCSI_STATUS_CHECK_COND; break; } /* Queue us up to receive a Continue Target I/O ccb. */ if ((atio->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) { TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, periph_links.tqe); priority = 0; } else { TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, periph_links.tqe); priority = 1; } xpt_schedule(periph, priority); break; } case XPT_CONT_TARGET_IO: { struct ccb_accept_tio *atio; struct targbh_cmd_desc *desc; CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("Received completed CTIO\n")); atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio; desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; TAILQ_REMOVE(&softc->pending_queue, &atio->ccb_h, periph_links.tqe); /* * We could check for CAM_SENT_SENSE bein set here, * but since we're not maintaining any CA/UA state, * there's no point. */ atio->sense_len = 0; done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE; done_ccb->ccb_h.status &= ~CAM_SENT_SENSE; /* * Any errors will not change the data we return, * so make sure the queue is not left frozen. * XXX - At some point there may be errors that * leave us in a connected state with the * initiator... */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { printf("Releasing Queue\n"); cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; } desc->data_resid -= desc->data_increment; xpt_release_ccb(done_ccb); if (softc->state != TARGBH_STATE_TEARDOWN) { /* * Send the original accept TIO back to the * controller to handle more work. */ CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, ("Returning ATIO to target\n")); /* Restore wildcards */ atio->ccb_h.target_id = CAM_TARGET_WILDCARD; atio->ccb_h.target_lun = CAM_LUN_WILDCARD; xpt_action((union ccb *)atio); break; } else { targbhfreedescr(desc); free(atio, M_DEVBUF); } break; } case XPT_IMMED_NOTIFY: { int frozen; frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; if (softc->state == TARGBH_STATE_TEARDOWN || done_ccb->ccb_h.status == CAM_REQ_ABORTED) { printf("Freed an immediate notify\n"); free(done_ccb, M_DEVBUF); } else { /* Requeue for another immediate event */ xpt_action(done_ccb); } if (frozen != 0) cam_release_devq(periph->path, /*relsim_flags*/0, /*opening reduction*/0, /*timeout*/0, /*getcount_only*/0); break; } default: panic("targbhdone: Unexpected ccb opcode"); break; } } #ifdef NOTYET static int targbherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { return 0; } #endif static struct targbh_cmd_desc* targbhallocdescr() { struct targbh_cmd_desc* descr; /* Allocate the targbh_descr structure */ descr = (struct targbh_cmd_desc *)malloc(sizeof(*descr), M_DEVBUF, M_NOWAIT); if (descr == NULL) return (NULL); bzero(descr, sizeof(*descr)); /* Allocate buffer backing store */ descr->backing_store = malloc(MAX_BUF_SIZE, M_DEVBUF, M_NOWAIT); if (descr->backing_store == NULL) { free(descr, M_DEVBUF); return (NULL); } descr->max_size = MAX_BUF_SIZE; return (descr); } static void targbhfreedescr(struct targbh_cmd_desc *descr) { free(descr->backing_store, M_DEVBUF); free(descr, M_DEVBUF); } Index: head/sys/compat/svr4/svr4_stream.c =================================================================== --- head/sys/compat/svr4/svr4_stream.c (revision 110231) +++ head/sys/compat/svr4/svr4_stream.c (revision 110232) @@ -1,2313 +1,2310 @@ /* * Copyright (c) 1998 Mark Newton. All rights reserved. * Copyright (c) 1994, 1996 Christos Zoulas. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Christos Zoulas. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Pretend that we have streams... * Yes, this is gross. * * ToDo: The state machine for getmsg needs re-thinking */ #define COMPAT_43 1 #include "opt_mac.h" #include #include #include #include #include #include #include #include /* Must come after sys/malloc.h */ #include #include #include #include #include #include #include #include #include #include #include #include #include /* Must come after sys/uio.h */ #include #include #include #include #include #include #include #include #include #include #include #include /* Utils */ static int clean_pipe(struct thread *, const char *); static void getparm(struct file *, struct svr4_si_sockparms *); static int svr4_do_putmsg(struct thread *, struct svr4_sys_putmsg_args *, struct file *); static int svr4_do_getmsg(struct thread *, struct svr4_sys_getmsg_args *, struct file *); /* Address Conversions */ static void sockaddr_to_netaddr_in(struct svr4_strmcmd *, const struct sockaddr_in *); static void sockaddr_to_netaddr_un(struct svr4_strmcmd *, const struct sockaddr_un *); static void netaddr_to_sockaddr_in(struct sockaddr_in *, const struct svr4_strmcmd *); static void netaddr_to_sockaddr_un(struct sockaddr_un *, const struct svr4_strmcmd *); /* stream ioctls */ static int i_nread(struct file *, struct thread *, register_t *, int, u_long, caddr_t); static int i_fdinsert(struct file *, struct thread *, register_t *, int, u_long, caddr_t); static int i_str(struct file *, struct thread *, register_t *, int, u_long, caddr_t); static int i_setsig(struct file *, struct thread *, register_t *, int, u_long, caddr_t); static int i_getsig(struct file *, struct thread *, register_t *, int, u_long, caddr_t); static int _i_bind_rsvd(struct file *, struct thread *, register_t *, int, u_long, caddr_t); static int _i_rele_rsvd(struct file *, struct thread *, register_t *, int, u_long, caddr_t); /* i_str sockmod calls */ static int sockmod(struct file *, int, struct svr4_strioctl *, struct thread *); static int si_listen(struct file *, int, struct svr4_strioctl *, struct thread *); static int si_ogetudata(struct file *, int, struct svr4_strioctl *, struct thread *); static int si_sockparams(struct file *, int, struct svr4_strioctl *, struct thread *); static int si_shutdown (struct file *, int, struct svr4_strioctl *, struct thread *); static int si_getudata(struct file *, int, struct svr4_strioctl *, struct thread *); /* i_str timod calls */ static int timod(struct file *, int, struct svr4_strioctl *, struct thread *); static int ti_getinfo(struct file *, int, struct svr4_strioctl *, struct thread *); static int ti_bind(struct file *, int, struct svr4_strioctl *, struct thread *); /* infrastructure */ static int svr4_sendit(struct thread *td, int s, struct msghdr *mp, int flags); static int svr4_recvit(struct thread *td, int s, struct msghdr *mp, caddr_t namelenp); /* Ok, so we shouldn't use sendit() in uipc_syscalls.c because * it isn't part of a "public" interface; We're supposed to use * pru_sosend instead. Same goes for recvit()/pru_soreceive() for * that matter. Solution: Suck sendit()/recvit() into here where we * can do what we like. * * I hate code duplication. * * I will take out all the #ifdef COMPAT_OLDSOCK gumph, though. */ static int svr4_sendit(td, s, mp, flags) register struct thread *td; int s; register struct msghdr *mp; int flags; { struct uio auio; register struct iovec *iov; register int i; struct mbuf *control; struct sockaddr *to; int len, error; struct socket *so; #ifdef KTRACE struct iovec *ktriov = NULL; struct uio ktruio; #endif if ((error = fgetsock(td, s, &so, NULL)) != 0) return (error); #ifdef MAC error = mac_check_socket_send(td->td_ucred, so); if (error) goto done1; #endif auio.uio_iov = mp->msg_iov; auio.uio_iovcnt = mp->msg_iovlen; auio.uio_segflg = UIO_USERSPACE; auio.uio_rw = UIO_WRITE; auio.uio_td = td; auio.uio_offset = 0; /* XXX */ auio.uio_resid = 0; iov = mp->msg_iov; for (i = 0; i < mp->msg_iovlen; i++, iov++) { if ((auio.uio_resid += iov->iov_len) < 0) { error = EINVAL; goto done1; } } if (mp->msg_name) { error = getsockaddr(&to, mp->msg_name, mp->msg_namelen); if (error) goto done1; } else { to = 0; } if (mp->msg_control) { if (mp->msg_controllen < sizeof(struct cmsghdr)) { error = EINVAL; goto bad; } error = sockargs(&control, mp->msg_control, mp->msg_controllen, MT_CONTROL); if (error) goto bad; } else { control = 0; } #ifdef KTRACE if (KTRPOINT(td, KTR_GENIO)) { int iovlen = auio.uio_iovcnt * sizeof (struct iovec); MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, 0); bcopy((caddr_t)auio.uio_iov, (caddr_t)ktriov, iovlen); ktruio = auio; } #endif len = auio.uio_resid; error = so->so_proto->pr_usrreqs->pru_sosend(so, to, &auio, 0, control, flags, td); if (error) { if (auio.uio_resid != len && (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) error = 0; if (error == EPIPE) { PROC_LOCK(td->td_proc); psignal(td->td_proc, SIGPIPE); PROC_UNLOCK(td->td_proc); } } if (error == 0) td->td_retval[0] = len - auio.uio_resid; #ifdef KTRACE if (ktriov != NULL) { if (error == 0) { ktruio.uio_iov = ktriov; ktruio.uio_resid = td->td_retval[0]; ktrgenio(s, UIO_WRITE, &ktruio, error); } FREE(ktriov, M_TEMP); } #endif bad: if (to) FREE(to, M_SONAME); done1: fputsock(so); return (error); } static int svr4_recvit(td, s, mp, namelenp) register struct thread *td; int s; register struct msghdr *mp; caddr_t namelenp; { struct uio auio; register struct iovec *iov; register int i; int len, error; struct mbuf *m, *control = 0; caddr_t ctlbuf; struct socket *so; struct sockaddr *fromsa = 0; #ifdef KTRACE struct iovec *ktriov = NULL; struct uio ktruio; #endif if ((error = fgetsock(td, s, &so, NULL)) != 0) return (error); #ifdef MAC error = mac_check_socket_receive(td->td_ucred, so); if (error) goto done1; #endif auio.uio_iov = mp->msg_iov; auio.uio_iovcnt = mp->msg_iovlen; auio.uio_segflg = UIO_USERSPACE; auio.uio_rw = UIO_READ; auio.uio_td = td; auio.uio_offset = 0; /* XXX */ auio.uio_resid = 0; iov = mp->msg_iov; for (i = 0; i < mp->msg_iovlen; i++, iov++) { if ((auio.uio_resid += iov->iov_len) < 0) { error = EINVAL; goto done1; } } #ifdef KTRACE if (KTRPOINT(td, KTR_GENIO)) { int iovlen = auio.uio_iovcnt * sizeof (struct iovec); MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, 0); bcopy((caddr_t)auio.uio_iov, (caddr_t)ktriov, iovlen); ktruio = auio; } #endif len = auio.uio_resid; error = so->so_proto->pr_usrreqs->pru_soreceive(so, &fromsa, &auio, (struct mbuf **)0, mp->msg_control ? &control : (struct mbuf **)0, &mp->msg_flags); if (error) { if (auio.uio_resid != len && (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) error = 0; } #ifdef KTRACE if (ktriov != NULL) { if (error == 0) { ktruio.uio_iov = ktriov; ktruio.uio_resid = len - auio.uio_resid; ktrgenio(s, UIO_READ, &ktruio, error); } FREE(ktriov, M_TEMP); } #endif if (error) goto out; td->td_retval[0] = len - auio.uio_resid; if (mp->msg_name) { len = mp->msg_namelen; if (len <= 0 || fromsa == 0) len = 0; else { -#ifndef MIN -#define MIN(a,b) ((a)>(b)?(b):(a)) -#endif /* save sa_len before it is destroyed by MSG_COMPAT */ len = MIN(len, fromsa->sa_len); error = copyout(fromsa, (caddr_t)mp->msg_name, (unsigned)len); if (error) goto out; } mp->msg_namelen = len; if (namelenp && (error = copyout((caddr_t)&len, namelenp, sizeof (int)))) { goto out; } } if (mp->msg_control) { len = mp->msg_controllen; m = control; mp->msg_controllen = 0; ctlbuf = (caddr_t) mp->msg_control; while (m && len > 0) { unsigned int tocopy; if (len >= m->m_len) tocopy = m->m_len; else { mp->msg_flags |= MSG_CTRUNC; tocopy = len; } if ((error = copyout((caddr_t)mtod(m, caddr_t), ctlbuf, tocopy)) != 0) goto out; ctlbuf += tocopy; len -= tocopy; m = m->m_next; } mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control; } out: if (fromsa) FREE(fromsa, M_SONAME); if (control) m_freem(control); done1: fputsock(so); return (error); } #ifdef DEBUG_SVR4 static void bufprint(u_char *, size_t); static int show_ioc(const char *, struct svr4_strioctl *); static int show_strbuf(struct svr4_strbuf *); static void show_msg(const char *, int, struct svr4_strbuf *, struct svr4_strbuf *, int); static void bufprint(buf, len) u_char *buf; size_t len; { size_t i; uprintf("\n\t"); for (i = 0; i < len; i++) { uprintf("%x ", buf[i]); if (i && (i % 16) == 0) uprintf("\n\t"); } } static int show_ioc(str, ioc) const char *str; struct svr4_strioctl *ioc; { u_char *ptr = (u_char *) malloc(ioc->len, M_TEMP, 0); int error; uprintf("%s cmd = %ld, timeout = %d, len = %d, buf = %p { ", str, ioc->cmd, ioc->timeout, ioc->len, ioc->buf); if ((error = copyin(ioc->buf, ptr, ioc->len)) != 0) { free((char *) ptr, M_TEMP); return error; } bufprint(ptr, ioc->len); uprintf("}\n"); free((char *) ptr, M_TEMP); return 0; } static int show_strbuf(str) struct svr4_strbuf *str; { int error; u_char *ptr = NULL; int maxlen = str->maxlen; int len = str->len; if (maxlen < 0) maxlen = 0; if (len >= maxlen) len = maxlen; if (len > 0) { ptr = (u_char *) malloc(len, M_TEMP, 0); if ((error = copyin(str->buf, ptr, len)) != 0) { free((char *) ptr, M_TEMP); return error; } } uprintf(", { %d, %d, %p=[ ", str->maxlen, str->len, str->buf); if (ptr) bufprint(ptr, len); uprintf("]}"); if (ptr) free((char *) ptr, M_TEMP); return 0; } static void show_msg(str, fd, ctl, dat, flags) const char *str; int fd; struct svr4_strbuf *ctl; struct svr4_strbuf *dat; int flags; { struct svr4_strbuf buf; int error; uprintf("%s(%d", str, fd); if (ctl != NULL) { if ((error = copyin(ctl, &buf, sizeof(buf))) != 0) return; show_strbuf(&buf); } else uprintf(", NULL"); if (dat != NULL) { if ((error = copyin(dat, &buf, sizeof(buf))) != 0) return; show_strbuf(&buf); } else uprintf(", NULL"); uprintf(", %x);\n", flags); } #endif /* DEBUG_SVR4 */ /* * We are faced with an interesting situation. On svr4 unix sockets * are really pipes. But we really have sockets, and we might as * well use them. At the point where svr4 calls TI_BIND, it has * already created a named pipe for the socket using mknod(2). * We need to create a socket with the same name when we bind, * so we need to remove the pipe before, otherwise we'll get address * already in use. So we *carefully* remove the pipe, to avoid * using this as a random file removal tool. We use system calls * to avoid code duplication. */ static int clean_pipe(td, path) struct thread *td; const char *path; { struct lstat_args la; struct unlink_args ua; struct stat st; int error; caddr_t sg = stackgap_init(); size_t l = strlen(path) + 1; void *tpath; tpath = stackgap_alloc(&sg, l); la.ub = stackgap_alloc(&sg, sizeof(struct stat)); if ((error = copyout(path, tpath, l)) != 0) return error; la.path = tpath; if ((error = lstat(td, &la)) != 0) return 0; if ((error = copyin(la.ub, &st, sizeof(st))) != 0) return 0; /* * Make sure we are dealing with a mode 0 named pipe. */ if ((st.st_mode & S_IFMT) != S_IFIFO) return 0; if ((st.st_mode & ALLPERMS) != 0) return 0; ua.path = la.path; if ((error = unlink(td, &ua)) != 0) { DPRINTF(("clean_pipe: unlink failed %d\n", error)); return error; } return 0; } static void sockaddr_to_netaddr_in(sc, sain) struct svr4_strmcmd *sc; const struct sockaddr_in *sain; { struct svr4_netaddr_in *na; na = SVR4_ADDROF(sc); na->family = sain->sin_family; na->port = sain->sin_port; na->addr = sain->sin_addr.s_addr; DPRINTF(("sockaddr_in -> netaddr %d %d %lx\n", na->family, na->port, na->addr)); } static void sockaddr_to_netaddr_un(sc, saun) struct svr4_strmcmd *sc; const struct sockaddr_un *saun; { struct svr4_netaddr_un *na; char *dst, *edst = ((char *) sc) + sc->offs + sizeof(na->family) + 1 - sizeof(*sc); const char *src; na = SVR4_ADDROF(sc); na->family = saun->sun_family; for (src = saun->sun_path, dst = na->path; (*dst++ = *src++) != '\0'; ) if (dst == edst) break; DPRINTF(("sockaddr_un -> netaddr %d %s\n", na->family, na->path)); } static void netaddr_to_sockaddr_in(sain, sc) struct sockaddr_in *sain; const struct svr4_strmcmd *sc; { const struct svr4_netaddr_in *na; na = SVR4_C_ADDROF(sc); memset(sain, 0, sizeof(*sain)); sain->sin_len = sizeof(*sain); sain->sin_family = na->family; sain->sin_port = na->port; sain->sin_addr.s_addr = na->addr; DPRINTF(("netaddr -> sockaddr_in %d %d %x\n", sain->sin_family, sain->sin_port, sain->sin_addr.s_addr)); } static void netaddr_to_sockaddr_un(saun, sc) struct sockaddr_un *saun; const struct svr4_strmcmd *sc; { const struct svr4_netaddr_un *na; char *dst, *edst = &saun->sun_path[sizeof(saun->sun_path) - 1]; const char *src; na = SVR4_C_ADDROF(sc); memset(saun, 0, sizeof(*saun)); saun->sun_family = na->family; for (src = na->path, dst = saun->sun_path; (*dst++ = *src++) != '\0'; ) if (dst == edst) break; saun->sun_len = dst - saun->sun_path; DPRINTF(("netaddr -> sockaddr_un %d %s\n", saun->sun_family, saun->sun_path)); } static void getparm(fp, pa) struct file *fp; struct svr4_si_sockparms *pa; { struct svr4_strm *st; struct socket *so; st = svr4_stream_get(fp); if (st == NULL) return; so = fp->f_data; pa->family = st->s_family; switch (so->so_type) { case SOCK_DGRAM: pa->type = SVR4_T_CLTS; pa->protocol = IPPROTO_UDP; DPRINTF(("getparm(dgram)\n")); return; case SOCK_STREAM: pa->type = SVR4_T_COTS; /* What about T_COTS_ORD? XXX */ pa->protocol = IPPROTO_IP; DPRINTF(("getparm(stream)\n")); return; case SOCK_RAW: pa->type = SVR4_T_CLTS; pa->protocol = IPPROTO_RAW; DPRINTF(("getparm(raw)\n")); return; default: pa->type = 0; pa->protocol = 0; DPRINTF(("getparm(type %d?)\n", so->so_type)); return; } } static int si_ogetudata(fp, fd, ioc, td) struct file *fp; int fd; struct svr4_strioctl *ioc; struct thread *td; { int error; struct svr4_si_oudata ud; struct svr4_si_sockparms pa; if (ioc->len != sizeof(ud) && ioc->len != sizeof(ud) - sizeof(int)) { DPRINTF(("SI_OGETUDATA: Wrong size %d != %d\n", sizeof(ud), ioc->len)); return EINVAL; } if ((error = copyin(ioc->buf, &ud, sizeof(ud))) != 0) return error; getparm(fp, &pa); switch (pa.family) { case AF_INET: ud.tidusize = 16384; ud.addrsize = sizeof(struct svr4_sockaddr_in); if (pa.type == SVR4_SOCK_STREAM) ud.etsdusize = 1; else ud.etsdusize = 0; break; case AF_LOCAL: ud.tidusize = 65536; ud.addrsize = 128; ud.etsdusize = 128; break; default: DPRINTF(("SI_OGETUDATA: Unsupported address family %d\n", pa.family)); return ENOSYS; } /* I have no idea what these should be! */ ud.optsize = 128; ud.tsdusize = 128; ud.servtype = pa.type; /* XXX: Fixme */ ud.so_state = 0; ud.so_options = 0; return copyout(&ud, ioc->buf, ioc->len); } static int si_sockparams(fp, fd, ioc, td) struct file *fp; int fd; struct svr4_strioctl *ioc; struct thread *td; { struct svr4_si_sockparms pa; getparm(fp, &pa); return copyout(&pa, ioc->buf, sizeof(pa)); } static int si_listen(fp, fd, ioc, td) struct file *fp; int fd; struct svr4_strioctl *ioc; struct thread *td; { int error; struct svr4_strm *st = svr4_stream_get(fp); struct svr4_strmcmd lst; struct listen_args la; if (st == NULL) return EINVAL; if ((error = copyin(ioc->buf, &lst, ioc->len)) != 0) return error; if (lst.cmd != SVR4_TI_OLD_BIND_REQUEST) { DPRINTF(("si_listen: bad request %ld\n", lst.cmd)); return EINVAL; } /* * We are making assumptions again... */ la.s = fd; DPRINTF(("SI_LISTEN: fileno %d backlog = %d\n", fd, 5)); la.backlog = 5; if ((error = listen(td, &la)) != 0) { DPRINTF(("SI_LISTEN: listen failed %d\n", error)); return error; } st->s_cmd = SVR4_TI__ACCEPT_WAIT; lst.cmd = SVR4_TI_BIND_REPLY; switch (st->s_family) { case AF_INET: /* XXX: Fill the length here */ break; case AF_LOCAL: lst.len = 140; lst.pad[28] = 0x00000000; /* magic again */ lst.pad[29] = 0x00000800; /* magic again */ lst.pad[30] = 0x80001400; /* magic again */ break; default: DPRINTF(("SI_LISTEN: Unsupported address family %d\n", st->s_family)); return ENOSYS; } if ((error = copyout(&lst, ioc->buf, ioc->len)) != 0) return error; return 0; } static int si_getudata(fp, fd, ioc, td) struct file *fp; int fd; struct svr4_strioctl *ioc; struct thread *td; { int error; struct svr4_si_udata ud; if (sizeof(ud) != ioc->len) { DPRINTF(("SI_GETUDATA: Wrong size %d != %d\n", sizeof(ud), ioc->len)); return EINVAL; } if ((error = copyin(ioc->buf, &ud, sizeof(ud))) != 0) return error; getparm(fp, &ud.sockparms); switch (ud.sockparms.family) { case AF_INET: DPRINTF(("getudata_inet\n")); ud.tidusize = 16384; ud.tsdusize = 16384; ud.addrsize = sizeof(struct svr4_sockaddr_in); if (ud.sockparms.type == SVR4_SOCK_STREAM) ud.etsdusize = 1; else ud.etsdusize = 0; ud.optsize = 0; break; case AF_LOCAL: DPRINTF(("getudata_local\n")); ud.tidusize = 65536; ud.tsdusize = 128; ud.addrsize = 128; ud.etsdusize = 128; ud.optsize = 128; break; default: DPRINTF(("SI_GETUDATA: Unsupported address family %d\n", ud.sockparms.family)); return ENOSYS; } ud.servtype = ud.sockparms.type; DPRINTF(("ud.servtype = %d\n", ud.servtype)); /* XXX: Fixme */ ud.so_state = 0; ud.so_options = 0; return copyout(&ud, ioc->buf, sizeof(ud)); } static int si_shutdown(fp, fd, ioc, td) struct file *fp; int fd; struct svr4_strioctl *ioc; struct thread *td; { int error; struct shutdown_args ap; if (ioc->len != sizeof(ap.how)) { DPRINTF(("SI_SHUTDOWN: Wrong size %d != %d\n", sizeof(ap.how), ioc->len)); return EINVAL; } if ((error = copyin(ioc->buf, &ap.how, ioc->len)) != 0) return error; ap.s = fd; return shutdown(td, &ap); } static int sockmod(fp, fd, ioc, td) struct file *fp; int fd; struct svr4_strioctl *ioc; struct thread *td; { switch (ioc->cmd) { case SVR4_SI_OGETUDATA: DPRINTF(("SI_OGETUDATA\n")); return si_ogetudata(fp, fd, ioc, td); case SVR4_SI_SHUTDOWN: DPRINTF(("SI_SHUTDOWN\n")); return si_shutdown(fp, fd, ioc, td); case SVR4_SI_LISTEN: DPRINTF(("SI_LISTEN\n")); return si_listen(fp, fd, ioc, td); case SVR4_SI_SETMYNAME: DPRINTF(("SI_SETMYNAME\n")); return 0; case SVR4_SI_SETPEERNAME: DPRINTF(("SI_SETPEERNAME\n")); return 0; case SVR4_SI_GETINTRANSIT: DPRINTF(("SI_GETINTRANSIT\n")); return 0; case SVR4_SI_TCL_LINK: DPRINTF(("SI_TCL_LINK\n")); return 0; case SVR4_SI_TCL_UNLINK: DPRINTF(("SI_TCL_UNLINK\n")); return 0; case SVR4_SI_SOCKPARAMS: DPRINTF(("SI_SOCKPARAMS\n")); return si_sockparams(fp, fd, ioc, td); case SVR4_SI_GETUDATA: DPRINTF(("SI_GETUDATA\n")); return si_getudata(fp, fd, ioc, td); default: DPRINTF(("Unknown sockmod ioctl %lx\n", ioc->cmd)); return 0; } } static int ti_getinfo(fp, fd, ioc, td) struct file *fp; int fd; struct svr4_strioctl *ioc; struct thread *td; { int error; struct svr4_infocmd info; memset(&info, 0, sizeof(info)); if ((error = copyin(ioc->buf, &info, ioc->len)) != 0) return error; if (info.cmd != SVR4_TI_INFO_REQUEST) return EINVAL; info.cmd = SVR4_TI_INFO_REPLY; info.tsdu = 0; info.etsdu = 1; info.cdata = -2; info.ddata = -2; info.addr = 16; info.opt = -1; info.tidu = 16384; info.serv = 2; info.current = 0; info.provider = 2; ioc->len = sizeof(info); if ((error = copyout(&info, ioc->buf, ioc->len)) != 0) return error; return 0; } static int ti_bind(fp, fd, ioc, td) struct file *fp; int fd; struct svr4_strioctl *ioc; struct thread *td; { int error; struct svr4_strm *st = svr4_stream_get(fp); struct sockaddr_in sain; struct sockaddr_un saun; caddr_t sg; void *skp, *sup = NULL; int sasize; struct svr4_strmcmd bnd; struct bind_args ba; if (st == NULL) { DPRINTF(("ti_bind: bad file descriptor\n")); return EINVAL; } if ((error = copyin(ioc->buf, &bnd, ioc->len)) != 0) return error; if (bnd.cmd != SVR4_TI_OLD_BIND_REQUEST) { DPRINTF(("ti_bind: bad request %ld\n", bnd.cmd)); return EINVAL; } switch (st->s_family) { case AF_INET: skp = &sain; sasize = sizeof(sain); if (bnd.offs == 0) goto reply; netaddr_to_sockaddr_in(&sain, &bnd); DPRINTF(("TI_BIND: fam %d, port %d, addr %x\n", sain.sin_family, sain.sin_port, sain.sin_addr.s_addr)); break; case AF_LOCAL: skp = &saun; sasize = sizeof(saun); if (bnd.offs == 0) goto reply; netaddr_to_sockaddr_un(&saun, &bnd); if (saun.sun_path[0] == '\0') goto reply; DPRINTF(("TI_BIND: fam %d, path %s\n", saun.sun_family, saun.sun_path)); if ((error = clean_pipe(td, saun.sun_path)) != 0) return error; bnd.pad[28] = 0x00001000; /* magic again */ break; default: DPRINTF(("TI_BIND: Unsupported address family %d\n", st->s_family)); return ENOSYS; } sg = stackgap_init(); sup = stackgap_alloc(&sg, sasize); if ((error = copyout(skp, sup, sasize)) != 0) return error; ba.s = fd; DPRINTF(("TI_BIND: fileno %d\n", fd)); ba.name = (void *) sup; ba.namelen = sasize; if ((error = bind(td, &ba)) != 0) { DPRINTF(("TI_BIND: bind failed %d\n", error)); return error; } reply: if (sup == NULL) { memset(&bnd, 0, sizeof(bnd)); bnd.len = sasize + 4; bnd.offs = 0x10; /* XXX */ } bnd.cmd = SVR4_TI_BIND_REPLY; if ((error = copyout(&bnd, ioc->buf, ioc->len)) != 0) return error; return 0; } static int timod(fp, fd, ioc, td) struct file *fp; int fd; struct svr4_strioctl *ioc; struct thread *td; { switch (ioc->cmd) { case SVR4_TI_GETINFO: DPRINTF(("TI_GETINFO\n")); return ti_getinfo(fp, fd, ioc, td); case SVR4_TI_OPTMGMT: DPRINTF(("TI_OPTMGMT\n")); return 0; case SVR4_TI_BIND: DPRINTF(("TI_BIND\n")); return ti_bind(fp, fd, ioc, td); case SVR4_TI_UNBIND: DPRINTF(("TI_UNBIND\n")); return 0; default: DPRINTF(("Unknown timod ioctl %lx\n", ioc->cmd)); return 0; } } int svr4_stream_ti_ioctl(fp, td, retval, fd, cmd, dat) struct file *fp; struct thread *td; register_t *retval; int fd; u_long cmd; caddr_t dat; { struct svr4_strbuf skb, *sub = (struct svr4_strbuf *) dat; struct svr4_strm *st = svr4_stream_get(fp); int error; void *skp, *sup; struct sockaddr_in sain; struct sockaddr_un saun; struct svr4_strmcmd sc; int sasize; caddr_t sg; int *lenp; DPRINTF(("svr4_stream_ti_ioctl\n")); if (st == NULL) return EINVAL; sc.offs = 0x10; if ((error = copyin(sub, &skb, sizeof(skb))) != 0) { DPRINTF(("ti_ioctl: error copying in strbuf\n")); return error; } switch (st->s_family) { case AF_INET: skp = &sain; sasize = sizeof(sain); break; case AF_LOCAL: skp = &saun; sasize = sizeof(saun); break; default: DPRINTF(("ti_ioctl: Unsupported address family %d\n", st->s_family)); return ENOSYS; } sg = stackgap_init(); sup = stackgap_alloc(&sg, sasize); lenp = stackgap_alloc(&sg, sizeof(*lenp)); if ((error = copyout(&sasize, lenp, sizeof(*lenp))) != 0) { DPRINTF(("ti_ioctl: error copying out lenp\n")); return error; } switch (cmd) { case SVR4_TI_GETMYNAME: DPRINTF(("TI_GETMYNAME\n")); { struct getsockname_args ap; ap.fdes = fd; ap.asa = sup; ap.alen = lenp; if ((error = getsockname(td, &ap)) != 0) { DPRINTF(("ti_ioctl: getsockname error\n")); return error; } } break; case SVR4_TI_GETPEERNAME: DPRINTF(("TI_GETPEERNAME\n")); { struct getpeername_args ap; ap.fdes = fd; ap.asa = sup; ap.alen = lenp; if ((error = getpeername(td, &ap)) != 0) { DPRINTF(("ti_ioctl: getpeername error\n")); return error; } } break; case SVR4_TI_SETMYNAME: DPRINTF(("TI_SETMYNAME\n")); return 0; case SVR4_TI_SETPEERNAME: DPRINTF(("TI_SETPEERNAME\n")); return 0; default: DPRINTF(("ti_ioctl: Unknown ioctl %lx\n", cmd)); return ENOSYS; } if ((error = copyin(sup, skp, sasize)) != 0) { DPRINTF(("ti_ioctl: error copying in socket data\n")); return error; } if ((error = copyin(lenp, &sasize, sizeof(*lenp))) != 0) { DPRINTF(("ti_ioctl: error copying in socket size\n")); return error; } switch (st->s_family) { case AF_INET: sockaddr_to_netaddr_in(&sc, &sain); skb.len = sasize; break; case AF_LOCAL: sockaddr_to_netaddr_un(&sc, &saun); skb.len = sasize + 4; break; default: return ENOSYS; } if ((error = copyout(SVR4_ADDROF(&sc), skb.buf, sasize)) != 0) { DPRINTF(("ti_ioctl: error copying out socket data\n")); return error; } if ((error = copyout(&skb, sub, sizeof(skb))) != 0) { DPRINTF(("ti_ioctl: error copying out strbuf\n")); return error; } return error; } static int i_nread(fp, td, retval, fd, cmd, dat) struct file *fp; struct thread *td; register_t *retval; int fd; u_long cmd; caddr_t dat; { int error; int nread = 0; /* * We are supposed to return the message length in nread, and the * number of messages in retval. We don't have the notion of number * of stream messages, so we just find out if we have any bytes waiting * for us, and if we do, then we assume that we have at least one * message waiting for us. */ if ((error = fo_ioctl(fp, FIONREAD, (caddr_t) &nread, td->td_ucred, td)) != 0) return error; if (nread != 0) *retval = 1; else *retval = 0; return copyout(&nread, dat, sizeof(nread)); } static int i_fdinsert(fp, td, retval, fd, cmd, dat) struct file *fp; struct thread *td; register_t *retval; int fd; u_long cmd; caddr_t dat; { /* * Major hack again here. We assume that we are using this to * implement accept(2). If that is the case, we have already * called accept, and we have stored the file descriptor in * afd. We find the file descriptor that the code wants to use * in fd insert, and then we dup2() our accepted file descriptor * to it. */ int error; struct svr4_strm *st = svr4_stream_get(fp); struct svr4_strfdinsert fdi; struct dup2_args d2p; struct close_args clp; if (st == NULL) { DPRINTF(("fdinsert: bad file type\n")); return EINVAL; } if (st->s_afd == -1) { DPRINTF(("fdinsert: accept fd not found\n")); return ENOENT; } if ((error = copyin(dat, &fdi, sizeof(fdi))) != 0) { DPRINTF(("fdinsert: copyin failed %d\n", error)); return error; } d2p.from = st->s_afd; d2p.to = fdi.fd; if ((error = dup2(td, &d2p)) != 0) { DPRINTF(("fdinsert: dup2(%d, %d) failed %d\n", st->s_afd, fdi.fd, error)); return error; } clp.fd = st->s_afd; if ((error = close(td, &clp)) != 0) { DPRINTF(("fdinsert: close(%d) failed %d\n", st->s_afd, error)); return error; } st->s_afd = -1; *retval = 0; return 0; } static int _i_bind_rsvd(fp, td, retval, fd, cmd, dat) struct file *fp; struct thread *td; register_t *retval; int fd; u_long cmd; caddr_t dat; { struct mkfifo_args ap; /* * This is a supposed to be a kernel and library only ioctl. * It gets called before ti_bind, when we have a unix * socket, to physically create the socket transport and * ``reserve'' it. I don't know how this get reserved inside * the kernel, but we are going to create it nevertheless. */ ap.path = dat; ap.mode = S_IFIFO; return mkfifo(td, &ap); } static int _i_rele_rsvd(fp, td, retval, fd, cmd, dat) struct file *fp; struct thread *td; register_t *retval; int fd; u_long cmd; caddr_t dat; { struct unlink_args ap; /* * This is a supposed to be a kernel and library only ioctl. * I guess it is supposed to release the socket. */ ap.path = dat; return unlink(td, &ap); } static int i_str(fp, td, retval, fd, cmd, dat) struct file *fp; struct thread *td; register_t *retval; int fd; u_long cmd; caddr_t dat; { int error; struct svr4_strioctl ioc; if ((error = copyin(dat, &ioc, sizeof(ioc))) != 0) return error; #ifdef DEBUG_SVR4 if ((error = show_ioc(">", &ioc)) != 0) return error; #endif /* DEBUG_SVR4 */ switch (ioc.cmd & 0xff00) { case SVR4_SIMOD: if ((error = sockmod(fp, fd, &ioc, td)) != 0) return error; break; case SVR4_TIMOD: if ((error = timod(fp, fd, &ioc, td)) != 0) return error; break; default: DPRINTF(("Unimplemented module %c %ld\n", (char) (cmd >> 8), cmd & 0xff)); return 0; } #ifdef DEBUG_SVR4 if ((error = show_ioc("<", &ioc)) != 0) return error; #endif /* DEBUG_SVR4 */ return copyout(&ioc, dat, sizeof(ioc)); } static int i_setsig(fp, td, retval, fd, cmd, dat) struct file *fp; struct thread *td; register_t *retval; int fd; u_long cmd; caddr_t dat; { /* * This is the best we can do for now; we cannot generate * signals only for specific events so the signal mask gets * ignored; we save it just to pass it to a possible I_GETSIG... * * We alse have to fix the O_ASYNC fcntl bit, so the * process will get SIGPOLLs. */ struct fcntl_args fa; int error; register_t oflags, flags; struct svr4_strm *st = svr4_stream_get(fp); if (st == NULL) { DPRINTF(("i_setsig: bad file descriptor\n")); return EINVAL; } /* get old status flags */ fa.fd = fd; fa.cmd = F_GETFL; if ((error = fcntl(td, &fa)) != 0) return error; oflags = td->td_retval[0]; /* update the flags */ if (dat != NULL) { int mask; flags = oflags | O_ASYNC; if ((error = copyin(dat, &mask, sizeof(mask))) != 0) { DPRINTF(("i_setsig: bad eventmask pointer\n")); return error; } if (mask & SVR4_S_ALLMASK) { DPRINTF(("i_setsig: bad eventmask data %x\n", mask)); return EINVAL; } st->s_eventmask = mask; } else { flags = oflags & ~O_ASYNC; st->s_eventmask = 0; } /* set the new flags, if changed */ if (flags != oflags) { fa.cmd = F_SETFL; fa.arg = (long) flags; if ((error = fcntl(td, &fa)) != 0) return error; flags = td->td_retval[0]; } /* set up SIGIO receiver if needed */ if (dat != NULL) { fa.cmd = F_SETOWN; fa.arg = (long) td->td_proc->p_pid; return fcntl(td, &fa); } return 0; } static int i_getsig(fp, td, retval, fd, cmd, dat) struct file *fp; struct thread *td; register_t *retval; int fd; u_long cmd; caddr_t dat; { int error; if (dat != NULL) { struct svr4_strm *st = svr4_stream_get(fp); if (st == NULL) { DPRINTF(("i_getsig: bad file descriptor\n")); return EINVAL; } if ((error = copyout(&st->s_eventmask, dat, sizeof(st->s_eventmask))) != 0) { DPRINTF(("i_getsig: bad eventmask pointer\n")); return error; } } return 0; } int svr4_stream_ioctl(fp, td, retval, fd, cmd, dat) struct file *fp; struct thread *td; register_t *retval; int fd; u_long cmd; caddr_t dat; { *retval = 0; /* * All the following stuff assumes "sockmod" is pushed... */ switch (cmd) { case SVR4_I_NREAD: DPRINTF(("I_NREAD\n")); return i_nread(fp, td, retval, fd, cmd, dat); case SVR4_I_PUSH: DPRINTF(("I_PUSH %p\n", dat)); #if defined(DEBUG_SVR4) show_strbuf((struct svr4_strbuf *)dat); #endif return 0; case SVR4_I_POP: DPRINTF(("I_POP\n")); return 0; case SVR4_I_LOOK: DPRINTF(("I_LOOK\n")); return 0; case SVR4_I_FLUSH: DPRINTF(("I_FLUSH\n")); return 0; case SVR4_I_SRDOPT: DPRINTF(("I_SRDOPT\n")); return 0; case SVR4_I_GRDOPT: DPRINTF(("I_GRDOPT\n")); return 0; case SVR4_I_STR: DPRINTF(("I_STR\n")); return i_str(fp, td, retval, fd, cmd, dat); case SVR4_I_SETSIG: DPRINTF(("I_SETSIG\n")); return i_setsig(fp, td, retval, fd, cmd, dat); case SVR4_I_GETSIG: DPRINTF(("I_GETSIG\n")); return i_getsig(fp, td, retval, fd, cmd, dat); case SVR4_I_FIND: DPRINTF(("I_FIND\n")); /* * Here we are not pushing modules really, we just * pretend all are present */ *retval = 0; return 0; case SVR4_I_LINK: DPRINTF(("I_LINK\n")); return 0; case SVR4_I_UNLINK: DPRINTF(("I_UNLINK\n")); return 0; case SVR4_I_ERECVFD: DPRINTF(("I_ERECVFD\n")); return 0; case SVR4_I_PEEK: DPRINTF(("I_PEEK\n")); return 0; case SVR4_I_FDINSERT: DPRINTF(("I_FDINSERT\n")); return i_fdinsert(fp, td, retval, fd, cmd, dat); case SVR4_I_SENDFD: DPRINTF(("I_SENDFD\n")); return 0; case SVR4_I_RECVFD: DPRINTF(("I_RECVFD\n")); return 0; case SVR4_I_SWROPT: DPRINTF(("I_SWROPT\n")); return 0; case SVR4_I_GWROPT: DPRINTF(("I_GWROPT\n")); return 0; case SVR4_I_LIST: DPRINTF(("I_LIST\n")); return 0; case SVR4_I_PLINK: DPRINTF(("I_PLINK\n")); return 0; case SVR4_I_PUNLINK: DPRINTF(("I_PUNLINK\n")); return 0; case SVR4_I_SETEV: DPRINTF(("I_SETEV\n")); return 0; case SVR4_I_GETEV: DPRINTF(("I_GETEV\n")); return 0; case SVR4_I_STREV: DPRINTF(("I_STREV\n")); return 0; case SVR4_I_UNSTREV: DPRINTF(("I_UNSTREV\n")); return 0; case SVR4_I_FLUSHBAND: DPRINTF(("I_FLUSHBAND\n")); return 0; case SVR4_I_CKBAND: DPRINTF(("I_CKBAND\n")); return 0; case SVR4_I_GETBAND: DPRINTF(("I_GETBANK\n")); return 0; case SVR4_I_ATMARK: DPRINTF(("I_ATMARK\n")); return 0; case SVR4_I_SETCLTIME: DPRINTF(("I_SETCLTIME\n")); return 0; case SVR4_I_GETCLTIME: DPRINTF(("I_GETCLTIME\n")); return 0; case SVR4_I_CANPUT: DPRINTF(("I_CANPUT\n")); return 0; case SVR4__I_BIND_RSVD: DPRINTF(("_I_BIND_RSVD\n")); return _i_bind_rsvd(fp, td, retval, fd, cmd, dat); case SVR4__I_RELE_RSVD: DPRINTF(("_I_RELE_RSVD\n")); return _i_rele_rsvd(fp, td, retval, fd, cmd, dat); default: DPRINTF(("unimpl cmd = %lx\n", cmd)); break; } return 0; } int svr4_sys_putmsg(td, uap) register struct thread *td; struct svr4_sys_putmsg_args *uap; { struct file *fp; int error; if ((error = fget(td, uap->fd, &fp)) != 0) { #ifdef DEBUG_SVR4 uprintf("putmsg: bad fp\n"); #endif return EBADF; } error = svr4_do_putmsg(td, uap, fp); fdrop(fp, td); return (error); } static int svr4_do_putmsg(td, uap, fp) struct thread *td; struct svr4_sys_putmsg_args *uap; struct file *fp; { struct svr4_strbuf dat, ctl; struct svr4_strmcmd sc; struct sockaddr_in sain; struct sockaddr_un saun; void *skp, *sup; int sasize, *retval; struct svr4_strm *st; int error; caddr_t sg; retval = td->td_retval; #ifdef DEBUG_SVR4 show_msg(">putmsg", uap->fd, uap->ctl, uap->dat, uap->flags); #endif /* DEBUG_SVR4 */ FILE_LOCK_ASSERT(fp, MA_NOTOWNED); if (uap->ctl != NULL) { if ((error = copyin(uap->ctl, &ctl, sizeof(ctl))) != 0) { #ifdef DEBUG_SVR4 uprintf("putmsg: copyin(): %d\n", error); #endif return error; } } else ctl.len = -1; if (uap->dat != NULL) { if ((error = copyin(uap->dat, &dat, sizeof(dat))) != 0) { #ifdef DEBUG_SVR4 uprintf("putmsg: copyin(): %d (2)\n", error); #endif return error; } } else dat.len = -1; /* * Only for sockets for now. */ if ((st = svr4_stream_get(fp)) == NULL) { DPRINTF(("putmsg: bad file type\n")); return EINVAL; } if (ctl.len > sizeof(sc)) { DPRINTF(("putmsg: Bad control size %d != %d\n", ctl.len, sizeof(struct svr4_strmcmd))); return EINVAL; } if ((error = copyin(ctl.buf, &sc, ctl.len)) != 0) return error; switch (st->s_family) { case AF_INET: if (sc.len != sizeof(sain)) { if (sc.cmd == SVR4_TI_DATA_REQUEST) { struct write_args wa; /* Solaris seems to use sc.cmd = 3 to * send "expedited" data. telnet uses * this for options processing, sending EOF, * etc. I'm sure other things use it too. * I don't have any documentation * on it, so I'm making a guess that this * is how it works. newton@atdot.dotat.org XXX */ DPRINTF(("sending expedited data ??\n")); wa.fd = uap->fd; wa.buf = dat.buf; wa.nbyte = dat.len; return write(td, &wa); } DPRINTF(("putmsg: Invalid inet length %ld\n", sc.len)); return EINVAL; } netaddr_to_sockaddr_in(&sain, &sc); skp = &sain; sasize = sizeof(sain); error = sain.sin_family != st->s_family; break; case AF_LOCAL: if (ctl.len == 8) { /* We are doing an accept; succeed */ DPRINTF(("putmsg: Do nothing\n")); *retval = 0; return 0; } else { /* Maybe we've been given a device/inode pair */ udev_t *dev = SVR4_ADDROF(&sc); ino_t *ino = (ino_t *) &dev[1]; skp = svr4_find_socket(td, fp, *dev, *ino); if (skp == NULL) { skp = &saun; /* I guess we have it by name */ netaddr_to_sockaddr_un(skp, &sc); } sasize = sizeof(saun); } break; default: DPRINTF(("putmsg: Unsupported address family %d\n", st->s_family)); return ENOSYS; } sg = stackgap_init(); sup = stackgap_alloc(&sg, sasize); if ((error = copyout(skp, sup, sasize)) != 0) return error; switch (st->s_cmd = sc.cmd) { case SVR4_TI_CONNECT_REQUEST: /* connect */ { struct connect_args co; co.s = uap->fd; co.name = (void *) sup; co.namelen = (int) sasize; return connect(td, &co); } case SVR4_TI_SENDTO_REQUEST: /* sendto */ { struct msghdr msg; struct iovec aiov; msg.msg_name = (caddr_t) sup; msg.msg_namelen = sasize; msg.msg_iov = &aiov; msg.msg_iovlen = 1; msg.msg_control = 0; msg.msg_flags = 0; aiov.iov_base = dat.buf; aiov.iov_len = dat.len; #if 0 error = so->so_proto->pr_usrreqs->pru_sosend(so, 0, uio, 0, 0, 0, uio->uio_td); #endif error = svr4_sendit(td, uap->fd, &msg, uap->flags); DPRINTF(("sendto_request error: %d\n", error)); *retval = 0; return error; } default: DPRINTF(("putmsg: Unimplemented command %lx\n", sc.cmd)); return ENOSYS; } } int svr4_sys_getmsg(td, uap) struct thread *td; struct svr4_sys_getmsg_args *uap; { struct file *fp; int error; if ((error = fget(td, uap->fd, &fp)) != 0) { #ifdef DEBUG_SVR4 uprintf("getmsg: bad fp\n"); #endif return EBADF; } error = svr4_do_getmsg(td, uap, fp); fdrop(fp, td); return (error); } int svr4_do_getmsg(td, uap, fp) register struct thread *td; struct svr4_sys_getmsg_args *uap; struct file *fp; { struct getpeername_args ga; struct accept_args aa; struct svr4_strbuf dat, ctl; struct svr4_strmcmd sc; int error, *retval; struct msghdr msg; struct iovec aiov; struct sockaddr_in sain; struct sockaddr_un saun; void *skp, *sup; int sasize; struct svr4_strm *st; int *flen; int fl; caddr_t sg; retval = td->td_retval; FILE_LOCK_ASSERT(fp, MA_NOTOWNED); memset(&sc, 0, sizeof(sc)); #ifdef DEBUG_SVR4 show_msg(">getmsg", uap->fd, uap->ctl, uap->dat, 0); #endif /* DEBUG_SVR4 */ if (uap->ctl != NULL) { if ((error = copyin(uap->ctl, &ctl, sizeof(ctl))) != 0) return error; } else { ctl.len = -1; ctl.maxlen = 0; } if (uap->dat != NULL) { if ((error = copyin(uap->dat, &dat, sizeof(dat))) != 0) return error; } else { dat.len = -1; dat.maxlen = 0; } /* * Only for sockets for now. */ if ((st = svr4_stream_get(fp)) == NULL) { DPRINTF(("getmsg: bad file type\n")); return EINVAL; } if (ctl.maxlen == -1 || dat.maxlen == -1) { DPRINTF(("getmsg: Cannot handle -1 maxlen (yet)\n")); return ENOSYS; } switch (st->s_family) { case AF_INET: skp = &sain; sasize = sizeof(sain); break; case AF_LOCAL: skp = &saun; sasize = sizeof(saun); break; default: DPRINTF(("getmsg: Unsupported address family %d\n", st->s_family)); return ENOSYS; } sg = stackgap_init(); sup = stackgap_alloc(&sg, sasize); flen = (int *) stackgap_alloc(&sg, sizeof(*flen)); fl = sasize; if ((error = copyout(&fl, flen, sizeof(fl))) != 0) return error; switch (st->s_cmd) { case SVR4_TI_CONNECT_REQUEST: DPRINTF(("getmsg: TI_CONNECT_REQUEST\n")); /* * We do the connect in one step, so the putmsg should * have gotten the error. */ sc.cmd = SVR4_TI_OK_REPLY; sc.len = 0; ctl.len = 8; dat.len = -1; fl = 1; st->s_cmd = sc.cmd; break; case SVR4_TI_OK_REPLY: DPRINTF(("getmsg: TI_OK_REPLY\n")); /* * We are immediately after a connect reply, so we send * a connect verification. */ ga.fdes = uap->fd; ga.asa = (void *) sup; ga.alen = flen; if ((error = getpeername(td, &ga)) != 0) { DPRINTF(("getmsg: getpeername failed %d\n", error)); return error; } if ((error = copyin(sup, skp, sasize)) != 0) return error; sc.cmd = SVR4_TI_CONNECT_REPLY; sc.pad[0] = 0x4; sc.offs = 0x18; sc.pad[1] = 0x14; sc.pad[2] = 0x04000402; switch (st->s_family) { case AF_INET: sc.len = sasize; sockaddr_to_netaddr_in(&sc, &sain); break; case AF_LOCAL: sc.len = sasize + 4; sockaddr_to_netaddr_un(&sc, &saun); break; default: return ENOSYS; } ctl.len = 40; dat.len = -1; fl = 0; st->s_cmd = sc.cmd; break; case SVR4_TI__ACCEPT_OK: DPRINTF(("getmsg: TI__ACCEPT_OK\n")); /* * We do the connect in one step, so the putmsg should * have gotten the error. */ sc.cmd = SVR4_TI_OK_REPLY; sc.len = 1; ctl.len = 8; dat.len = -1; fl = 1; st->s_cmd = SVR4_TI__ACCEPT_WAIT; break; case SVR4_TI__ACCEPT_WAIT: DPRINTF(("getmsg: TI__ACCEPT_WAIT\n")); /* * We are after a listen, so we try to accept... */ aa.s = uap->fd; aa.name = (void *) sup; aa.anamelen = flen; if ((error = accept(td, &aa)) != 0) { DPRINTF(("getmsg: accept failed %d\n", error)); return error; } st->s_afd = *retval; DPRINTF(("getmsg: Accept fd = %d\n", st->s_afd)); if ((error = copyin(sup, skp, sasize)) != 0) return error; sc.cmd = SVR4_TI_ACCEPT_REPLY; sc.offs = 0x18; sc.pad[0] = 0x0; switch (st->s_family) { case AF_INET: sc.pad[1] = 0x28; sockaddr_to_netaddr_in(&sc, &sain); ctl.len = 40; sc.len = sasize; break; case AF_LOCAL: sc.pad[1] = 0x00010000; sc.pad[2] = 0xf6bcdaa0; /* I don't know what that is */ sc.pad[3] = 0x00010000; ctl.len = 134; sc.len = sasize + 4; break; default: return ENOSYS; } dat.len = -1; fl = 0; st->s_cmd = SVR4_TI__ACCEPT_OK; break; case SVR4_TI_SENDTO_REQUEST: DPRINTF(("getmsg: TI_SENDTO_REQUEST\n")); if (ctl.maxlen > 36 && ctl.len < 36) ctl.len = 36; if ((error = copyin(ctl.buf, &sc, ctl.len)) != 0) return error; switch (st->s_family) { case AF_INET: sockaddr_to_netaddr_in(&sc, &sain); break; case AF_LOCAL: sockaddr_to_netaddr_un(&sc, &saun); break; default: return ENOSYS; } msg.msg_name = (caddr_t) sup; msg.msg_namelen = sasize; msg.msg_iov = &aiov; msg.msg_iovlen = 1; msg.msg_control = 0; aiov.iov_base = dat.buf; aiov.iov_len = dat.maxlen; msg.msg_flags = 0; error = svr4_recvit(td, uap->fd, &msg, (caddr_t) flen); if (error) { DPRINTF(("getmsg: recvit failed %d\n", error)); return error; } if ((error = copyin(msg.msg_name, skp, sasize)) != 0) return error; sc.cmd = SVR4_TI_RECVFROM_IND; switch (st->s_family) { case AF_INET: sc.len = sasize; sockaddr_to_netaddr_in(&sc, &sain); break; case AF_LOCAL: sc.len = sasize + 4; sockaddr_to_netaddr_un(&sc, &saun); break; default: return ENOSYS; } dat.len = *retval; fl = 0; st->s_cmd = sc.cmd; break; default: st->s_cmd = sc.cmd; if (st->s_cmd == SVR4_TI_CONNECT_REQUEST) { struct read_args ra; /* More weirdness: Again, I can't find documentation * to back this up, but when a process does a generic * "getmsg()" call it seems that the command field is * zero and the length of the data area is zero. I * think processes expect getmsg() to fill in dat.len * after reading at most dat.maxlen octets from the * stream. Since we're using sockets I can let * read() look after it and frob return values * appropriately (or inappropriately :-) * -- newton@atdot.dotat.org XXX */ ra.fd = uap->fd; ra.buf = dat.buf; ra.nbyte = dat.maxlen; if ((error = read(td, &ra)) != 0) { return error; } dat.len = *retval; *retval = 0; st->s_cmd = SVR4_TI_SENDTO_REQUEST; break; } DPRINTF(("getmsg: Unknown state %x\n", st->s_cmd)); return EINVAL; } if (uap->ctl) { if (ctl.len != -1) if ((error = copyout(&sc, ctl.buf, ctl.len)) != 0) return error; if ((error = copyout(&ctl, uap->ctl, sizeof(ctl))) != 0) return error; } if (uap->dat) { if ((error = copyout(&dat, uap->dat, sizeof(dat))) != 0) return error; } if (uap->flags) { /* XXX: Need translation */ if ((error = copyout(&fl, uap->flags, sizeof(fl))) != 0) return error; } *retval = 0; #ifdef DEBUG_SVR4 show_msg("fd, uap->ctl, uap->dat, fl); #endif /* DEBUG_SVR4 */ return error; } int svr4_sys_send(td, uap) struct thread *td; struct svr4_sys_send_args *uap; { struct osend_args osa; osa.s = uap->s; osa.buf = uap->buf; osa.len = uap->len; osa.flags = uap->flags; return osend(td, &osa); } int svr4_sys_recv(td, uap) struct thread *td; struct svr4_sys_recv_args *uap; { struct orecv_args ora; ora.s = uap->s; ora.buf = uap->buf; ora.len = uap->len; ora.flags = uap->flags; return orecv(td, &ora); } /* * XXX This isn't necessary, but it's handy for inserting debug code into * sendto(). Let's leave it here for now... */ int svr4_sys_sendto(td, uap) struct thread *td; struct svr4_sys_sendto_args *uap; { struct sendto_args sa; sa.s = uap->s; sa.buf = uap->buf; sa.len = uap->len; sa.flags = uap->flags; sa.to = (caddr_t)uap->to; sa.tolen = uap->tolen; DPRINTF(("calling sendto()\n")); return sendto(td, &sa); } Index: head/sys/contrib/dev/oltr/if_oltr.c =================================================================== --- head/sys/contrib/dev/oltr/if_oltr.c (revision 110231) +++ head/sys/contrib/dev/oltr/if_oltr.c (revision 110232) @@ -1,1602 +1,1601 @@ /* * Copyright (c) 1998, Larry Lile * All rights reserved. * * For latest sources and information on this driver, please * go to http://anarchy.stdio.com. * * Questions, comments or suggestions should be directed to * Larry Lile . * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if (__FreeBSD_version < 400000) #include #endif #if (NBPFILTER > 0) || (__FreeBSD_version > 400000) #include #ifndef BPF_MTAP #define BPF_MTAP(_ifp, _m) do { \ if ((_ifp)->if_bpf) \ bpf_mtap((_ifp), (_m)); \ } while (0) #endif #endif #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include "contrib/dev/oltr/trlld.h" /*#define DEBUG_MASK DEBUG_POLL*/ #ifndef DEBUG_MASK #define DEBUG_MASK 0x0000 #endif #define DEBUG_POLL 0x0001 #define DEBUG_INT 0x0002 #define DEBUG_INIT 0x0004 #define DEBUG_FN_ENT 0x8000 #define PCI_VENDOR_OLICOM 0x108D -#define MIN(A,B) (((A) < (B)) ? (A) : (B)) #define MIN3(A,B,C) (MIN(A, (MIN(B, C)))) char *AdapterName[] = { /* 0 */ "Olicom XT Adapter [unsupported]", /* 1 */ "Olicom OC-3115", /* 2 */ "Olicom ISA 16/4 Adapter (OC-3117)", /* 3 */ "Olicom ISA 16/4 Adapter (OC-3118)", /* 4 */ "Olicom MCA 16/4 Adapter (OC-3129) [unsupported]", /* 5 */ "Olicom MCA 16/4 Adapter (OC-3129) [unsupported]", /* 6 */ "Olicom MCA 16/4 Adapter (OC-3129) [unsupported]", /* 7 */ "Olicom EISA 16/4 Adapter (OC-3133)", /* 8 */ "Olicom EISA 16/4 Adapter (OC-3133)", /* 9 */ "Olicom EISA 16/4 Server Adapter (OC-3135)", /* 10 */ "Olicom PCI 16/4 Adapter (OC-3136)", /* 11 */ "Olicom PCI 16/4 Adapter (OC-3136)", /* 12 */ "Olicom PCI/II 16/4 Adapter (OC-3137)", /* 13 */ "Olicom PCI 16/4 Adapter (OC-3139)", /* 14 */ "Olicom RapidFire 3140 16/4 PCI Adapter (OC-3140)", /* 15 */ "Olicom RapidFire 3141 Fiber Adapter (OC-3141)", /* 16 */ "Olicom PCMCIA 16/4 Adapter (OC-3220) [unsupported]", /* 17 */ "Olicom PCMCIA 16/4 Adapter (OC-3121, OC-3230, OC-3232) [unsupported]", /* 18 */ "Olicom PCMCIA 16/4 Adapter (OC-3250)", /* 19 */ "Olicom RapidFire 3540 100/16/4 Adapter (OC-3540)" }; /* * Glue function prototypes for PMW kit IO */ #ifndef TRlldInlineIO static void DriverOutByte __P((unsigned short, unsigned char)); static void DriverOutWord __P((unsigned short, unsigned short)); static void DriverOutDword __P((unsigned short, unsigned long)); static void DriverRepOutByte __P((unsigned short, unsigned char *, int)); static void DriverRepOutWord __P((unsigned short, unsigned short *, int)); static void DriverRepOutDword __P((unsigned short, unsigned long *, int)); static unsigned char DriverInByte __P((unsigned short)); static unsigned short DriverInWord __P((unsigned short)); static unsigned long DriverInDword __P((unsigned short)); static void DriverRepInByte __P((unsigned short, unsigned char *, int)); static void DriverRepInWord __P((unsigned short, unsigned short *, int)); static void DriverRepInDword __P((unsigned short, unsigned long *, int)); #endif /*TRlldInlineIO*/ static void DriverSuspend __P((unsigned short)); static void DriverStatus __P((void *, TRlldStatus_t *)); static void DriverCloseCompleted __P((void *)); static void DriverStatistics __P((void *, TRlldStatistics_t *)); static void DriverTransmitFrameCompleted __P((void *, void *, int)); static void DriverReceiveFrameCompleted __P((void *, int, int, void *, int)); static TRlldDriver_t LldDriver = { TRLLD_VERSION, #ifndef TRlldInlineIO DriverOutByte, DriverOutWord, DriverOutDword, DriverRepOutByte, DriverRepOutWord, DriverRepOutDword, DriverInByte, DriverInWord, DriverInDword, DriverRepInByte, DriverRepInWord, DriverRepInDword, #endif /*TRlldInlineIO*/ DriverSuspend, DriverStatus, DriverCloseCompleted, DriverStatistics, DriverTransmitFrameCompleted, DriverReceiveFrameCompleted, }; struct oltr_rx_buf { int index; char *data; u_long address; }; struct oltr_tx_buf { int index; char *data; u_long address; }; #define RING_BUFFER_LEN 16 #define RING_BUFFER(x) ((RING_BUFFER_LEN - 1) & x) #define RX_BUFFER_LEN 2048 #define TX_BUFFER_LEN 2048 struct oltr_softc { struct arpcom arpcom; struct ifmedia ifmedia; bus_space_handle_t oltr_bhandle; bus_space_tag_t oltr_btag; void *oltr_intrhand; struct resource *oltr_irq; struct resource *oltr_res; int unit; int state; #define OL_UNKNOWN 0 #define OL_INIT 1 #define OL_READY 2 #define OL_CLOSING 3 #define OL_CLOSED 4 #define OL_OPENING 5 #define OL_OPEN 6 #define OL_PROMISC 7 #define OL_DEAD 8 struct oltr_rx_buf rx_ring[RING_BUFFER_LEN]; int tx_head, tx_avail, tx_frame; struct oltr_tx_buf tx_ring[RING_BUFFER_LEN]; TRlldTransmit_t frame_ring[RING_BUFFER_LEN]; struct mbuf *restart; TRlldAdapter_t TRlldAdapter; TRlldStatistics_t statistics; TRlldStatistics_t current; TRlldAdapterConfig_t config; u_short AdapterMode; u_long GroupAddress; u_long FunctionalAddress; struct callout_handle oltr_poll_ch; /*struct callout_handle oltr_stat_ch;*/ void *work_memory; }; #define SELF_TEST_POLLS 32 void oltr_poll __P((void *)); /*void oltr_stat __P((void *));*/ static void oltr_start __P((struct ifnet *)); static void oltr_stop __P((struct oltr_softc *)); static void oltr_close __P((struct oltr_softc *)); static void oltr_init __P((void *)); static int oltr_ioctl __P((struct ifnet *, u_long, caddr_t)); static void oltr_intr __P((void *)); static int oltr_ifmedia_upd __P((struct ifnet *)); static void oltr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); #if __FreeBSD_version > 400000 static int oltr_pci_probe __P((device_t)); static int oltr_pci_attach __P((device_t)); static int oltr_pci_detach __P((device_t)); static void oltr_pci_shutdown __P((device_t)); static device_method_t oltr_methods[] = { DEVMETHOD(device_probe, oltr_pci_probe), DEVMETHOD(device_attach, oltr_pci_attach), DEVMETHOD(device_detach, oltr_pci_detach), DEVMETHOD(device_shutdown, oltr_pci_shutdown), { 0, 0 } }; static driver_t oltr_driver = { "oltr", oltr_methods, sizeof(struct oltr_softc) }; static devclass_t oltr_devclass; DRIVER_MODULE(oltr, pci, oltr_driver, oltr_devclass, 0, 0); static int oltr_pci_probe(device_t dev) { int i, rc; char PCIConfigHeader[64]; TRlldAdapterConfig_t config; if ((pci_get_vendor(dev) == PCI_VENDOR_OLICOM) && ((pci_get_device(dev) == 0x0001) || (pci_get_device(dev) == 0x0004) || (pci_get_device(dev) == 0x0005) || (pci_get_device(dev) == 0x0007) || (pci_get_device(dev) == 0x0008))) { for (i = 0; i < sizeof(PCIConfigHeader); i++) PCIConfigHeader[i] = pci_read_config(dev, i, 1); rc = TRlldPCIConfig(&LldDriver, &config, PCIConfigHeader); if (rc == TRLLD_PCICONFIG_FAIL) { device_printf(dev, "TRlldPciConfig failed!\n"); return(ENXIO); } if (rc == TRLLD_PCICONFIG_VERSION) { device_printf(dev, "wrong LLD version\n"); return(ENXIO); } device_set_desc(dev, AdapterName[config.type]); return(0); } return(ENXIO); } static int oltr_pci_attach(device_t dev) { int i, s, rc = 0, rid, scratch_size; int media = IFM_TOKEN|IFM_TOK_UTP16; u_long command; char PCIConfigHeader[64]; struct oltr_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->arpcom.ac_if; s = splimp(); bzero(sc, sizeof(struct oltr_softc)); sc->unit = device_get_unit(dev); sc->state = OL_UNKNOWN; for (i = 0; i < sizeof(PCIConfigHeader); i++) PCIConfigHeader[i] = pci_read_config(dev, i, 1); switch(TRlldPCIConfig(&LldDriver, &sc->config, PCIConfigHeader)) { case TRLLD_PCICONFIG_OK: break; case TRLLD_PCICONFIG_SET_COMMAND: device_printf(dev, "enabling bus master mode\n"); command = pci_read_config(dev, PCIR_COMMAND, 4); pci_write_config(dev, PCIR_COMMAND, (command | PCIM_CMD_BUSMASTEREN), 4); command = pci_read_config(dev, PCIR_COMMAND, 4); if (!(command & PCIM_CMD_BUSMASTEREN)) { device_printf(dev, "failed to enable bus master mode\n"); goto config_failed; } break; case TRLLD_PCICONFIG_FAIL: device_printf(dev, "TRlldPciConfig failed!\n"); goto config_failed; break; case TRLLD_PCICONFIG_VERSION: device_printf(dev, "wrong LLD version\n"); goto config_failed; break; } device_printf(dev, "MAC address %6D\n", sc->config.macaddress, ":"); scratch_size = TRlldAdapterSize(); if (bootverbose) device_printf(dev, "adapter memory block size %d bytes\n", scratch_size); sc->TRlldAdapter = (TRlldAdapter_t)malloc(scratch_size, M_DEVBUF, M_NOWAIT); if (sc->TRlldAdapter == NULL) { device_printf(dev, "couldn't allocate scratch buffer (%d bytes)\n", scratch_size); goto config_failed; } /* * Allocate RX/TX Pools */ for (i = 0; i < RING_BUFFER_LEN; i++) { sc->rx_ring[i].index = i; sc->rx_ring[i].data = (char *)malloc(RX_BUFFER_LEN, M_DEVBUF, M_NOWAIT); sc->rx_ring[i].address = vtophys(sc->rx_ring[i].data); sc->tx_ring[i].index = i; sc->tx_ring[i].data = (char *)malloc(TX_BUFFER_LEN, M_DEVBUF, M_NOWAIT); sc->tx_ring[i].address = vtophys(sc->tx_ring[i].data); if ((!sc->rx_ring[i].data) || (!sc->tx_ring[i].data)) { device_printf(dev, "unable to allocate ring buffers\n"); while (i > 0) { if (sc->rx_ring[i].data) free(sc->rx_ring[i].data, M_DEVBUF); if (sc->tx_ring[i].data) free(sc->tx_ring[i].data, M_DEVBUF); i--; } goto config_failed; } } /* * Allocate interrupt and DMA channel */ rid = 0; sc->oltr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, (sc->config.mode & TRLLD_MODE_SHARE_INTERRUPT ? RF_ACTIVE | RF_SHAREABLE : RF_ACTIVE)); if (sc->oltr_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); goto config_failed; } if (bus_setup_intr(dev, sc->oltr_irq, INTR_TYPE_NET, oltr_intr, sc, &sc->oltr_intrhand)) { device_printf(dev, "couldn't setup interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->oltr_irq); goto config_failed; } /* * Do the ifnet initialization */ ifp->if_softc = sc; ifp->if_unit = device_get_unit(dev); ifp->if_name = "oltr"; ifp->if_output = iso88025_output; ifp->if_init = oltr_init; ifp->if_start = oltr_start; ifp->if_ioctl = oltr_ioctl; ifp->if_flags = IFF_BROADCAST; bcopy(sc->config.macaddress, sc->arpcom.ac_enaddr, sizeof(sc->config.macaddress)); /* * Do ifmedia setup. */ ifmedia_init(&sc->ifmedia, 0, oltr_ifmedia_upd, oltr_ifmedia_sts); rc = TRlldSetSpeed(sc->TRlldAdapter, TRLLD_SPEED_16MBPS); switch(sc->config.type) { case TRLLD_ADAPTER_PCI7: /* OC-3540 */ ifmedia_add(&sc->ifmedia, IFM_TOKEN|IFM_TOK_UTP100, 0, NULL); /* FALL THROUGH */ case TRLLD_ADAPTER_PCI4: /* OC-3139 */ case TRLLD_ADAPTER_PCI5: /* OC-3140 */ case TRLLD_ADAPTER_PCI6: /* OC-3141 */ ifmedia_add(&sc->ifmedia, IFM_TOKEN|IFM_AUTO, 0, NULL); media = IFM_TOKEN|IFM_AUTO; rc = TRlldSetSpeed(sc->TRlldAdapter, 0); /* FALL THROUGH */ default: ifmedia_add(&sc->ifmedia, IFM_TOKEN|IFM_TOK_UTP4, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_TOKEN|IFM_TOK_UTP16, 0, NULL); break; } sc->ifmedia.ifm_media = media; ifmedia_set(&sc->ifmedia, media); /* * Attach the interface */ if_attach(ifp); ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; iso88025_ifattach(ifp); #if (NBPFILTER > 0) || (__FreeBSD_version > 400000) bpfattach(ifp, DLT_IEEE802, sizeof(struct iso88025_header)); #endif splx(s); return(0); config_failed: splx(s); return(ENXIO); } static int oltr_pci_detach(device_t dev) { struct oltr_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->arpcom.ac_if; int s, i; device_printf(dev, "driver unloading\n"); s = splimp(); if_detach(ifp); if (sc->state > OL_CLOSED) oltr_stop(sc); untimeout(oltr_poll, (void *)sc, sc->oltr_poll_ch); /*untimeout(oltr_stat, (void *)sc, sc->oltr_stat_ch);*/ bus_teardown_intr(dev, sc->oltr_irq, sc->oltr_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->oltr_irq); /* Deallocate all dynamic memory regions */ for (i = 0; i < RING_BUFFER_LEN; i++) { free(sc->rx_ring[i].data, M_DEVBUF); free(sc->tx_ring[i].data, M_DEVBUF); } if (sc->work_memory) free(sc->work_memory, M_DEVBUF); free(sc->TRlldAdapter, M_DEVBUF); (void)splx(s); return(0); } static void oltr_pci_shutdown(device_t dev) { struct oltr_softc *sc = device_get_softc(dev); device_printf(dev, "oltr_pci_shutdown called\n"); if (sc->state > OL_CLOSED) oltr_stop(sc); return; } #else static const char *oltr_pci_probe __P((pcici_t, pcidi_t)); static void oltr_pci_attach __P((pcici_t, int)); static unsigned long oltr_count = 0; static struct pci_device oltr_device = { "oltr", oltr_pci_probe, oltr_pci_attach, &oltr_count, NULL }; DATA_SET(pcidevice_set, oltr_device); static const char * oltr_pci_probe(pcici_t config_id, pcidi_t device_id) { int i, rc; char PCIConfigHeader[64]; TRlldAdapterConfig_t config; if (((device_id & 0xffff) == PCI_VENDOR_OLICOM) && ( (((device_id >> 16) & 0xffff) == 0x0001) || (((device_id >> 16) & 0xffff) == 0x0004) || (((device_id >> 16) & 0xffff) == 0x0005) || (((device_id >> 16) & 0xffff) == 0x0007) || (((device_id >> 16) & 0xffff) == 0x0008))) { for (i = 0; i < 64; i++) PCIConfigHeader[i] = pci_cfgread(config_id, i, /* bytes */ 1); rc = TRlldPCIConfig(&LldDriver, &config, PCIConfigHeader); if (rc == TRLLD_PCICONFIG_FAIL) { printf("oltr: TRlldPciConfig failed!\n"); return(NULL); } if (rc == TRLLD_PCICONFIG_VERSION) { printf("oltr: wrong LLD version.\n"); return(NULL); } return(AdapterName[config.type]); } return(NULL); } static void oltr_pci_attach(pcici_t config_id, int unit) { int i, s, rc = 0, scratch_size; int media = IFM_TOKEN|IFM_TOK_UTP16; u_long command; char PCIConfigHeader[64]; struct oltr_softc *sc; struct ifnet *ifp; /* = &sc->arpcom.ac_if; */ s = splimp(); sc = malloc(sizeof(struct oltr_softc), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc == NULL) { printf("oltr%d: no memory for softc struct!\n", unit); goto config_failed; } sc->unit = unit; sc->state = OL_UNKNOWN; ifp = &sc->arpcom.ac_if; for (i = 0; i < sizeof(PCIConfigHeader); i++) PCIConfigHeader[i] = pci_cfgread(config_id, i, 1); switch(TRlldPCIConfig(&LldDriver, &sc->config, PCIConfigHeader)) { case TRLLD_PCICONFIG_OK: break; case TRLLD_PCICONFIG_SET_COMMAND: printf("oltr%d: enabling bus master mode\n", unit); command = pci_conf_read(config_id, PCIR_COMMAND); pci_conf_write(config_id, PCIR_COMMAND, (command | PCIM_CMD_BUSMASTEREN)); command = pci_conf_read(config_id, PCIR_COMMAND); if (!(command & PCIM_CMD_BUSMASTEREN)) { printf("oltr%d: failed to enable bus master mode\n", unit); goto config_failed; } break; case TRLLD_PCICONFIG_FAIL: printf("oltr%d: TRlldPciConfig failed!\n", unit); goto config_failed; break; case TRLLD_PCICONFIG_VERSION: printf("oltr%d: wrong LLD version\n", unit); goto config_failed; break; } printf("oltr%d: MAC address %6D\n", unit, sc->config.macaddress, ":"); scratch_size = TRlldAdapterSize(); if (bootverbose) printf("oltr%d: adapter memory block size %d bytes\n", unit, scratch_size); sc->TRlldAdapter = (TRlldAdapter_t)malloc(scratch_size, M_DEVBUF, M_NOWAIT); if (sc->TRlldAdapter == NULL) { printf("oltr%d: couldn't allocate scratch buffer (%d bytes)\n",unit, scratch_size); goto config_failed; } /* * Allocate RX/TX Pools */ for (i = 0; i < RING_BUFFER_LEN; i++) { sc->rx_ring[i].index = i; sc->rx_ring[i].data = (char *)malloc(RX_BUFFER_LEN, M_DEVBUF, M_NOWAIT); sc->rx_ring[i].address = vtophys(sc->rx_ring[i].data); sc->tx_ring[i].index = i; sc->tx_ring[i].data = (char *)malloc(TX_BUFFER_LEN, M_DEVBUF, M_NOWAIT); sc->tx_ring[i].address = vtophys(sc->tx_ring[i].data); if ((!sc->rx_ring[i].data) || (!sc->tx_ring[i].data)) { printf("oltr%d: unable to allocate ring buffers\n", unit); while (i > 0) { if (sc->rx_ring[i].data) free(sc->rx_ring[i].data, M_DEVBUF); if (sc->tx_ring[i].data) free(sc->tx_ring[i].data, M_DEVBUF); i--; } goto config_failed; } } /* * Allocate interrupt and DMA channel */ if (!pci_map_int(config_id, oltr_intr, sc, &net_imask)) { printf("oltr%d: couldn't setup interrupt\n", unit); goto config_failed; } /* * Do the ifnet initialization */ ifp->if_softc = sc; ifp->if_unit = unit; ifp->if_name = "oltr"; ifp->if_output = iso88025_output; ifp->if_init = oltr_init; ifp->if_start = oltr_start; ifp->if_ioctl = oltr_ioctl; ifp->if_flags = IFF_BROADCAST; bcopy(sc->config.macaddress, sc->arpcom.ac_enaddr, sizeof(sc->config.macaddress)); /* * Do ifmedia setup. */ ifmedia_init(&sc->ifmedia, 0, oltr_ifmedia_upd, oltr_ifmedia_sts); rc = TRlldSetSpeed(sc->TRlldAdapter, TRLLD_SPEED_16MBPS); switch(sc->config.type) { case TRLLD_ADAPTER_PCI7: /* OC-3540 */ ifmedia_add(&sc->ifmedia, IFM_TOKEN|IFM_TOK_UTP100, 0, NULL); /* FALL THROUGH */ case TRLLD_ADAPTER_PCI4: /* OC-3139 */ case TRLLD_ADAPTER_PCI5: /* OC-3140 */ case TRLLD_ADAPTER_PCI6: /* OC-3141 */ ifmedia_add(&sc->ifmedia, IFM_TOKEN|IFM_AUTO, 0, NULL); media = IFM_TOKEN|IFM_AUTO; rc = TRlldSetSpeed(sc->TRlldAdapter, 0); /* FALL THROUGH */ default: ifmedia_add(&sc->ifmedia, IFM_TOKEN|IFM_TOK_UTP4, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_TOKEN|IFM_TOK_UTP16, 0, NULL); break; } sc->ifmedia.ifm_media = media; ifmedia_set(&sc->ifmedia, media); /* * Attach the interface */ if_attach(ifp); ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; iso88025_ifattach(ifp); #if (NBPFILTER > 0) || (__FreeBSD_version > 400000) bpfattach(ifp, DLT_IEEE802, sizeof(struct iso88025_header)); #endif splx(s); return; config_failed: (void)splx(s); return; } #endif static void oltr_intr(void *xsc) { struct oltr_softc *sc = (struct oltr_softc *)xsc; if (DEBUG_MASK & DEBUG_INT) printf("I"); TRlldInterruptService(sc->TRlldAdapter); return; } static void oltr_start(struct ifnet *ifp) { struct oltr_softc *sc = ifp->if_softc; struct mbuf *m0, *m; int copy_len, buffer, frame, fragment, rc, s; /* * Check to see if output is already active */ if (ifp->if_flags & IFF_OACTIVE) return; outloop: /* * Make sure we have buffers to transmit with */ if (sc->tx_avail <= 0) { printf("oltr%d: tx queue full\n", sc->unit); ifp->if_flags |= IFF_OACTIVE; return; } if (sc->restart == NULL) { IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) return; } else { m = sc->restart; sc->restart = NULL; } m0 = m; frame = RING_BUFFER(sc->tx_frame); buffer = RING_BUFFER(sc->tx_head); fragment = 0; copy_len = 0; sc->frame_ring[frame].FragmentCount = 0; while (copy_len < m0->m_pkthdr.len) { sc->frame_ring[frame].FragmentCount++; if (sc->frame_ring[frame].FragmentCount > sc->tx_avail) goto nobuffers; sc->frame_ring[frame].TransmitFragment[fragment].VirtualAddress = sc->tx_ring[buffer].data; sc->frame_ring[frame].TransmitFragment[fragment].PhysicalAddress = sc->tx_ring[buffer].address; sc->frame_ring[frame].TransmitFragment[fragment].count = MIN(m0->m_pkthdr.len - copy_len, TX_BUFFER_LEN); m_copydata(m0, copy_len, MIN(m0->m_pkthdr.len - copy_len, TX_BUFFER_LEN), sc->tx_ring[buffer].data); copy_len += MIN(m0->m_pkthdr.len - copy_len, TX_BUFFER_LEN); fragment++; buffer = RING_BUFFER((buffer + 1)); } s = splimp(); rc = TRlldTransmitFrame(sc->TRlldAdapter, &sc->frame_ring[frame], (void *)&sc->frame_ring[frame]); (void)splx(s); if (rc != TRLLD_TRANSMIT_OK) { printf("oltr%d: TRlldTransmitFrame returned %d\n", sc->unit, rc); ifp->if_oerrors++; goto bad; } sc->tx_avail -= sc->frame_ring[frame].FragmentCount; sc->tx_head = RING_BUFFER((sc->tx_head + sc->frame_ring[frame].FragmentCount)); sc->tx_frame++; #if (NBPFILTER > 0) || (__FreeBSD_version > 400000) BPF_MTAP(ifp, m0); #endif /*ifp->if_opackets++;*/ bad: m_freem(m0); goto outloop; nobuffers: printf("oltr%d: queue full\n", sc->unit); ifp->if_flags |= IFF_OACTIVE; ifp->if_oerrors++; /*m_freem(m0);*/ sc->restart = m0; return; } static void oltr_close(struct oltr_softc *sc) { /*printf("oltr%d: oltr_close\n", sc->unit);*/ oltr_stop(sc); tsleep(sc, PWAIT, "oltrclose", 30*hz); } static void oltr_stop(struct oltr_softc *sc) { struct ifnet *ifp = &sc->arpcom.ac_if; /*printf("oltr%d: oltr_stop\n", sc->unit);*/ ifp->if_flags &= ~(IFF_UP | IFF_RUNNING | IFF_OACTIVE); TRlldClose(sc->TRlldAdapter, 0); sc->state = OL_CLOSING; } static void oltr_init(void * xsc) { struct oltr_softc *sc = (struct oltr_softc *)xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct ifmedia *ifm = &sc->ifmedia; int poll = 0, i, rc = 0, s; int work_size; /* * Check adapter state, don't allow multiple inits */ if (sc->state > OL_CLOSED) { printf("oltr%d: adapter not ready\n", sc->unit); return; } s = splimp(); /* * Initialize Adapter */ if ((rc = TRlldAdapterInit(&LldDriver, sc->TRlldAdapter, vtophys(sc->TRlldAdapter), (void *)sc, &sc->config)) != TRLLD_INIT_OK) { switch(rc) { case TRLLD_INIT_NOT_FOUND: printf("oltr%d: adapter not found\n", sc->unit); break; case TRLLD_INIT_UNSUPPORTED: printf("oltr%d: adapter not supported by low level driver\n", sc->unit); break; case TRLLD_INIT_PHYS16: printf("oltr%d: adapter memory block above 16M cannot DMA\n", sc->unit); break; case TRLLD_INIT_VERSION: printf("oltr%d: low level driver version mismatch\n", sc->unit); break; default: printf("oltr%d: unknown init error %d\n", sc->unit, rc); break; } goto init_failed; } sc->state = OL_INIT; switch(sc->config.type) { case TRLLD_ADAPTER_PCI4: /* OC-3139 */ work_size = 32 * 1024; break; case TRLLD_ADAPTER_PCI7: /* OC-3540 */ work_size = 256; break; default: work_size = 0; } if (work_size) { if ((sc->work_memory = malloc(work_size, M_DEVBUF, M_NOWAIT)) == NULL) { printf("oltr%d: failed to allocate work memory (%d octets).\n", sc->unit, work_size); } else { TRlldAddMemory(sc->TRlldAdapter, sc->work_memory, vtophys(sc->work_memory), work_size); } } switch(IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: rc = TRlldSetSpeed(sc->TRlldAdapter, 0); /* TRLLD_SPEED_AUTO */ break; case IFM_TOK_UTP4: rc = TRlldSetSpeed(sc->TRlldAdapter, TRLLD_SPEED_4MBPS); break; case IFM_TOK_UTP16: rc = TRlldSetSpeed(sc->TRlldAdapter, TRLLD_SPEED_16MBPS); break; case IFM_TOK_UTP100: rc = TRlldSetSpeed(sc->TRlldAdapter, TRLLD_SPEED_100MBPS); break; } /* * Download adapter micro-code */ if (bootverbose) printf("oltr%d: Downloading adapter microcode: ", sc->unit); switch(sc->config.mactype) { case TRLLD_MAC_TMS: rc = TRlldDownload(sc->TRlldAdapter, TRlldMacCode); if (bootverbose) printf("TMS-380"); break; case TRLLD_MAC_HAWKEYE: rc = TRlldDownload(sc->TRlldAdapter, TRlldHawkeyeMac); if (bootverbose) printf("Hawkeye"); break; case TRLLD_MAC_BULLSEYE: rc = TRlldDownload(sc->TRlldAdapter, TRlldBullseyeMac); if (bootverbose) printf("Bullseye"); break; default: if (bootverbose) printf("unknown - failed!\n"); goto init_failed; break; } /* * Check download status */ switch(rc) { case TRLLD_DOWNLOAD_OK: if (bootverbose) printf(" - ok\n"); break; case TRLLD_DOWNLOAD_ERROR: if (bootverbose) printf(" - failed\n"); else printf("oltr%d: adapter microcode download failed\n", sc->unit); goto init_failed; break; case TRLLD_STATE: if (bootverbose) printf(" - not ready\n"); goto init_failed; break; } /* * Wait for self-test to complete */ i = 0; while ((poll++ < SELF_TEST_POLLS) && (sc->state < OL_READY)) { if (DEBUG_MASK & DEBUG_INIT) printf("p"); DELAY(TRlldPoll(sc->TRlldAdapter) * 1000); if (TRlldInterruptService(sc->TRlldAdapter) != 0) if (DEBUG_MASK & DEBUG_INIT) printf("i"); } if (sc->state != OL_CLOSED) { printf("oltr%d: self-test failed\n", sc->unit); goto init_failed; } /* * Set up adapter poll */ callout_handle_init(&sc->oltr_poll_ch); sc->oltr_poll_ch = timeout(oltr_poll, (void *)sc, 1); sc->state = OL_OPENING; /* * Open the adapter */ rc = TRlldOpen(sc->TRlldAdapter, sc->arpcom.ac_enaddr, sc->GroupAddress, sc->FunctionalAddress, 1552, sc->AdapterMode); switch(rc) { case TRLLD_OPEN_OK: break; case TRLLD_OPEN_STATE: printf("oltr%d: adapter not ready for open\n", sc->unit); (void)splx(s); return; case TRLLD_OPEN_ADDRESS_ERROR: printf("oltr%d: illegal MAC address\n", sc->unit); (void)splx(s); return; case TRLLD_OPEN_MODE_ERROR: printf("oltr%d: illegal open mode\n", sc->unit); (void)splx(s); return; default: printf("oltr%d: unknown open error (%d)\n", sc->unit, rc); (void)splx(s); return; } /* * Set promiscious mode for now... */ TRlldSetPromiscuousMode(sc->TRlldAdapter, TRLLD_PROM_LLC); ifp->if_flags |= IFF_PROMISC; /* * Block on the ring insert and set a timeout */ tsleep(sc, PWAIT, "oltropen", 30*hz); /* * Set up receive buffer ring */ for (i = 0; i < RING_BUFFER_LEN; i++) { rc = TRlldReceiveFragment(sc->TRlldAdapter, (void *)sc->rx_ring[i].data, sc->rx_ring[i].address, RX_BUFFER_LEN, (void *)sc->rx_ring[i].index); if (rc != TRLLD_RECEIVE_OK) { printf("oltr%d: adapter refused receive fragment %d (rc = %d)\n", sc->unit, i, rc); break; } } sc->tx_avail = RING_BUFFER_LEN; sc->tx_head = 0; sc->tx_frame = 0; sc->restart = NULL; ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; /* * Set up adapter statistics poll */ /*callout_handle_init(&sc->oltr_stat_ch);*/ /*sc->oltr_stat_ch = timeout(oltr_stat, (void *)sc, 1*hz);*/ (void)splx(s); return; init_failed: sc->state = OL_DEAD; (void)splx(s); return; } static int oltr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct oltr_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error = 0, s; s = splimp(); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = iso88025_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { oltr_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) { oltr_close(sc); } } break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; default: error = EINVAL; break; } (void)splx(s); return(error); } void oltr_poll(void *arg) { struct oltr_softc *sc = (struct oltr_softc *)arg; int s; s = splimp(); if (DEBUG_MASK & DEBUG_POLL) printf("P"); /* Set up next adapter poll */ sc->oltr_poll_ch = timeout(oltr_poll, (void *)sc, (TRlldPoll(sc->TRlldAdapter) * hz / 1000)); (void)splx(s); } #ifdef NOTYET void oltr_stat(void *arg) { struct oltr_softc *sc = (struct oltr_softc *)arg; int s; s = splimp(); /* Set up next adapter poll */ sc->oltr_stat_ch = timeout(oltr_stat, (void *)sc, 1*hz); if (TRlldGetStatistics(sc->TRlldAdapter, &sc->current, 0) != 0) { /*printf("oltr%d: statistics available immediately...\n", sc->unit);*/ DriverStatistics((void *)sc, &sc->current); } (void)splx(s); } #endif static int oltr_ifmedia_upd(struct ifnet *ifp) { struct oltr_softc *sc = ifp->if_softc; struct ifmedia *ifm = &sc->ifmedia; int rc; if (IFM_TYPE(ifm->ifm_media) != IFM_TOKEN) return(EINVAL); switch(IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: rc = TRlldSetSpeed(sc->TRlldAdapter, 0); /* TRLLD_SPEED_AUTO */ break; case IFM_TOK_UTP4: rc = TRlldSetSpeed(sc->TRlldAdapter, TRLLD_SPEED_4MBPS); break; case IFM_TOK_UTP16: rc = TRlldSetSpeed(sc->TRlldAdapter, TRLLD_SPEED_16MBPS); break; case IFM_TOK_UTP100: rc = TRlldSetSpeed(sc->TRlldAdapter, TRLLD_SPEED_100MBPS); break; default: return(EINVAL); break; } return(0); } static void oltr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct oltr_softc *sc = ifp->if_softc; struct ifmedia *ifm = &sc->ifmedia; /*printf("oltr%d: oltr_ifmedia_sts\n", sc->unit);*/ ifmr->ifm_active = IFM_TYPE(ifm->ifm_media)|IFM_SUBTYPE(ifm->ifm_media); } /* * ---------------------- PMW Callback Functions ----------------------- */ void DriverStatistics(void *DriverHandle, TRlldStatistics_t *statistics) { #ifdef NOTYET struct oltr_softc *sc = (struct oltr_softc *)DriverHandle; if (sc->statistics.LineErrors != statistics->LineErrors) printf("oltr%d: Line Errors %lu\n", sc->unit, statistics->LineErrors); if (sc->statistics.InternalErrors != statistics->InternalErrors) printf("oltr%d: Internal Errors %lu\n", sc->unit, statistics->InternalErrors); if (sc->statistics.BurstErrors != statistics->BurstErrors) printf("oltr%d: Burst Errors %lu\n", sc->unit, statistics->BurstErrors); if (sc->statistics.AbortDelimiters != statistics->AbortDelimiters) printf("oltr%d: Abort Delimiters %lu\n", sc->unit, statistics->AbortDelimiters); if (sc->statistics.ARIFCIErrors != statistics->ARIFCIErrors) printf("oltr%d: ARIFCI Errors %lu\n", sc->unit, statistics->ARIFCIErrors); if (sc->statistics.LostFrames != statistics->LostFrames) printf("oltr%d: Lost Frames %lu\n", sc->unit, statistics->LostFrames); if (sc->statistics.CongestionErrors != statistics->CongestionErrors) printf("oltr%d: Congestion Errors %lu\n", sc->unit, statistics->CongestionErrors); if (sc->statistics.FrequencyErrors != statistics->FrequencyErrors) printf("oltr%d: Frequency Errors %lu\n", sc->unit, statistics->FrequencyErrors); if (sc->statistics.TokenErrors != statistics->TokenErrors) printf("oltr%d: Token Errors %lu\n", sc->unit, statistics->TokenErrors); if (sc->statistics.DMABusErrors != statistics->DMABusErrors) printf("oltr%d: DMA Bus Errors %lu\n", sc->unit, statistics->DMABusErrors); if (sc->statistics.DMAParityErrors != statistics->DMAParityErrors) printf("oltr%d: DMA Parity Errors %lu\n", sc->unit, statistics->DMAParityErrors); if (sc->statistics.ReceiveLongFrame != statistics->ReceiveLongFrame) printf("oltr%d: Long frames received %lu\n", sc->unit, statistics->ReceiveLongFrame); if (sc->statistics.ReceiveCRCErrors != statistics->ReceiveCRCErrors) printf("oltr%d: Receive CRC Errors %lu\n", sc->unit, statistics->ReceiveCRCErrors); if (sc->statistics.ReceiveOverflow != statistics->ReceiveOverflow) printf("oltr%d: Recieve overflows %lu\n", sc->unit, statistics->ReceiveOverflow); if (sc->statistics.TransmitUnderrun != statistics->TransmitUnderrun) printf("oltr%d: Frequency Errors %lu\n", sc->unit, statistics->TransmitUnderrun); bcopy(statistics, &sc->statistics, sizeof(TRlldStatistics_t)); #endif } static void DriverSuspend(unsigned short MicroSeconds) { DELAY(MicroSeconds); } static void DriverStatus(void *DriverHandle, TRlldStatus_t *Status) { struct oltr_softc *sc = (struct oltr_softc *)DriverHandle; struct ifnet *ifp = &sc->arpcom.ac_if; char *Protocol[] = { /* 0 */ "Unknown", /* 1 */ "TKP", /* 2 */ "TXI" }; char *Timeout[] = { /* 0 */ "command", /* 1 */ "transmit", /* 2 */ "interrupt" }; switch (Status->Type) { case TRLLD_STS_ON_WIRE: printf("oltr%d: ring insert (%d Mbps - %s)\n", sc->unit, Status->Specification.OnWireInformation.Speed, Protocol[Status->Specification.OnWireInformation.AccessProtocol]); sc->state = OL_OPEN; wakeup(sc); break; case TRLLD_STS_SELFTEST_STATUS: if (Status->Specification.SelftestStatus == TRLLD_ST_OK) { sc->state = OL_CLOSED; if (bootverbose) printf("oltr%d: self test complete\n", sc->unit); } if (Status->Specification.SelftestStatus & TRLLD_ST_ERROR) { printf("oltr%d: Adapter self test error %d", sc->unit, Status->Specification.SelftestStatus & ~TRLLD_ST_ERROR); sc->state = OL_DEAD; } if (Status->Specification.SelftestStatus & TRLLD_ST_TIMEOUT) { printf("oltr%d: Adapter self test timed out.\n", sc->unit); sc->state = OL_DEAD; } break; case TRLLD_STS_INIT_STATUS: if (Status->Specification.InitStatus == 0x800) { oltr_stop(sc); ifmedia_set(&sc->ifmedia, IFM_TOKEN|IFM_TOK_UTP16); TRlldSetSpeed(sc->TRlldAdapter, TRLLD_SPEED_16MBPS); oltr_init(sc); break; } printf("oltr%d: adapter init failure 0x%03x\n", sc->unit, Status->Specification.InitStatus); oltr_stop(sc); break; case TRLLD_STS_RING_STATUS: if (Status->Specification.RingStatus) { printf("oltr%d: Ring status change: ", sc->unit); if (Status->Specification.RingStatus & TRLLD_RS_SIGNAL_LOSS) printf(" [Signal Loss]"); if (Status->Specification.RingStatus & TRLLD_RS_HARD_ERROR) printf(" [Hard Error]"); if (Status->Specification.RingStatus & TRLLD_RS_SOFT_ERROR) printf(" [Soft Error]"); if (Status->Specification.RingStatus & TRLLD_RS_TRANSMIT_BEACON) printf(" [Beacon]"); if (Status->Specification.RingStatus & TRLLD_RS_LOBE_WIRE_FAULT) printf(" [Wire Fault]"); if (Status->Specification.RingStatus & TRLLD_RS_AUTO_REMOVAL_ERROR) printf(" [Auto Removal]"); if (Status->Specification.RingStatus & TRLLD_RS_REMOVE_RECEIVED) printf(" [Remove Received]"); if (Status->Specification.RingStatus & TRLLD_RS_COUNTER_OVERFLOW) printf(" [Counter Overflow]"); if (Status->Specification.RingStatus & TRLLD_RS_SINGLE_STATION) printf(" [Single Station]"); if (Status->Specification.RingStatus & TRLLD_RS_RING_RECOVERY) printf(" [Ring Recovery]"); printf("\n"); } break; case TRLLD_STS_ADAPTER_CHECK: printf("oltr%d: adapter check (%04x %04x %04x %04x)\n", sc->unit, Status->Specification.AdapterCheck[0], Status->Specification.AdapterCheck[1], Status->Specification.AdapterCheck[2], Status->Specification.AdapterCheck[3]); sc->state = OL_DEAD; oltr_stop(sc); break; case TRLLD_STS_PROMISCUOUS_STOPPED: printf("oltr%d: promiscuous mode ", sc->unit); if (Status->Specification.PromRemovedCause == 1) printf("remove received."); if (Status->Specification.PromRemovedCause == 2) printf("poll failure."); if (Status->Specification.PromRemovedCause == 2) printf("buffer size failure."); printf("\n"); ifp->if_flags &= ~IFF_PROMISC; break; case TRLLD_STS_LLD_ERROR: printf("oltr%d: low level driver internal error ", sc->unit); printf("(%04x %04x %04x %04x).\n", Status->Specification.InternalError[0], Status->Specification.InternalError[1], Status->Specification.InternalError[2], Status->Specification.InternalError[3]); sc->state = OL_DEAD; oltr_stop(sc); break; case TRLLD_STS_ADAPTER_TIMEOUT: printf("oltr%d: adapter %s timeout.\n", sc->unit, Timeout[Status->Specification.AdapterTimeout]); break; default: printf("oltr%d: driver status Type = %d\n", sc->unit, Status->Type); break; } if (Status->Closed) { sc->state = OL_CLOSING; oltr_stop(sc); } } static void DriverCloseCompleted(void *DriverHandle) { struct oltr_softc *sc = (struct oltr_softc *)DriverHandle; printf("oltr%d: adapter closed\n", sc->unit); wakeup(sc); sc->state = OL_CLOSED; } static void DriverTransmitFrameCompleted(void *DriverHandle, void *FrameHandle, int TransmitStatus) { struct oltr_softc *sc = (struct oltr_softc *)DriverHandle; struct ifnet *ifp = &sc->arpcom.ac_if; TRlldTransmit_t *frame = (TRlldTransmit_t *)FrameHandle; /*printf("oltr%d: DriverTransmitFrameCompleted\n", sc->unit);*/ if (TransmitStatus != TRLLD_TRANSMIT_OK) { ifp->if_oerrors++; printf("oltr%d: transmit error %d\n", sc->unit, TransmitStatus); } else { ifp->if_opackets++; } sc->tx_avail += frame->FragmentCount; if (ifp->if_flags & IFF_OACTIVE) { printf("oltr%d: queue restart\n", sc->unit); ifp->if_flags &= ~IFF_OACTIVE; oltr_start(ifp); } } static void DriverReceiveFrameCompleted(void *DriverHandle, int ByteCount, int FragmentCount, void *FragmentHandle, int ReceiveStatus) { struct oltr_softc *sc = (struct oltr_softc *)DriverHandle; struct ifnet *ifp = (struct ifnet *)&sc->arpcom.ac_if; struct mbuf *m0, *m1, *m; struct iso88025_header *th; int frame_len = ByteCount, hdr_len, i = (int)FragmentHandle, rc, s; int mbuf_offset, mbuf_size, frag_offset, copy_length; char *fragment = sc->rx_ring[RING_BUFFER(i)].data; if (sc->state > OL_CLOSED) { if (ReceiveStatus == TRLLD_RCV_OK) { MGETHDR(m0, M_NOWAIT, MT_DATA); mbuf_size = MHLEN - 2; if (!m0) { ifp->if_ierrors++; goto dropped; } if (ByteCount + 2 > MHLEN) { MCLGET(m0, M_NOWAIT); mbuf_size = MCLBYTES - 2; if (!(m0->m_flags & M_EXT)) { m_freem(m0); ifp->if_ierrors++; goto dropped; } } m0->m_pkthdr.rcvif = ifp; m0->m_pkthdr.len = ByteCount; m0->m_len = 0; m0->m_data += 2; th = mtod(m0, struct iso88025_header *); m0->m_pkthdr.header = (void *)th; m = m0; mbuf_offset = 0; frag_offset = 0; while (frame_len) { copy_length = MIN3(frame_len, (RX_BUFFER_LEN - frag_offset), (mbuf_size - mbuf_offset)); bcopy(fragment + frag_offset, mtod(m, char *) + mbuf_offset, copy_length); m->m_len += copy_length; mbuf_offset += copy_length; frag_offset += copy_length; frame_len -= copy_length; if (frag_offset == RX_BUFFER_LEN) { fragment = sc->rx_ring[RING_BUFFER(++i)].data; frag_offset = 0; } if ((mbuf_offset == mbuf_size) && (frame_len > 0)) { MGET(m1, M_NOWAIT, MT_DATA); mbuf_size = MHLEN; if (!m1) { ifp->if_ierrors++; m_freem(m0); goto dropped; } if (frame_len > MHLEN) { MCLGET(m1, M_NOWAIT); mbuf_size = MCLBYTES; if (!(m1->m_flags & M_EXT)) { m_freem(m0); m_freem(m1); ifp->if_ierrors++; goto dropped; } } m->m_next = m1; m = m1; mbuf_offset = 0; m->m_len = 0; } } #if (NBPFILTER > 0) || (__FreeBSD_version > 400000) BPF_MTAP(ifp, m0); #endif /*if (ifp->if_flags & IFF_PROMISC) {*/ if (bcmp(th->iso88025_dhost, etherbroadcastaddr , sizeof(th->iso88025_dhost))) { if ((bcmp(th->iso88025_dhost + 1, sc->arpcom.ac_enaddr + 1, ISO88025_ADDR_LEN - 1)) || ((th->iso88025_dhost[0] & 0x7f) != sc->arpcom.ac_enaddr[0])) { m_freem(m0); goto dropped; } } /*}*/ ifp->if_ipackets++; hdr_len = ISO88025_HDR_LEN; if (th->iso88025_shost[0] & 0x80) hdr_len += (ntohs(th->rcf) & 0x1f00) >> 8; m0->m_pkthdr.len -= hdr_len; m0->m_len -= hdr_len; m0->m_data += hdr_len; iso88025_input(ifp, th, m0); } else { /* Receiver error */ if (ReceiveStatus != TRLLD_RCV_NO_DATA) { printf("oltr%d: receive error %d\n", sc->unit, ReceiveStatus); ifp->if_ierrors++; } } dropped: s = splimp(); i = (int)FragmentHandle; while (FragmentCount--) { rc = TRlldReceiveFragment(sc->TRlldAdapter, (void *)sc->rx_ring[RING_BUFFER(i)].data, sc->rx_ring[RING_BUFFER(i)].address, RX_BUFFER_LEN, (void *)sc->rx_ring[RING_BUFFER(i)].index); if (rc != TRLLD_RECEIVE_OK) { printf("oltr%d: adapter refused receive fragment %d (rc = %d)\n", sc->unit, i, rc); break; } i++; } (void)splx(s); } } /* * ---------------------------- PMW Glue ------------------------------- */ #ifndef TRlldInlineIO static void DriverOutByte(unsigned short IOAddress, unsigned char value) { outb(IOAddress, value); } static void DriverOutWord(unsigned short IOAddress, unsigned short value) { outw(IOAddress, value); } static void DriverOutDword(unsigned short IOAddress, unsigned long value) { outl(IOAddress, value); } static void DriverRepOutByte(unsigned short IOAddress, unsigned char *DataPointer, int ByteCount) { outsb(IOAddress, (void *)DataPointer, ByteCount); } static void DriverRepOutWord(unsigned short IOAddress, unsigned short *DataPointer, int WordCount) { outsw(IOAddress, (void *)DataPointer, WordCount); } static void DriverRepOutDword(unsigned short IOAddress, unsigned long *DataPointer, int DWordCount) { outsl(IOAddress, (void *)DataPointer, DWordCount); } static unsigned char DriverInByte(unsigned short IOAddress) { return(inb(IOAddress)); } static unsigned short DriverInWord(unsigned short IOAddress) { return(inw(IOAddress)); } static unsigned long DriverInDword(unsigned short IOAddress) { return(inl(IOAddress)); } static void DriverRepInByte(unsigned short IOAddress, unsigned char *DataPointer, int ByteCount) { insb(IOAddress, (void *)DataPointer, ByteCount); } static void DriverRepInWord(unsigned short IOAddress, unsigned short *DataPointer, int WordCount) { insw(IOAddress, (void *)DataPointer, WordCount); } static void DriverRepInDword( unsigned short IOAddress, unsigned long *DataPointer, int DWordCount) { insl(IOAddress, (void *)DataPointer, DWordCount); } #endif /* TRlldInlineIO */ Index: head/sys/contrib/ipfilter/netinet/ip_proxy.c =================================================================== --- head/sys/contrib/ipfilter/netinet/ip_proxy.c (revision 110231) +++ head/sys/contrib/ipfilter/netinet/ip_proxy.c (revision 110232) @@ -1,616 +1,612 @@ /* * Copyright (C) 1997-2002 by Darren Reed. * * See the IPFILTER.LICENCE file for details on licencing. */ #if defined(__FreeBSD__) && defined(KERNEL) && !defined(_KERNEL) # define _KERNEL #endif #ifdef __sgi # include #endif #include #include #include #include #include #if !defined(__FreeBSD_version) # include #endif #include #if !defined(_KERNEL) && !defined(KERNEL) # include # include # include #endif #ifndef linux # include #endif #include #if defined(_KERNEL) # if !defined(linux) # include # else # include # endif #endif #if !defined(__SVR4) && !defined(__svr4__) # ifndef linux # include # endif #else # include # ifdef _KERNEL # include # endif # include # include #endif #if __FreeBSD__ > 2 # include #endif #include #ifdef sun # include #endif #include #include #include #include #ifndef linux # include #endif #include #include #include #include "netinet/ip_compat.h" #include #include "netinet/ip_fil.h" #include "netinet/ip_nat.h" #include "netinet/ip_state.h" #include "netinet/ip_proxy.h" #if (__FreeBSD_version >= 300000) # include #endif #if !defined(lint) /* static const char rcsid[] = "@(#)$Id: ip_proxy.c,v 2.9.2.6 2001/07/15 22:06:15 darrenr Exp $"; */ static const char rcsid[] = "@(#)$FreeBSD$"; #endif #if defined(_KERNEL) && (SOLARIS || defined(__sgi)) extern KRWLOCK_T ipf_nat, ipf_state; #endif -#ifndef MIN -#define MIN(a,b) (((a)<(b))?(a):(b)) -#endif - static int appr_fixseqack __P((fr_info_t *, ip_t *, ap_session_t *, int )); #define PROXY_DEBUG 0 #define AP_SESS_SIZE 53 #include "netinet/ip_ftp_pxy.c" #if defined(_KERNEL) #include "netinet/ip_rcmd_pxy.c" #include "netinet/ip_raudio_pxy.c" #include "netinet/ip_netbios_pxy.c" #include "netinet/ip_ipsec_pxy.c" #endif ap_session_t *ap_sess_tab[AP_SESS_SIZE]; ap_session_t *ap_sess_list = NULL; aproxy_t *ap_proxylist = NULL; aproxy_t ap_proxies[] = { #ifdef IPF_FTP_PROXY { NULL, "ftp", (char)IPPROTO_TCP, 0, 0, ippr_ftp_init, NULL, ippr_ftp_new, NULL, ippr_ftp_in, ippr_ftp_out, NULL }, #endif #ifdef IPF_RCMD_PROXY { NULL, "rcmd", (char)IPPROTO_TCP, 0, 0, ippr_rcmd_init, NULL, ippr_rcmd_new, NULL, NULL, ippr_rcmd_out, NULL }, #endif #ifdef IPF_RAUDIO_PROXY { NULL, "raudio", (char)IPPROTO_TCP, 0, 0, ippr_raudio_init, NULL, ippr_raudio_new, NULL, ippr_raudio_in, ippr_raudio_out, NULL }, #endif #ifdef IPF_IPSEC_PROXY { NULL, "ipsec", (char)IPPROTO_UDP, 0, 0, ippr_ipsec_init, NULL, ippr_ipsec_new, ippr_ipsec_del, NULL, ippr_ipsec_out, ippr_ipsec_match }, #endif #ifdef IPF_NETBIOS_PROXY { NULL, "netbios", (char)IPPROTO_UDP, 0, 0, ippr_netbios_init, NULL, NULL, NULL, NULL, ippr_netbios_out, NULL }, #endif #ifdef IPF_H323_PROXY { NULL, "h323", (char)IPPROTO_TCP, 0, 0, ippr_h323_init, NULL, ippr_h323_new, ippr_h323_del, ippr_h323_in, ippr_h323_out, NULL }, { NULL, "h245", (char)IPPROTO_TCP, 0, 0, ippr_h245_init, NULL, ippr_h245_new, NULL, NULL, ippr_h245_out, NULL }, #endif { NULL, "", '\0', 0, 0, NULL, NULL, NULL } }; /* * Dynamically add a new kernel proxy. Ensure that it is unique in the * collection compiled in and dynamically added. */ int appr_add(ap) aproxy_t *ap; { aproxy_t *a; for (a = ap_proxies; a->apr_p; a++) if ((a->apr_p == ap->apr_p) && !strncmp(a->apr_label, ap->apr_label, sizeof(ap->apr_label))) return -1; for (a = ap_proxylist; a && a->apr_p; a = a->apr_next) if ((a->apr_p == ap->apr_p) && !strncmp(a->apr_label, ap->apr_label, sizeof(ap->apr_label))) return -1; ap->apr_next = ap_proxylist; ap_proxylist = ap; return (*ap->apr_init)(); } /* * Delete a proxy that has been added dynamically from those available. * If it is in use, return 1 (do not destroy NOW), not in use 0 or -1 * if it cannot be matched. */ int appr_del(ap) aproxy_t *ap; { aproxy_t *a, **app; for (app = &ap_proxylist; (a = *app); app = &a->apr_next) if (a == ap) { a->apr_flags |= APR_DELETE; *app = a->apr_next; if (ap->apr_ref != 0) return 1; return 0; } return -1; } /* * Return 1 if the packet is a good match against a proxy, else 0. */ int appr_ok(ip, tcp, nat) ip_t *ip; tcphdr_t *tcp; ipnat_t *nat; { aproxy_t *apr = nat->in_apr; u_short dport = nat->in_dport; if ((apr == NULL) || (apr->apr_flags & APR_DELETE) || (ip->ip_p != apr->apr_p)) return 0; if (((tcp != NULL) && (tcp->th_dport != dport)) || (!tcp && dport)) return 0; return 1; } /* * If a proxy has a match function, call that to do extended packet * matching. */ int appr_match(fin, nat) fr_info_t *fin; nat_t *nat; { aproxy_t *apr; ipnat_t *ipn; ipn = nat->nat_ptr; if (ipn == NULL) return -1; apr = ipn->in_apr; if ((apr == NULL) || (apr->apr_flags & APR_DELETE) || (nat->nat_aps == NULL)) return -1; if (apr->apr_match != NULL) if ((*apr->apr_match)(fin, nat->nat_aps, nat) != 0) return -1; return 0; } /* * Allocate a new application proxy structure and fill it in with the * relevant details. call the init function once complete, prior to * returning. */ int appr_new(fin, ip, nat) fr_info_t *fin; ip_t *ip; nat_t *nat; { register ap_session_t *aps; aproxy_t *apr; if ((nat->nat_ptr == NULL) || (nat->nat_aps != NULL)) return -1; apr = nat->nat_ptr->in_apr; if (!apr || (apr->apr_flags & APR_DELETE) || (ip->ip_p != apr->apr_p)) return -1; KMALLOC(aps, ap_session_t *); if (!aps) return -1; bzero((char *)aps, sizeof(*aps)); aps->aps_p = ip->ip_p; aps->aps_data = NULL; aps->aps_apr = apr; aps->aps_psiz = 0; if (apr->apr_new != NULL) if ((*apr->apr_new)(fin, ip, aps, nat) == -1) { if ((aps->aps_data != NULL) && (aps->aps_psiz != 0)) { KFREES(aps->aps_data, aps->aps_psiz); } KFREE(aps); return -1; } aps->aps_nat = nat; aps->aps_next = ap_sess_list; ap_sess_list = aps; nat->nat_aps = aps; return 0; } /* * check to see if a packet should be passed through an active proxy routine * if one has been setup for it. */ int appr_check(ip, fin, nat) ip_t *ip; fr_info_t *fin; nat_t *nat; { #if SOLARIS && defined(_KERNEL) && (SOLARIS2 >= 6) mb_t *m = fin->fin_qfm; int dosum = 1; #endif tcphdr_t *tcp = NULL; ap_session_t *aps; aproxy_t *apr; u_32_t sum; short rv; int err; aps = nat->nat_aps; if ((aps != NULL) && (aps->aps_p == ip->ip_p)) { if (ip->ip_p == IPPROTO_TCP) { tcp = (tcphdr_t *)fin->fin_dp; /* * verify that the checksum is correct. If not, then * don't do anything with this packet. */ #if SOLARIS && defined(_KERNEL) && (SOLARIS2 >= 6) if (dohwcksum && (m->b_ick_flag == ICK_VALID)) { sum = tcp->th_sum; dosum = 0; } if (dosum) sum = fr_tcpsum(fin->fin_qfm, ip, tcp); #else sum = fr_tcpsum(*(mb_t **)fin->fin_mp, ip, tcp); #endif if (sum != tcp->th_sum) { #if PROXY_DEBUG printf("proxy tcp checksum failure\n"); #endif frstats[fin->fin_out].fr_tcpbad++; return -1; } /* * Don't both the proxy with these...or in fact, should * we free up proxy stuff when seen? */ if ((tcp->th_flags & TH_RST) != 0) return 0; } apr = aps->aps_apr; err = 0; if (fin->fin_out != 0) { if (apr->apr_outpkt != NULL) err = (*apr->apr_outpkt)(fin, ip, aps, nat); } else { if (apr->apr_inpkt != NULL) err = (*apr->apr_inpkt)(fin, ip, aps, nat); } rv = APR_EXIT(err); if (rv == 1) { #if PROXY_DEBUG printf("proxy says bad packet received\n"); #endif return -1; } if (rv == 2) { #if PROXY_DEBUG printf("proxy says free app proxy data\n"); #endif appr_free(apr); nat->nat_aps = NULL; return -1; } if (tcp != NULL) { err = appr_fixseqack(fin, ip, aps, APR_INC(err)); #if SOLARIS && defined(_KERNEL) && (SOLARIS2 >= 6) if (dosum) tcp->th_sum = fr_tcpsum(fin->fin_qfm, ip, tcp); #else tcp->th_sum = fr_tcpsum(*(mb_t **)fin->fin_mp, ip, tcp); #endif } aps->aps_bytes += ip->ip_len; aps->aps_pkts++; return 1; } return 0; } /* * Search for an proxy by the protocol it is being used with and its name. */ aproxy_t *appr_lookup(pr, name) u_int pr; char *name; { aproxy_t *ap; for (ap = ap_proxies; ap->apr_p; ap++) if ((ap->apr_p == pr) && !strncmp(name, ap->apr_label, sizeof(ap->apr_label))) { ap->apr_ref++; return ap; } for (ap = ap_proxylist; ap; ap = ap->apr_next) if ((ap->apr_p == pr) && !strncmp(name, ap->apr_label, sizeof(ap->apr_label))) { ap->apr_ref++; return ap; } return NULL; } void appr_free(ap) aproxy_t *ap; { ap->apr_ref--; } void aps_free(aps) ap_session_t *aps; { ap_session_t *a, **ap; aproxy_t *apr; if (!aps) return; for (ap = &ap_sess_list; (a = *ap); ap = &a->aps_next) if (a == aps) { *ap = a->aps_next; break; } apr = aps->aps_apr; if ((apr != NULL) && (apr->apr_del != NULL)) (*apr->apr_del)(aps); if ((aps->aps_data != NULL) && (aps->aps_psiz != 0)) KFREES(aps->aps_data, aps->aps_psiz); KFREE(aps); } /* * returns 2 if ack or seq number in TCP header is changed, returns 0 otherwise */ static int appr_fixseqack(fin, ip, aps, inc) fr_info_t *fin; ip_t *ip; ap_session_t *aps; int inc; { int sel, ch = 0, out, nlen; u_32_t seq1, seq2; tcphdr_t *tcp; short inc2; tcp = (tcphdr_t *)fin->fin_dp; out = fin->fin_out; /* * ip_len has already been adjusted by 'inc'. */ nlen = ip->ip_len; nlen -= (ip->ip_hl << 2) + (tcp->th_off << 2); inc2 = inc; inc = (int)inc2; if (out != 0) { seq1 = (u_32_t)ntohl(tcp->th_seq); sel = aps->aps_sel[out]; /* switch to other set ? */ if ((aps->aps_seqmin[!sel] > aps->aps_seqmin[sel]) && (seq1 > aps->aps_seqmin[!sel])) { #if PROXY_DEBUG printf("proxy out switch set seq %d -> %d %x > %x\n", sel, !sel, seq1, aps->aps_seqmin[!sel]); #endif sel = aps->aps_sel[out] = !sel; } if (aps->aps_seqoff[sel]) { seq2 = aps->aps_seqmin[sel] - aps->aps_seqoff[sel]; if (seq1 > seq2) { seq2 = aps->aps_seqoff[sel]; seq1 += seq2; tcp->th_seq = htonl(seq1); ch = 1; } } if (inc && (seq1 > aps->aps_seqmin[!sel])) { aps->aps_seqmin[sel] = seq1 + nlen - 1; aps->aps_seqoff[sel] = aps->aps_seqoff[sel] + inc; #if PROXY_DEBUG printf("proxy seq set %d at %x to %d + %d\n", sel, aps->aps_seqmin[sel], aps->aps_seqoff[sel], inc); #endif } /***/ seq1 = ntohl(tcp->th_ack); sel = aps->aps_sel[1 - out]; /* switch to other set ? */ if ((aps->aps_ackmin[!sel] > aps->aps_ackmin[sel]) && (seq1 > aps->aps_ackmin[!sel])) { #if PROXY_DEBUG printf("proxy out switch set ack %d -> %d %x > %x\n", sel, !sel, seq1, aps->aps_ackmin[!sel]); #endif sel = aps->aps_sel[1 - out] = !sel; } if (aps->aps_ackoff[sel] && (seq1 > aps->aps_ackmin[sel])) { seq2 = aps->aps_ackoff[sel]; tcp->th_ack = htonl(seq1 - seq2); ch = 1; } } else { seq1 = ntohl(tcp->th_seq); sel = aps->aps_sel[out]; /* switch to other set ? */ if ((aps->aps_ackmin[!sel] > aps->aps_ackmin[sel]) && (seq1 > aps->aps_ackmin[!sel])) { #if PROXY_DEBUG printf("proxy in switch set ack %d -> %d %x > %x\n", sel, !sel, seq1, aps->aps_ackmin[!sel]); #endif sel = aps->aps_sel[out] = !sel; } if (aps->aps_ackoff[sel]) { seq2 = aps->aps_ackmin[sel] - aps->aps_ackoff[sel]; if (seq1 > seq2) { seq2 = aps->aps_ackoff[sel]; seq1 += seq2; tcp->th_seq = htonl(seq1); ch = 1; } } if (inc && (seq1 > aps->aps_ackmin[!sel])) { aps->aps_ackmin[!sel] = seq1 + nlen - 1; aps->aps_ackoff[!sel] = aps->aps_ackoff[sel] + inc; #if PROXY_DEBUG printf("proxy ack set %d at %x to %d + %d\n", !sel, aps->aps_seqmin[!sel], aps->aps_seqoff[sel], inc); #endif } /***/ seq1 = ntohl(tcp->th_ack); sel = aps->aps_sel[1 - out]; /* switch to other set ? */ if ((aps->aps_seqmin[!sel] > aps->aps_seqmin[sel]) && (seq1 > aps->aps_seqmin[!sel])) { #if PROXY_DEBUG printf("proxy in switch set seq %d -> %d %x > %x\n", sel, !sel, seq1, aps->aps_seqmin[!sel]); #endif sel = aps->aps_sel[1 - out] = !sel; } if (aps->aps_seqoff[sel] != 0) { #if PROXY_DEBUG printf("sel %d seqoff %d seq1 %x seqmin %x\n", sel, aps->aps_seqoff[sel], seq1, aps->aps_seqmin[sel]); #endif if (seq1 > aps->aps_seqmin[sel]) { seq2 = aps->aps_seqoff[sel]; tcp->th_ack = htonl(seq1 - seq2); ch = 1; } } } #if PROXY_DEBUG printf("appr_fixseqack: seq %x ack %x\n", ntohl(tcp->th_seq), ntohl(tcp->th_ack)); #endif return ch ? 2 : 0; } /* * Initialise hook for kernel application proxies. * Call the initialise routine for all the compiled in kernel proxies. */ int appr_init() { aproxy_t *ap; int err = 0; for (ap = ap_proxies; ap->apr_p; ap++) { err = (*ap->apr_init)(); if (err != 0) break; } return err; } /* * Unload hook for kernel application proxies. * Call the finialise routine for all the compiled in kernel proxies. */ void appr_unload() { aproxy_t *ap; for (ap = ap_proxies; ap->apr_p; ap++) if (ap->apr_fini) (*ap->apr_fini)(); for (ap = ap_proxylist; ap; ap = ap->apr_next) if (ap->apr_fini) (*ap->apr_fini)(); } Index: head/sys/dev/advansys/advlib.c =================================================================== --- head/sys/dev/advansys/advlib.c (revision 110231) +++ head/sys/dev/advansys/advlib.c (revision 110232) @@ -1,2064 +1,2062 @@ /* * Low level routines for the Advanced Systems Inc. SCSI controllers chips * * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Ported from: * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters * * Copyright (c) 1995-1996 Advanced System Products, Inc. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that redistributions of source * code retain the above copyright notice and this comment without * modification. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct adv_quirk_entry { struct scsi_inquiry_pattern inq_pat; u_int8_t quirks; #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS 0x01 #define ADV_QUIRK_FIX_ASYN_XFER 0x02 }; static struct adv_quirk_entry adv_quirk_table[] = { { { T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" }, ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER }, { { T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" }, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 36", "*" }, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" }, 0 }, { { T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "*", "*", "*" }, 0 }, { { T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "*", "*", "*" }, 0 }, { /* Default quirk entry */ { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, /*vendor*/"*", /*product*/"*", /*revision*/"*" }, ADV_QUIRK_FIX_ASYN_XFER, } }; /* * Allowable periods in ns */ static u_int8_t adv_sdtr_period_tbl[] = { 25, 30, 35, 40, 50, 60, 70, 85 }; static u_int8_t adv_sdtr_period_tbl_ultra[] = { 12, 19, 25, 32, 38, 44, 50, 57, 63, 69, 75, 82, 88, 94, 100, 107 }; struct ext_msg { u_int8_t msg_type; u_int8_t msg_len; u_int8_t msg_req; union { struct { u_int8_t sdtr_xfer_period; u_int8_t sdtr_req_ack_offset; } sdtr; struct { u_int8_t wdtr_width; } wdtr; struct { u_int8_t mdp[4]; } mdp; } u_ext_msg; u_int8_t res; }; #define xfer_period u_ext_msg.sdtr.sdtr_xfer_period #define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset #define wdtr_width u_ext_msg.wdtr.wdtr_width #define mdp_b3 u_ext_msg.mdp_b3 #define mdp_b2 u_ext_msg.mdp_b2 #define mdp_b1 u_ext_msg.mdp_b1 #define mdp_b0 u_ext_msg.mdp_b0 /* * Some of the early PCI adapters have problems with * async transfers. Instead use an offset of 1. */ #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41 /* LRAM routines */ static void adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr, u_int16_t *buffer, int count); static void adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr, u_int16_t *buffer, int count); static void adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr, u_int16_t set_value, int count); static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count); static int adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value); static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr); static void adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value); static void adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr, u_int32_t *buffer, int count); /* EEPROM routines */ static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr); static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value); static int adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg); static int adv_set_eeprom_config_once(struct adv_softc *adv, struct adv_eeprom_config *eeconfig); /* Initialization */ static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr, u_int16_t *mcode_buf, u_int16_t mcode_size); static void adv_reinit_lram(struct adv_softc *adv); static void adv_init_lram(struct adv_softc *adv); static int adv_init_microcode_var(struct adv_softc *adv); static void adv_init_qlink_var(struct adv_softc *adv); /* Interrupts */ static void adv_disable_interrupt(struct adv_softc *adv); static void adv_enable_interrupt(struct adv_softc *adv); static void adv_toggle_irq_act(struct adv_softc *adv); /* Chip Control */ static int adv_host_req_chip_halt(struct adv_softc *adv); static void adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code); #if UNUSED static u_int8_t adv_get_chip_scsi_ctrl(struct adv_softc *adv); #endif /* Queue handling and execution */ static __inline int adv_sgcount_to_qcount(int sgcount); static __inline int adv_sgcount_to_qcount(int sgcount) { int n_sg_list_qs; n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q); if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0) n_sg_list_qs++; return (n_sg_list_qs + 1); } static void adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr, u_int16_t *inbuf, int words); static u_int adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs); static u_int8_t adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head, u_int8_t n_free_q); static u_int8_t adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head); static int adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq, u_int8_t n_q_required); static void adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq, u_int q_no); static void adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq, u_int q_no); static void adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr, u_int16_t *buffer, int words); /* Messages */ static void adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr, u_int8_t q_cntl, target_bit_vector target_id, int tid); static void adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period, u_int8_t sdtr_offset); static void adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id, u_int8_t sdtr_data); /* Exported functions first */ void advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct adv_softc *adv; adv = (struct adv_softc *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; target_bit_vector target_mask; int num_entries; caddr_t match; struct adv_quirk_entry *entry; struct adv_target_transinfo* tinfo; cgd = (struct ccb_getdev *)arg; target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id); num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table); match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)adv_quirk_table, num_entries, sizeof(*adv_quirk_table), scsi_inquiry_match); if (match == NULL) panic("advasync: device didn't match wildcard entry!!"); entry = (struct adv_quirk_entry *)match; if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) { if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0) adv->fix_asyn_xfer_always |= target_mask; else adv->fix_asyn_xfer_always &= ~target_mask; /* * We start out life with all bits set and clear them * after we've determined that the fix isn't necessary. * It may well be that we've already cleared a target * before the full inquiry session completes, so don't * gratuitously set a target bit even if it has this * quirk. But, if the quirk exonerates a device, clear * the bit now. */ if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0) adv->fix_asyn_xfer &= ~target_mask; } /* * Reset our sync settings now that we've determined * what quirks are in effect for the device. */ tinfo = &adv->tinfo[cgd->ccb_h.target_id]; adv_set_syncrate(adv, cgd->ccb_h.path, cgd->ccb_h.target_id, tinfo->current.period, tinfo->current.offset, ADV_TRANS_CUR); break; } case AC_LOST_DEVICE: { u_int target_mask; if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) { target_mask = 0x01 << xpt_path_target_id(path); adv->fix_asyn_xfer |= target_mask; } /* * Revert to async transfers * for the next device. */ adv_set_syncrate(adv, /*path*/NULL, xpt_path_target_id(path), /*period*/0, /*offset*/0, ADV_TRANS_GOAL|ADV_TRANS_CUR); } default: break; } } void adv_set_bank(struct adv_softc *adv, u_int8_t bank) { u_int8_t control; /* * Start out with the bank reset to 0 */ control = ADV_INB(adv, ADV_CHIP_CTRL) & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG | ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE)); if (bank == 1) { control |= ADV_CC_BANK_ONE; } else if (bank == 2) { control |= ADV_CC_DIAG | ADV_CC_BANK_ONE; } ADV_OUTB(adv, ADV_CHIP_CTRL, control); } u_int8_t adv_read_lram_8(struct adv_softc *adv, u_int16_t addr) { u_int8_t byte_data; u_int16_t word_data; /* * LRAM is accessed on 16bit boundaries. */ ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE); word_data = ADV_INW(adv, ADV_LRAM_DATA); if (addr & 1) { #if BYTE_ORDER == BIG_ENDIAN byte_data = (u_int8_t)(word_data & 0xFF); #else byte_data = (u_int8_t)((word_data >> 8) & 0xFF); #endif } else { #if BYTE_ORDER == BIG_ENDIAN byte_data = (u_int8_t)((word_data >> 8) & 0xFF); #else byte_data = (u_int8_t)(word_data & 0xFF); #endif } return (byte_data); } void adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value) { u_int16_t word_data; word_data = adv_read_lram_16(adv, addr & 0xFFFE); if (addr & 1) { word_data &= 0x00FF; word_data |= (((u_int8_t)value << 8) & 0xFF00); } else { word_data &= 0xFF00; word_data |= ((u_int8_t)value & 0x00FF); } adv_write_lram_16(adv, addr & 0xFFFE, word_data); } u_int16_t adv_read_lram_16(struct adv_softc *adv, u_int16_t addr) { ADV_OUTW(adv, ADV_LRAM_ADDR, addr); return (ADV_INW(adv, ADV_LRAM_DATA)); } void adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value) { ADV_OUTW(adv, ADV_LRAM_ADDR, addr); ADV_OUTW(adv, ADV_LRAM_DATA, value); } /* * Determine if there is a board at "iobase" by looking * for the AdvanSys signatures. Return 1 if a board is * found, 0 otherwise. */ int adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh) { u_int16_t signature; if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) { signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD); if ((signature == ADV_1000_ID0W) || (signature == ADV_1000_ID0W_FIX)) return (1); } return (0); } void adv_lib_init(struct adv_softc *adv) { if ((adv->type & ADV_ULTRA) != 0) { adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra; adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra); } else { adv->sdtr_period_tbl = adv_sdtr_period_tbl; adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl); } } u_int16_t adv_get_eeprom_config(struct adv_softc *adv, struct adv_eeprom_config *eeprom_config) { u_int16_t sum; u_int16_t *wbuf; u_int8_t cfg_beg; u_int8_t cfg_end; u_int8_t s_addr; wbuf = (u_int16_t *)eeprom_config; sum = 0; for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { *wbuf = adv_read_eeprom_16(adv, s_addr); sum += *wbuf; } if (adv->type & ADV_VL) { cfg_beg = ADV_EEPROM_CFG_BEG_VL; cfg_end = ADV_EEPROM_MAX_ADDR_VL; } else { cfg_beg = ADV_EEPROM_CFG_BEG; cfg_end = ADV_EEPROM_MAX_ADDR; } for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { *wbuf = adv_read_eeprom_16(adv, s_addr); sum += *wbuf; #if ADV_DEBUG_EEPROM printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf); #endif } *wbuf = adv_read_eeprom_16(adv, s_addr); return (sum); } int adv_set_eeprom_config(struct adv_softc *adv, struct adv_eeprom_config *eeprom_config) { int retry; retry = 0; while (1) { if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) { break; } if (++retry > ADV_EEPROM_MAX_RETRY) { break; } } return (retry > ADV_EEPROM_MAX_RETRY); } int adv_reset_chip(struct adv_softc *adv, int reset_bus) { adv_stop_chip(adv); ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT | (reset_bus ? ADV_CC_SCSI_RESET : 0)); DELAY(60); adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM); adv_set_chip_ih(adv, ADV_INS_HALT); if (reset_bus) ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT); ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT); if (reset_bus) DELAY(200 * 1000); ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT); ADV_OUTW(adv, ADV_CHIP_STATUS, 0); return (adv_is_chip_halted(adv)); } int adv_test_external_lram(struct adv_softc* adv) { u_int16_t q_addr; u_int16_t saved_value; int success; success = 0; q_addr = ADV_QNO_TO_QADDR(241); saved_value = adv_read_lram_16(adv, q_addr); if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) { success = 1; adv_write_lram_16(adv, q_addr, saved_value); } return (success); } int adv_init_lram_and_mcode(struct adv_softc *adv) { u_int32_t retval; adv_disable_interrupt(adv); adv_init_lram(adv); retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode, adv_mcode_size); if (retval != adv_mcode_chksum) { printf("adv%d: Microcode download failed checksum!\n", adv->unit); return (1); } if (adv_init_microcode_var(adv) != 0) return (1); adv_enable_interrupt(adv); return (0); } u_int8_t adv_get_chip_irq(struct adv_softc *adv) { u_int16_t cfg_lsw; u_int8_t chip_irq; cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW); if ((adv->type & ADV_VL) != 0) { chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07)); if ((chip_irq == 0) || (chip_irq == 4) || (chip_irq == 7)) { return (0); } return (chip_irq + (ADV_MIN_IRQ_NO - 1)); } chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03)); if (chip_irq == 3) chip_irq += 2; return (chip_irq + ADV_MIN_IRQ_NO); } u_int8_t adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no) { u_int16_t cfg_lsw; if ((adv->type & ADV_VL) != 0) { if (irq_no != 0) { if ((irq_no < ADV_MIN_IRQ_NO) || (irq_no > ADV_MAX_IRQ_NO)) { irq_no = 0; } else { irq_no -= ADV_MIN_IRQ_NO - 1; } } cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3; cfg_lsw |= 0x0010; ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw); adv_toggle_irq_act(adv); cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0; cfg_lsw |= (irq_no & 0x07) << 2; ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw); adv_toggle_irq_act(adv); } else if ((adv->type & ADV_ISA) != 0) { if (irq_no == 15) irq_no -= 2; irq_no -= ADV_MIN_IRQ_NO; cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3; cfg_lsw |= (irq_no & 0x03) << 2; ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw); } return (adv_get_chip_irq(adv)); } void adv_set_chip_scsiid(struct adv_softc *adv, int new_id) { u_int16_t cfg_lsw; cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW); if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id) return; cfg_lsw &= ~ADV_CFG_LSW_SCSIID; cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT; ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw); } int adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq, u_int32_t datalen) { struct adv_target_transinfo* tinfo; u_int32_t *p_data_addr; u_int32_t *p_data_bcount; int disable_syn_offset_one_fix; int retval; u_int n_q_required; u_int32_t addr; u_int8_t sg_entry_cnt; u_int8_t target_ix; u_int8_t sg_entry_cnt_minus_one; u_int8_t tid_no; scsiq->q1.q_no = 0; retval = 1; /* Default to error case */ target_ix = scsiq->q2.target_ix; tid_no = ADV_TIX_TO_TID(target_ix); tinfo = &adv->tinfo[tid_no]; if (scsiq->cdbptr[0] == REQUEST_SENSE) { /* Renegotiate if appropriate. */ adv_set_syncrate(adv, /*struct cam_path */NULL, tid_no, /*period*/0, /*offset*/0, ADV_TRANS_CUR); if (tinfo->current.period != tinfo->goal.period) { adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset); scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT); } } if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) { sg_entry_cnt = scsiq->sg_head->entry_cnt; sg_entry_cnt_minus_one = sg_entry_cnt - 1; #ifdef DIAGNOSTIC if (sg_entry_cnt <= 1) panic("adv_execute_scsi_queue: Queue " "with QC_SG_HEAD set but %d segs.", sg_entry_cnt); if (sg_entry_cnt > ADV_MAX_SG_LIST) panic("adv_execute_scsi_queue: " "Queue with too many segs."); if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) { int i; for (i = 0; i < sg_entry_cnt_minus_one; i++) { addr = scsiq->sg_head->sg_list[i].addr + scsiq->sg_head->sg_list[i].bytes; if ((addr & 0x0003) != 0) panic("adv_execute_scsi_queue: SG " "with odd address or byte count"); } } #endif p_data_addr = &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr; p_data_bcount = &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes; n_q_required = adv_sgcount_to_qcount(sg_entry_cnt); scsiq->sg_head->queue_cnt = n_q_required - 1; } else { p_data_addr = &scsiq->q1.data_addr; p_data_bcount = &scsiq->q1.data_cnt; n_q_required = 1; } disable_syn_offset_one_fix = FALSE; if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) { if (datalen != 0) { if (datalen < 512) { disable_syn_offset_one_fix = TRUE; } else { if (scsiq->cdbptr[0] == INQUIRY || scsiq->cdbptr[0] == REQUEST_SENSE || scsiq->cdbptr[0] == READ_CAPACITY || scsiq->cdbptr[0] == MODE_SELECT_6 || scsiq->cdbptr[0] == MODE_SENSE_6 || scsiq->cdbptr[0] == MODE_SENSE_10 || scsiq->cdbptr[0] == MODE_SELECT_10 || scsiq->cdbptr[0] == READ_TOC) { disable_syn_offset_one_fix = TRUE; } } } } if (disable_syn_offset_one_fix) { scsiq->q2.tag_code &= ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG); scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX | ADV_TAG_FLAG_DISABLE_DISCONNECT); } if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) { u_int8_t extra_bytes; addr = *p_data_addr + *p_data_bcount; extra_bytes = addr & 0x0003; if (extra_bytes != 0 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0 || (scsiq->q1.data_cnt & 0x01FF) == 0)) { scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES; scsiq->q1.extra_bytes = extra_bytes; *p_data_bcount -= extra_bytes; } } if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required) || ((scsiq->q1.cntl & QC_URGENT) != 0)) retval = adv_send_scsi_queue(adv, scsiq, n_q_required); return (retval); } u_int8_t adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr, struct adv_q_done_info *scsiq, u_int32_t max_dma_count) { u_int16_t val; u_int8_t sg_queue_cnt; adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG, (u_int16_t *)scsiq, (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2); #if BYTE_ORDER == BIG_ENDIAN adv_adj_endian_qdone_info(scsiq); #endif val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS); scsiq->q_status = val & 0xFF; scsiq->q_no = (val >> 8) & 0XFF; val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL); scsiq->cntl = val & 0xFF; sg_queue_cnt = (val >> 8) & 0xFF; val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN); scsiq->sense_len = val & 0xFF; scsiq->extra_bytes = (val >> 8) & 0xFF; /* * Due to a bug in accessing LRAM on the 940UA, the residual * is split into separate high and low 16bit quantities. */ scsiq->remain_bytes = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT); scsiq->remain_bytes |= adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16; /* * XXX Is this just a safeguard or will the counter really * have bogus upper bits? */ scsiq->remain_bytes &= max_dma_count; return (sg_queue_cnt); } int adv_start_chip(struct adv_softc *adv) { ADV_OUTB(adv, ADV_CHIP_CTRL, 0); if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) return (0); return (1); } int adv_stop_execution(struct adv_softc *adv) { int count; count = 0; if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) { adv_write_lram_8(adv, ADV_STOP_CODE_B, ADV_STOP_REQ_RISC_STOP); do { if (adv_read_lram_8(adv, ADV_STOP_CODE_B) & ADV_STOP_ACK_RISC_STOP) { return (1); } DELAY(1000); } while (count++ < 20); } return (0); } int adv_is_chip_halted(struct adv_softc *adv) { if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) { if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) { return (1); } } return (0); } /* * XXX The numeric constants and the loops in this routine * need to be documented. */ void adv_ack_interrupt(struct adv_softc *adv) { u_int8_t host_flag; u_int8_t risc_flag; int loop; loop = 0; do { risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B); if (loop++ > 0x7FFF) { break; } } while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0); host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B); adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag | ADV_HOST_FLAG_ACK_INT); ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK); loop = 0; while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) { ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK); if (loop++ > 3) { break; } } adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag); } /* * Handle all conditions that may halt the chip waiting * for us to intervene. */ void adv_isr_chip_halted(struct adv_softc *adv) { u_int16_t int_halt_code; u_int16_t halt_q_addr; target_bit_vector target_mask; target_bit_vector scsi_busy; u_int8_t halt_qp; u_int8_t target_ix; u_int8_t q_cntl; u_int8_t tid_no; int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W); halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B); halt_q_addr = ADV_QNO_TO_QADDR(halt_qp); target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX); q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL); tid_no = ADV_TIX_TO_TID(target_ix); target_mask = ADV_TID_TO_TARGET_MASK(tid_no); if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) { /* * Temporarily disable the async fix by removing * this target from the list of affected targets, * setting our async rate, and then putting us * back into the mask. */ adv->fix_asyn_xfer &= ~target_mask; adv_set_syncrate(adv, /*struct cam_path */NULL, tid_no, /*period*/0, /*offset*/0, ADV_TRANS_ACTIVE); adv->fix_asyn_xfer |= target_mask; } else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) { adv_set_syncrate(adv, /*struct cam_path */NULL, tid_no, /*period*/0, /*offset*/0, ADV_TRANS_ACTIVE); } else if (int_halt_code == ADV_HALT_EXTMSG_IN) { adv_handle_extmsg_in(adv, halt_q_addr, q_cntl, target_mask, tid_no); } else if (int_halt_code == ADV_HALT_CHK_CONDITION) { struct adv_target_transinfo* tinfo; union ccb *ccb; u_int32_t cinfo_index; u_int8_t tag_code; u_int8_t q_status; tinfo = &adv->tinfo[tid_no]; q_cntl |= QC_REQ_SENSE; /* Renegotiate if appropriate. */ adv_set_syncrate(adv, /*struct cam_path */NULL, tid_no, /*period*/0, /*offset*/0, ADV_TRANS_CUR); if (tinfo->current.period != tinfo->goal.period) { adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset); q_cntl |= QC_MSG_OUT; } adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl); /* Don't tag request sense commands */ tag_code = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE); tag_code &= ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG); if ((adv->fix_asyn_xfer & target_mask) != 0 && (adv->fix_asyn_xfer_always & target_mask) == 0) { tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX); } adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE, tag_code); q_status = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS); q_status |= (QS_READY | QS_BUSY); adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS, q_status); /* * Freeze the devq until we can handle the sense condition. */ cinfo_index = adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX); ccb = adv->ccb_infos[cinfo_index].ccb; xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN; adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix), /*ccb*/NULL, CAM_REQUEUE_REQ, /*queued_only*/TRUE); scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B); scsi_busy &= ~target_mask; adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy); /* * Ensure we have enough time to actually * retrieve the sense. */ untimeout(adv_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch); ccb->ccb_h.timeout_ch = timeout(adv_timeout, (caddr_t)ccb, 5 * hz); } else if (int_halt_code == ADV_HALT_SDTR_REJECTED) { struct ext_msg out_msg; adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG, (u_int16_t *) &out_msg, sizeof(out_msg)/2); if ((out_msg.msg_type == MSG_EXTENDED) && (out_msg.msg_len == MSG_EXT_SDTR_LEN) && (out_msg.msg_req == MSG_EXT_SDTR)) { /* Revert to Async */ adv_set_syncrate(adv, /*struct cam_path */NULL, tid_no, /*period*/0, /*offset*/0, ADV_TRANS_GOAL|ADV_TRANS_ACTIVE); } q_cntl &= ~QC_MSG_OUT; adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl); } else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) { u_int8_t scsi_status; union ccb *ccb; u_int32_t cinfo_index; scsi_status = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_SCSI_STATUS); cinfo_index = adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX); ccb = adv->ccb_infos[cinfo_index].ccb; xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL; adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix), /*ccb*/NULL, CAM_REQUEUE_REQ, /*queued_only*/TRUE); scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B); scsi_busy &= ~target_mask; adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy); } else { printf("Unhandled Halt Code %x\n", int_halt_code); } adv_write_lram_16(adv, ADVV_HALTCODE_W, 0); } void adv_sdtr_to_period_offset(struct adv_softc *adv, u_int8_t sync_data, u_int8_t *period, u_int8_t *offset, int tid) { if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid) && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) { *period = *offset = 0; } else { *period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)]; *offset = sync_data & 0xF; } } void adv_set_syncrate(struct adv_softc *adv, struct cam_path *path, u_int tid, u_int period, u_int offset, u_int type) { struct adv_target_transinfo* tinfo; u_int old_period; u_int old_offset; u_int8_t sdtr_data; tinfo = &adv->tinfo[tid]; /* Filter our input */ sdtr_data = adv_period_offset_to_sdtr(adv, &period, &offset, tid); old_period = tinfo->current.period; old_offset = tinfo->current.offset; if ((type & ADV_TRANS_CUR) != 0 && ((old_period != period || old_offset != offset) || period == 0 || offset == 0) /*Changes in asyn fix settings*/) { int s; int halted; s = splcam(); halted = adv_is_chip_halted(adv); if (halted == 0) /* Must halt the chip first */ adv_host_req_chip_halt(adv); /* Update current hardware settings */ adv_set_sdtr_reg_at_id(adv, tid, sdtr_data); /* * If a target can run in sync mode, we don't need * to check it for sync problems. */ if (offset != 0) adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid); if (halted == 0) /* Start the chip again */ adv_start_chip(adv); splx(s); tinfo->current.period = period; tinfo->current.offset = offset; if (path != NULL) { /* * Tell the SCSI layer about the * new transfer parameters. */ struct ccb_trans_settings neg; neg.sync_period = period; neg.sync_offset = offset; neg.valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID; xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, path, &neg); } } if ((type & ADV_TRANS_GOAL) != 0) { tinfo->goal.period = period; tinfo->goal.offset = offset; } if ((type & ADV_TRANS_USER) != 0) { tinfo->user.period = period; tinfo->user.offset = offset; } } u_int8_t adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period, u_int *offset, int tid) { u_int i; u_int dummy_offset; u_int dummy_period; if (offset == NULL) { dummy_offset = 0; offset = &dummy_offset; } if (period == NULL) { dummy_period = 0; period = &dummy_period; } -#define MIN(a,b) (((a) < (b)) ? (a) : (b)) - *offset = MIN(ADV_SYN_MAX_OFFSET, *offset); if (*period != 0 && *offset != 0) { for (i = 0; i < adv->sdtr_period_tbl_size; i++) { if (*period <= adv->sdtr_period_tbl[i]) { /* * When responding to a target that requests * sync, the requested rate may fall between * two rates that we can output, but still be * a rate that we can receive. Because of this, * we want to respond to the target with * the same rate that it sent to us even * if the period we use to send data to it * is lower. Only lower the response period * if we must. */ if (i == 0 /* Our maximum rate */) *period = adv->sdtr_period_tbl[0]; return ((i << 4) | *offset); } } } /* Must go async */ *period = 0; *offset = 0; if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)) return (ASYN_SDTR_DATA_FIX_PCI_REV_AB); return (0); } /* Internal Routines */ static void adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr, u_int16_t *buffer, int count) { ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr); ADV_INSW(adv, ADV_LRAM_DATA, buffer, count); } static void adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr, u_int16_t *buffer, int count) { ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr); ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count); } static void adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr, u_int16_t set_value, int count) { ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr); bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA, set_value, count); } static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count) { u_int32_t sum; int i; sum = 0; ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr); for (i = 0; i < count; i++) sum += ADV_INW(adv, ADV_LRAM_DATA); return (sum); } static int adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value) { int retval; retval = 0; ADV_OUTW(adv, ADV_LRAM_ADDR, addr); ADV_OUTW(adv, ADV_LRAM_DATA, value); DELAY(10000); ADV_OUTW(adv, ADV_LRAM_ADDR, addr); if (value != ADV_INW(adv, ADV_LRAM_DATA)) retval = 1; return (retval); } static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr) { u_int16_t val_low, val_high; ADV_OUTW(adv, ADV_LRAM_ADDR, addr); #if BYTE_ORDER == BIG_ENDIAN val_high = ADV_INW(adv, ADV_LRAM_DATA); val_low = ADV_INW(adv, ADV_LRAM_DATA); #else val_low = ADV_INW(adv, ADV_LRAM_DATA); val_high = ADV_INW(adv, ADV_LRAM_DATA); #endif return (((u_int32_t)val_high << 16) | (u_int32_t)val_low); } static void adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value) { ADV_OUTW(adv, ADV_LRAM_ADDR, addr); #if BYTE_ORDER == BIG_ENDIAN ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF)); ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF)); #else ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF)); ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF)); #endif } static void adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr, u_int32_t *buffer, int count) { ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr); ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2); } static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr) { u_int16_t read_wval; u_int8_t cmd_reg; adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE); DELAY(1000); cmd_reg = addr | ADV_EEPROM_CMD_READ; adv_write_eeprom_cmd_reg(adv, cmd_reg); DELAY(1000); read_wval = ADV_INW(adv, ADV_EEPROM_DATA); DELAY(1000); return (read_wval); } static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value) { u_int16_t read_value; read_value = adv_read_eeprom_16(adv, addr); if (read_value != value) { adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE); DELAY(1000); ADV_OUTW(adv, ADV_EEPROM_DATA, value); DELAY(1000); adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr); DELAY(20 * 1000); adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE); DELAY(1000); read_value = adv_read_eeprom_16(adv, addr); } return (read_value); } static int adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg) { u_int8_t read_back; int retry; retry = 0; while (1) { ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg); DELAY(1000); read_back = ADV_INB(adv, ADV_EEPROM_CMD); if (read_back == cmd_reg) { return (1); } if (retry++ > ADV_EEPROM_MAX_RETRY) { return (0); } } } static int adv_set_eeprom_config_once(struct adv_softc *adv, struct adv_eeprom_config *eeprom_config) { int n_error; u_int16_t *wbuf; u_int16_t sum; u_int8_t s_addr; u_int8_t cfg_beg; u_int8_t cfg_end; wbuf = (u_int16_t *)eeprom_config; n_error = 0; sum = 0; for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { sum += *wbuf; if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) { n_error++; } } if (adv->type & ADV_VL) { cfg_beg = ADV_EEPROM_CFG_BEG_VL; cfg_end = ADV_EEPROM_MAX_ADDR_VL; } else { cfg_beg = ADV_EEPROM_CFG_BEG; cfg_end = ADV_EEPROM_MAX_ADDR; } for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) { sum += *wbuf; if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) { n_error++; } } *wbuf = sum; if (sum != adv_write_eeprom_16(adv, s_addr, sum)) { n_error++; } wbuf = (u_int16_t *)eeprom_config; for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) { if (*wbuf != adv_read_eeprom_16(adv, s_addr)) { n_error++; } } for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) { if (*wbuf != adv_read_eeprom_16(adv, s_addr)) { n_error++; } } return (n_error); } static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr, u_int16_t *mcode_buf, u_int16_t mcode_size) { u_int32_t chksum; u_int16_t mcode_lram_size; u_int16_t mcode_chksum; mcode_lram_size = mcode_size >> 1; /* XXX Why zero the memory just before you write the whole thing?? */ adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size); adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size); chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size); mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG, ((mcode_size - s_addr - ADV_CODE_SEC_BEG) >> 1)); adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum); adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size); return (chksum); } static void adv_reinit_lram(struct adv_softc *adv) { adv_init_lram(adv); adv_init_qlink_var(adv); } static void adv_init_lram(struct adv_softc *adv) { u_int8_t i; u_int16_t s_addr; adv_mset_lram_16(adv, ADV_QADR_BEG, 0, (((adv->max_openings + 2 + 1) * 64) >> 1)); i = ADV_MIN_ACTIVE_QNO; s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE; adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1); adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings); adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i); i++; s_addr += ADV_QBLK_SIZE; for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) { adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1); adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1); adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i); } adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END); adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1); adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings); i++; s_addr += ADV_QBLK_SIZE; for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) { adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i); adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i); adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i); } } static int adv_init_microcode_var(struct adv_softc *adv) { int i; for (i = 0; i <= ADV_MAX_TID; i++) { /* Start out async all around */ adv_set_syncrate(adv, /*path*/NULL, i, 0, 0, ADV_TRANS_GOAL|ADV_TRANS_CUR); } adv_init_qlink_var(adv); adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable); adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id); adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase); adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE); ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR); if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) { printf("adv%d: Unable to set program counter. Aborting.\n", adv->unit); return (1); } return (0); } static void adv_init_qlink_var(struct adv_softc *adv) { int i; u_int16_t lram_addr; adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1); adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings); adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1); adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings); adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B, (u_int8_t)((int) adv->max_openings + 1)); adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B, (u_int8_t)((int) adv->max_openings + 2)); adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings); adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0); adv_write_lram_16(adv, ADVV_HALTCODE_W, 0); adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0); adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0); adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0); adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0); lram_addr = ADV_QADR_BEG; for (i = 0; i < 32; i++, lram_addr += 2) adv_write_lram_16(adv, lram_addr, 0); } static void adv_disable_interrupt(struct adv_softc *adv) { u_int16_t cfg; cfg = ADV_INW(adv, ADV_CONFIG_LSW); ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON); } static void adv_enable_interrupt(struct adv_softc *adv) { u_int16_t cfg; cfg = ADV_INW(adv, ADV_CONFIG_LSW); ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON); } static void adv_toggle_irq_act(struct adv_softc *adv) { ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT); ADV_OUTW(adv, ADV_CHIP_STATUS, 0); } void adv_start_execution(struct adv_softc *adv) { if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) { adv_write_lram_8(adv, ADV_STOP_CODE_B, 0); } } int adv_stop_chip(struct adv_softc *adv) { u_int8_t cc_val; cc_val = ADV_INB(adv, ADV_CHIP_CTRL) & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG)); ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT); adv_set_chip_ih(adv, ADV_INS_HALT); adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM); if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) { return (0); } return (1); } static int adv_host_req_chip_halt(struct adv_softc *adv) { int count; u_int8_t saved_stop_code; if (adv_is_chip_halted(adv)) return (1); count = 0; saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B); adv_write_lram_8(adv, ADVV_STOP_CODE_B, ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP); while (adv_is_chip_halted(adv) == 0 && count++ < 2000) ; adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code); return (count < 2000); } static void adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code) { adv_set_bank(adv, 1); ADV_OUTW(adv, ADV_REG_IH, ins_code); adv_set_bank(adv, 0); } #if UNUSED static u_int8_t adv_get_chip_scsi_ctrl(struct adv_softc *adv) { u_int8_t scsi_ctrl; adv_set_bank(adv, 1); scsi_ctrl = ADV_INB(adv, ADV_REG_SC); adv_set_bank(adv, 0); return (scsi_ctrl); } #endif /* * XXX Looks like more padding issues in this routine as well. * There has to be a way to turn this into an insw. */ static void adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr, u_int16_t *inbuf, int words) { int i; ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr); for (i = 0; i < words; i++, inbuf++) { if (i == 5) { continue; } *inbuf = ADV_INW(adv, ADV_LRAM_DATA); } } static u_int adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs) { u_int cur_used_qs; u_int cur_free_qs; cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q; if ((cur_used_qs + n_qs) <= adv->max_openings) { cur_free_qs = adv->max_openings - cur_used_qs; return (cur_free_qs); } adv->openings_needed = n_qs; return (0); } static u_int8_t adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head, u_int8_t n_free_q) { int i; for (i = 0; i < n_free_q; i++) { free_q_head = adv_alloc_free_queue(adv, free_q_head); if (free_q_head == ADV_QLINK_END) break; } return (free_q_head); } static u_int8_t adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head) { u_int16_t q_addr; u_int8_t next_qp; u_int8_t q_status; next_qp = ADV_QLINK_END; q_addr = ADV_QNO_TO_QADDR(free_q_head); q_status = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS); if ((q_status & QS_READY) == 0) next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD); return (next_qp); } static int adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq, u_int8_t n_q_required) { u_int8_t free_q_head; u_int8_t next_qp; u_int8_t tid_no; u_int8_t target_ix; int retval; retval = 1; target_ix = scsiq->q2.target_ix; tid_no = ADV_TIX_TO_TID(target_ix); free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF; if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required)) != ADV_QLINK_END) { scsiq->q1.q_no = free_q_head; /* * Now that we know our Q number, point our sense * buffer pointer to a bus dma mapped area where * we can dma the data to. */ scsiq->q1.sense_addr = adv->sense_physbase + ((free_q_head - 1) * sizeof(struct scsi_sense_data)); adv_put_ready_sg_list_queue(adv, scsiq, free_q_head); adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp); adv->cur_active += n_q_required; retval = 0; } return (retval); } static void adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq, u_int q_no) { u_int8_t sg_list_dwords; u_int8_t sg_index, i; u_int8_t sg_entry_cnt; u_int8_t next_qp; u_int16_t q_addr; struct adv_sg_head *sg_head; struct adv_sg_list_q scsi_sg_q; sg_head = scsiq->sg_head; if (sg_head) { sg_entry_cnt = sg_head->entry_cnt - 1; #ifdef DIAGNOSTIC if (sg_entry_cnt == 0) panic("adv_put_ready_sg_list_queue: ScsiQ with " "a SG list but only one element"); if ((scsiq->q1.cntl & QC_SG_HEAD) == 0) panic("adv_put_ready_sg_list_queue: ScsiQ with " "a SG list but QC_SG_HEAD not set"); #endif q_addr = ADV_QNO_TO_QADDR(q_no); sg_index = 1; scsiq->q1.sg_queue_cnt = sg_head->queue_cnt; scsi_sg_q.sg_head_qp = q_no; scsi_sg_q.cntl = QCSG_SG_XFER_LIST; for (i = 0; i < sg_head->queue_cnt; i++) { u_int8_t segs_this_q; if (sg_entry_cnt > ADV_SG_LIST_PER_Q) segs_this_q = ADV_SG_LIST_PER_Q; else { /* This will be the last segment then */ segs_this_q = sg_entry_cnt; scsi_sg_q.cntl |= QCSG_SG_XFER_END; } scsi_sg_q.seq_no = i + 1; sg_list_dwords = segs_this_q << 1; if (i == 0) { scsi_sg_q.sg_list_cnt = segs_this_q; scsi_sg_q.sg_cur_list_cnt = segs_this_q; } else { scsi_sg_q.sg_list_cnt = segs_this_q - 1; scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1; } next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD); scsi_sg_q.q_no = next_qp; q_addr = ADV_QNO_TO_QADDR(next_qp); adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_SGHD_CPY_BEG, (u_int16_t *)&scsi_sg_q, sizeof(scsi_sg_q) >> 1); adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG, (u_int32_t *)&sg_head->sg_list[sg_index], sg_list_dwords); sg_entry_cnt -= segs_this_q; sg_index += ADV_SG_LIST_PER_Q; } } adv_put_ready_queue(adv, scsiq, q_no); } static void adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq, u_int q_no) { struct adv_target_transinfo* tinfo; u_int q_addr; u_int tid_no; tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix); tinfo = &adv->tinfo[tid_no]; if ((tinfo->current.period != tinfo->goal.period) || (tinfo->current.offset != tinfo->goal.offset)) { adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset); scsiq->q1.cntl |= QC_MSG_OUT; } q_addr = ADV_QNO_TO_QADDR(q_no); scsiq->q1.status = QS_FREE; adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG, (u_int16_t *)scsiq->cdbptr, scsiq->q2.cdb_len >> 1); #if BYTE_ORDER == BIG_ENDIAN adv_adj_scsiq_endian(scsiq); #endif adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG, (u_int16_t *) &scsiq->q1.cntl, ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1); #if CC_WRITE_IO_COUNT adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT, adv->req_count); #endif #if CC_CLEAR_DMA_REMAIN adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0); adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0); #endif adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS, (scsiq->q1.q_no << 8) | QS_READY); } static void adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr, u_int16_t *buffer, int words) { int i; /* * XXX This routine makes *gross* assumptions * about padding in the data structures. * Either the data structures should have explicit * padding members added, or they should have padding * turned off via compiler attributes depending on * which yields better overall performance. My hunch * would be that turning off padding would be the * faster approach as an outsw is much faster than * this crude loop and accessing un-aligned data * members isn't *that* expensive. The other choice * would be to modify the ASC script so that the * the adv_scsiq_1 structure can be re-arranged so * padding isn't required. */ ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr); for (i = 0; i < words; i++, buffer++) { if (i == 2 || i == 10) { continue; } ADV_OUTW(adv, ADV_LRAM_DATA, *buffer); } } static void adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr, u_int8_t q_cntl, target_bit_vector target_mask, int tid_no) { struct ext_msg ext_msg; adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg, sizeof(ext_msg) >> 1); if ((ext_msg.msg_type == MSG_EXTENDED) && (ext_msg.msg_req == MSG_EXT_SDTR) && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) { union ccb *ccb; struct adv_target_transinfo* tinfo; u_int32_t cinfo_index; u_int period; u_int offset; int sdtr_accept; u_int8_t orig_offset; cinfo_index = adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX); ccb = adv->ccb_infos[cinfo_index].ccb; tinfo = &adv->tinfo[tid_no]; sdtr_accept = TRUE; orig_offset = ext_msg.req_ack_offset; if (ext_msg.xfer_period < tinfo->goal.period) { sdtr_accept = FALSE; ext_msg.xfer_period = tinfo->goal.period; } /* Perform range checking */ period = ext_msg.xfer_period; offset = ext_msg.req_ack_offset; adv_period_offset_to_sdtr(adv, &period, &offset, tid_no); ext_msg.xfer_period = period; ext_msg.req_ack_offset = offset; /* Record our current sync settings */ adv_set_syncrate(adv, ccb->ccb_h.path, tid_no, ext_msg.xfer_period, ext_msg.req_ack_offset, ADV_TRANS_GOAL|ADV_TRANS_ACTIVE); /* Offset too high or large period forced async */ if (orig_offset != ext_msg.req_ack_offset) sdtr_accept = FALSE; if (sdtr_accept && (q_cntl & QC_MSG_OUT)) { /* Valid response to our requested negotiation */ q_cntl &= ~QC_MSG_OUT; } else { /* Must Respond */ q_cntl |= QC_MSG_OUT; adv_msgout_sdtr(adv, ext_msg.xfer_period, ext_msg.req_ack_offset); } } else if (ext_msg.msg_type == MSG_EXTENDED && ext_msg.msg_req == MSG_EXT_WDTR && ext_msg.msg_len == MSG_EXT_WDTR_LEN) { ext_msg.wdtr_width = 0; adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG, (u_int16_t *)&ext_msg, sizeof(ext_msg) >> 1); q_cntl |= QC_MSG_OUT; } else { ext_msg.msg_type = MSG_MESSAGE_REJECT; adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG, (u_int16_t *)&ext_msg, sizeof(ext_msg) >> 1); q_cntl |= QC_MSG_OUT; } adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl); } static void adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period, u_int8_t sdtr_offset) { struct ext_msg sdtr_buf; sdtr_buf.msg_type = MSG_EXTENDED; sdtr_buf.msg_len = MSG_EXT_SDTR_LEN; sdtr_buf.msg_req = MSG_EXT_SDTR; sdtr_buf.xfer_period = sdtr_period; sdtr_offset &= ADV_SYN_MAX_OFFSET; sdtr_buf.req_ack_offset = sdtr_offset; adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG, (u_int16_t *) &sdtr_buf, sizeof(sdtr_buf) / 2); } int adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb, u_int32_t status, int queued_only) { u_int16_t q_addr; u_int8_t q_no; struct adv_q_done_info scsiq_buf; struct adv_q_done_info *scsiq; u_int8_t target_ix; int count; scsiq = &scsiq_buf; target_ix = ADV_TIDLUN_TO_IX(target, lun); count = 0; for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) { struct adv_ccb_info *ccb_info; q_addr = ADV_QNO_TO_QADDR(q_no); adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count); ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index]; if (((scsiq->q_status & QS_READY) != 0) && ((scsiq->q_status & QS_ABORTED) == 0) && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0) && (scsiq->d2.target_ix == target_ix) && (queued_only == 0 || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE))) && (ccb == NULL || (ccb == ccb_info->ccb))) { union ccb *aborted_ccb; struct adv_ccb_info *cinfo; scsiq->q_status |= QS_ABORTED; adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS, scsiq->q_status); aborted_ccb = ccb_info->ccb; /* Don't clobber earlier error codes */ if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) aborted_ccb->ccb_h.status |= status; cinfo = (struct adv_ccb_info *) aborted_ccb->ccb_h.ccb_cinfo_ptr; cinfo->state |= ACCB_ABORT_QUEUED; count++; } } return (count); } int adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset) { int count; int i; union ccb *ccb; i = 200; while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0 && i--) DELAY(1000); adv_reset_chip(adv, initiate_bus_reset); adv_reinit_lram(adv); for (i = 0; i <= ADV_MAX_TID; i++) adv_set_syncrate(adv, NULL, i, /*period*/0, /*offset*/0, ADV_TRANS_CUR); ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR); /* Tell the XPT layer that a bus reset occured */ if (adv->path != NULL) xpt_async(AC_BUS_RESET, adv->path, NULL); count = 0; while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) { if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) ccb->ccb_h.status |= CAM_SCSI_BUS_RESET; adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0); count++; } adv_start_chip(adv); return (count); } static void adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data) { int orig_id; adv_set_bank(adv, 1); orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1; ADV_OUTB(adv, ADV_HOST_SCSIID, tid); if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) { adv_set_bank(adv, 0); ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data); } adv_set_bank(adv, 1); ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id); adv_set_bank(adv, 0); } Index: head/sys/dev/advansys/adwcam.c =================================================================== --- head/sys/dev/advansys/adwcam.c (revision 110231) +++ head/sys/dev/advansys/adwcam.c (revision 110232) @@ -1,1540 +1,1538 @@ /* * CAM SCSI interface for the the Advanced Systems Inc. * Second Generation SCSI controllers. * * Product specific probe and attach routines can be found in: * * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W * * Copyright (c) 1998, 1999, 2000 Justin Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Ported from: * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters * * Copyright (c) 1995-1998 Advanced System Products, Inc. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that redistributions of source * code retain the above copyright notice and this comment without * modification. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Definitions for our use of the SIM private CCB area */ #define ccb_acb_ptr spriv_ptr0 #define ccb_adw_ptr spriv_ptr1 -#define MIN(a, b) (((a) < (b)) ? (a) : (b)) - u_long adw_unit; static __inline cam_status adwccbstatus(union ccb*); static __inline struct acb* adwgetacb(struct adw_softc *adw); static __inline void adwfreeacb(struct adw_softc *adw, struct acb *acb); static void adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error); static struct sg_map_node* adwallocsgmap(struct adw_softc *adw); static int adwallocacbs(struct adw_softc *adw); static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error); static void adw_action(struct cam_sim *sim, union ccb *ccb); static void adw_poll(struct cam_sim *sim); static void adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void adwprocesserror(struct adw_softc *adw, struct acb *acb); static void adwtimeout(void *arg); static void adw_handle_device_reset(struct adw_softc *adw, u_int target); static void adw_handle_bus_reset(struct adw_softc *adw, int initiated); static __inline cam_status adwccbstatus(union ccb* ccb) { return (ccb->ccb_h.status & CAM_STATUS_MASK); } static __inline struct acb* adwgetacb(struct adw_softc *adw) { struct acb* acb; int s; s = splcam(); if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) { SLIST_REMOVE_HEAD(&adw->free_acb_list, links); } else if (adw->num_acbs < adw->max_acbs) { adwallocacbs(adw); acb = SLIST_FIRST(&adw->free_acb_list); if (acb == NULL) printf("%s: Can't malloc ACB\n", adw_name(adw)); else { SLIST_REMOVE_HEAD(&adw->free_acb_list, links); } } splx(s); return (acb); } static __inline void adwfreeacb(struct adw_softc *adw, struct acb *acb) { int s; s = splcam(); if ((acb->state & ACB_ACTIVE) != 0) LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le); if ((acb->state & ACB_RELEASE_SIMQ) != 0) acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; adw->state &= ~ADW_RESOURCE_SHORTAGE; } acb->state = ACB_FREE; SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links); splx(s); } static void adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *busaddrp; busaddrp = (bus_addr_t *)arg; *busaddrp = segs->ds_addr; } static struct sg_map_node * adwallocsgmap(struct adw_softc *adw) { struct sg_map_node *sg_map; sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) return (NULL); /* Allocate S/G space for the next batch of ACBS */ if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { free(sg_map, M_DEVBUF); return (NULL); } SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links); bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0); bzero(sg_map->sg_vaddr, PAGE_SIZE); return (sg_map); } /* * Allocate another chunk of CCB's. Return count of entries added. * Assumed to be called at splcam(). */ static int adwallocacbs(struct adw_softc *adw) { struct acb *next_acb; struct sg_map_node *sg_map; bus_addr_t busaddr; struct adw_sg_block *blocks; int newcount; int i; next_acb = &adw->acbs[adw->num_acbs]; sg_map = adwallocsgmap(adw); if (sg_map == NULL) return (0); blocks = sg_map->sg_vaddr; busaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks))); for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) { int error; error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0, &next_acb->dmamap); if (error != 0) break; next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb); next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb); next_acb->queue.sense_baddr = acbvtob(adw, next_acb) + offsetof(struct acb, sense_data); next_acb->sg_blocks = blocks; next_acb->sg_busaddr = busaddr; next_acb->state = ACB_FREE; SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links); blocks += ADW_SG_BLOCKCNT; busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks); next_acb++; adw->num_acbs++; } return (i); } static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct acb *acb; union ccb *ccb; struct adw_softc *adw; int s; acb = (struct acb *)arg; ccb = acb->ccb; adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; if (error != 0) { if (error != EFBIG) printf("%s: Unexepected error 0x%x returned from " "bus_dmamap_load\n", adw_name(adw), error); if (ccb->ccb_h.status == CAM_REQ_INPROG) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; } adwfreeacb(adw, acb); xpt_done(ccb); return; } if (nseg != 0) { bus_dmasync_op_t op; acb->queue.data_addr = dm_segs[0].ds_addr; acb->queue.data_cnt = ccb->csio.dxfer_len; if (nseg > 1) { struct adw_sg_block *sg_block; struct adw_sg_elm *sg; bus_addr_t sg_busaddr; u_int sg_index; bus_dma_segment_t *end_seg; end_seg = dm_segs + nseg; sg_busaddr = acb->sg_busaddr; sg_index = 0; /* Copy the segments into our SG list */ for (sg_block = acb->sg_blocks;; sg_block++) { u_int i; sg = sg_block->sg_list; for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) { if (dm_segs >= end_seg) break; sg->sg_addr = dm_segs->ds_addr; sg->sg_count = dm_segs->ds_len; sg++; dm_segs++; } sg_block->sg_cnt = i; sg_index += i; if (dm_segs == end_seg) { sg_block->sg_busaddr_next = 0; break; } else { sg_busaddr += sizeof(struct adw_sg_block); sg_block->sg_busaddr_next = sg_busaddr; } } acb->queue.sg_real_addr = acb->sg_busaddr; } else { acb->queue.sg_real_addr = 0; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); } else { acb->queue.data_addr = 0; acb->queue.data_cnt = 0; acb->queue.sg_real_addr = 0; } s = splcam(); /* * Last time we need to check if this CCB needs to * be aborted. */ if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (nseg != 0) bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); adwfreeacb(adw, acb); xpt_done(ccb); splx(s); return; } acb->state |= ACB_ACTIVE; ccb->ccb_h.status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le); ccb->ccb_h.timeout_ch = timeout(adwtimeout, (caddr_t)acb, (ccb->ccb_h.timeout * hz) / 1000); adw_send_acb(adw, acb, acbvtob(adw, acb)); splx(s); } static void adw_action(struct cam_sim *sim, union ccb *ccb) { struct adw_softc *adw; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n")); adw = (struct adw_softc *)cam_sim_softc(sim); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { struct ccb_scsiio *csio; struct ccb_hdr *ccbh; struct acb *acb; csio = &ccb->csio; ccbh = &ccb->ccb_h; /* Max supported CDB length is 12 bytes */ if (csio->cdb_len > 12) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } if ((acb = adwgetacb(adw)) == NULL) { int s; s = splcam(); adw->state |= ADW_RESOURCE_SHORTAGE; splx(s); xpt_freeze_simq(sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } /* Link acb and ccb so we can find one from the other */ acb->ccb = ccb; ccb->ccb_h.ccb_acb_ptr = acb; ccb->ccb_h.ccb_adw_ptr = adw; acb->queue.cntl = 0; acb->queue.target_cmd = 0; acb->queue.target_id = ccb->ccb_h.target_id; acb->queue.target_lun = ccb->ccb_h.target_lun; acb->queue.mflag = 0; acb->queue.sense_len = MIN(csio->sense_len, sizeof(acb->sense_data)); acb->queue.cdb_len = csio->cdb_len; if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { switch (csio->tag_action) { case MSG_SIMPLE_Q_TAG: acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG; break; case MSG_HEAD_OF_Q_TAG: acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG; break; case MSG_ORDERED_Q_TAG: acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG; break; default: acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; break; } } else acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) acb->queue.scsi_cntl |= ADW_QSC_NO_DISC; acb->queue.done_status = 0; acb->queue.scsi_status = 0; acb->queue.host_status = 0; acb->queue.sg_wk_ix = 0; if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { bcopy(csio->cdb_io.cdb_ptr, acb->queue.cdb, csio->cdb_len); } else { /* I guess I could map it in... */ ccb->ccb_h.status = CAM_REQ_INVALID; adwfreeacb(adw, acb); xpt_done(ccb); return; } } else { bcopy(csio->cdb_io.cdb_bytes, acb->queue.cdb, csio->cdb_len); } /* * If we have any data to send with this command, * map it into bus space. */ if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { /* * We've been given a pointer * to a single buffer. */ if ((ccbh->flags & CAM_DATA_PHYS) == 0) { int s; int error; s = splsoftvm(); error = bus_dmamap_load(adw->buffer_dmat, acb->dmamap, csio->data_ptr, csio->dxfer_len, adwexecuteacb, acb, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ xpt_freeze_simq(sim, 1); acb->state |= CAM_RELEASE_SIMQ; } splx(s); } else { struct bus_dma_segment seg; /* Pointer to physical buffer */ seg.ds_addr = (bus_addr_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; adwexecuteacb(acb, &seg, 1, 0); } } else { struct bus_dma_segment *segs; if ((ccbh->flags & CAM_DATA_PHYS) != 0) panic("adw_action - Physical " "segment pointers " "unsupported"); if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) panic("adw_action - Virtual " "segment addresses " "unsupported"); /* Just use the segments provided */ segs = (struct bus_dma_segment *)csio->data_ptr; adwexecuteacb(acb, segs, csio->sglist_cnt, (csio->sglist_cnt < ADW_SGSIZE) ? 0 : EFBIG); } } else { adwexecuteacb(acb, NULL, 0, 0); } break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { adw_idle_cmd_status_t status; status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, ccb->ccb_h.target_id); if (status == ADW_IDLE_CMD_SUCCESS) { ccb->ccb_h.status = CAM_REQ_CMP; if (bootverbose) { xpt_print_path(ccb->ccb_h.path); printf("BDR Delivered\n"); } } else ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); break; } case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; u_int target_mask; int s; cts = &ccb->cts; target_mask = 0x01 << ccb->ccb_h.target_id; s = splcam(); if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { u_int sdtrdone; sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE); if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { u_int discenb; discenb = adw_lram_read_16(adw, ADW_MC_DISC_ENABLE); if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) discenb |= target_mask; else discenb &= ~target_mask; adw_lram_write_16(adw, ADW_MC_DISC_ENABLE, discenb); } if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) adw->tagenb |= target_mask; else adw->tagenb &= ~target_mask; } if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { u_int wdtrenb_orig; u_int wdtrenb; u_int wdtrdone; wdtrenb_orig = adw_lram_read_16(adw, ADW_MC_WDTR_ABLE); wdtrenb = wdtrenb_orig; wdtrdone = adw_lram_read_16(adw, ADW_MC_WDTR_DONE); switch (cts->bus_width) { case MSG_EXT_WDTR_BUS_32_BIT: case MSG_EXT_WDTR_BUS_16_BIT: wdtrenb |= target_mask; break; case MSG_EXT_WDTR_BUS_8_BIT: default: wdtrenb &= ~target_mask; break; } if (wdtrenb != wdtrenb_orig) { adw_lram_write_16(adw, ADW_MC_WDTR_ABLE, wdtrenb); wdtrdone &= ~target_mask; adw_lram_write_16(adw, ADW_MC_WDTR_DONE, wdtrdone); /* Wide negotiation forces async */ sdtrdone &= ~target_mask; adw_lram_write_16(adw, ADW_MC_SDTR_DONE, sdtrdone); } } if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { u_int sdtr_orig; u_int sdtr; u_int sdtrable_orig; u_int sdtrable; sdtr = adw_get_chip_sdtr(adw, ccb->ccb_h.target_id); sdtr_orig = sdtr; sdtrable = adw_lram_read_16(adw, ADW_MC_SDTR_ABLE); sdtrable_orig = sdtrable; if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) { sdtr = adw_find_sdtr(adw, cts->sync_period); } if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0) { if (cts->sync_offset == 0) sdtr = ADW_MC_SDTR_ASYNC; } if (sdtr == ADW_MC_SDTR_ASYNC) sdtrable &= ~target_mask; else sdtrable |= target_mask; if (sdtr != sdtr_orig || sdtrable != sdtrable_orig) { adw_set_chip_sdtr(adw, ccb->ccb_h.target_id, sdtr); sdtrdone &= ~target_mask; adw_lram_write_16(adw, ADW_MC_SDTR_ABLE, sdtrable); adw_lram_write_16(adw, ADW_MC_SDTR_DONE, sdtrdone); } } } splx(s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts; u_int target_mask; cts = &ccb->cts; target_mask = 0x01 << ccb->ccb_h.target_id; if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { u_int mc_sdtr; cts->flags = 0; if ((adw->user_discenb & target_mask) != 0) cts->flags |= CCB_TRANS_DISC_ENB; if ((adw->user_tagenb & target_mask) != 0) cts->flags |= CCB_TRANS_TAG_ENB; if ((adw->user_wdtr & target_mask) != 0) cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; else cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id); cts->sync_period = adw_find_period(adw, mc_sdtr); if (cts->sync_period != 0) cts->sync_offset = 15; /* XXX ??? */ else cts->sync_offset = 0; cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; ccb->ccb_h.status = CAM_REQ_CMP; } else { u_int targ_tinfo; cts->flags = 0; if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE) & target_mask) != 0) cts->flags |= CCB_TRANS_DISC_ENB; if ((adw->tagenb & target_mask) != 0) cts->flags |= CCB_TRANS_TAG_ENB; targ_tinfo = adw_lram_read_16(adw, ADW_MC_DEVICE_HSHK_CFG_TABLE + (2 * ccb->ccb_h.target_id)); if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0) cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; else cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; cts->sync_period = adw_hshk_cfg_period_factor(targ_tinfo); cts->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET; if (cts->sync_period == 0) cts->sync_offset = 0; if (cts->sync_offset == 0) cts->sync_period = 0; } cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; int extended; /* * XXX Use Adaptec translation until I find out how to * get this information from the card. */ ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); extended = 1; if (size_mb > 1024 && extended) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { int failure; failure = adw_reset_bus(adw); if (failure != 0) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else { if (bootverbose) { xpt_print_path(adw->path); printf("Bus Reset Delivered\n"); } ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = ADW_MAX_TID; cpi->max_lun = ADW_MAX_LUN; cpi->initiator_id = adw->initiator_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void adw_poll(struct cam_sim *sim) { adw_intr(cam_sim_softc(sim)); } static void adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { } struct adw_softc * adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id) { struct adw_softc *adw; int i; /* * Allocate a storage area for us */ adw = malloc(sizeof(struct adw_softc), M_DEVBUF, M_NOWAIT | M_ZERO); if (adw == NULL) { printf("adw%d: cannot malloc!\n", device_get_unit(dev)); return NULL; } LIST_INIT(&adw->pending_ccbs); SLIST_INIT(&adw->sg_maps); adw->device = dev; adw->unit = device_get_unit(dev); adw->regs_res_type = regs_type; adw->regs_res_id = regs_id; adw->regs = regs; adw->tag = rman_get_bustag(regs); adw->bsh = rman_get_bushandle(regs); i = adw->unit / 10; adw->name = malloc(sizeof("adw") + i + 1, M_DEVBUF, M_NOWAIT); if (adw->name == NULL) { printf("adw%d: cannot malloc name!\n", adw->unit); free(adw, M_DEVBUF); return NULL; } sprintf(adw->name, "adw%d", adw->unit); return(adw); } void adw_free(struct adw_softc *adw) { switch (adw->init_level) { case 9: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) { SLIST_REMOVE_HEAD(&adw->sg_maps, links); bus_dmamap_unload(adw->sg_dmat, sg_map->sg_dmamap); bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); free(sg_map, M_DEVBUF); } bus_dma_tag_destroy(adw->sg_dmat); } case 8: bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap); case 7: bus_dmamem_free(adw->acb_dmat, adw->acbs, adw->acb_dmamap); bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap); case 6: bus_dma_tag_destroy(adw->acb_dmat); case 5: bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap); case 4: bus_dmamem_free(adw->carrier_dmat, adw->carriers, adw->carrier_dmamap); bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap); case 3: bus_dma_tag_destroy(adw->carrier_dmat); case 2: bus_dma_tag_destroy(adw->buffer_dmat); case 1: bus_dma_tag_destroy(adw->parent_dmat); case 0: break; } free(adw->name, M_DEVBUF); free(adw, M_DEVBUF); } int adw_init(struct adw_softc *adw) { struct adw_eeprom eep_config; u_int tid; u_int i; u_int16_t checksum; u_int16_t scsicfg1; checksum = adw_eeprom_read(adw, &eep_config); bcopy(eep_config.serial_number, adw->serial_number, sizeof(adw->serial_number)); if (checksum != eep_config.checksum) { u_int16_t serial_number[3]; adw->flags |= ADW_EEPROM_FAILED; printf("%s: EEPROM checksum failed. Restoring Defaults\n", adw_name(adw)); /* * Restore the default EEPROM settings. * Assume the 6 byte board serial number that was read * from EEPROM is correct even if the EEPROM checksum * failed. */ bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config)); bcopy(adw->serial_number, eep_config.serial_number, sizeof(serial_number)); adw_eeprom_write(adw, &eep_config); } /* Pull eeprom information into our softc. */ adw->bios_ctrl = eep_config.bios_ctrl; adw->user_wdtr = eep_config.wdtr_able; for (tid = 0; tid < ADW_MAX_TID; tid++) { u_int mc_sdtr; u_int16_t tid_mask; tid_mask = 0x1 << tid; if ((adw->features & ADW_ULTRA) != 0) { /* * Ultra chips store sdtr and ultraenb * bits in their seeprom, so we must * construct valid mc_sdtr entries for * indirectly. */ if (eep_config.sync1.sync_enable & tid_mask) { if (eep_config.sync2.ultra_enable & tid_mask) mc_sdtr = ADW_MC_SDTR_20; else mc_sdtr = ADW_MC_SDTR_10; } else mc_sdtr = ADW_MC_SDTR_ASYNC; } else { switch (ADW_TARGET_GROUP(tid)) { case 3: mc_sdtr = eep_config.sync4.sdtr4; break; case 2: mc_sdtr = eep_config.sync3.sdtr3; break; case 1: mc_sdtr = eep_config.sync2.sdtr2; break; default: /* Shut up compiler */ case 0: mc_sdtr = eep_config.sync1.sdtr1; break; } mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid); mc_sdtr &= 0xFF; } adw_set_user_sdtr(adw, tid, mc_sdtr); } adw->user_tagenb = eep_config.tagqng_able; adw->user_discenb = eep_config.disc_enable; adw->max_acbs = eep_config.max_host_qng; adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID); /* * Sanity check the number of host openings. */ if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG) adw->max_acbs = ADW_DEF_MAX_HOST_QNG; else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) { /* If the value is zero, assume it is uninitialized. */ if (adw->max_acbs == 0) adw->max_acbs = ADW_DEF_MAX_HOST_QNG; else adw->max_acbs = ADW_DEF_MIN_HOST_QNG; } scsicfg1 = 0; if ((adw->features & ADW_ULTRA2) != 0) { switch (eep_config.termination_lvd) { default: printf("%s: Invalid EEPROM LVD Termination Settings.\n", adw_name(adw)); printf("%s: Reverting to Automatic LVD Termination\n", adw_name(adw)); /* FALLTHROUGH */ case ADW_EEPROM_TERM_AUTO: break; case ADW_EEPROM_TERM_BOTH_ON: scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO; /* FALLTHROUGH */ case ADW_EEPROM_TERM_HIGH_ON: scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI; /* FALLTHROUGH */ case ADW_EEPROM_TERM_OFF: scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV; break; } } switch (eep_config.termination_se) { default: printf("%s: Invalid SE EEPROM Termination Settings.\n", adw_name(adw)); printf("%s: Reverting to Automatic SE Termination\n", adw_name(adw)); /* FALLTHROUGH */ case ADW_EEPROM_TERM_AUTO: break; case ADW_EEPROM_TERM_BOTH_ON: scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L; /* FALLTHROUGH */ case ADW_EEPROM_TERM_HIGH_ON: scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H; /* FALLTHROUGH */ case ADW_EEPROM_TERM_OFF: scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL; break; } printf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id); /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/MAXBSIZE, /*nsegments*/ADW_SGSIZE, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/BUS_DMA_ALLOCNOW, &adw->buffer_dmat) != 0) { return (ENOMEM); } adw->init_level++; /* DMA tag for our ccb carrier structures */ if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/0x10, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) * sizeof(struct adw_carrier), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &adw->carrier_dmat) != 0) { return (ENOMEM); } adw->init_level++; /* Allocation for our ccb carrier structures */ if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers, BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) { return (ENOMEM); } adw->init_level++; /* And permanently map them */ bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap, adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) * sizeof(struct adw_carrier), adwmapmem, &adw->carrier_busbase, /*flags*/0); /* Clear them out. */ bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) * sizeof(struct adw_carrier)); /* Setup our free carrier list */ adw->free_carriers = adw->carriers; for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) { adw->carriers[i].carr_offset = carriervtobo(adw, &adw->carriers[i]); adw->carriers[i].carr_ba = carriervtob(adw, &adw->carriers[i]); adw->carriers[i].areq_ba = 0; adw->carriers[i].next_ba = carriervtobo(adw, &adw->carriers[i+1]); } /* Terminal carrier. Never leaves the freelist */ adw->carriers[i].carr_offset = carriervtobo(adw, &adw->carriers[i]); adw->carriers[i].carr_ba = carriervtob(adw, &adw->carriers[i]); adw->carriers[i].areq_ba = 0; adw->carriers[i].next_ba = ~0; adw->init_level++; /* DMA tag for our acb structures */ if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, adw->max_acbs * sizeof(struct acb), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &adw->acb_dmat) != 0) { return (ENOMEM); } adw->init_level++; /* Allocation for our ccbs */ if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs, BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) return (ENOMEM); adw->init_level++; /* And permanently map them */ bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap, adw->acbs, adw->max_acbs * sizeof(struct acb), adwmapmem, &adw->acb_busbase, /*flags*/0); /* Clear them out. */ bzero(adw->acbs, adw->max_acbs * sizeof(struct acb)); /* DMA tag for our S/G structures. We allocate in page sized chunks */ if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &adw->sg_dmat) != 0) { return (ENOMEM); } adw->init_level++; /* Allocate our first batch of ccbs */ if (adwallocacbs(adw) == 0) return (ENOMEM); if (adw_init_chip(adw, scsicfg1) != 0) return (ENXIO); printf("Queue Depth %d\n", adw->max_acbs); return (0); } /* * Attach all the sub-devices we can find */ int adw_attach(struct adw_softc *adw) { struct ccb_setasync csa; struct cam_devq *devq; int s; int error; error = 0; s = splcam(); /* Hook up our interrupt handler */ if ((error = bus_setup_intr(adw->device, adw->irq, INTR_TYPE_CAM | INTR_ENTROPY, adw_intr, adw, &adw->ih)) != 0) { device_printf(adw->device, "bus_setup_intr() failed: %d\n", error); goto fail; } /* Start the Risc processor now that we are fully configured. */ adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN); /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(adw->max_acbs); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry. */ adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit, 1, adw->max_acbs, devq); if (adw->sim == NULL) { error = ENOMEM; goto fail; } /* * Register the bus. */ if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) { cam_sim_free(adw->sim, /*free devq*/TRUE); error = ENOMEM; goto fail; } if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) == CAM_REQ_CMP) { xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = adw_async; csa.callback_arg = adw; xpt_action((union ccb *)&csa); } fail: splx(s); return (error); } void adw_intr(void *arg) { struct adw_softc *adw; u_int int_stat; adw = (struct adw_softc *)arg; if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0) return; /* Reading the register clears the interrupt. */ int_stat = adw_inb(adw, ADW_INTR_STATUS_REG); if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) { u_int intrb_code; /* Async Microcode Event */ intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE); switch (intrb_code) { case ADW_ASYNC_CARRIER_READY_FAILURE: /* * The RISC missed our update of * the commandq. */ if (LIST_FIRST(&adw->pending_ccbs) != NULL) adw_tickle_risc(adw, ADW_TICKLE_A); break; case ADW_ASYNC_SCSI_BUS_RESET_DET: /* * The firmware detected a SCSI Bus reset. */ printf("Someone Reset the Bus\n"); adw_handle_bus_reset(adw, /*initiated*/FALSE); break; case ADW_ASYNC_RDMA_FAILURE: /* * Handle RDMA failure by resetting the * SCSI Bus and chip. */ #if XXX AdvResetChipAndSB(adv_dvc_varp); #endif break; case ADW_ASYNC_HOST_SCSI_BUS_RESET: /* * Host generated SCSI bus reset occurred. */ adw_handle_bus_reset(adw, /*initiated*/TRUE); break; default: printf("adw_intr: unknown async code 0x%x\n", intrb_code); break; } } /* * Run down the RequestQ. */ while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) { struct adw_carrier *free_carrier; struct acb *acb; union ccb *ccb; #if 0 printf("0x%x, 0x%x, 0x%x, 0x%x\n", adw->responseq->carr_offset, adw->responseq->carr_ba, adw->responseq->areq_ba, adw->responseq->next_ba); #endif /* * The firmware copies the adw_scsi_req_q.acb_baddr * field into the areq_ba field of the carrier. */ acb = acbbotov(adw, adw->responseq->areq_ba); /* * The least significant four bits of the next_ba * field are used as flags. Mask them out and then * advance through the list. */ free_carrier = adw->responseq; adw->responseq = carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK); free_carrier->next_ba = adw->free_carriers->carr_offset; adw->free_carriers = free_carrier; /* Process CCB */ ccb = acb->ccb; untimeout(adwtimeout, acb, ccb->ccb_h.timeout_ch); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); ccb->csio.resid = acb->queue.data_cnt; } else ccb->csio.resid = 0; /* Common Cases inline... */ if (acb->queue.host_status == QHSTA_NO_ERROR && (acb->queue.done_status == QD_NO_ERROR || acb->queue.done_status == QD_WITH_ERROR)) { ccb->csio.scsi_status = acb->queue.scsi_status; ccb->ccb_h.status = 0; switch (ccb->csio.scsi_status) { case SCSI_STATUS_OK: ccb->ccb_h.status |= CAM_REQ_CMP; break; case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: bcopy(&acb->sense_data, &ccb->csio.sense_data, ccb->csio.sense_len); ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.sense_resid = acb->queue.sense_len; /* FALLTHROUGH */ default: ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); break; } adwfreeacb(adw, acb); xpt_done(ccb); } else { adwprocesserror(adw, acb); } } } static void adwprocesserror(struct adw_softc *adw, struct acb *acb) { union ccb *ccb; ccb = acb->ccb; if (acb->queue.done_status == QD_ABORTED_BY_HOST) { ccb->ccb_h.status = CAM_REQ_ABORTED; } else { switch (acb->queue.host_status) { case QHSTA_M_SEL_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case QHSTA_M_SXFR_OFF_UFLW: case QHSTA_M_SXFR_OFF_OFLW: case QHSTA_M_DATA_OVER_RUN: ccb->ccb_h.status = CAM_DATA_RUN_ERR; break; case QHSTA_M_SXFR_DESELECTED: case QHSTA_M_UNEXPECTED_BUS_FREE: ccb->ccb_h.status = CAM_UNEXP_BUSFREE; break; case QHSTA_M_SCSI_BUS_RESET: case QHSTA_M_SCSI_BUS_RESET_UNSOL: ccb->ccb_h.status = CAM_SCSI_BUS_RESET; break; case QHSTA_M_BUS_DEVICE_RESET: ccb->ccb_h.status = CAM_BDR_SENT; break; case QHSTA_M_QUEUE_ABORTED: /* BDR or Bus Reset */ printf("Saw Queue Aborted\n"); ccb->ccb_h.status = adw->last_reset; break; case QHSTA_M_SXFR_SDMA_ERR: case QHSTA_M_SXFR_SXFR_PERR: case QHSTA_M_RDMA_PERR: ccb->ccb_h.status = CAM_UNCOR_PARITY; break; case QHSTA_M_WTM_TIMEOUT: case QHSTA_M_SXFR_WD_TMO: { /* The SCSI bus hung in a phase */ xpt_print_path(adw->path); printf("Watch Dog timer expired. Reseting bus\n"); adw_reset_bus(adw); break; } case QHSTA_M_SXFR_XFR_PH_ERR: ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case QHSTA_M_SXFR_UNKNOWN_ERROR: break; case QHSTA_M_BAD_CMPL_STATUS_IN: /* No command complete after a status message */ ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case QHSTA_M_AUTO_REQ_SENSE_FAIL: ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; break; case QHSTA_M_INVALID_DEVICE: ccb->ccb_h.status = CAM_PATH_INVALID; break; case QHSTA_M_NO_AUTO_REQ_SENSE: /* * User didn't request sense, but we got a * check condition. */ ccb->csio.scsi_status = acb->queue.scsi_status; ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; default: panic("%s: Unhandled Host status error %x", adw_name(adw), acb->queue.host_status); /* NOTREACHED */ } } if ((acb->state & ACB_RECOVERY_ACB) != 0) { if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET || ccb->ccb_h.status == CAM_BDR_SENT) ccb->ccb_h.status = CAM_CMD_TIMEOUT; } if (ccb->ccb_h.status != CAM_REQ_CMP) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } adwfreeacb(adw, acb); xpt_done(ccb); } static void adwtimeout(void *arg) { struct acb *acb; union ccb *ccb; struct adw_softc *adw; adw_idle_cmd_status_t status; int target_id; int s; acb = (struct acb *)arg; ccb = acb->ccb; adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; xpt_print_path(ccb->ccb_h.path); printf("ACB %p - timed out\n", (void *)acb); s = splcam(); if ((acb->state & ACB_ACTIVE) == 0) { xpt_print_path(ccb->ccb_h.path); printf("ACB %p - timed out CCB already completed\n", (void *)acb); splx(s); return; } acb->state |= ACB_RECOVERY_ACB; target_id = ccb->ccb_h.target_id; /* Attempt a BDR first */ status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, ccb->ccb_h.target_id); splx(s); if (status == ADW_IDLE_CMD_SUCCESS) { printf("%s: BDR Delivered. No longer in timeout\n", adw_name(adw)); adw_handle_device_reset(adw, target_id); } else { adw_reset_bus(adw); xpt_print_path(adw->path); printf("Bus Reset Delivered. No longer in timeout\n"); } } static void adw_handle_device_reset(struct adw_softc *adw, u_int target) { struct cam_path *path; cam_status error; error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim), target, CAM_LUN_WILDCARD); if (error == CAM_REQ_CMP) { xpt_async(AC_SENT_BDR, path, NULL); xpt_free_path(path); } adw->last_reset = CAM_BDR_SENT; } static void adw_handle_bus_reset(struct adw_softc *adw, int initiated) { if (initiated) { /* * The microcode currently sets the SCSI Bus Reset signal * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET * command above. But the SCSI Bus Reset Hold Time in the * microcode is not deterministic (it may in fact be for less * than the SCSI Spec. minimum of 25 us). Therefore on return * the Adv Library sets the SCSI Bus Reset signal for * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater * than 25 us. */ u_int scsi_ctrl; scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT; adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT); DELAY(ADW_SCSI_RESET_HOLD_TIME_US); adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl); /* * We will perform the async notification when the * SCSI Reset interrupt occurs. */ } else xpt_async(AC_BUS_RESET, adw->path, NULL); adw->last_reset = CAM_SCSI_BUS_RESET; } Index: head/sys/dev/aha/aha.c =================================================================== --- head/sys/dev/aha/aha.c (revision 110231) +++ head/sys/dev/aha/aha.c (revision 110232) @@ -1,1945 +1,1941 @@ /* * Generic register and struct definitions for the Adaptech 154x/164x * SCSI host adapters. Product specific probe and attach routines can * be found in: * aha 1540/1542B/1542C/1542CF/1542CP aha_isa.c * * Copyright (c) 1998 M. Warner Losh. * All Rights Reserved. * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Derived from bt.c written by: * * Copyright (c) 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #define PRVERB(x) if (bootverbose) printf x /* Macro to determine that a rev is potentially a new valid one * so that the driver doesn't keep breaking on new revs as it * did for the CF and CP. */ #define PROBABLY_NEW_BOARD(REV) (REV > 0x43 && REV < 0x56) -#ifndef MAX -#define MAX(a, b) ((a) > (b) ? (a) : (b)) -#endif - /* MailBox Management functions */ static __inline void ahanextinbox(struct aha_softc *aha); static __inline void ahanextoutbox(struct aha_softc *aha); static __inline void ahanextinbox(struct aha_softc *aha) { if (aha->cur_inbox == aha->last_inbox) aha->cur_inbox = aha->in_boxes; else aha->cur_inbox++; } static __inline void ahanextoutbox(struct aha_softc *aha) { if (aha->cur_outbox == aha->last_outbox) aha->cur_outbox = aha->out_boxes; else aha->cur_outbox++; } #define ahautoa24(u,s3) \ (s3)[0] = ((u) >> 16) & 0xff; \ (s3)[1] = ((u) >> 8) & 0xff; \ (s3)[2] = (u) & 0xff; #define aha_a24tou(s3) \ (((s3)[0] << 16) | ((s3)[1] << 8) | (s3)[2]) /* CCB Mangement functions */ static __inline u_int32_t ahaccbvtop(struct aha_softc *aha, struct aha_ccb *accb); static __inline struct aha_ccb* ahaccbptov(struct aha_softc *aha, u_int32_t ccb_addr); static __inline u_int32_t ahaccbvtop(struct aha_softc *aha, struct aha_ccb *accb) { return (aha->aha_ccb_physbase + (u_int32_t)((caddr_t)accb - (caddr_t)aha->aha_ccb_array)); } static __inline struct aha_ccb * ahaccbptov(struct aha_softc *aha, u_int32_t ccb_addr) { return (aha->aha_ccb_array + + ((struct aha_ccb*)(uintptr_t)ccb_addr - (struct aha_ccb*)(uintptr_t)aha->aha_ccb_physbase)); } static struct aha_ccb* ahagetccb(struct aha_softc *aha); static __inline void ahafreeccb(struct aha_softc *aha, struct aha_ccb *accb); static void ahaallocccbs(struct aha_softc *aha); static bus_dmamap_callback_t ahaexecuteccb; static void ahadone(struct aha_softc *aha, struct aha_ccb *accb, aha_mbi_comp_code_t comp_code); /* Host adapter command functions */ static int ahareset(struct aha_softc* aha, int hard_reset); /* Initialization functions */ static int ahainitmboxes(struct aha_softc *aha); static bus_dmamap_callback_t ahamapmboxes; static bus_dmamap_callback_t ahamapccbs; static bus_dmamap_callback_t ahamapsgs; /* Transfer Negotiation Functions */ static void ahafetchtransinfo(struct aha_softc *aha, struct ccb_trans_settings *cts); /* CAM SIM entry points */ #define ccb_accb_ptr spriv_ptr0 #define ccb_aha_ptr spriv_ptr1 static void ahaaction(struct cam_sim *sim, union ccb *ccb); static void ahapoll(struct cam_sim *sim); /* Our timeout handler */ static timeout_t ahatimeout; u_long aha_unit = 0; /* * Do our own re-probe protection until a configuration * manager can do it for us. This ensures that we don't * reprobe a card already found by the EISA or PCI probes. */ static struct aha_isa_port aha_isa_ports[] = { { 0x130, 4 }, { 0x134, 5 }, { 0x230, 2 }, { 0x234, 3 }, { 0x330, 0 }, { 0x334, 1 } }; /* * I/O ports listed in the order enumerated by the * card for certain op codes. */ static u_int16_t aha_board_ports[] = { 0x330, 0x334, 0x230, 0x234, 0x130, 0x134 }; /* Exported functions */ struct aha_softc * aha_alloc(int unit, bus_space_tag_t tag, bus_space_handle_t bsh) { struct aha_softc *aha; aha = malloc(sizeof(struct aha_softc), M_DEVBUF, M_NOWAIT | M_ZERO); if (!aha) { printf("aha%d: cannot malloc!\n", unit); return NULL; } SLIST_INIT(&aha->free_aha_ccbs); LIST_INIT(&aha->pending_ccbs); SLIST_INIT(&aha->sg_maps); aha->unit = unit; aha->tag = tag; aha->bsh = bsh; aha->ccb_sg_opcode = INITIATOR_SG_CCB_WRESID; aha->ccb_ccb_opcode = INITIATOR_CCB_WRESID; return (aha); } void aha_free(struct aha_softc *aha) { switch (aha->init_level) { default: case 8: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&aha->sg_maps))!= NULL) { SLIST_REMOVE_HEAD(&aha->sg_maps, links); bus_dmamap_unload(aha->sg_dmat, sg_map->sg_dmamap); bus_dmamem_free(aha->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); free(sg_map, M_DEVBUF); } bus_dma_tag_destroy(aha->sg_dmat); } case 7: bus_dmamap_unload(aha->ccb_dmat, aha->ccb_dmamap); case 6: bus_dmamap_destroy(aha->ccb_dmat, aha->ccb_dmamap); bus_dmamem_free(aha->ccb_dmat, aha->aha_ccb_array, aha->ccb_dmamap); case 5: bus_dma_tag_destroy(aha->ccb_dmat); case 4: bus_dmamap_unload(aha->mailbox_dmat, aha->mailbox_dmamap); case 3: bus_dmamem_free(aha->mailbox_dmat, aha->in_boxes, aha->mailbox_dmamap); bus_dmamap_destroy(aha->mailbox_dmat, aha->mailbox_dmamap); case 2: bus_dma_tag_destroy(aha->buffer_dmat); case 1: bus_dma_tag_destroy(aha->mailbox_dmat); case 0: break; } free(aha, M_DEVBUF); } /* * Probe the adapter and verify that the card is an Adaptec. */ int aha_probe(struct aha_softc* aha) { u_int status; u_int intstat; int error; board_id_data_t board_id; /* * See if the three I/O ports look reasonable. * Touch the minimal number of registers in the * failure case. */ status = aha_inb(aha, STATUS_REG); if ((status == 0) || (status & (DIAG_ACTIVE|CMD_REG_BUSY| STATUS_REG_RSVD)) != 0) { PRVERB(("%s: status reg test failed %x\n", aha_name(aha), status)); return (ENXIO); } intstat = aha_inb(aha, INTSTAT_REG); if ((intstat & INTSTAT_REG_RSVD) != 0) { PRVERB(("%s: Failed Intstat Reg Test\n", aha_name(aha))); return (ENXIO); } /* * Looking good so far. Final test is to reset the * adapter and fetch the board ID and ensure we aren't * looking at a BusLogic. */ if ((error = ahareset(aha, /*hard_reset*/TRUE)) != 0) { PRVERB(("%s: Failed Reset\n", aha_name(aha))); return (ENXIO); } /* * Get the board ID. We use this to see if we're dealing with * a buslogic card or an aha card (or clone). */ error = aha_cmd(aha, AOP_INQUIRE_BOARD_ID, NULL, /*parmlen*/0, (u_int8_t*)&board_id, sizeof(board_id), DEFAULT_CMD_TIMEOUT); if (error != 0) { PRVERB(("%s: INQUIRE failed %x\n", aha_name(aha), error)); return (ENXIO); } aha->fw_major = board_id.firmware_rev_major; aha->fw_minor = board_id.firmware_rev_minor; aha->boardid = board_id.board_type; /* * The Buslogic cards have an id of either 0x41 or 0x42. So * if those come up in the probe, we test the geometry register * of the board. Adaptec boards that are this old will not have * this register, and return 0xff, while buslogic cards will return * something different. * * It appears that for reasons unknow, for the for the * aha-1542B cards, we need to wait a little bit before trying * to read the geometry register. I picked 10ms since we have * reports that a for loop to 1000 did the trick, and this * errs on the side of conservatism. Besides, no one will * notice a 10mS delay here, even the 1542B card users :-) * * Some compatible cards return 0 here. Some cards also * seem to return 0x7f. * * XXX I'm not sure how this will impact other cloned cards * * This really should be replaced with the esetup command, since * that appears to be more reliable. This becomes more and more * true over time as we discover more cards that don't read the * geometry register consistantly. */ if (aha->boardid <= 0x42) { /* Wait 10ms before reading */ DELAY(10000); status = aha_inb(aha, GEOMETRY_REG); if (status != 0xff && status != 0x00 && status != 0x7f) { PRVERB(("%s: Geometry Register test failed 0x%x\n", aha_name(aha), status)); return (ENXIO); } } return (0); } /* * Pull the boards setup information and record it in our softc. */ int aha_fetch_adapter_info(struct aha_softc *aha) { setup_data_t setup_info; config_data_t config_data; u_int8_t length_param; int error; struct aha_extbios extbios; switch (aha->boardid) { case BOARD_1540_16HEAD_BIOS: snprintf(aha->model, sizeof(aha->model), "1540 16 head BIOS"); break; case BOARD_1540_64HEAD_BIOS: snprintf(aha->model, sizeof(aha->model), "1540 64 head BIOS"); break; case BOARD_1542: snprintf(aha->model, sizeof(aha->model), "1540/1542 64 head BIOS"); break; case BOARD_1640: snprintf(aha->model, sizeof(aha->model), "1640"); break; case BOARD_1740: snprintf(aha->model, sizeof(aha->model), "1740A/1742A/1744"); break; case BOARD_1542C: snprintf(aha->model, sizeof(aha->model), "1542C"); break; case BOARD_1542CF: snprintf(aha->model, sizeof(aha->model), "1542CF"); break; case BOARD_1542CP: snprintf(aha->model, sizeof(aha->model), "1542CP"); break; default: snprintf(aha->model, sizeof(aha->model), "Unknown"); break; } /* * If we are a new type of 1542 board (anything newer than a 1542C) * then disable the extended bios so that the * mailbox interface is unlocked. * This is also true for the 1542B Version 3.20. First Adaptec * board that supports >1Gb drives. * No need to check the extended bios flags as some of the * extensions that cause us problems are not flagged in that byte. */ if (PROBABLY_NEW_BOARD(aha->boardid) || (aha->boardid == 0x41 && aha->fw_major == 0x31 && aha->fw_minor >= 0x34)) { error = aha_cmd(aha, AOP_RETURN_EXT_BIOS_INFO, NULL, /*paramlen*/0, (u_char *)&extbios, sizeof(extbios), DEFAULT_CMD_TIMEOUT); error = aha_cmd(aha, AOP_MBOX_IF_ENABLE, (u_int8_t *)&extbios, /*paramlen*/2, NULL, 0, DEFAULT_CMD_TIMEOUT); } if (aha->boardid < 0x41) printf("%s: Warning: aha-1542A won't likely work.\n", aha_name(aha)); aha->max_sg = 17; /* Need >= 17 to do 64k I/O */ aha->diff_bus = 0; aha->extended_lun = 0; aha->extended_trans = 0; aha->max_ccbs = 16; /* Determine Sync/Wide/Disc settings */ length_param = sizeof(setup_info); error = aha_cmd(aha, AOP_INQUIRE_SETUP_INFO, &length_param, /*paramlen*/1, (u_int8_t*)&setup_info, sizeof(setup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { printf("%s: aha_fetch_adapter_info - Failed " "Get Setup Info\n", aha_name(aha)); return (error); } if (setup_info.initiate_sync != 0) { aha->sync_permitted = ALL_TARGETS; } aha->disc_permitted = ALL_TARGETS; /* We need as many mailboxes as we can have ccbs */ aha->num_boxes = aha->max_ccbs; /* Determine our SCSI ID */ error = aha_cmd(aha, AOP_INQUIRE_CONFIG, NULL, /*parmlen*/0, (u_int8_t*)&config_data, sizeof(config_data), DEFAULT_CMD_TIMEOUT); if (error != 0) { printf("%s: aha_fetch_adapter_info - Failed Get Config\n", aha_name(aha)); return (error); } aha->scsi_id = config_data.scsi_id; return (0); } /* * Start the board, ready for normal operation */ int aha_init(struct aha_softc* aha) { /* Announce the Adapter */ printf("%s: AHA-%s FW Rev. %c.%c (ID=%x) ", aha_name(aha), aha->model, aha->fw_major, aha->fw_minor, aha->boardid); if (aha->diff_bus != 0) printf("Diff "); printf("SCSI Host Adapter, SCSI ID %d, %d CCBs\n", aha->scsi_id, aha->max_ccbs); /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create(aha->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/MAXBSIZE, /*nsegments*/AHA_NSEG, /*maxsegsz*/BUS_SPACE_MAXSIZE_24BIT, /*flags*/BUS_DMA_ALLOCNOW, &aha->buffer_dmat) != 0) { goto error_exit; } aha->init_level++; /* DMA tag for our mailboxes */ if (bus_dma_tag_create(aha->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, aha->num_boxes * (sizeof(aha_mbox_in_t) + sizeof(aha_mbox_out_t)), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_24BIT, /*flags*/0, &aha->mailbox_dmat) != 0) { goto error_exit; } aha->init_level++; /* Allocation for our mailboxes */ if (bus_dmamem_alloc(aha->mailbox_dmat, (void **)&aha->out_boxes, BUS_DMA_NOWAIT, &aha->mailbox_dmamap) != 0) { goto error_exit; } aha->init_level++; /* And permanently map them */ bus_dmamap_load(aha->mailbox_dmat, aha->mailbox_dmamap, aha->out_boxes, aha->num_boxes * (sizeof(aha_mbox_in_t) + sizeof(aha_mbox_out_t)), ahamapmboxes, aha, /*flags*/0); aha->init_level++; aha->in_boxes = (aha_mbox_in_t *)&aha->out_boxes[aha->num_boxes]; ahainitmboxes(aha); /* DMA tag for our ccb structures */ if (bus_dma_tag_create(aha->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, aha->max_ccbs * sizeof(struct aha_ccb), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_24BIT, /*flags*/0, &aha->ccb_dmat) != 0) { goto error_exit; } aha->init_level++; /* Allocation for our ccbs */ if (bus_dmamem_alloc(aha->ccb_dmat, (void **)&aha->aha_ccb_array, BUS_DMA_NOWAIT, &aha->ccb_dmamap) != 0) { goto error_exit; } aha->init_level++; /* And permanently map them */ bus_dmamap_load(aha->ccb_dmat, aha->ccb_dmamap, aha->aha_ccb_array, aha->max_ccbs * sizeof(struct aha_ccb), ahamapccbs, aha, /*flags*/0); aha->init_level++; /* DMA tag for our S/G structures. We allocate in page sized chunks */ if (bus_dma_tag_create(aha->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_24BIT, /*flags*/0, &aha->sg_dmat) != 0) { goto error_exit; } aha->init_level++; /* Perform initial CCB allocation */ bzero(aha->aha_ccb_array, aha->max_ccbs * sizeof(struct aha_ccb)); ahaallocccbs(aha); if (aha->num_ccbs == 0) { printf("%s: aha_init - Unable to allocate initial ccbs\n", aha_name(aha)); goto error_exit; } /* * Note that we are going and return (to probe) */ return 0; error_exit: return (ENXIO); } int aha_attach(struct aha_softc *aha) { int tagged_dev_openings; struct cam_devq *devq; /* * We don't do tagged queueing, since the aha cards don't * support it. */ tagged_dev_openings = 0; /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(aha->max_ccbs - 1); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry */ aha->sim = cam_sim_alloc(ahaaction, ahapoll, "aha", aha, aha->unit, 2, tagged_dev_openings, devq); if (aha->sim == NULL) { cam_simq_free(devq); return (ENOMEM); } if (xpt_bus_register(aha->sim, 0) != CAM_SUCCESS) { cam_sim_free(aha->sim, /*free_devq*/TRUE); return (ENXIO); } if (xpt_create_path(&aha->path, /*periph*/NULL, cam_sim_path(aha->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(aha->sim)); cam_sim_free(aha->sim, /*free_devq*/TRUE); return (ENXIO); } return (0); } char * aha_name(struct aha_softc *aha) { static char name[10]; snprintf(name, sizeof(name), "aha%d", aha->unit); return (name); } void aha_find_probe_range(int ioport, int *port_index, int *max_port_index) { if (ioport > 0) { int i; for (i = 0;i < AHA_NUM_ISAPORTS; i++) if (ioport <= aha_isa_ports[i].addr) break; if ((i >= AHA_NUM_ISAPORTS) || (ioport != aha_isa_ports[i].addr)) { printf("\n" "aha_isa_probe: Invalid baseport of 0x%x specified.\n" "aha_isa_probe: Nearest valid baseport is 0x%x.\n" "aha_isa_probe: Failing probe.\n", ioport, (i < AHA_NUM_ISAPORTS) ? aha_isa_ports[i].addr : aha_isa_ports[AHA_NUM_ISAPORTS - 1].addr); *port_index = *max_port_index = -1; return; } *port_index = *max_port_index = aha_isa_ports[i].bio; } else { *port_index = 0; *max_port_index = AHA_NUM_ISAPORTS - 1; } } int aha_iop_from_bio(isa_compat_io_t bio_index) { if (bio_index >= 0 && bio_index < AHA_NUM_ISAPORTS) return (aha_board_ports[bio_index]); return (-1); } static void ahaallocccbs(struct aha_softc *aha) { struct aha_ccb *next_ccb; struct sg_map_node *sg_map; bus_addr_t physaddr; aha_sg_t *segs; int newcount; int i; next_ccb = &aha->aha_ccb_array[aha->num_ccbs]; sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) return; /* Allocate S/G space for the next batch of CCBS */ if (bus_dmamem_alloc(aha->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { free(sg_map, M_DEVBUF); return; } SLIST_INSERT_HEAD(&aha->sg_maps, sg_map, links); bus_dmamap_load(aha->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, ahamapsgs, aha, /*flags*/0); segs = sg_map->sg_vaddr; physaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (AHA_NSEG * sizeof(aha_sg_t))); for (i = 0; aha->num_ccbs < aha->max_ccbs && i < newcount; i++) { int error; next_ccb->sg_list = segs; next_ccb->sg_list_phys = physaddr; next_ccb->flags = ACCB_FREE; error = bus_dmamap_create(aha->buffer_dmat, /*flags*/0, &next_ccb->dmamap); if (error != 0) break; SLIST_INSERT_HEAD(&aha->free_aha_ccbs, next_ccb, links); segs += AHA_NSEG; physaddr += (AHA_NSEG * sizeof(aha_sg_t)); next_ccb++; aha->num_ccbs++; } /* Reserve a CCB for error recovery */ if (aha->recovery_accb == NULL) { aha->recovery_accb = SLIST_FIRST(&aha->free_aha_ccbs); SLIST_REMOVE_HEAD(&aha->free_aha_ccbs, links); } } static __inline void ahafreeccb(struct aha_softc *aha, struct aha_ccb *accb) { int s; s = splcam(); if ((accb->flags & ACCB_ACTIVE) != 0) LIST_REMOVE(&accb->ccb->ccb_h, sim_links.le); if (aha->resource_shortage != 0 && (accb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { accb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; aha->resource_shortage = FALSE; } accb->flags = ACCB_FREE; SLIST_INSERT_HEAD(&aha->free_aha_ccbs, accb, links); aha->active_ccbs--; splx(s); } static struct aha_ccb* ahagetccb(struct aha_softc *aha) { struct aha_ccb* accb; int s; s = splcam(); if ((accb = SLIST_FIRST(&aha->free_aha_ccbs)) != NULL) { SLIST_REMOVE_HEAD(&aha->free_aha_ccbs, links); aha->active_ccbs++; } else if (aha->num_ccbs < aha->max_ccbs) { ahaallocccbs(aha); accb = SLIST_FIRST(&aha->free_aha_ccbs); if (accb == NULL) printf("%s: Can't malloc ACCB\n", aha_name(aha)); else { SLIST_REMOVE_HEAD(&aha->free_aha_ccbs, links); aha->active_ccbs++; } } splx(s); return (accb); } static void ahaaction(struct cam_sim *sim, union ccb *ccb) { struct aha_softc *aha; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahaaction\n")); aha = (struct aha_softc *)cam_sim_softc(sim); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { struct aha_ccb *accb; struct aha_hccb *hccb; /* * Get an accb to use. */ if ((accb = ahagetccb(aha)) == NULL) { int s; s = splcam(); aha->resource_shortage = TRUE; splx(s); xpt_freeze_simq(aha->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } hccb = &accb->hccb; /* * So we can find the ACCB when an abort is requested */ accb->ccb = ccb; ccb->ccb_h.ccb_accb_ptr = accb; ccb->ccb_h.ccb_aha_ptr = aha; /* * Put all the arguments for the xfer in the accb */ hccb->target = ccb->ccb_h.target_id; hccb->lun = ccb->ccb_h.target_lun; hccb->ahastat = 0; hccb->sdstat = 0; if (ccb->ccb_h.func_code == XPT_SCSI_IO) { struct ccb_scsiio *csio; struct ccb_hdr *ccbh; csio = &ccb->csio; ccbh = &csio->ccb_h; hccb->opcode = aha->ccb_ccb_opcode; hccb->datain = (ccb->ccb_h.flags & CAM_DIR_IN) != 0; hccb->dataout = (ccb->ccb_h.flags & CAM_DIR_OUT) != 0; hccb->cmd_len = csio->cdb_len; if (hccb->cmd_len > sizeof(hccb->scsi_cdb)) { ccb->ccb_h.status = CAM_REQ_INVALID; ahafreeccb(aha, accb); xpt_done(ccb); return; } hccb->sense_len = csio->sense_len; if ((ccbh->flags & CAM_CDB_POINTER) != 0) { if ((ccbh->flags & CAM_CDB_PHYS) == 0) { bcopy(csio->cdb_io.cdb_ptr, hccb->scsi_cdb, hccb->cmd_len); } else { /* I guess I could map it in... */ ccbh->status = CAM_REQ_INVALID; ahafreeccb(aha, accb); xpt_done(ccb); return; } } else { bcopy(csio->cdb_io.cdb_bytes, hccb->scsi_cdb, hccb->cmd_len); } /* * If we have any data to send with this command, * map it into bus space. */ /* Only use S/G if there is a transfer */ if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { /* * We've been given a pointer * to a single buffer. */ if ((ccbh->flags & CAM_DATA_PHYS)==0) { int s; int error; s = splsoftvm(); error = bus_dmamap_load( aha->buffer_dmat, accb->dmamap, csio->data_ptr, csio->dxfer_len, ahaexecuteccb, accb, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain * ordering, freeze the * controller queue * until our mapping is * returned. */ xpt_freeze_simq(aha->sim, 1); csio->ccb_h.status |= CAM_RELEASE_SIMQ; } splx(s); } else { struct bus_dma_segment seg; /* Pointer to physical buffer */ seg.ds_addr = (bus_addr_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; ahaexecuteccb(accb, &seg, 1, 0); } } else { struct bus_dma_segment *segs; if ((ccbh->flags & CAM_DATA_PHYS) != 0) panic("ahaaction - Physical " "segment pointers " "unsupported"); if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) panic("ahaaction - Virtual " "segment addresses " "unsupported"); /* Just use the segments provided */ segs = (struct bus_dma_segment *) csio->data_ptr; ahaexecuteccb(accb, segs, csio->sglist_cnt, 0); } } else { ahaexecuteccb(accb, NULL, 0, 0); } } else { hccb->opcode = INITIATOR_BUS_DEV_RESET; /* No data transfer */ hccb->datain = TRUE; hccb->dataout = TRUE; hccb->cmd_len = 0; hccb->sense_len = 0; ahaexecuteccb(accb, NULL, 0, 0); } break; } case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { /* XXX Implement */ ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts; u_int target_mask; cts = &ccb->cts; target_mask = 0x01 << ccb->ccb_h.target_id; if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { cts->flags = 0; if ((aha->disc_permitted & target_mask) != 0) cts->flags |= CCB_TRANS_DISC_ENB; cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; if ((aha->sync_permitted & target_mask) != 0) { if (aha->boardid >= BOARD_1542CF) cts->sync_period = 25; else cts->sync_period = 50; } else cts->sync_period = 0; if (cts->sync_period != 0) cts->sync_offset = 15; cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; } else { ahafetchtransinfo(aha, cts); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); if (size_mb >= 1024 && (aha->extended_trans != 0)) { if (size_mb >= 2048) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 128; ccg->secs_per_track = 32; } } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { ahareset(aha, /*hardreset*/TRUE); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = aha->scsi_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void ahaexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct aha_ccb *accb; union ccb *ccb; struct aha_softc *aha; int s; u_int32_t paddr; accb = (struct aha_ccb *)arg; ccb = accb->ccb; aha = (struct aha_softc *)ccb->ccb_h.ccb_aha_ptr; if (error != 0) { if (error != EFBIG) printf("%s: Unexepected error 0x%x returned from " "bus_dmamap_load\n", aha_name(aha), error); if (ccb->ccb_h.status == CAM_REQ_INPROG) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; } ahafreeccb(aha, accb); xpt_done(ccb); return; } if (nseg != 0) { aha_sg_t *sg; bus_dma_segment_t *end_seg; bus_dmasync_op_t op; end_seg = dm_segs + nseg; /* Copy the segments into our SG list */ sg = accb->sg_list; while (dm_segs < end_seg) { ahautoa24(dm_segs->ds_len, sg->len); ahautoa24(dm_segs->ds_addr, sg->addr); sg++; dm_segs++; } if (nseg > 1) { accb->hccb.opcode = aha->ccb_sg_opcode; ahautoa24((sizeof(aha_sg_t) * nseg), accb->hccb.data_len); ahautoa24(accb->sg_list_phys, accb->hccb.data_addr); } else { bcopy(accb->sg_list->len, accb->hccb.data_len, 3); bcopy(accb->sg_list->addr, accb->hccb.data_addr, 3); } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(aha->buffer_dmat, accb->dmamap, op); } else { accb->hccb.opcode = INITIATOR_CCB; ahautoa24(0, accb->hccb.data_len); ahautoa24(0, accb->hccb.data_addr); } s = splcam(); /* * Last time we need to check if this CCB needs to * be aborted. */ if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (nseg != 0) bus_dmamap_unload(aha->buffer_dmat, accb->dmamap); ahafreeccb(aha, accb); xpt_done(ccb); splx(s); return; } accb->flags = ACCB_ACTIVE; ccb->ccb_h.status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&aha->pending_ccbs, &ccb->ccb_h, sim_links.le); ccb->ccb_h.timeout_ch = timeout(ahatimeout, (caddr_t)accb, (ccb->ccb_h.timeout * hz) / 1000); /* Tell the adapter about this command */ if (aha->cur_outbox->action_code != AMBO_FREE) { /* * We should never encounter a busy mailbox. * If we do, warn the user, and treat it as * a resource shortage. If the controller is * hung, one of the pending transactions will * timeout causing us to start recovery operations. */ printf("%s: Encountered busy mailbox with %d out of %d " "commands active!!!", aha_name(aha), aha->active_ccbs, aha->max_ccbs); untimeout(ahatimeout, accb, ccb->ccb_h.timeout_ch); if (nseg != 0) bus_dmamap_unload(aha->buffer_dmat, accb->dmamap); ahafreeccb(aha, accb); aha->resource_shortage = TRUE; xpt_freeze_simq(aha->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } paddr = ahaccbvtop(aha, accb); ahautoa24(paddr, aha->cur_outbox->ccb_addr); aha->cur_outbox->action_code = AMBO_START; aha_outb(aha, COMMAND_REG, AOP_START_MBOX); ahanextoutbox(aha); splx(s); } void aha_intr(void *arg) { struct aha_softc *aha; u_int intstat; aha = (struct aha_softc *)arg; while (((intstat = aha_inb(aha, INTSTAT_REG)) & INTR_PENDING) != 0) { if ((intstat & CMD_COMPLETE) != 0) { aha->latched_status = aha_inb(aha, STATUS_REG); aha->command_cmp = TRUE; } aha_outb(aha, CONTROL_REG, RESET_INTR); if ((intstat & IMB_LOADED) != 0) { while (aha->cur_inbox->comp_code != AMBI_FREE) { u_int32_t paddr; paddr = aha_a24tou(aha->cur_inbox->ccb_addr); ahadone(aha, ahaccbptov(aha, paddr), aha->cur_inbox->comp_code); aha->cur_inbox->comp_code = AMBI_FREE; ahanextinbox(aha); } } if ((intstat & SCSI_BUS_RESET) != 0) { ahareset(aha, /*hardreset*/FALSE); } } } static void ahadone(struct aha_softc *aha, struct aha_ccb *accb, aha_mbi_comp_code_t comp_code) { union ccb *ccb; struct ccb_scsiio *csio; ccb = accb->ccb; csio = &accb->ccb->csio; if ((accb->flags & ACCB_ACTIVE) == 0) { printf("%s: ahadone - Attempt to free non-active ACCB %p\n", aha_name(aha), (void *)accb); return; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(aha->buffer_dmat, accb->dmamap, op); bus_dmamap_unload(aha->buffer_dmat, accb->dmamap); } if (accb == aha->recovery_accb) { /* * The recovery ACCB does not have a CCB associated * with it, so short circuit the normal error handling. * We now traverse our list of pending CCBs and process * any that were terminated by the recovery CCBs action. * We also reinstate timeouts for all remaining, pending, * CCBs. */ struct cam_path *path; struct ccb_hdr *ccb_h; cam_status error; /* Notify all clients that a BDR occured */ error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(aha->sim), accb->hccb.target, CAM_LUN_WILDCARD); if (error == CAM_REQ_CMP) xpt_async(AC_SENT_BDR, path, NULL); ccb_h = LIST_FIRST(&aha->pending_ccbs); while (ccb_h != NULL) { struct aha_ccb *pending_accb; pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr; if (pending_accb->hccb.target == accb->hccb.target) { pending_accb->hccb.ahastat = AHASTAT_HA_BDR; ccb_h = LIST_NEXT(ccb_h, sim_links.le); ahadone(aha, pending_accb, AMBI_ERROR); } else { ccb_h->timeout_ch = timeout(ahatimeout, (caddr_t)pending_accb, (ccb_h->timeout * hz) / 1000); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } } printf("%s: No longer in timeout\n", aha_name(aha)); return; } untimeout(ahatimeout, accb, ccb->ccb_h.timeout_ch); switch (comp_code) { case AMBI_FREE: printf("%s: ahadone - CCB completed with free status!\n", aha_name(aha)); break; case AMBI_NOT_FOUND: printf("%s: ahadone - CCB Abort failed to find CCB\n", aha_name(aha)); break; case AMBI_ABORT: case AMBI_ERROR: /* An error occured */ if (accb->hccb.opcode < INITIATOR_CCB_WRESID) csio->resid = 0; else csio->resid = aha_a24tou(accb->hccb.data_len); switch(accb->hccb.ahastat) { case AHASTAT_DATARUN_ERROR: { if (csio->resid <= 0) { csio->ccb_h.status = CAM_DATA_RUN_ERR; break; } /* FALLTHROUGH */ } case AHASTAT_NOERROR: csio->scsi_status = accb->hccb.sdstat; csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; switch(csio->scsi_status) { case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: csio->ccb_h.status |= CAM_AUTOSNS_VALID; /* * The aha writes the sense data at different * offsets based on the scsi cmd len */ bcopy((caddr_t) &accb->hccb.scsi_cdb + accb->hccb.cmd_len, (caddr_t) &csio->sense_data, accb->hccb.sense_len); break; default: break; case SCSI_STATUS_OK: csio->ccb_h.status = CAM_REQ_CMP; break; } break; case AHASTAT_SELTIMEOUT: csio->ccb_h.status = CAM_SEL_TIMEOUT; break; case AHASTAT_UNEXPECTED_BUSFREE: csio->ccb_h.status = CAM_UNEXP_BUSFREE; break; case AHASTAT_INVALID_PHASE: csio->ccb_h.status = CAM_SEQUENCE_FAIL; break; case AHASTAT_INVALID_ACTION_CODE: panic("%s: Inavlid Action code", aha_name(aha)); break; case AHASTAT_INVALID_OPCODE: if (accb->hccb.opcode < INITIATOR_CCB_WRESID) panic("%s: Invalid CCB Opcode %x hccb = %p", aha_name(aha), accb->hccb.opcode, &accb->hccb); printf("%s: AHA-1540A detected, compensating\n", aha_name(aha)); aha->ccb_sg_opcode = INITIATOR_SG_CCB; aha->ccb_ccb_opcode = INITIATOR_CCB; xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); csio->ccb_h.status = CAM_REQUEUE_REQ; break; case AHASTAT_LINKED_CCB_LUN_MISMATCH: /* We don't even support linked commands... */ panic("%s: Linked CCB Lun Mismatch", aha_name(aha)); break; case AHASTAT_INVALID_CCB_OR_SG_PARAM: panic("%s: Invalid CCB or SG list", aha_name(aha)); break; case AHASTAT_HA_SCSI_BUS_RESET: if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_CMD_TIMEOUT) csio->ccb_h.status = CAM_SCSI_BUS_RESET; break; case AHASTAT_HA_BDR: if ((accb->flags & ACCB_DEVICE_RESET) == 0) csio->ccb_h.status = CAM_BDR_SENT; else csio->ccb_h.status = CAM_CMD_TIMEOUT; break; } if (csio->ccb_h.status != CAM_REQ_CMP) { xpt_freeze_devq(csio->ccb_h.path, /*count*/1); csio->ccb_h.status |= CAM_DEV_QFRZN; } if ((accb->flags & ACCB_RELEASE_SIMQ) != 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; ahafreeccb(aha, accb); xpt_done(ccb); break; case AMBI_OK: /* All completed without incident */ /* XXX DO WE NEED TO COPY SENSE BYTES HERE???? XXX */ /* I don't think so since it works???? */ ccb->ccb_h.status |= CAM_REQ_CMP; if ((accb->flags & ACCB_RELEASE_SIMQ) != 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; ahafreeccb(aha, accb); xpt_done(ccb); break; } } static int ahareset(struct aha_softc* aha, int hard_reset) { struct ccb_hdr *ccb_h; u_int status; u_int timeout; u_int8_t reset_type; if (hard_reset != 0) reset_type = HARD_RESET; else reset_type = SOFT_RESET; aha_outb(aha, CONTROL_REG, reset_type); /* Wait 5sec. for Diagnostic start */ timeout = 5 * 10000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & DIAG_ACTIVE) != 0) break; DELAY(100); } if (timeout == 0) { PRVERB(("%s: ahareset - Diagnostic Active failed to " "assert. status = 0x%x\n", aha_name(aha), status)); return (ETIMEDOUT); } /* Wait 10sec. for Diagnostic end */ timeout = 10 * 10000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & DIAG_ACTIVE) == 0) break; DELAY(100); } if (timeout == 0) { panic("%s: ahareset - Diagnostic Active failed to drop. " "status = 0x%x\n", aha_name(aha), status); return (ETIMEDOUT); } /* Wait for the host adapter to become ready or report a failure */ timeout = 10000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & (DIAG_FAIL|HA_READY|DATAIN_REG_READY)) != 0) break; DELAY(100); } if (timeout == 0) { printf("%s: ahareset - Host adapter failed to come ready. " "status = 0x%x\n", aha_name(aha), status); return (ETIMEDOUT); } /* If the diagnostics failed, tell the user */ if ((status & DIAG_FAIL) != 0 || (status & HA_READY) == 0) { printf("%s: ahareset - Adapter failed diagnostics\n", aha_name(aha)); if ((status & DATAIN_REG_READY) != 0) printf("%s: ahareset - Host Adapter Error " "code = 0x%x\n", aha_name(aha), aha_inb(aha, DATAIN_REG)); return (ENXIO); } /* If we've allocated mailboxes, initialize them */ if (aha->init_level > 4) ahainitmboxes(aha); /* If we've attached to the XPT, tell it about the event */ if (aha->path != NULL) xpt_async(AC_BUS_RESET, aha->path, NULL); /* * Perform completion processing for all outstanding CCBs. */ while ((ccb_h = LIST_FIRST(&aha->pending_ccbs)) != NULL) { struct aha_ccb *pending_accb; pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr; pending_accb->hccb.ahastat = AHASTAT_HA_SCSI_BUS_RESET; ahadone(aha, pending_accb, AMBI_ERROR); } return (0); } /* * Send a command to the adapter. */ int aha_cmd(struct aha_softc *aha, aha_op_t opcode, u_int8_t *params, u_int param_len, u_int8_t *reply_data, u_int reply_len, u_int cmd_timeout) { u_int timeout; u_int status; u_int saved_status; u_int intstat; u_int reply_buf_size; int s; int cmd_complete; int error; /* No data returned to start */ reply_buf_size = reply_len; reply_len = 0; intstat = 0; cmd_complete = 0; saved_status = 0; error = 0; /* * All commands except for the "start mailbox" and the "enable * outgoing mailbox read interrupt" commands cannot be issued * while there are pending transactions. Freeze our SIMQ * and wait for all completions to occur if necessary. */ timeout = 100000; s = splcam(); while (LIST_FIRST(&aha->pending_ccbs) != NULL && --timeout) { /* Fire the interrupt handler in case interrupts are blocked */ aha_intr(aha); splx(s); DELAY(100); s = splcam(); } splx(s); if (timeout == 0) { printf("%s: aha_cmd: Timeout waiting for adapter idle\n", aha_name(aha)); return (ETIMEDOUT); } aha->command_cmp = 0; /* * Wait up to 10 sec. for the adapter to become * ready to accept commands. */ timeout = 100000; while (--timeout) { status = aha_inb(aha, STATUS_REG); if ((status & HA_READY) != 0 && (status & CMD_REG_BUSY) == 0) break; /* * Throw away any pending data which may be * left over from earlier commands that we * timedout on. */ if ((status & DATAIN_REG_READY) != 0) (void)aha_inb(aha, DATAIN_REG); DELAY(100); } if (timeout == 0) { printf("%s: aha_cmd: Timeout waiting for adapter ready, " "status = 0x%x\n", aha_name(aha), status); return (ETIMEDOUT); } /* * Send the opcode followed by any necessary parameter bytes. */ aha_outb(aha, COMMAND_REG, opcode); /* * Wait for up to 1sec to get the parameter list sent */ timeout = 10000; while (param_len && --timeout) { DELAY(100); s = splcam(); status = aha_inb(aha, STATUS_REG); intstat = aha_inb(aha, INTSTAT_REG); splx(s); if ((intstat & (INTR_PENDING|CMD_COMPLETE)) == (INTR_PENDING|CMD_COMPLETE)) { saved_status = status; cmd_complete = 1; break; } if (aha->command_cmp != 0) { saved_status = aha->latched_status; cmd_complete = 1; break; } if ((status & DATAIN_REG_READY) != 0) break; if ((status & CMD_REG_BUSY) == 0) { aha_outb(aha, COMMAND_REG, *params++); param_len--; timeout = 10000; } } if (timeout == 0) { printf("%s: aha_cmd: Timeout sending parameters, " "status = 0x%x\n", aha_name(aha), status); error = ETIMEDOUT; } /* * For all other commands, we wait for any output data * and the final comand completion interrupt. */ while (cmd_complete == 0 && --cmd_timeout) { s = splcam(); status = aha_inb(aha, STATUS_REG); intstat = aha_inb(aha, INTSTAT_REG); splx(s); if (aha->command_cmp != 0) { cmd_complete = 1; saved_status = aha->latched_status; } else if ((intstat & (INTR_PENDING|CMD_COMPLETE)) == (INTR_PENDING|CMD_COMPLETE)) { /* * Our poll (in case interrupts are blocked) * saw the CMD_COMPLETE interrupt. */ cmd_complete = 1; saved_status = status; } if ((status & DATAIN_REG_READY) != 0) { u_int8_t data; data = aha_inb(aha, DATAIN_REG); if (reply_len < reply_buf_size) { *reply_data++ = data; } else { printf("%s: aha_cmd - Discarded reply data " "byte for opcode 0x%x\n", aha_name(aha), opcode); } /* * Reset timeout to ensure at least a second * between response bytes. */ cmd_timeout = MAX(cmd_timeout, 10000); reply_len++; } DELAY(100); } if (cmd_timeout == 0) { printf("%s: aha_cmd: Timeout waiting for reply data and " "command complete.\n%s: status = 0x%x, intstat = 0x%x, " "reply_len = %d\n", aha_name(aha), aha_name(aha), status, intstat, reply_len); return (ETIMEDOUT); } /* * Clear any pending interrupts. Block interrupts so our * interrupt handler is not re-entered. */ s = splcam(); aha_intr(aha); splx(s); if (error != 0) return (error); /* * If the command was rejected by the controller, tell the caller. */ if ((saved_status & CMD_INVALID) != 0) { PRVERB(("%s: Invalid Command 0x%x\n", aha_name(aha), opcode)); /* * Some early adapters may not recover properly from * an invalid command. If it appears that the controller * has wedged (i.e. status was not cleared by our interrupt * reset above), perform a soft reset. */ DELAY(1000); status = aha_inb(aha, STATUS_REG); if ((status & (CMD_INVALID|STATUS_REG_RSVD|DATAIN_REG_READY| CMD_REG_BUSY|DIAG_FAIL|DIAG_ACTIVE)) != 0 || (status & (HA_READY|INIT_REQUIRED)) != (HA_READY|INIT_REQUIRED)) { ahareset(aha, /*hard_reset*/FALSE); } return (EINVAL); } if (param_len > 0) { /* The controller did not accept the full argument list */ PRVERB(("%s: Controller did not accept full argument list " "(%d > 0)\n", aha_name(aha), param_len)); return (E2BIG); } if (reply_len != reply_buf_size) { /* Too much or too little data received */ PRVERB(("%s: Too much or too little data received (%d != %d)\n", aha_name(aha), reply_len, reply_buf_size)); return (EMSGSIZE); } /* We were successful */ return (0); } static int ahainitmboxes(struct aha_softc *aha) { int error; init_24b_mbox_params_t init_mbox; bzero(aha->in_boxes, sizeof(aha_mbox_in_t) * aha->num_boxes); bzero(aha->out_boxes, sizeof(aha_mbox_out_t) * aha->num_boxes); aha->cur_inbox = aha->in_boxes; aha->last_inbox = aha->in_boxes + aha->num_boxes - 1; aha->cur_outbox = aha->out_boxes; aha->last_outbox = aha->out_boxes + aha->num_boxes - 1; /* Tell the adapter about them */ init_mbox.num_mboxes = aha->num_boxes; ahautoa24(aha->mailbox_physbase, init_mbox.base_addr); error = aha_cmd(aha, AOP_INITIALIZE_MBOX, (u_int8_t *)&init_mbox, /*parmlen*/sizeof(init_mbox), /*reply_buf*/NULL, /*reply_len*/0, DEFAULT_CMD_TIMEOUT); if (error != 0) printf("ahainitmboxes: Initialization command failed\n"); return (error); } /* * Update the XPT's idea of the negotiated transfer * parameters for a particular target. */ static void ahafetchtransinfo(struct aha_softc *aha, struct ccb_trans_settings* cts) { setup_data_t setup_info; u_int target; u_int targ_offset; u_int sync_period; int error; u_int8_t param; targ_syncinfo_t sync_info; target = cts->ccb_h.target_id; targ_offset = (target & 0x7); /* * Inquire Setup Information. This command retreives * the sync info for older models. */ param = sizeof(setup_info); error = aha_cmd(aha, AOP_INQUIRE_SETUP_INFO, ¶m, /*paramlen*/1, (u_int8_t*)&setup_info, sizeof(setup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { printf("%s: ahafetchtransinfo - Inquire Setup Info Failed %d\n", aha_name(aha), error); return; } sync_info = setup_info.syncinfo[targ_offset]; if (sync_info.sync == 0) cts->sync_offset = 0; else cts->sync_offset = sync_info.offset; cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; if (aha->boardid >= BOARD_1542CF) sync_period = 1000; else sync_period = 2000; sync_period += 500 * sync_info.period; /* Convert ns value to standard SCSI sync rate */ if (cts->sync_offset != 0) cts->sync_period = scsi_calc_syncparam(sync_period); else cts->sync_period = 0; cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID; xpt_async(AC_TRANSFER_NEG, cts->ccb_h.path, cts); } static void ahamapmboxes(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aha_softc* aha; aha = (struct aha_softc*)arg; aha->mailbox_physbase = segs->ds_addr; } static void ahamapccbs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aha_softc* aha; aha = (struct aha_softc*)arg; aha->aha_ccb_physbase = segs->ds_addr; } static void ahamapsgs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aha_softc* aha; aha = (struct aha_softc*)arg; SLIST_FIRST(&aha->sg_maps)->sg_physaddr = segs->ds_addr; } static void ahapoll(struct cam_sim *sim) { aha_intr(cam_sim_softc(sim)); } static void ahatimeout(void *arg) { struct aha_ccb *accb; union ccb *ccb; struct aha_softc *aha; int s; u_int32_t paddr; accb = (struct aha_ccb *)arg; ccb = accb->ccb; aha = (struct aha_softc *)ccb->ccb_h.ccb_aha_ptr; xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out\n", (void *)accb); s = splcam(); if ((accb->flags & ACCB_ACTIVE) == 0) { xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out CCB already completed\n", (void *)accb); splx(s); return; } /* * In order to simplify the recovery process, we ask the XPT * layer to halt the queue of new transactions and we traverse * the list of pending CCBs and remove their timeouts. This * means that the driver attempts to clear only one error * condition at a time. In general, timeouts that occur * close together are related anyway, so there is no benefit * in attempting to handle errors in parrallel. Timeouts will * be reinstated when the recovery process ends. */ if ((accb->flags & ACCB_DEVICE_RESET) == 0) { struct ccb_hdr *ccb_h; if ((accb->flags & ACCB_RELEASE_SIMQ) == 0) { xpt_freeze_simq(aha->sim, /*count*/1); accb->flags |= ACCB_RELEASE_SIMQ; } ccb_h = LIST_FIRST(&aha->pending_ccbs); while (ccb_h != NULL) { struct aha_ccb *pending_accb; pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr; untimeout(ahatimeout, pending_accb, ccb_h->timeout_ch); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } } if ((accb->flags & ACCB_DEVICE_RESET) != 0 || aha->cur_outbox->action_code != AMBO_FREE) { /* * Try a full host adapter/SCSI bus reset. * We do this only if we have already attempted * to clear the condition with a BDR, or we cannot * attempt a BDR for lack of mailbox resources. */ ccb->ccb_h.status = CAM_CMD_TIMEOUT; ahareset(aha, /*hardreset*/TRUE); printf("%s: No longer in timeout\n", aha_name(aha)); } else { /* * Send a Bus Device Reset message: * The target that is holding up the bus may not * be the same as the one that triggered this timeout * (different commands have different timeout lengths), * but we have no way of determining this from our * timeout handler. Our strategy here is to queue a * BDR message to the target of the timed out command. * If this fails, we'll get another timeout 2 seconds * later which will attempt a bus reset. */ accb->flags |= ACCB_DEVICE_RESET; ccb->ccb_h.timeout_ch = timeout(ahatimeout, (caddr_t)accb, 2 * hz); aha->recovery_accb->hccb.opcode = INITIATOR_BUS_DEV_RESET; /* No Data Transfer */ aha->recovery_accb->hccb.datain = TRUE; aha->recovery_accb->hccb.dataout = TRUE; aha->recovery_accb->hccb.ahastat = 0; aha->recovery_accb->hccb.sdstat = 0; aha->recovery_accb->hccb.target = ccb->ccb_h.target_id; /* Tell the adapter about this command */ paddr = ahaccbvtop(aha, aha->recovery_accb); ahautoa24(paddr, aha->cur_outbox->ccb_addr); aha->cur_outbox->action_code = AMBO_START; aha_outb(aha, COMMAND_REG, AOP_START_MBOX); ahanextoutbox(aha); } splx(s); } int aha_detach(struct aha_softc *aha) { xpt_async(AC_LOST_DEVICE, aha->path, NULL); xpt_free_path(aha->path); xpt_bus_deregister(cam_sim_path(aha->sim)); cam_sim_free(aha->sim, /*free_devq*/TRUE); return (0); } Index: head/sys/dev/ahb/ahb.c =================================================================== --- head/sys/dev/ahb/ahb.c (revision 110231) +++ head/sys/dev/ahb/ahb.c (revision 110232) @@ -1,1357 +1,1355 @@ /* * CAM SCSI device driver for the Adaptec 174X SCSI Host adapter * * Copyright (c) 1998 Justin T. Gibbs * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ccb_ecb_ptr spriv_ptr0 #define ccb_ahb_ptr spriv_ptr1 -#define MIN(a, b) ((a) < (b) ? (a) : (b)) - #define ahb_inb(ahb, port) \ bus_space_read_1((ahb)->tag, (ahb)->bsh, port) #define ahb_inl(ahb, port) \ bus_space_read_4((ahb)->tag, (ahb)->bsh, port) #define ahb_outb(ahb, port, value) \ bus_space_write_1((ahb)->tag, (ahb)->bsh, port, value) #define ahb_outl(ahb, port, value) \ bus_space_write_4((ahb)->tag, (ahb)->bsh, port, value) static const char *ahbmatch(eisa_id_t type); static struct ahb_softc *ahballoc(u_long unit, struct resource *res); static void ahbfree(struct ahb_softc *ahb); static int ahbreset(struct ahb_softc *ahb); static void ahbmapecbs(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int ahbxptattach(struct ahb_softc *ahb); static void ahbhandleimmed(struct ahb_softc *ahb, u_int32_t mbox, u_int intstat); static void ahbcalcresid(struct ahb_softc *ahb, struct ecb *ecb, union ccb *ccb); static __inline void ahbdone(struct ahb_softc *ahb, u_int32_t mbox, u_int intstat); static void ahbintr(void *arg); static bus_dmamap_callback_t ahbexecuteecb; static void ahbaction(struct cam_sim *sim, union ccb *ccb); static void ahbpoll(struct cam_sim *sim); /* Our timeout handler */ static timeout_t ahbtimeout; static __inline struct ecb* ahbecbget(struct ahb_softc *ahb); static __inline void ahbecbfree(struct ahb_softc* ahb, struct ecb* ecb); static __inline u_int32_t ahbecbvtop(struct ahb_softc *ahb, struct ecb *ecb); static __inline struct ecb* ahbecbptov(struct ahb_softc *ahb, u_int32_t ecb_addr); static __inline u_int32_t ahbstatuspaddr(u_int32_t ecb_paddr); static __inline u_int32_t ahbsensepaddr(u_int32_t ecb_paddr); static __inline u_int32_t ahbsgpaddr(u_int32_t ecb_paddr); static __inline void ahbqueuembox(struct ahb_softc *ahb, u_int32_t mboxval, u_int attn_code); static __inline struct ecb* ahbecbget(struct ahb_softc *ahb) { struct ecb* ecb; int s; s = splcam(); if ((ecb = SLIST_FIRST(&ahb->free_ecbs)) != NULL) SLIST_REMOVE_HEAD(&ahb->free_ecbs, links); splx(s); return (ecb); } static __inline void ahbecbfree(struct ahb_softc* ahb, struct ecb* ecb) { int s; s = splcam(); ecb->state = ECB_FREE; SLIST_INSERT_HEAD(&ahb->free_ecbs, ecb, links); splx(s); } static __inline u_int32_t ahbecbvtop(struct ahb_softc *ahb, struct ecb *ecb) { return (ahb->ecb_physbase + (u_int32_t)((caddr_t)ecb - (caddr_t)ahb->ecb_array)); } static __inline struct ecb* ahbecbptov(struct ahb_softc *ahb, u_int32_t ecb_addr) { return (ahb->ecb_array + ((struct ecb*)ecb_addr - (struct ecb*)ahb->ecb_physbase)); } static __inline u_int32_t ahbstatuspaddr(u_int32_t ecb_paddr) { return (ecb_paddr + offsetof(struct ecb, status)); } static __inline u_int32_t ahbsensepaddr(u_int32_t ecb_paddr) { return (ecb_paddr + offsetof(struct ecb, sense)); } static __inline u_int32_t ahbsgpaddr(u_int32_t ecb_paddr) { return (ecb_paddr + offsetof(struct ecb, sg_list)); } static __inline void ahbqueuembox(struct ahb_softc *ahb, u_int32_t mboxval, u_int attn_code) { u_int loopmax = 300; while (--loopmax) { u_int status; status = ahb_inb(ahb, HOSTSTAT); if ((status & (HOSTSTAT_MBOX_EMPTY|HOSTSTAT_BUSY)) == HOSTSTAT_MBOX_EMPTY) break; DELAY(20); } if (loopmax == 0) panic("ahb%ld: adapter not taking commands\n", ahb->unit); ahb_outl(ahb, MBOXOUT0, mboxval); ahb_outb(ahb, ATTN, attn_code); } static const char * ahbmatch(eisa_id_t type) { switch(type & 0xfffffe00) { case EISA_DEVICE_ID_ADAPTEC_1740: return ("Adaptec 174x SCSI host adapter"); break; default: break; } return (NULL); } static int ahbprobe(device_t dev) { const char *desc; u_int32_t iobase; u_int32_t irq; u_int8_t intdef; int shared; desc = ahbmatch(eisa_get_id(dev)); if (!desc) return (ENXIO); device_set_desc(dev, desc); iobase = (eisa_get_slot(dev) * EISA_SLOT_SIZE) + AHB_EISA_SLOT_OFFSET; eisa_add_iospace(dev, iobase, AHB_EISA_IOSIZE, RESVADDR_NONE); intdef = inb(INTDEF + iobase); switch (intdef & 0x7) { case INT9: irq = 9; break; case INT10: irq = 10; break; case INT11: irq = 11; break; case INT12: irq = 12; break; case INT14: irq = 14; break; case INT15: irq = 15; break; default: printf("Adaptec 174X at slot %d: illegal " "irq setting %d\n", eisa_get_slot(dev), (intdef & 0x7)); irq = 0; break; } if (irq == 0) return ENXIO; shared = (inb(INTDEF + iobase) & INTLEVEL) ? EISA_TRIGGER_LEVEL : EISA_TRIGGER_EDGE; eisa_add_intr(dev, irq, shared); return 0; } static int ahbattach(device_t dev) { /* * find unit and check we have that many defined */ struct ahb_softc *ahb; struct ecb* next_ecb; struct resource *io = 0; struct resource *irq = 0; int rid; void *ih; rid = 0; io = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1, RF_ACTIVE); if (!io) { device_printf(dev, "No I/O space?!\n"); return ENOMEM; } if ((ahb = ahballoc(device_get_unit(dev), io)) == NULL) { goto error_exit2; } if (ahbreset(ahb) != 0) goto error_exit; rid = 0; irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_ACTIVE); if (!irq) { device_printf(dev, "Can't allocate interrupt\n"); goto error_exit; } /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. */ /* DMA tag for mapping buffers into device visible space. */ /* XXX Should be a child of the EISA bus dma tag */ if (bus_dma_tag_create(/*parent*/NULL, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/MAXBSIZE, /*nsegments*/AHB_NSEG, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/BUS_DMA_ALLOCNOW, &ahb->buffer_dmat) != 0) goto error_exit; ahb->init_level++; /* DMA tag for our ccb structures and ha inquiry data */ if (bus_dma_tag_create(/*parent*/NULL, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, (AHB_NECB * sizeof(struct ecb)) + sizeof(*ahb->ha_inq_data), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ahb->ecb_dmat) != 0) goto error_exit; ahb->init_level++; /* Allocation for our ccbs */ if (bus_dmamem_alloc(ahb->ecb_dmat, (void **)&ahb->ecb_array, BUS_DMA_NOWAIT, &ahb->ecb_dmamap) != 0) goto error_exit; ahb->ha_inq_data = (struct ha_inquiry_data *)&ahb->ecb_array[AHB_NECB]; ahb->init_level++; /* And permanently map them */ bus_dmamap_load(ahb->ecb_dmat, ahb->ecb_dmamap, ahb->ecb_array, AHB_NSEG * sizeof(struct ecb), ahbmapecbs, ahb, /*flags*/0); ahb->init_level++; /* Allocate the buffer dmamaps for each of our ECBs */ bzero(ahb->ecb_array, (AHB_NECB * sizeof(struct ecb)) + sizeof(*ahb->ha_inq_data)); next_ecb = ahb->ecb_array; while (ahb->num_ecbs < AHB_NECB) { u_int32_t ecb_paddr; if (bus_dmamap_create(ahb->buffer_dmat, /*flags*/0, &next_ecb->dmamap)) break; ecb_paddr = ahbecbvtop(ahb, next_ecb); next_ecb->hecb.status_ptr = ahbstatuspaddr(ecb_paddr); next_ecb->hecb.sense_ptr = ahbsensepaddr(ecb_paddr); ahb->num_ecbs++; ahbecbfree(ahb, next_ecb); next_ecb++; } if (ahb->num_ecbs == 0) goto error_exit; ahb->init_level++; /* * Now that we know we own the resources we need, register * our bus with the XPT. */ if (ahbxptattach(ahb)) goto error_exit; /* Enable our interrupt */ bus_setup_intr(dev, irq, INTR_TYPE_CAM|INTR_ENTROPY, ahbintr, ahb, &ih); return (0); error_exit: /* * The board's IRQ line will not be left enabled * if we can't intialize correctly, so its safe * to release the irq. */ ahbfree(ahb); error_exit2: if (io) bus_release_resource(dev, SYS_RES_IOPORT, 0, io); if (irq) bus_release_resource(dev, SYS_RES_IRQ, 0, irq); return (-1); } static struct ahb_softc * ahballoc(u_long unit, struct resource *res) { struct ahb_softc *ahb; /* * Allocate a storage area for us */ ahb = malloc(sizeof(struct ahb_softc), M_DEVBUF, M_NOWAIT | M_ZERO); if (!ahb) { printf("ahb%ld: cannot malloc!\n", unit); return (NULL); } SLIST_INIT(&ahb->free_ecbs); LIST_INIT(&ahb->pending_ccbs); ahb->unit = unit; ahb->tag = rman_get_bustag(res); ahb->bsh = rman_get_bushandle(res); ahb->disc_permitted = ~0; ahb->tags_permitted = ~0; return (ahb); } static void ahbfree(struct ahb_softc *ahb) { switch (ahb->init_level) { default: case 4: bus_dmamap_unload(ahb->ecb_dmat, ahb->ecb_dmamap); case 3: bus_dmamem_free(ahb->ecb_dmat, ahb->ecb_array, ahb->ecb_dmamap); bus_dmamap_destroy(ahb->ecb_dmat, ahb->ecb_dmamap); case 2: bus_dma_tag_destroy(ahb->ecb_dmat); case 1: bus_dma_tag_destroy(ahb->buffer_dmat); case 0: break; } free(ahb, M_DEVBUF); } /* * reset board, If it doesn't respond, return failure */ static int ahbreset(struct ahb_softc *ahb) { int wait = 1000; /* 1 sec enough? */ int test; if ((ahb_inb(ahb, PORTADDR) & PORTADDR_ENHANCED) == 0) { printf("ahb_reset: Controller not in enhanced mode\n"); return (-1); } ahb_outb(ahb, CONTROL, CNTRL_HARD_RST); DELAY(1000); ahb_outb(ahb, CONTROL, 0); while (--wait) { DELAY(1000); if ((ahb_inb(ahb, HOSTSTAT) & HOSTSTAT_BUSY) == 0) break; } if (wait == 0) { printf("ahbreset: No answer from aha1742 board\n"); return (-1); } if ((test = ahb_inb(ahb, MBOXIN0)) != 0) { printf("ahb_reset: self test failed, val = 0x%x\n", test); return (-1); } while (ahb_inb(ahb, HOSTSTAT) & HOSTSTAT_INTPEND) { ahb_outb(ahb, CONTROL, CNTRL_CLRINT); DELAY(10000); } return (0); } static void ahbmapecbs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct ahb_softc* ahb; ahb = (struct ahb_softc*)arg; ahb->ecb_physbase = segs->ds_addr; /* * Space for adapter inquiry information is on the * tail of the ecb array. */ ahb->ha_inq_physbase = ahbecbvtop(ahb, &ahb->ecb_array[AHB_NECB]); } static int ahbxptattach(struct ahb_softc *ahb) { struct cam_devq *devq; struct ecb *ecb; u_int i; /* Remeber who are we on the scsi bus */ ahb->scsi_id = ahb_inb(ahb, SCSIDEF) & HSCSIID; /* Use extended translation?? */ ahb->extended_trans = ahb_inb(ahb, RESV1) & EXTENDED_TRANS; /* Fetch adapter inquiry data */ ecb = ahbecbget(ahb); /* Always succeeds - no outstanding commands */ ecb->hecb.opcode = ECBOP_READ_HA_INQDATA; ecb->hecb.flag_word1 = FW1_SUPPRESS_URUN_ERR|FW1_ERR_STATUS_BLK_ONLY; ecb->hecb.data_ptr = ahb->ha_inq_physbase; ecb->hecb.data_len = sizeof(struct ha_inquiry_data); ecb->hecb.sense_ptr = 0; ecb->state = ECB_ACTIVE; /* Tell the adapter about this command */ ahbqueuembox(ahb, ahbecbvtop(ahb, ecb), ATTN_STARTECB|ahb->scsi_id); /* Poll for interrupt completion */ for (i = 1000; ecb->state != ECB_FREE && i != 0; i--) { ahbintr(ahb); DELAY(1000); } ahb->num_ecbs = MIN(ahb->num_ecbs, ahb->ha_inq_data->scsi_data.reserved[1]); printf("ahb%ld: %.8s %s SCSI Adapter, FW Rev. %.4s, ID=%d, %d ECBs\n", ahb->unit, ahb->ha_inq_data->scsi_data.product, (ahb->ha_inq_data->scsi_data.flags & 0x4) ? "Differential" : "Single Ended", ahb->ha_inq_data->scsi_data.revision, ahb->scsi_id, ahb->num_ecbs); /* Restore sense paddr for future CCB clients */ ecb->hecb.sense_ptr = ahbsensepaddr(ahbecbvtop(ahb, ecb)); ahbecbfree(ahb, ecb); /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(ahb->num_ecbs); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry */ ahb->sim = cam_sim_alloc(ahbaction, ahbpoll, "ahb", ahb, ahb->unit, 2, ahb->num_ecbs, devq); if (ahb->sim == NULL) { cam_simq_free(devq); return (ENOMEM); } if (xpt_bus_register(ahb->sim, 0) != CAM_SUCCESS) { cam_sim_free(ahb->sim, /*free_devq*/TRUE); return (ENXIO); } if (xpt_create_path(&ahb->path, /*periph*/NULL, cam_sim_path(ahb->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(ahb->sim)); cam_sim_free(ahb->sim, /*free_devq*/TRUE); return (ENXIO); } /* * Allow the board to generate interrupts. */ ahb_outb(ahb, INTDEF, ahb_inb(ahb, INTDEF) | INTEN); return (0); } static void ahbhandleimmed(struct ahb_softc *ahb, u_int32_t mbox, u_int intstat) { struct ccb_hdr *ccb_h; u_int target_id; if (ahb->immed_cmd == 0) { printf("ahb%ld: Immediate Command complete with no " " pending command\n", ahb->unit); return; } target_id = intstat & INTSTAT_TARGET_MASK; ccb_h = LIST_FIRST(&ahb->pending_ccbs); while (ccb_h != NULL) { struct ecb *pending_ecb; union ccb *ccb; pending_ecb = (struct ecb *)ccb_h->ccb_ecb_ptr; ccb = pending_ecb->ccb; ccb_h = LIST_NEXT(ccb_h, sim_links.le); if (ccb->ccb_h.target_id == target_id || target_id == ahb->scsi_id) { untimeout(ahbtimeout, pending_ecb, ccb->ccb_h.timeout_ch); LIST_REMOVE(&ccb->ccb_h, sim_links.le); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) bus_dmamap_unload(ahb->buffer_dmat, pending_ecb->dmamap); if (pending_ecb == ahb->immed_ecb) ccb->ccb_h.status = CAM_CMD_TIMEOUT|CAM_RELEASE_SIMQ; else if (target_id == ahb->scsi_id) ccb->ccb_h.status = CAM_SCSI_BUS_RESET; else ccb->ccb_h.status = CAM_BDR_SENT; ahbecbfree(ahb, pending_ecb); xpt_done(ccb); } else if (ahb->immed_ecb != NULL) { /* Re-instate timeout */ ccb->ccb_h.timeout_ch = timeout(ahbtimeout, (caddr_t)pending_ecb, (ccb->ccb_h.timeout * hz) / 1000); } } if (ahb->immed_ecb != NULL) { ahb->immed_ecb = NULL; printf("ahb%ld: No longer in timeout\n", ahb->unit); } else if (target_id == ahb->scsi_id) printf("ahb%ld: SCSI Bus Reset Delivered\n", ahb->unit); else printf("ahb%ld: Bus Device Reset Delibered to target %d\n", ahb->unit, target_id); ahb->immed_cmd = 0; } static void ahbcalcresid(struct ahb_softc *ahb, struct ecb *ecb, union ccb *ccb) { if (ecb->status.data_overrun != 0) { /* * Overrun Condition. The hardware doesn't * provide a meaningful byte count in this case * (the residual is always 0). Tell the XPT * layer about the error. */ ccb->ccb_h.status = CAM_DATA_RUN_ERR; } else { ccb->csio.resid = ecb->status.resid_count; if ((ecb->hecb.flag_word1 & FW1_SG_ECB) != 0) { /* * For S/G transfers, the adapter provides a pointer * to the address in the last S/G element used and a * residual for that element. So, we need to sum up * the elements that follow it in order to get a real * residual number. If we have an overrun, the residual * reported will be 0 and we already know that all S/G * segments have been exhausted, so we can skip this * step. */ ahb_sg_t *sg; int num_sg; num_sg = ecb->hecb.data_len / sizeof(ahb_sg_t); /* Find the S/G the adapter was working on */ for (sg = ecb->sg_list; num_sg != 0 && sg->addr != ecb->status.resid_addr; num_sg--, sg++) ; /* Skip it */ num_sg--; sg++; /* Sum the rest */ for (; num_sg != 0; num_sg--, sg++) ccb->csio.resid += sg->len; } /* Underruns are not errors */ ccb->ccb_h.status = CAM_REQ_CMP; } } static void ahbprocesserror(struct ahb_softc *ahb, struct ecb *ecb, union ccb *ccb) { struct hardware_ecb *hecb; struct ecb_status *status; hecb = &ecb->hecb; status = &ecb->status; switch (status->ha_status) { case HS_OK: ccb->csio.scsi_status = status->scsi_status; if (status->scsi_status != 0) { ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; if (status->sense_stored) { ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.sense_resid = ccb->csio.sense_len - status->sense_len; bcopy(&ecb->sense, &ccb->csio.sense_data, status->sense_len); } } break; case HS_TARGET_NOT_ASSIGNED: ccb->ccb_h.status = CAM_PATH_INVALID; break; case HS_SEL_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case HS_DATA_RUN_ERR: ahbcalcresid(ahb, ecb, ccb); break; case HS_UNEXPECTED_BUSFREE: ccb->ccb_h.status = CAM_UNEXP_BUSFREE; break; case HS_INVALID_PHASE: ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case HS_REQUEST_SENSE_FAILED: ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; break; case HS_TAG_MSG_REJECTED: { struct ccb_trans_settings neg; xpt_print_path(ccb->ccb_h.path); printf("refuses tagged commands. Performing " "non-tagged I/O\n"); neg.flags = 0; neg.valid = CCB_TRANS_TQ_VALID; xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); ahb->tags_permitted &= ~(0x01 << ccb->ccb_h.target_id); ccb->ccb_h.status = CAM_MSG_REJECT_REC; break; } case HS_FIRMWARE_LOAD_REQ: case HS_HARDWARE_ERR: /* * Tell the system that the Adapter * is no longer functional. */ ccb->ccb_h.status = CAM_NO_HBA; break; case HS_CMD_ABORTED_HOST: case HS_CMD_ABORTED_ADAPTER: case HS_ATN_TARGET_FAILED: case HS_SCSI_RESET_ADAPTER: case HS_SCSI_RESET_INCOMING: ccb->ccb_h.status = CAM_SCSI_BUS_RESET; break; case HS_INVALID_ECB_PARAM: printf("ahb%ld: opcode 0x%02x, flag_word1 0x%02x, flag_word2 0x%02x\n", ahb->unit, hecb->opcode, hecb->flag_word1, hecb->flag_word2); ccb->ccb_h.status = CAM_SCSI_BUS_RESET; break; case HS_DUP_TCB_RECEIVED: case HS_INVALID_OPCODE: case HS_INVALID_CMD_LINK: case HS_PROGRAM_CKSUM_ERROR: panic("ahb%ld: Can't happen host status %x occurred", ahb->unit, status->ha_status); break; } if (ccb->ccb_h.status != CAM_REQ_CMP) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } } static void ahbdone(struct ahb_softc *ahb, u_int32_t mbox, u_int intstat) { struct ecb *ecb; union ccb *ccb; ecb = ahbecbptov(ahb, mbox); if ((ecb->state & ECB_ACTIVE) == 0) panic("ecb not active"); ccb = ecb->ccb; if (ccb != NULL) { untimeout(ahbtimeout, ecb, ccb->ccb_h.timeout_ch); LIST_REMOVE(&ccb->ccb_h, sim_links.le); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(ahb->buffer_dmat, ecb->dmamap, op); bus_dmamap_unload(ahb->buffer_dmat, ecb->dmamap); } if ((intstat & INTSTAT_MASK) == INTSTAT_ECB_OK) { ccb->ccb_h.status = CAM_REQ_CMP; ccb->csio.resid = 0; } else { ahbprocesserror(ahb, ecb, ccb); } ahbecbfree(ahb, ecb); xpt_done(ccb); } else { /* Non CCB Command */ if ((intstat & INTSTAT_MASK) != INTSTAT_ECB_OK) { printf("ahb%ld: Command 0%x Failed %x:%x:%x\n", ahb->unit, ecb->hecb.opcode, *((u_int16_t*)&ecb->status), ecb->status.ha_status, ecb->status.resid_count); } /* Client owns this ECB and will release it. */ } } /* * Catch an interrupt from the adaptor */ static void ahbintr(void *arg) { struct ahb_softc *ahb; u_int intstat; u_int32_t mbox; ahb = (struct ahb_softc *)arg; while (ahb_inb(ahb, HOSTSTAT) & HOSTSTAT_INTPEND) { /* * Fetch information about this interrupt. */ intstat = ahb_inb(ahb, INTSTAT); mbox = ahb_inl(ahb, MBOXIN0); /* * Reset interrupt latch. */ ahb_outb(ahb, CONTROL, CNTRL_CLRINT); /* * Process the completed operation */ switch (intstat & INTSTAT_MASK) { case INTSTAT_ECB_OK: case INTSTAT_ECB_CMPWRETRY: case INTSTAT_ECB_CMPWERR: ahbdone(ahb, mbox, intstat); break; case INTSTAT_AEN_OCCURED: if ((intstat & INTSTAT_TARGET_MASK) == ahb->scsi_id) { /* Bus Reset */ xpt_print_path(ahb->path); switch (mbox) { case HS_SCSI_RESET_ADAPTER: printf("Host Adapter Initiated " "Bus Reset occurred\n"); break; case HS_SCSI_RESET_INCOMING: printf("Bus Reset Initiated " "by another device occurred\n"); break; } /* Notify the XPT */ xpt_async(AC_BUS_RESET, ahb->path, NULL); break; } printf("Unsupported initiator selection AEN occured\n"); break; case INTSTAT_IMMED_OK: case INTSTAT_IMMED_ERR: ahbhandleimmed(ahb, mbox, intstat); break; case INTSTAT_HW_ERR: panic("Unrecoverable hardware Error Occurred\n"); } } } static void ahbexecuteecb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct ecb *ecb; union ccb *ccb; struct ahb_softc *ahb; u_int32_t ecb_paddr; int s; ecb = (struct ecb *)arg; ccb = ecb->ccb; ahb = (struct ahb_softc *)ccb->ccb_h.ccb_ahb_ptr; if (error != 0) { if (error != EFBIG) printf("ahb%ld: Unexepected error 0x%x returned from " "bus_dmamap_load\n", ahb->unit, error); if (ccb->ccb_h.status == CAM_REQ_INPROG) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; } ahbecbfree(ahb, ecb); xpt_done(ccb); return; } ecb_paddr = ahbecbvtop(ahb, ecb); if (nseg != 0) { ahb_sg_t *sg; bus_dma_segment_t *end_seg; bus_dmasync_op_t op; end_seg = dm_segs + nseg; /* Copy the segments into our SG list */ sg = ecb->sg_list; while (dm_segs < end_seg) { sg->addr = dm_segs->ds_addr; sg->len = dm_segs->ds_len; sg++; dm_segs++; } if (nseg > 1) { ecb->hecb.flag_word1 |= FW1_SG_ECB; ecb->hecb.data_ptr = ahbsgpaddr(ecb_paddr); ecb->hecb.data_len = sizeof(ahb_sg_t) * nseg; } else { ecb->hecb.data_ptr = ecb->sg_list->addr; ecb->hecb.data_len = ecb->sg_list->len; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { /* ecb->hecb.flag_word2 |= FW2_DATA_DIR_IN; */ op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; } /* ecb->hecb.flag_word2 |= FW2_CHECK_DATA_DIR; */ bus_dmamap_sync(ahb->buffer_dmat, ecb->dmamap, op); } else { ecb->hecb.data_ptr = 0; ecb->hecb.data_len = 0; } s = splcam(); /* * Last time we need to check if this CCB needs to * be aborted. */ if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (nseg != 0) bus_dmamap_unload(ahb->buffer_dmat, ecb->dmamap); ahbecbfree(ahb, ecb); xpt_done(ccb); splx(s); return; } ecb->state = ECB_ACTIVE; ccb->ccb_h.status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&ahb->pending_ccbs, &ccb->ccb_h, sim_links.le); /* Tell the adapter about this command */ ahbqueuembox(ahb, ecb_paddr, ATTN_STARTECB|ccb->ccb_h.target_id); ccb->ccb_h.timeout_ch = timeout(ahbtimeout, (caddr_t)ecb, (ccb->ccb_h.timeout * hz) / 1000); splx(s); } static void ahbaction(struct cam_sim *sim, union ccb *ccb) { struct ahb_softc *ahb; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahbaction\n")); ahb = (struct ahb_softc *)cam_sim_softc(sim); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { struct ecb *ecb; struct hardware_ecb *hecb; /* * get an ecb to use. */ if ((ecb = ahbecbget(ahb)) == NULL) { /* Should never occur */ panic("Failed to get an ecb"); } /* * So we can find the ECB when an abort is requested */ ecb->ccb = ccb; ccb->ccb_h.ccb_ecb_ptr = ecb; ccb->ccb_h.ccb_ahb_ptr = ahb; /* * Put all the arguments for the xfer in the ecb */ hecb = &ecb->hecb; hecb->opcode = ECBOP_INITIATOR_SCSI_CMD; hecb->flag_word1 = FW1_AUTO_REQUEST_SENSE | FW1_ERR_STATUS_BLK_ONLY; hecb->flag_word2 = ccb->ccb_h.target_lun | FW2_NO_RETRY_ON_BUSY; if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { hecb->flag_word2 |= FW2_TAG_ENB | ((ccb->csio.tag_action & 0x3) << FW2_TAG_TYPE_SHIFT); } if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) hecb->flag_word2 |= FW2_DISABLE_DISC; hecb->sense_len = ccb->csio.sense_len; hecb->cdb_len = ccb->csio.cdb_len; if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { bcopy(ccb->csio.cdb_io.cdb_ptr, hecb->cdb, hecb->cdb_len); } else { /* I guess I could map it in... */ ccb->ccb_h.status = CAM_REQ_INVALID; ahbecbfree(ahb, ecb); xpt_done(ccb); return; } } else { bcopy(ccb->csio.cdb_io.cdb_bytes, hecb->cdb, hecb->cdb_len); } /* * If we have any data to send with this command, * map it into bus space. */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { /* * We've been given a pointer * to a single buffer. */ if ((ccb->ccb_h.flags & CAM_DATA_PHYS)==0) { int s; int error; s = splsoftvm(); error = bus_dmamap_load( ahb->buffer_dmat, ecb->dmamap, ccb->csio.data_ptr, ccb->csio.dxfer_len, ahbexecuteecb, ecb, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ xpt_freeze_simq(ahb->sim, 1); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } splx(s); } else { struct bus_dma_segment seg; /* Pointer to physical buffer */ seg.ds_addr = (bus_addr_t)ccb->csio.data_ptr; seg.ds_len = ccb->csio.dxfer_len; ahbexecuteecb(ecb, &seg, 1, 0); } } else { struct bus_dma_segment *segs; if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) panic("ahbaction - Physical segment " "pointers unsupported"); if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) panic("btaction - Virtual segment " "addresses unsupported"); /* Just use the segments provided */ segs = (struct bus_dma_segment *) ccb->csio.data_ptr; ahbexecuteecb(ecb, segs, ccb->csio.sglist_cnt, 0); } } else { ahbexecuteecb(ecb, NULL, 0, 0); } break; } case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts; u_int target_mask; cts = &ccb->cts; target_mask = 0x01 << ccb->ccb_h.target_id; if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { cts->flags = 0; if ((ahb->disc_permitted & target_mask) != 0) cts->flags |= CCB_TRANS_DISC_ENB; if ((ahb->tags_permitted & target_mask) != 0) cts->flags |= CCB_TRANS_TAG_ENB; cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; cts->sync_period = 25; /* 10MHz */ if (cts->sync_period != 0) cts->sync_offset = 15; cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; } xpt_done(ccb); break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { int i; int s; s = splcam(); ahb->immed_cmd = IMMED_RESET; ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ccb->ccb_h.target_id); /* Poll for interrupt completion */ for (i = 1000; ahb->immed_cmd != 0 && i != 0; i--) { DELAY(1000); ahbintr(cam_sim_softc(sim)); } splx(s); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); if (size_mb > 1024 && (ahb->extended_trans != 0)) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { int i; ahb->immed_cmd = IMMED_RESET; ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ahb->scsi_id); /* Poll for interrupt completion */ for (i = 1000; ahb->immed_cmd != 0 && i != 0; i--) DELAY(1000); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = ahb->scsi_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } #if 0 /* Need these??? */ case XPT_IMMED_NOTIFY: /* Notify Host Target driver of event */ case XPT_NOTIFY_ACK: /* Acknowledgement of event */ #endif default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void ahbpoll(struct cam_sim *sim) { ahbintr(cam_sim_softc(sim)); } static void ahbtimeout(void *arg) { struct ecb *ecb; union ccb *ccb; struct ahb_softc *ahb; int s; ecb = (struct ecb *)arg; ccb = ecb->ccb; ahb = (struct ahb_softc *)ccb->ccb_h.ccb_ahb_ptr; xpt_print_path(ccb->ccb_h.path); printf("ECB %p - timed out\n", (void *)ecb); s = splcam(); if ((ecb->state & ECB_ACTIVE) == 0) { xpt_print_path(ccb->ccb_h.path); printf("ECB %p - timed out ECB already completed\n", (void *)ecb); splx(s); return; } /* * In order to simplify the recovery process, we ask the XPT * layer to halt the queue of new transactions and we traverse * the list of pending CCBs and remove their timeouts. This * means that the driver attempts to clear only one error * condition at a time. In general, timeouts that occur * close together are related anyway, so there is no benefit * in attempting to handle errors in parrallel. Timeouts will * be reinstated when the recovery process ends. */ if ((ecb->state & ECB_DEVICE_RESET) == 0) { struct ccb_hdr *ccb_h; if ((ecb->state & ECB_RELEASE_SIMQ) == 0) { xpt_freeze_simq(ahb->sim, /*count*/1); ecb->state |= ECB_RELEASE_SIMQ; } ccb_h = LIST_FIRST(&ahb->pending_ccbs); while (ccb_h != NULL) { struct ecb *pending_ecb; pending_ecb = (struct ecb *)ccb_h->ccb_ecb_ptr; untimeout(ahbtimeout, pending_ecb, ccb_h->timeout_ch); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } /* Store for our interrupt handler */ ahb->immed_ecb = ecb; /* * Send a Bus Device Reset message: * The target that is holding up the bus may not * be the same as the one that triggered this timeout * (different commands have different timeout lengths), * but we have no way of determining this from our * timeout handler. Our strategy here is to queue a * BDR message to the target of the timed out command. * If this fails, we'll get another timeout 2 seconds * later which will attempt a bus reset. */ xpt_print_path(ccb->ccb_h.path); printf("Queuing BDR\n"); ecb->state |= ECB_DEVICE_RESET; ccb->ccb_h.timeout_ch = timeout(ahbtimeout, (caddr_t)ecb, 2 * hz); ahb->immed_cmd = IMMED_RESET; ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ccb->ccb_h.target_id); } else if ((ecb->state & ECB_SCSIBUS_RESET) != 0) { /* * Try a SCSI bus reset. We do this only if we * have already attempted to clear the condition with a BDR. */ xpt_print_path(ccb->ccb_h.path); printf("Attempting SCSI Bus reset\n"); ecb->state |= ECB_SCSIBUS_RESET; ccb->ccb_h.timeout_ch = timeout(ahbtimeout, (caddr_t)ecb, 2 * hz); ahb->immed_cmd = IMMED_RESET; ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ahb->scsi_id); } else { /* Bring out the hammer... */ ahbreset(ahb); /* Simulate the reset complete interrupt */ ahbhandleimmed(ahb, 0, ahb->scsi_id|INTSTAT_IMMED_OK); } splx(s); } static device_method_t ahb_eisa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ahbprobe), DEVMETHOD(device_attach, ahbattach), { 0, 0 } }; static driver_t ahb_eisa_driver = { "ahb", ahb_eisa_methods, 1, /* unused */ }; static devclass_t ahb_devclass; DRIVER_MODULE(ahb, eisa, ahb_eisa_driver, ahb_devclass, 0, 0); Index: head/sys/dev/buslogic/bt.c =================================================================== --- head/sys/dev/buslogic/bt.c (revision 110231) +++ head/sys/dev/buslogic/bt.c (revision 110232) @@ -1,2460 +1,2456 @@ /* * Generic driver for the BusLogic MultiMaster SCSI host adapters * Product specific probe and attach routines can be found in: * sys/dev/buslogic/bt_isa.c BT-54X, BT-445 cards * sys/dev/buslogic/bt_mca.c BT-64X, SDC3211B, SDC3211F * sys/dev/buslogic/bt_eisa.c BT-74X, BT-75x cards, SDC3222F * sys/dev/buslogic/bt_pci.c BT-946, BT-948, BT-956, BT-958 cards * * Copyright (c) 1998, 1999 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Special thanks to Leonard N. Zubkoff for writing such a complete and * well documented Mylex/BusLogic MultiMaster driver for Linux. Support * in this driver for the wide range of MultiMaster controllers and * firmware revisions, with their otherwise undocumented quirks, would not * have been possible without his efforts. */ #include #include #include #include #include #include /* * XXX It appears that BusLogic PCI adapters go out to lunch if you * attempt to perform memory mapped I/O. */ #if 0 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include -#ifndef MAX -#define MAX(a, b) ((a) > (b) ? (a) : (b)) -#endif - /* MailBox Management functions */ static __inline void btnextinbox(struct bt_softc *bt); static __inline void btnextoutbox(struct bt_softc *bt); static __inline void btnextinbox(struct bt_softc *bt) { if (bt->cur_inbox == bt->last_inbox) bt->cur_inbox = bt->in_boxes; else bt->cur_inbox++; } static __inline void btnextoutbox(struct bt_softc *bt) { if (bt->cur_outbox == bt->last_outbox) bt->cur_outbox = bt->out_boxes; else bt->cur_outbox++; } /* CCB Mangement functions */ static __inline u_int32_t btccbvtop(struct bt_softc *bt, struct bt_ccb *bccb); static __inline struct bt_ccb* btccbptov(struct bt_softc *bt, u_int32_t ccb_addr); static __inline u_int32_t btsensepaddr(struct bt_softc *bt, struct bt_ccb *bccb); static __inline struct scsi_sense_data* btsensevaddr(struct bt_softc *bt, struct bt_ccb *bccb); static __inline u_int32_t btccbvtop(struct bt_softc *bt, struct bt_ccb *bccb) { return (bt->bt_ccb_physbase + (u_int32_t)((caddr_t)bccb - (caddr_t)bt->bt_ccb_array)); } static __inline struct bt_ccb * btccbptov(struct bt_softc *bt, u_int32_t ccb_addr) { return (bt->bt_ccb_array + ((struct bt_ccb*)(uintptr_t)ccb_addr - (struct bt_ccb*)(uintptr_t)bt->bt_ccb_physbase)); } static __inline u_int32_t btsensepaddr(struct bt_softc *bt, struct bt_ccb *bccb) { u_int index; index = (u_int)(bccb - bt->bt_ccb_array); return (bt->sense_buffers_physbase + (index * sizeof(struct scsi_sense_data))); } static __inline struct scsi_sense_data * btsensevaddr(struct bt_softc *bt, struct bt_ccb *bccb) { u_int index; index = (u_int)(bccb - bt->bt_ccb_array); return (bt->sense_buffers + index); } static __inline struct bt_ccb* btgetccb(struct bt_softc *bt); static __inline void btfreeccb(struct bt_softc *bt, struct bt_ccb *bccb); static void btallocccbs(struct bt_softc *bt); static bus_dmamap_callback_t btexecuteccb; static void btdone(struct bt_softc *bt, struct bt_ccb *bccb, bt_mbi_comp_code_t comp_code); /* Host adapter command functions */ static int btreset(struct bt_softc* bt, int hard_reset); /* Initialization functions */ static int btinitmboxes(struct bt_softc *bt); static bus_dmamap_callback_t btmapmboxes; static bus_dmamap_callback_t btmapccbs; static bus_dmamap_callback_t btmapsgs; /* Transfer Negotiation Functions */ static void btfetchtransinfo(struct bt_softc *bt, struct ccb_trans_settings *cts); /* CAM SIM entry points */ #define ccb_bccb_ptr spriv_ptr0 #define ccb_bt_ptr spriv_ptr1 static void btaction(struct cam_sim *sim, union ccb *ccb); static void btpoll(struct cam_sim *sim); /* Our timeout handler */ timeout_t bttimeout; u_long bt_unit = 0; /* * XXX * Do our own re-probe protection until a configuration * manager can do it for us. This ensures that we don't * reprobe a card already found by the EISA or PCI probes. */ struct bt_isa_port bt_isa_ports[] = { { 0x130, 0, 4 }, { 0x134, 0, 5 }, { 0x230, 0, 2 }, { 0x234, 0, 3 }, { 0x330, 0, 0 }, { 0x334, 0, 1 } }; /* * I/O ports listed in the order enumerated by the * card for certain op codes. */ u_int16_t bt_board_ports[] = { 0x330, 0x334, 0x230, 0x234, 0x130, 0x134 }; /* Exported functions */ void bt_init_softc(device_t dev, struct resource *port, struct resource *irq, struct resource *drq) { struct bt_softc *bt = device_get_softc(dev); SLIST_INIT(&bt->free_bt_ccbs); LIST_INIT(&bt->pending_ccbs); SLIST_INIT(&bt->sg_maps); bt->dev = dev; bt->unit = device_get_unit(dev); bt->port = port; bt->irq = irq; bt->drq = drq; bt->tag = rman_get_bustag(port); bt->bsh = rman_get_bushandle(port); } void bt_free_softc(device_t dev) { struct bt_softc *bt = device_get_softc(dev); switch (bt->init_level) { default: case 11: bus_dmamap_unload(bt->sense_dmat, bt->sense_dmamap); case 10: bus_dmamem_free(bt->sense_dmat, bt->sense_buffers, bt->sense_dmamap); case 9: bus_dma_tag_destroy(bt->sense_dmat); case 8: { struct sg_map_node *sg_map; while ((sg_map = SLIST_FIRST(&bt->sg_maps))!= NULL) { SLIST_REMOVE_HEAD(&bt->sg_maps, links); bus_dmamap_unload(bt->sg_dmat, sg_map->sg_dmamap); bus_dmamem_free(bt->sg_dmat, sg_map->sg_vaddr, sg_map->sg_dmamap); free(sg_map, M_DEVBUF); } bus_dma_tag_destroy(bt->sg_dmat); } case 7: bus_dmamap_unload(bt->ccb_dmat, bt->ccb_dmamap); case 6: bus_dmamem_free(bt->ccb_dmat, bt->bt_ccb_array, bt->ccb_dmamap); bus_dmamap_destroy(bt->ccb_dmat, bt->ccb_dmamap); case 5: bus_dma_tag_destroy(bt->ccb_dmat); case 4: bus_dmamap_unload(bt->mailbox_dmat, bt->mailbox_dmamap); case 3: bus_dmamem_free(bt->mailbox_dmat, bt->in_boxes, bt->mailbox_dmamap); bus_dmamap_destroy(bt->mailbox_dmat, bt->mailbox_dmamap); case 2: bus_dma_tag_destroy(bt->buffer_dmat); case 1: bus_dma_tag_destroy(bt->mailbox_dmat); case 0: break; } } int bt_port_probe(device_t dev, struct bt_probe_info *info) { struct bt_softc *bt = device_get_softc(dev); config_data_t config_data; int error; /* See if there is really a card present */ if (bt_probe(dev) || bt_fetch_adapter_info(dev)) return(1); /* * Determine our IRQ, and DMA settings and * export them to the configuration system. */ error = bt_cmd(bt, BOP_INQUIRE_CONFIG, NULL, /*parmlen*/0, (u_int8_t*)&config_data, sizeof(config_data), DEFAULT_CMD_TIMEOUT); if (error != 0) { printf("bt_port_probe: Could not determine IRQ or DMA " "settings for adapter.\n"); return (1); } if (bt->model[0] == '5') { /* DMA settings only make sense for ISA cards */ switch (config_data.dma_chan) { case DMA_CHAN_5: info->drq = 5; break; case DMA_CHAN_6: info->drq = 6; break; case DMA_CHAN_7: info->drq = 7; break; default: printf("bt_port_probe: Invalid DMA setting " "detected for adapter.\n"); return (1); } } else { /* VL/EISA/PCI DMA */ info->drq = -1; } switch (config_data.irq) { case IRQ_9: case IRQ_10: case IRQ_11: case IRQ_12: case IRQ_14: case IRQ_15: info->irq = ffs(config_data.irq) + 8; break; default: printf("bt_port_probe: Invalid IRQ setting %x" "detected for adapter.\n", config_data.irq); return (1); } return (0); } /* * Probe the adapter and verify that the card is a BusLogic. */ int bt_probe(device_t dev) { struct bt_softc *bt = device_get_softc(dev); esetup_info_data_t esetup_info; u_int status; u_int intstat; u_int geometry; int error; u_int8_t param; /* * See if the three I/O ports look reasonable. * Touch the minimal number of registers in the * failure case. */ status = bt_inb(bt, STATUS_REG); if ((status == 0) || (status & (DIAG_ACTIVE|CMD_REG_BUSY| STATUS_REG_RSVD|CMD_INVALID)) != 0) { if (bootverbose) device_printf(dev, "Failed Status Reg Test - %x\n", status); return (ENXIO); } intstat = bt_inb(bt, INTSTAT_REG); if ((intstat & INTSTAT_REG_RSVD) != 0) { device_printf(dev, "Failed Intstat Reg Test\n"); return (ENXIO); } geometry = bt_inb(bt, GEOMETRY_REG); if (geometry == 0xFF) { if (bootverbose) device_printf(dev, "Failed Geometry Reg Test\n"); return (ENXIO); } /* * Looking good so far. Final test is to reset the * adapter and attempt to fetch the extended setup * information. This should filter out all 1542 cards. */ if ((error = btreset(bt, /*hard_reset*/TRUE)) != 0) { if (bootverbose) device_printf(dev, "Failed Reset\n"); return (ENXIO); } param = sizeof(esetup_info); error = bt_cmd(bt, BOP_INQUIRE_ESETUP_INFO, ¶m, /*parmlen*/1, (u_int8_t*)&esetup_info, sizeof(esetup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { return (ENXIO); } return (0); } /* * Pull the boards setup information and record it in our softc. */ int bt_fetch_adapter_info(device_t dev) { struct bt_softc *bt = device_get_softc(dev); board_id_data_t board_id; esetup_info_data_t esetup_info; config_data_t config_data; int error; u_int8_t length_param; /* First record the firmware version */ error = bt_cmd(bt, BOP_INQUIRE_BOARD_ID, NULL, /*parmlen*/0, (u_int8_t*)&board_id, sizeof(board_id), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(dev, "bt_fetch_adapter_info - Failed Get Board Info\n"); return (error); } bt->firmware_ver[0] = board_id.firmware_rev_major; bt->firmware_ver[1] = '.'; bt->firmware_ver[2] = board_id.firmware_rev_minor; bt->firmware_ver[3] = '\0'; /* * Depending on the firmware major and minor version, * we may be able to fetch additional minor version info. */ if (bt->firmware_ver[0] > '0') { error = bt_cmd(bt, BOP_INQUIRE_FW_VER_3DIG, NULL, /*parmlen*/0, (u_int8_t*)&bt->firmware_ver[3], 1, DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(dev, "bt_fetch_adapter_info - Failed Get " "Firmware 3rd Digit\n"); return (error); } if (bt->firmware_ver[3] == ' ') bt->firmware_ver[3] = '\0'; bt->firmware_ver[4] = '\0'; } if (strcmp(bt->firmware_ver, "3.3") >= 0) { error = bt_cmd(bt, BOP_INQUIRE_FW_VER_4DIG, NULL, /*parmlen*/0, (u_int8_t*)&bt->firmware_ver[4], 1, DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(dev, "bt_fetch_adapter_info - Failed Get " "Firmware 4th Digit\n"); return (error); } if (bt->firmware_ver[4] == ' ') bt->firmware_ver[4] = '\0'; bt->firmware_ver[5] = '\0'; } /* * Some boards do not handle the "recently documented" * Inquire Board Model Number command correctly or do not give * exact information. Use the Firmware and Extended Setup * information in these cases to come up with the right answer. * The major firmware revision number indicates: * * 5.xx BusLogic "W" Series Host Adapters: * BT-948/958/958D * 4.xx BusLogic "C" Series Host Adapters: * BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF * 3.xx BusLogic "S" Series Host Adapters: * BT-747S/747D/757S/757D/445S/545S/542D * BT-542B/742A (revision H) * 2.xx BusLogic "A" Series Host Adapters: * BT-542B/742A (revision G and below) * 0.xx AMI FastDisk VLB/EISA BusLogic Clone Host Adapter */ length_param = sizeof(esetup_info); error = bt_cmd(bt, BOP_INQUIRE_ESETUP_INFO, &length_param, /*parmlen*/1, (u_int8_t*)&esetup_info, sizeof(esetup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { return (error); } bt->bios_addr = esetup_info.bios_addr << 12; bt->mailbox_addrlimit = BUS_SPACE_MAXADDR; if (esetup_info.bus_type == 'A' && bt->firmware_ver[0] == '2') { snprintf(bt->model, sizeof(bt->model), "542B"); } else if (esetup_info.bus_type == 'E' && bt->firmware_ver[0] == '2') { /* * The 742A seems to object if its mailboxes are * allocated above the 16MB mark. */ bt->mailbox_addrlimit = BUS_SPACE_MAXADDR_24BIT; snprintf(bt->model, sizeof(bt->model), "742A"); } else if (esetup_info.bus_type == 'E' && bt->firmware_ver[0] == '0') { /* AMI FastDisk EISA Series 441 0.x */ snprintf(bt->model, sizeof(bt->model), "747A"); } else { ha_model_data_t model_data; int i; length_param = sizeof(model_data); error = bt_cmd(bt, BOP_INQUIRE_MODEL, &length_param, 1, (u_int8_t*)&model_data, sizeof(model_data), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(dev, "bt_fetch_adapter_info - Failed Inquire " "Model Number\n"); return (error); } for (i = 0; i < sizeof(model_data.ascii_model); i++) { bt->model[i] = model_data.ascii_model[i]; if (bt->model[i] == ' ') break; } bt->model[i] = '\0'; } bt->level_trigger_ints = esetup_info.level_trigger_ints ? 1 : 0; /* SG element limits */ bt->max_sg = esetup_info.max_sg; /* Set feature flags */ bt->wide_bus = esetup_info.wide_bus; bt->diff_bus = esetup_info.diff_bus; bt->ultra_scsi = esetup_info.ultra_scsi; if ((bt->firmware_ver[0] == '5') || (bt->firmware_ver[0] == '4' && bt->wide_bus)) bt->extended_lun = TRUE; bt->strict_rr = (strcmp(bt->firmware_ver, "3.31") >= 0); bt->extended_trans = ((bt_inb(bt, GEOMETRY_REG) & EXTENDED_TRANSLATION) != 0); /* * Determine max CCB count and whether tagged queuing is * available based on controller type. Tagged queuing * only works on 'W' series adapters, 'C' series adapters * with firmware of rev 4.42 and higher, and 'S' series * adapters with firmware of rev 3.35 and higher. The * maximum CCB counts are as follows: * * 192 BT-948/958/958D * 100 BT-946C/956C/956CD/747C/757C/757CD/445C * 50 BT-545C/540CF * 30 BT-747S/747D/757S/757D/445S/545S/542D/542B/742A */ if (bt->firmware_ver[0] == '5') { bt->max_ccbs = 192; bt->tag_capable = TRUE; } else if (bt->firmware_ver[0] == '4') { if (bt->model[0] == '5') bt->max_ccbs = 50; else bt->max_ccbs = 100; bt->tag_capable = (strcmp(bt->firmware_ver, "4.22") >= 0); } else { bt->max_ccbs = 30; if (bt->firmware_ver[0] == '3' && (strcmp(bt->firmware_ver, "3.35") >= 0)) bt->tag_capable = TRUE; else bt->tag_capable = FALSE; } if (bt->tag_capable != FALSE) bt->tags_permitted = ALL_TARGETS; /* Determine Sync/Wide/Disc settings */ if (bt->firmware_ver[0] >= '4') { auto_scsi_data_t auto_scsi_data; fetch_lram_params_t fetch_lram_params; int error; /* * These settings are stored in the * AutoSCSI data in LRAM of 'W' and 'C' * adapters. */ fetch_lram_params.offset = AUTO_SCSI_BYTE_OFFSET; fetch_lram_params.response_len = sizeof(auto_scsi_data); error = bt_cmd(bt, BOP_FETCH_LRAM, (u_int8_t*)&fetch_lram_params, sizeof(fetch_lram_params), (u_int8_t*)&auto_scsi_data, sizeof(auto_scsi_data), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(dev, "bt_fetch_adapter_info - Failed " "Get Auto SCSI Info\n"); return (error); } bt->disc_permitted = auto_scsi_data.low_disc_permitted | (auto_scsi_data.high_disc_permitted << 8); bt->sync_permitted = auto_scsi_data.low_sync_permitted | (auto_scsi_data.high_sync_permitted << 8); bt->fast_permitted = auto_scsi_data.low_fast_permitted | (auto_scsi_data.high_fast_permitted << 8); bt->ultra_permitted = auto_scsi_data.low_ultra_permitted | (auto_scsi_data.high_ultra_permitted << 8); bt->wide_permitted = auto_scsi_data.low_wide_permitted | (auto_scsi_data.high_wide_permitted << 8); if (bt->ultra_scsi == FALSE) bt->ultra_permitted = 0; if (bt->wide_bus == FALSE) bt->wide_permitted = 0; } else { /* * 'S' and 'A' series have this information in the setup * information structure. */ setup_data_t setup_info; length_param = sizeof(setup_info); error = bt_cmd(bt, BOP_INQUIRE_SETUP_INFO, &length_param, /*paramlen*/1, (u_int8_t*)&setup_info, sizeof(setup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(dev, "bt_fetch_adapter_info - Failed " "Get Setup Info\n"); return (error); } if (setup_info.initiate_sync != 0) { bt->sync_permitted = ALL_TARGETS; if (bt->model[0] == '7') { if (esetup_info.sync_neg10MB != 0) bt->fast_permitted = ALL_TARGETS; if (strcmp(bt->model, "757") == 0) bt->wide_permitted = ALL_TARGETS; } } bt->disc_permitted = ALL_TARGETS; } /* We need as many mailboxes as we can have ccbs */ bt->num_boxes = bt->max_ccbs; /* Determine our SCSI ID */ error = bt_cmd(bt, BOP_INQUIRE_CONFIG, NULL, /*parmlen*/0, (u_int8_t*)&config_data, sizeof(config_data), DEFAULT_CMD_TIMEOUT); if (error != 0) { device_printf(dev, "bt_fetch_adapter_info - Failed Get Config\n"); return (error); } bt->scsi_id = config_data.scsi_id; return (0); } /* * Start the board, ready for normal operation */ int bt_init(device_t dev) { struct bt_softc *bt = device_get_softc(dev); /* Announce the Adapter */ device_printf(dev, "BT-%s FW Rev. %s ", bt->model, bt->firmware_ver); if (bt->ultra_scsi != 0) printf("Ultra "); if (bt->wide_bus != 0) printf("Wide "); else printf("Narrow "); if (bt->diff_bus != 0) printf("Diff "); printf("SCSI Host Adapter, SCSI ID %d, %d CCBs\n", bt->scsi_id, bt->max_ccbs); /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create(bt->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/MAXBSIZE, /*nsegments*/BT_NSEG, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/BUS_DMA_ALLOCNOW, &bt->buffer_dmat) != 0) { goto error_exit; } bt->init_level++; /* DMA tag for our mailboxes */ if (bus_dma_tag_create(bt->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/bt->mailbox_addrlimit, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, bt->num_boxes * (sizeof(bt_mbox_in_t) + sizeof(bt_mbox_out_t)), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &bt->mailbox_dmat) != 0) { goto error_exit; } bt->init_level++; /* Allocation for our mailboxes */ if (bus_dmamem_alloc(bt->mailbox_dmat, (void **)&bt->out_boxes, BUS_DMA_NOWAIT, &bt->mailbox_dmamap) != 0) { goto error_exit; } bt->init_level++; /* And permanently map them */ bus_dmamap_load(bt->mailbox_dmat, bt->mailbox_dmamap, bt->out_boxes, bt->num_boxes * (sizeof(bt_mbox_in_t) + sizeof(bt_mbox_out_t)), btmapmboxes, bt, /*flags*/0); bt->init_level++; bt->in_boxes = (bt_mbox_in_t *)&bt->out_boxes[bt->num_boxes]; btinitmboxes(bt); /* DMA tag for our ccb structures */ if (bus_dma_tag_create(bt->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, bt->max_ccbs * sizeof(struct bt_ccb), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &bt->ccb_dmat) != 0) { goto error_exit; } bt->init_level++; /* Allocation for our ccbs */ if (bus_dmamem_alloc(bt->ccb_dmat, (void **)&bt->bt_ccb_array, BUS_DMA_NOWAIT, &bt->ccb_dmamap) != 0) { goto error_exit; } bt->init_level++; /* And permanently map them */ bus_dmamap_load(bt->ccb_dmat, bt->ccb_dmamap, bt->bt_ccb_array, bt->max_ccbs * sizeof(struct bt_ccb), btmapccbs, bt, /*flags*/0); bt->init_level++; /* DMA tag for our S/G structures. We allocate in page sized chunks */ if (bus_dma_tag_create(bt->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &bt->sg_dmat) != 0) { goto error_exit; } bt->init_level++; /* Perform initial CCB allocation */ bzero(bt->bt_ccb_array, bt->max_ccbs * sizeof(struct bt_ccb)); btallocccbs(bt); if (bt->num_ccbs == 0) { device_printf(dev, "bt_init - Unable to allocate initial ccbs\n"); goto error_exit; } /* * Note that we are going and return (to probe) */ return 0; error_exit: return (ENXIO); } int bt_attach(device_t dev) { struct bt_softc *bt = device_get_softc(dev); int tagged_dev_openings; struct cam_devq *devq; int error; /* * We reserve 1 ccb for error recovery, so don't * tell the XPT about it. */ if (bt->tag_capable != 0) tagged_dev_openings = bt->max_ccbs - 1; else tagged_dev_openings = 0; /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(bt->max_ccbs - 1); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry */ bt->sim = cam_sim_alloc(btaction, btpoll, "bt", bt, bt->unit, 2, tagged_dev_openings, devq); if (bt->sim == NULL) { cam_simq_free(devq); return (ENOMEM); } if (xpt_bus_register(bt->sim, 0) != CAM_SUCCESS) { cam_sim_free(bt->sim, /*free_devq*/TRUE); return (ENXIO); } if (xpt_create_path(&bt->path, /*periph*/NULL, cam_sim_path(bt->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(bt->sim)); cam_sim_free(bt->sim, /*free_devq*/TRUE); return (ENXIO); } /* * Setup interrupt. */ error = bus_setup_intr(dev, bt->irq, INTR_TYPE_CAM|INTR_ENTROPY, bt_intr, bt, &bt->ih); if (error) { device_printf(dev, "bus_setup_intr() failed: %d\n", error); return (error); } return (0); } int bt_check_probed_iop(u_int ioport) { u_int i; for (i = 0; i < BT_NUM_ISAPORTS; i++) { if (bt_isa_ports[i].addr == ioport) { if (bt_isa_ports[i].probed != 0) return (1); else { return (0); } } } return (1); } void bt_mark_probed_bio(isa_compat_io_t port) { if (port < BIO_DISABLED) bt_mark_probed_iop(bt_board_ports[port]); } void bt_mark_probed_iop(u_int ioport) { u_int i; for (i = 0; i < BT_NUM_ISAPORTS; i++) { if (ioport == bt_isa_ports[i].addr) { bt_isa_ports[i].probed = 1; break; } } } void bt_find_probe_range(int ioport, int *port_index, int *max_port_index) { if (ioport > 0) { int i; for (i = 0;i < BT_NUM_ISAPORTS; i++) if (ioport <= bt_isa_ports[i].addr) break; if ((i >= BT_NUM_ISAPORTS) || (ioport != bt_isa_ports[i].addr)) { printf( "bt_isa_probe: Invalid baseport of 0x%x specified.\n" "bt_isa_probe: Nearest valid baseport is 0x%x.\n" "bt_isa_probe: Failing probe.\n", ioport, (i < BT_NUM_ISAPORTS) ? bt_isa_ports[i].addr : bt_isa_ports[BT_NUM_ISAPORTS - 1].addr); *port_index = *max_port_index = -1; return; } *port_index = *max_port_index = bt_isa_ports[i].bio; } else { *port_index = 0; *max_port_index = BT_NUM_ISAPORTS - 1; } } int bt_iop_from_bio(isa_compat_io_t bio_index) { if (bio_index >= 0 && bio_index < BT_NUM_ISAPORTS) return (bt_board_ports[bio_index]); return (-1); } static void btallocccbs(struct bt_softc *bt) { struct bt_ccb *next_ccb; struct sg_map_node *sg_map; bus_addr_t physaddr; bt_sg_t *segs; int newcount; int i; if (bt->num_ccbs >= bt->max_ccbs) /* Can't allocate any more */ return; next_ccb = &bt->bt_ccb_array[bt->num_ccbs]; sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); if (sg_map == NULL) goto error_exit; /* Allocate S/G space for the next batch of CCBS */ if (bus_dmamem_alloc(bt->sg_dmat, (void **)&sg_map->sg_vaddr, BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { free(sg_map, M_DEVBUF); goto error_exit; } SLIST_INSERT_HEAD(&bt->sg_maps, sg_map, links); bus_dmamap_load(bt->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, PAGE_SIZE, btmapsgs, bt, /*flags*/0); segs = sg_map->sg_vaddr; physaddr = sg_map->sg_physaddr; newcount = (PAGE_SIZE / (BT_NSEG * sizeof(bt_sg_t))); for (i = 0; bt->num_ccbs < bt->max_ccbs && i < newcount; i++) { int error; next_ccb->sg_list = segs; next_ccb->sg_list_phys = physaddr; next_ccb->flags = BCCB_FREE; error = bus_dmamap_create(bt->buffer_dmat, /*flags*/0, &next_ccb->dmamap); if (error != 0) break; SLIST_INSERT_HEAD(&bt->free_bt_ccbs, next_ccb, links); segs += BT_NSEG; physaddr += (BT_NSEG * sizeof(bt_sg_t)); next_ccb++; bt->num_ccbs++; } /* Reserve a CCB for error recovery */ if (bt->recovery_bccb == NULL) { bt->recovery_bccb = SLIST_FIRST(&bt->free_bt_ccbs); SLIST_REMOVE_HEAD(&bt->free_bt_ccbs, links); } if (SLIST_FIRST(&bt->free_bt_ccbs) != NULL) return; error_exit: device_printf(bt->dev, "Can't malloc BCCBs\n"); } static __inline void btfreeccb(struct bt_softc *bt, struct bt_ccb *bccb) { int s; s = splcam(); if ((bccb->flags & BCCB_ACTIVE) != 0) LIST_REMOVE(&bccb->ccb->ccb_h, sim_links.le); if (bt->resource_shortage != 0 && (bccb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { bccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; bt->resource_shortage = FALSE; } bccb->flags = BCCB_FREE; SLIST_INSERT_HEAD(&bt->free_bt_ccbs, bccb, links); bt->active_ccbs--; splx(s); } static __inline struct bt_ccb* btgetccb(struct bt_softc *bt) { struct bt_ccb* bccb; int s; s = splcam(); if ((bccb = SLIST_FIRST(&bt->free_bt_ccbs)) != NULL) { SLIST_REMOVE_HEAD(&bt->free_bt_ccbs, links); bt->active_ccbs++; } else { btallocccbs(bt); bccb = SLIST_FIRST(&bt->free_bt_ccbs); if (bccb != NULL) { SLIST_REMOVE_HEAD(&bt->free_bt_ccbs, links); bt->active_ccbs++; } } splx(s); return (bccb); } static void btaction(struct cam_sim *sim, union ccb *ccb) { struct bt_softc *bt; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("btaction\n")); bt = (struct bt_softc *)cam_sim_softc(sim); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ { struct bt_ccb *bccb; struct bt_hccb *hccb; /* * get a bccb to use. */ if ((bccb = btgetccb(bt)) == NULL) { int s; s = splcam(); bt->resource_shortage = TRUE; splx(s); xpt_freeze_simq(bt->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } hccb = &bccb->hccb; /* * So we can find the BCCB when an abort is requested */ bccb->ccb = ccb; ccb->ccb_h.ccb_bccb_ptr = bccb; ccb->ccb_h.ccb_bt_ptr = bt; /* * Put all the arguments for the xfer in the bccb */ hccb->target_id = ccb->ccb_h.target_id; hccb->target_lun = ccb->ccb_h.target_lun; hccb->btstat = 0; hccb->sdstat = 0; if (ccb->ccb_h.func_code == XPT_SCSI_IO) { struct ccb_scsiio *csio; struct ccb_hdr *ccbh; csio = &ccb->csio; ccbh = &csio->ccb_h; hccb->opcode = INITIATOR_CCB_WRESID; hccb->datain = (ccb->ccb_h.flags & CAM_DIR_IN) ? 1 : 0; hccb->dataout =(ccb->ccb_h.flags & CAM_DIR_OUT) ? 1 : 0; hccb->cmd_len = csio->cdb_len; if (hccb->cmd_len > sizeof(hccb->scsi_cdb)) { ccb->ccb_h.status = CAM_REQ_INVALID; btfreeccb(bt, bccb); xpt_done(ccb); return; } hccb->sense_len = csio->sense_len; if ((ccbh->flags & CAM_TAG_ACTION_VALID) != 0 && ccb->csio.tag_action != CAM_TAG_ACTION_NONE) { hccb->tag_enable = TRUE; hccb->tag_type = (ccb->csio.tag_action & 0x3); } else { hccb->tag_enable = FALSE; hccb->tag_type = 0; } if ((ccbh->flags & CAM_CDB_POINTER) != 0) { if ((ccbh->flags & CAM_CDB_PHYS) == 0) { bcopy(csio->cdb_io.cdb_ptr, hccb->scsi_cdb, hccb->cmd_len); } else { /* I guess I could map it in... */ ccbh->status = CAM_REQ_INVALID; btfreeccb(bt, bccb); xpt_done(ccb); return; } } else { bcopy(csio->cdb_io.cdb_bytes, hccb->scsi_cdb, hccb->cmd_len); } /* If need be, bounce our sense buffer */ if (bt->sense_buffers != NULL) { hccb->sense_addr = btsensepaddr(bt, bccb); } else { hccb->sense_addr = vtophys(&csio->sense_data); } /* * If we have any data to send with this command, * map it into bus space. */ /* Only use S/G if there is a transfer */ if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { /* * We've been given a pointer * to a single buffer. */ if ((ccbh->flags & CAM_DATA_PHYS)==0) { int s; int error; s = splsoftvm(); error = bus_dmamap_load( bt->buffer_dmat, bccb->dmamap, csio->data_ptr, csio->dxfer_len, btexecuteccb, bccb, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain * ordering, freeze the * controller queue * until our mapping is * returned. */ xpt_freeze_simq(bt->sim, 1); csio->ccb_h.status |= CAM_RELEASE_SIMQ; } splx(s); } else { struct bus_dma_segment seg; /* Pointer to physical buffer */ seg.ds_addr = (bus_addr_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; btexecuteccb(bccb, &seg, 1, 0); } } else { struct bus_dma_segment *segs; if ((ccbh->flags & CAM_DATA_PHYS) != 0) panic("btaction - Physical " "segment pointers " "unsupported"); if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) panic("btaction - Virtual " "segment addresses " "unsupported"); /* Just use the segments provided */ segs = (struct bus_dma_segment *) csio->data_ptr; btexecuteccb(bccb, segs, csio->sglist_cnt, 0); } } else { btexecuteccb(bccb, NULL, 0, 0); } } else { hccb->opcode = INITIATOR_BUS_DEV_RESET; /* No data transfer */ hccb->datain = TRUE; hccb->dataout = TRUE; hccb->cmd_len = 0; hccb->sense_len = 0; hccb->tag_enable = FALSE; hccb->tag_type = 0; btexecuteccb(bccb, NULL, 0, 0); } break; } case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: { /* XXX Implement */ ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts; u_int target_mask; cts = &ccb->cts; target_mask = 0x01 << ccb->ccb_h.target_id; #ifdef CAM_NEW_TRAN_CODE if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if ((bt->disc_permitted & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((bt->tags_permitted & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; if ((bt->ultra_permitted & target_mask) != 0) spi->sync_period = 12; else if ((bt->fast_permitted & target_mask) != 0) spi->sync_period = 25; else if ((bt->sync_permitted & target_mask) != 0) spi->sync_period = 50; else spi->sync_period = 0; if (spi->sync_period != 0) spi->sync_offset = 15; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; if ((bt->wide_permitted & target_mask) != 0) spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; else spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; spi->valid |= CTS_SPI_VALID_DISC; } else scsi->valid = 0; } else { #else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { cts->flags = 0; if ((bt->disc_permitted & target_mask) != 0) cts->flags |= CCB_TRANS_DISC_ENB; if ((bt->tags_permitted & target_mask) != 0) cts->flags |= CCB_TRANS_TAG_ENB; if ((bt->wide_permitted & target_mask) != 0) cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; else cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; if ((bt->ultra_permitted & target_mask) != 0) cts->sync_period = 12; else if ((bt->fast_permitted & target_mask) != 0) cts->sync_period = 25; else if ((bt->sync_permitted & target_mask) != 0) cts->sync_period = 50; else cts->sync_period = 0; if (cts->sync_period != 0) cts->sync_offset = 15; cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID | CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; } else { #endif btfetchtransinfo(bt, cts); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u_int32_t size_mb; u_int32_t secs_per_cylinder; ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); if (size_mb >= 1024 && (bt->extended_trans != 0)) { if (size_mb >= 2048) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 128; ccg->secs_per_track = 32; } } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { btreset(bt, /*hardreset*/TRUE); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; if (bt->tag_capable != 0) cpi->hba_inquiry |= PI_TAG_ABLE; if (bt->wide_bus != 0) cpi->hba_inquiry |= PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = bt->wide_bus ? 15 : 7; cpi->max_lun = 7; cpi->initiator_id = bt->scsi_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "BusLogic", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; #ifdef CAM_NEW_TRAN_CODE cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; #endif xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } static void btexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { struct bt_ccb *bccb; union ccb *ccb; struct bt_softc *bt; int s; bccb = (struct bt_ccb *)arg; ccb = bccb->ccb; bt = (struct bt_softc *)ccb->ccb_h.ccb_bt_ptr; if (error != 0) { if (error != EFBIG) device_printf(bt->dev, "Unexepected error 0x%x returned from " "bus_dmamap_load\n", error); if (ccb->ccb_h.status == CAM_REQ_INPROG) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; } btfreeccb(bt, bccb); xpt_done(ccb); return; } if (nseg != 0) { bt_sg_t *sg; bus_dma_segment_t *end_seg; bus_dmasync_op_t op; end_seg = dm_segs + nseg; /* Copy the segments into our SG list */ sg = bccb->sg_list; while (dm_segs < end_seg) { sg->len = dm_segs->ds_len; sg->addr = dm_segs->ds_addr; sg++; dm_segs++; } if (nseg > 1) { bccb->hccb.opcode = INITIATOR_SG_CCB_WRESID; bccb->hccb.data_len = sizeof(bt_sg_t) * nseg; bccb->hccb.data_addr = bccb->sg_list_phys; } else { bccb->hccb.data_len = bccb->sg_list->len; bccb->hccb.data_addr = bccb->sg_list->addr; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(bt->buffer_dmat, bccb->dmamap, op); } else { bccb->hccb.opcode = INITIATOR_CCB; bccb->hccb.data_len = 0; bccb->hccb.data_addr = 0; } s = splcam(); /* * Last time we need to check if this CCB needs to * be aborted. */ if (ccb->ccb_h.status != CAM_REQ_INPROG) { if (nseg != 0) bus_dmamap_unload(bt->buffer_dmat, bccb->dmamap); btfreeccb(bt, bccb); xpt_done(ccb); splx(s); return; } bccb->flags = BCCB_ACTIVE; ccb->ccb_h.status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&bt->pending_ccbs, &ccb->ccb_h, sim_links.le); ccb->ccb_h.timeout_ch = timeout(bttimeout, (caddr_t)bccb, (ccb->ccb_h.timeout * hz) / 1000); /* Tell the adapter about this command */ bt->cur_outbox->ccb_addr = btccbvtop(bt, bccb); if (bt->cur_outbox->action_code != BMBO_FREE) { /* * We should never encounter a busy mailbox. * If we do, warn the user, and treat it as * a resource shortage. If the controller is * hung, one of the pending transactions will * timeout causing us to start recovery operations. */ device_printf(bt->dev, "Encountered busy mailbox with %d out of %d " "commands active!!!\n", bt->active_ccbs, bt->max_ccbs); untimeout(bttimeout, bccb, ccb->ccb_h.timeout_ch); if (nseg != 0) bus_dmamap_unload(bt->buffer_dmat, bccb->dmamap); btfreeccb(bt, bccb); bt->resource_shortage = TRUE; xpt_freeze_simq(bt->sim, /*count*/1); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); return; } bt->cur_outbox->action_code = BMBO_START; bt_outb(bt, COMMAND_REG, BOP_START_MBOX); btnextoutbox(bt); splx(s); } void bt_intr(void *arg) { struct bt_softc *bt; u_int intstat; bt = (struct bt_softc *)arg; while (((intstat = bt_inb(bt, INTSTAT_REG)) & INTR_PENDING) != 0) { if ((intstat & CMD_COMPLETE) != 0) { bt->latched_status = bt_inb(bt, STATUS_REG); bt->command_cmp = TRUE; } bt_outb(bt, CONTROL_REG, RESET_INTR); if ((intstat & IMB_LOADED) != 0) { while (bt->cur_inbox->comp_code != BMBI_FREE) { btdone(bt, btccbptov(bt, bt->cur_inbox->ccb_addr), bt->cur_inbox->comp_code); bt->cur_inbox->comp_code = BMBI_FREE; btnextinbox(bt); } } if ((intstat & SCSI_BUS_RESET) != 0) { btreset(bt, /*hardreset*/FALSE); } } } static void btdone(struct bt_softc *bt, struct bt_ccb *bccb, bt_mbi_comp_code_t comp_code) { union ccb *ccb; struct ccb_scsiio *csio; ccb = bccb->ccb; csio = &bccb->ccb->csio; if ((bccb->flags & BCCB_ACTIVE) == 0) { device_printf(bt->dev, "btdone - Attempt to free non-active BCCB %p\n", (void *)bccb); return; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(bt->buffer_dmat, bccb->dmamap, op); bus_dmamap_unload(bt->buffer_dmat, bccb->dmamap); } if (bccb == bt->recovery_bccb) { /* * The recovery BCCB does not have a CCB associated * with it, so short circuit the normal error handling. * We now traverse our list of pending CCBs and process * any that were terminated by the recovery CCBs action. * We also reinstate timeouts for all remaining, pending, * CCBs. */ struct cam_path *path; struct ccb_hdr *ccb_h; cam_status error; /* Notify all clients that a BDR occured */ error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(bt->sim), bccb->hccb.target_id, CAM_LUN_WILDCARD); if (error == CAM_REQ_CMP) xpt_async(AC_SENT_BDR, path, NULL); ccb_h = LIST_FIRST(&bt->pending_ccbs); while (ccb_h != NULL) { struct bt_ccb *pending_bccb; pending_bccb = (struct bt_ccb *)ccb_h->ccb_bccb_ptr; if (pending_bccb->hccb.target_id == bccb->hccb.target_id) { pending_bccb->hccb.btstat = BTSTAT_HA_BDR; ccb_h = LIST_NEXT(ccb_h, sim_links.le); btdone(bt, pending_bccb, BMBI_ERROR); } else { ccb_h->timeout_ch = timeout(bttimeout, (caddr_t)pending_bccb, (ccb_h->timeout * hz) / 1000); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } } device_printf(bt->dev, "No longer in timeout\n"); return; } untimeout(bttimeout, bccb, ccb->ccb_h.timeout_ch); switch (comp_code) { case BMBI_FREE: device_printf(bt->dev, "btdone - CCB completed with free status!\n"); break; case BMBI_NOT_FOUND: device_printf(bt->dev, "btdone - CCB Abort failed to find CCB\n"); break; case BMBI_ABORT: case BMBI_ERROR: if (bootverbose) { printf("bt: ccb %p - error %x occured. " "btstat = %x, sdstat = %x\n", (void *)bccb, comp_code, bccb->hccb.btstat, bccb->hccb.sdstat); } /* An error occured */ switch(bccb->hccb.btstat) { case BTSTAT_DATARUN_ERROR: if (bccb->hccb.data_len == 0) { /* * At least firmware 4.22, does this * for a QUEUE FULL condition. */ bccb->hccb.sdstat = SCSI_STATUS_QUEUE_FULL; } else if (bccb->hccb.data_len < 0) { csio->ccb_h.status = CAM_DATA_RUN_ERR; break; } /* FALLTHROUGH */ case BTSTAT_NOERROR: case BTSTAT_LINKED_CMD_COMPLETE: case BTSTAT_LINKED_CMD_FLAG_COMPLETE: case BTSTAT_DATAUNDERUN_ERROR: csio->scsi_status = bccb->hccb.sdstat; csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; switch(csio->scsi_status) { case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: csio->ccb_h.status |= CAM_AUTOSNS_VALID; /* Bounce sense back if necessary */ if (bt->sense_buffers != NULL) { csio->sense_data = *btsensevaddr(bt, bccb); } break; default: break; case SCSI_STATUS_OK: csio->ccb_h.status = CAM_REQ_CMP; break; } csio->resid = bccb->hccb.data_len; break; case BTSTAT_SELTIMEOUT: csio->ccb_h.status = CAM_SEL_TIMEOUT; break; case BTSTAT_UNEXPECTED_BUSFREE: csio->ccb_h.status = CAM_UNEXP_BUSFREE; break; case BTSTAT_INVALID_PHASE: csio->ccb_h.status = CAM_SEQUENCE_FAIL; break; case BTSTAT_INVALID_ACTION_CODE: panic("%s: Inavlid Action code", bt_name(bt)); break; case BTSTAT_INVALID_OPCODE: panic("%s: Inavlid CCB Opcode code", bt_name(bt)); break; case BTSTAT_LINKED_CCB_LUN_MISMATCH: /* We don't even support linked commands... */ panic("%s: Linked CCB Lun Mismatch", bt_name(bt)); break; case BTSTAT_INVALID_CCB_OR_SG_PARAM: panic("%s: Invalid CCB or SG list", bt_name(bt)); break; case BTSTAT_AUTOSENSE_FAILED: csio->ccb_h.status = CAM_AUTOSENSE_FAIL; break; case BTSTAT_TAGGED_MSG_REJECTED: { struct ccb_trans_settings neg; #ifdef CAM_NEW_TRAN_CODE struct ccb_trans_settings_scsi *scsi = &neg.proto_specific.scsi; neg.protocol = PROTO_SCSI; neg.protocol_version = SCSI_REV_2; neg.transport = XPORT_SPI; neg.transport_version = 2; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = 0; #else neg.flags = 0; neg.valid = CCB_TRANS_TQ_VALID; #endif xpt_print_path(csio->ccb_h.path); printf("refuses tagged commands. Performing " "non-tagged I/O\n"); xpt_setup_ccb(&neg.ccb_h, csio->ccb_h.path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, csio->ccb_h.path, &neg); bt->tags_permitted &= ~(0x01 << csio->ccb_h.target_id); csio->ccb_h.status = CAM_MSG_REJECT_REC; break; } case BTSTAT_UNSUPPORTED_MSG_RECEIVED: /* * XXX You would think that this is * a recoverable error... Hmmm. */ csio->ccb_h.status = CAM_REQ_CMP_ERR; break; case BTSTAT_HA_SOFTWARE_ERROR: case BTSTAT_HA_WATCHDOG_ERROR: case BTSTAT_HARDWARE_FAILURE: /* Hardware reset ??? Can we recover ??? */ csio->ccb_h.status = CAM_NO_HBA; break; case BTSTAT_TARGET_IGNORED_ATN: case BTSTAT_OTHER_SCSI_BUS_RESET: case BTSTAT_HA_SCSI_BUS_RESET: if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_CMD_TIMEOUT) csio->ccb_h.status = CAM_SCSI_BUS_RESET; break; case BTSTAT_HA_BDR: if ((bccb->flags & BCCB_DEVICE_RESET) == 0) csio->ccb_h.status = CAM_BDR_SENT; else csio->ccb_h.status = CAM_CMD_TIMEOUT; break; case BTSTAT_INVALID_RECONNECT: case BTSTAT_ABORT_QUEUE_GENERATED: csio->ccb_h.status = CAM_REQ_TERMIO; break; case BTSTAT_SCSI_PERROR_DETECTED: csio->ccb_h.status = CAM_UNCOR_PARITY; break; } if (csio->ccb_h.status != CAM_REQ_CMP) { xpt_freeze_devq(csio->ccb_h.path, /*count*/1); csio->ccb_h.status |= CAM_DEV_QFRZN; } if ((bccb->flags & BCCB_RELEASE_SIMQ) != 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; btfreeccb(bt, bccb); xpt_done(ccb); break; case BMBI_OK: /* All completed without incident */ ccb->ccb_h.status |= CAM_REQ_CMP; if ((bccb->flags & BCCB_RELEASE_SIMQ) != 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; btfreeccb(bt, bccb); xpt_done(ccb); break; } } static int btreset(struct bt_softc* bt, int hard_reset) { struct ccb_hdr *ccb_h; u_int status; u_int timeout; u_int8_t reset_type; if (hard_reset != 0) reset_type = HARD_RESET; else reset_type = SOFT_RESET; bt_outb(bt, CONTROL_REG, reset_type); /* Wait 5sec. for Diagnostic start */ timeout = 5 * 10000; while (--timeout) { status = bt_inb(bt, STATUS_REG); if ((status & DIAG_ACTIVE) != 0) break; DELAY(100); } if (timeout == 0) { if (bootverbose) printf("%s: btreset - Diagnostic Active failed to " "assert. status = 0x%x\n", bt_name(bt), status); return (ETIMEDOUT); } /* Wait 10sec. for Diagnostic end */ timeout = 10 * 10000; while (--timeout) { status = bt_inb(bt, STATUS_REG); if ((status & DIAG_ACTIVE) == 0) break; DELAY(100); } if (timeout == 0) { panic("%s: btreset - Diagnostic Active failed to drop. " "status = 0x%x\n", bt_name(bt), status); return (ETIMEDOUT); } /* Wait for the host adapter to become ready or report a failure */ timeout = 10000; while (--timeout) { status = bt_inb(bt, STATUS_REG); if ((status & (DIAG_FAIL|HA_READY|DATAIN_REG_READY)) != 0) break; DELAY(100); } if (timeout == 0) { printf("%s: btreset - Host adapter failed to come ready. " "status = 0x%x\n", bt_name(bt), status); return (ETIMEDOUT); } /* If the diagnostics failed, tell the user */ if ((status & DIAG_FAIL) != 0 || (status & HA_READY) == 0) { printf("%s: btreset - Adapter failed diagnostics\n", bt_name(bt)); if ((status & DATAIN_REG_READY) != 0) printf("%s: btreset - Host Adapter Error code = 0x%x\n", bt_name(bt), bt_inb(bt, DATAIN_REG)); return (ENXIO); } /* If we've allocated mailboxes, initialize them */ if (bt->init_level > 4) btinitmboxes(bt); /* If we've attached to the XPT, tell it about the event */ if (bt->path != NULL) xpt_async(AC_BUS_RESET, bt->path, NULL); /* * Perform completion processing for all outstanding CCBs. */ while ((ccb_h = LIST_FIRST(&bt->pending_ccbs)) != NULL) { struct bt_ccb *pending_bccb; pending_bccb = (struct bt_ccb *)ccb_h->ccb_bccb_ptr; pending_bccb->hccb.btstat = BTSTAT_HA_SCSI_BUS_RESET; btdone(bt, pending_bccb, BMBI_ERROR); } return (0); } /* * Send a command to the adapter. */ int bt_cmd(struct bt_softc *bt, bt_op_t opcode, u_int8_t *params, u_int param_len, u_int8_t *reply_data, u_int reply_len, u_int cmd_timeout) { u_int timeout; u_int status; u_int saved_status; u_int intstat; u_int reply_buf_size; int s; int cmd_complete; int error; /* No data returned to start */ reply_buf_size = reply_len; reply_len = 0; intstat = 0; cmd_complete = 0; saved_status = 0; error = 0; bt->command_cmp = 0; /* * Wait up to 10 sec. for the adapter to become * ready to accept commands. */ timeout = 100000; while (--timeout) { status = bt_inb(bt, STATUS_REG); if ((status & HA_READY) != 0 && (status & CMD_REG_BUSY) == 0) break; /* * Throw away any pending data which may be * left over from earlier commands that we * timedout on. */ if ((status & DATAIN_REG_READY) != 0) (void)bt_inb(bt, DATAIN_REG); DELAY(100); } if (timeout == 0) { printf("%s: bt_cmd: Timeout waiting for adapter ready, " "status = 0x%x\n", bt_name(bt), status); return (ETIMEDOUT); } /* * Send the opcode followed by any necessary parameter bytes. */ bt_outb(bt, COMMAND_REG, opcode); /* * Wait for up to 1sec for each byte of the the * parameter list sent to be sent. */ timeout = 10000; while (param_len && --timeout) { DELAY(100); s = splcam(); status = bt_inb(bt, STATUS_REG); intstat = bt_inb(bt, INTSTAT_REG); splx(s); if ((intstat & (INTR_PENDING|CMD_COMPLETE)) == (INTR_PENDING|CMD_COMPLETE)) { saved_status = status; cmd_complete = 1; break; } if (bt->command_cmp != 0) { saved_status = bt->latched_status; cmd_complete = 1; break; } if ((status & DATAIN_REG_READY) != 0) break; if ((status & CMD_REG_BUSY) == 0) { bt_outb(bt, COMMAND_REG, *params++); param_len--; timeout = 10000; } } if (timeout == 0) { printf("%s: bt_cmd: Timeout sending parameters, " "status = 0x%x\n", bt_name(bt), status); cmd_complete = 1; saved_status = status; error = ETIMEDOUT; } /* * Wait for the command to complete. */ while (cmd_complete == 0 && --cmd_timeout) { s = splcam(); status = bt_inb(bt, STATUS_REG); intstat = bt_inb(bt, INTSTAT_REG); /* * It may be that this command was issued with * controller interrupts disabled. We'll never * get to our command if an incoming mailbox * interrupt is pending, so take care of completed * mailbox commands by calling our interrupt handler. */ if ((intstat & (INTR_PENDING|IMB_LOADED)) == (INTR_PENDING|IMB_LOADED)) bt_intr(bt); splx(s); if (bt->command_cmp != 0) { /* * Our interrupt handler saw CMD_COMPLETE * status before we did. */ cmd_complete = 1; saved_status = bt->latched_status; } else if ((intstat & (INTR_PENDING|CMD_COMPLETE)) == (INTR_PENDING|CMD_COMPLETE)) { /* * Our poll (in case interrupts are blocked) * saw the CMD_COMPLETE interrupt. */ cmd_complete = 1; saved_status = status; } else if (opcode == BOP_MODIFY_IO_ADDR && (status & CMD_REG_BUSY) == 0) { /* * The BOP_MODIFY_IO_ADDR does not issue a CMD_COMPLETE, * but it should update the status register. So, we * consider this command complete when the CMD_REG_BUSY * status clears. */ saved_status = status; cmd_complete = 1; } else if ((status & DATAIN_REG_READY) != 0) { u_int8_t data; data = bt_inb(bt, DATAIN_REG); if (reply_len < reply_buf_size) { *reply_data++ = data; } else { printf("%s: bt_cmd - Discarded reply data byte " "for opcode 0x%x\n", bt_name(bt), opcode); } /* * Reset timeout to ensure at least a second * between response bytes. */ cmd_timeout = MAX(cmd_timeout, 10000); reply_len++; } else if ((opcode == BOP_FETCH_LRAM) && (status & HA_READY) != 0) { saved_status = status; cmd_complete = 1; } DELAY(100); } if (cmd_timeout == 0) { printf("%s: bt_cmd: Timeout waiting for command (%x) " "to complete.\n%s: status = 0x%x, intstat = 0x%x, " "rlen %d\n", bt_name(bt), opcode, bt_name(bt), status, intstat, reply_len); error = (ETIMEDOUT); } /* * Clear any pending interrupts. Block interrupts so our * interrupt handler is not re-entered. */ s = splcam(); bt_intr(bt); splx(s); if (error != 0) return (error); /* * If the command was rejected by the controller, tell the caller. */ if ((saved_status & CMD_INVALID) != 0) { /* * Some early adapters may not recover properly from * an invalid command. If it appears that the controller * has wedged (i.e. status was not cleared by our interrupt * reset above), perform a soft reset. */ if (bootverbose) printf("%s: Invalid Command 0x%x\n", bt_name(bt), opcode); DELAY(1000); status = bt_inb(bt, STATUS_REG); if ((status & (CMD_INVALID|STATUS_REG_RSVD|DATAIN_REG_READY| CMD_REG_BUSY|DIAG_FAIL|DIAG_ACTIVE)) != 0 || (status & (HA_READY|INIT_REQUIRED)) != (HA_READY|INIT_REQUIRED)) { btreset(bt, /*hard_reset*/FALSE); } return (EINVAL); } if (param_len > 0) { /* The controller did not accept the full argument list */ return (E2BIG); } if (reply_len != reply_buf_size) { /* Too much or too little data received */ return (EMSGSIZE); } /* We were successful */ return (0); } static int btinitmboxes(struct bt_softc *bt) { init_32b_mbox_params_t init_mbox; int error; bzero(bt->in_boxes, sizeof(bt_mbox_in_t) * bt->num_boxes); bzero(bt->out_boxes, sizeof(bt_mbox_out_t) * bt->num_boxes); bt->cur_inbox = bt->in_boxes; bt->last_inbox = bt->in_boxes + bt->num_boxes - 1; bt->cur_outbox = bt->out_boxes; bt->last_outbox = bt->out_boxes + bt->num_boxes - 1; /* Tell the adapter about them */ init_mbox.num_boxes = bt->num_boxes; init_mbox.base_addr[0] = bt->mailbox_physbase & 0xFF; init_mbox.base_addr[1] = (bt->mailbox_physbase >> 8) & 0xFF; init_mbox.base_addr[2] = (bt->mailbox_physbase >> 16) & 0xFF; init_mbox.base_addr[3] = (bt->mailbox_physbase >> 24) & 0xFF; error = bt_cmd(bt, BOP_INITIALIZE_32BMBOX, (u_int8_t *)&init_mbox, /*parmlen*/sizeof(init_mbox), /*reply_buf*/NULL, /*reply_len*/0, DEFAULT_CMD_TIMEOUT); if (error != 0) printf("btinitmboxes: Initialization command failed\n"); else if (bt->strict_rr != 0) { /* * If the controller supports * strict round robin mode, * enable it */ u_int8_t param; param = 0; error = bt_cmd(bt, BOP_ENABLE_STRICT_RR, ¶m, 1, /*reply_buf*/NULL, /*reply_len*/0, DEFAULT_CMD_TIMEOUT); if (error != 0) { printf("btinitmboxes: Unable to enable strict RR\n"); error = 0; } else if (bootverbose) { printf("%s: Using Strict Round Robin Mailbox Mode\n", bt_name(bt)); } } return (error); } /* * Update the XPT's idea of the negotiated transfer * parameters for a particular target. */ static void btfetchtransinfo(struct bt_softc *bt, struct ccb_trans_settings *cts) { setup_data_t setup_info; u_int target; u_int targ_offset; u_int targ_mask; u_int sync_period; u_int sync_offset; u_int bus_width; int error; u_int8_t param; targ_syncinfo_t sync_info; #ifdef CAM_NEW_TRAN_CODE struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; spi->valid = 0; scsi->valid = 0; #else cts->valid = 0; #endif target = cts->ccb_h.target_id; targ_offset = (target & 0x7); targ_mask = (0x01 << targ_offset); /* * Inquire Setup Information. This command retreives the * Wide negotiation status for recent adapters as well as * the sync info for older models. */ param = sizeof(setup_info); error = bt_cmd(bt, BOP_INQUIRE_SETUP_INFO, ¶m, /*paramlen*/1, (u_int8_t*)&setup_info, sizeof(setup_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { printf("%s: btfetchtransinfo - Inquire Setup Info Failed %x\n", bt_name(bt), error); return; } sync_info = (target < 8) ? setup_info.low_syncinfo[targ_offset] : setup_info.high_syncinfo[targ_offset]; if (sync_info.sync == 0) sync_offset = 0; else sync_offset = sync_info.offset; bus_width = MSG_EXT_WDTR_BUS_8_BIT; if (strcmp(bt->firmware_ver, "5.06L") >= 0) { u_int wide_active; wide_active = (target < 8) ? (setup_info.low_wide_active & targ_mask) : (setup_info.high_wide_active & targ_mask); if (wide_active) bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else if ((bt->wide_permitted & targ_mask) != 0) { struct ccb_getdev cgd; /* * Prior to rev 5.06L, wide status isn't provided, * so we "guess" that wide transfers are in effect * if the user settings allow for wide and the inquiry * data for the device indicates that it can handle * wide transfers. */ xpt_setup_ccb(&cgd.ccb_h, cts->ccb_h.path, /*priority*/1); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if ((cgd.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (cgd.inq_data.flags & SID_WBus16) != 0) bus_width = MSG_EXT_WDTR_BUS_16_BIT; } if (bt->firmware_ver[0] >= '3') { /* * For adapters that can do fast or ultra speeds, * use the more exact Target Sync Information command. */ target_sync_info_data_t sync_info; param = sizeof(sync_info); error = bt_cmd(bt, BOP_TARG_SYNC_INFO, ¶m, /*paramlen*/1, (u_int8_t*)&sync_info, sizeof(sync_info), DEFAULT_CMD_TIMEOUT); if (error != 0) { printf("%s: btfetchtransinfo - Inquire Sync " "Info Failed 0x%x\n", bt_name(bt), error); return; } sync_period = sync_info.sync_rate[target] * 100; } else { sync_period = 2000 + (500 * sync_info.period); } #ifdef CAM_NEW_TRAN_CODE cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->sync_period = sync_period; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->sync_offset = sync_offset; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; spi->bus_width = bus_width; if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; spi->valid |= CTS_SPI_VALID_DISC; } else scsi->valid = 0; #else /* Convert ns value to standard SCSI sync rate */ if (cts->sync_offset != 0) cts->sync_period = scsi_calc_syncparam(sync_period); else cts->sync_period = 0; cts->sync_offset = sync_offset; cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID; #endif xpt_async(AC_TRANSFER_NEG, cts->ccb_h.path, cts); } static void btmapmboxes(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bt_softc* bt; bt = (struct bt_softc*)arg; bt->mailbox_physbase = segs->ds_addr; } static void btmapccbs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bt_softc* bt; bt = (struct bt_softc*)arg; bt->bt_ccb_physbase = segs->ds_addr; } static void btmapsgs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bt_softc* bt; bt = (struct bt_softc*)arg; SLIST_FIRST(&bt->sg_maps)->sg_physaddr = segs->ds_addr; } static void btpoll(struct cam_sim *sim) { bt_intr(cam_sim_softc(sim)); } void bttimeout(void *arg) { struct bt_ccb *bccb; union ccb *ccb; struct bt_softc *bt; int s; bccb = (struct bt_ccb *)arg; ccb = bccb->ccb; bt = (struct bt_softc *)ccb->ccb_h.ccb_bt_ptr; xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out\n", (void *)bccb); s = splcam(); if ((bccb->flags & BCCB_ACTIVE) == 0) { xpt_print_path(ccb->ccb_h.path); printf("CCB %p - timed out CCB already completed\n", (void *)bccb); splx(s); return; } /* * In order to simplify the recovery process, we ask the XPT * layer to halt the queue of new transactions and we traverse * the list of pending CCBs and remove their timeouts. This * means that the driver attempts to clear only one error * condition at a time. In general, timeouts that occur * close together are related anyway, so there is no benefit * in attempting to handle errors in parrallel. Timeouts will * be reinstated when the recovery process ends. */ if ((bccb->flags & BCCB_DEVICE_RESET) == 0) { struct ccb_hdr *ccb_h; if ((bccb->flags & BCCB_RELEASE_SIMQ) == 0) { xpt_freeze_simq(bt->sim, /*count*/1); bccb->flags |= BCCB_RELEASE_SIMQ; } ccb_h = LIST_FIRST(&bt->pending_ccbs); while (ccb_h != NULL) { struct bt_ccb *pending_bccb; pending_bccb = (struct bt_ccb *)ccb_h->ccb_bccb_ptr; untimeout(bttimeout, pending_bccb, ccb_h->timeout_ch); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } } if ((bccb->flags & BCCB_DEVICE_RESET) != 0 || bt->cur_outbox->action_code != BMBO_FREE || ((bccb->hccb.tag_enable == TRUE) && (bt->firmware_ver[0] < '5'))) { /* * Try a full host adapter/SCSI bus reset. * We do this only if we have already attempted * to clear the condition with a BDR, or we cannot * attempt a BDR for lack of mailbox resources * or because of faulty firmware. It turns out * that firmware versions prior to 5.xx treat BDRs * as untagged commands that cannot be sent until * all outstanding tagged commands have been processed. * This makes it somewhat difficult to use a BDR to * clear up a problem with an uncompleted tagged command. */ ccb->ccb_h.status = CAM_CMD_TIMEOUT; btreset(bt, /*hardreset*/TRUE); printf("%s: No longer in timeout\n", bt_name(bt)); } else { /* * Send a Bus Device Reset message: * The target that is holding up the bus may not * be the same as the one that triggered this timeout * (different commands have different timeout lengths), * but we have no way of determining this from our * timeout handler. Our strategy here is to queue a * BDR message to the target of the timed out command. * If this fails, we'll get another timeout 2 seconds * later which will attempt a bus reset. */ bccb->flags |= BCCB_DEVICE_RESET; ccb->ccb_h.timeout_ch = timeout(bttimeout, (caddr_t)bccb, 2 * hz); bt->recovery_bccb->hccb.opcode = INITIATOR_BUS_DEV_RESET; /* No Data Transfer */ bt->recovery_bccb->hccb.datain = TRUE; bt->recovery_bccb->hccb.dataout = TRUE; bt->recovery_bccb->hccb.btstat = 0; bt->recovery_bccb->hccb.sdstat = 0; bt->recovery_bccb->hccb.target_id = ccb->ccb_h.target_id; /* Tell the adapter about this command */ bt->cur_outbox->ccb_addr = btccbvtop(bt, bt->recovery_bccb); bt->cur_outbox->action_code = BMBO_START; bt_outb(bt, COMMAND_REG, BOP_START_MBOX); btnextoutbox(bt); } splx(s); } Index: head/sys/dev/sound/pci/cs4281.c =================================================================== --- head/sys/dev/sound/pci/cs4281.c (revision 110231) +++ head/sys/dev/sound/pci/cs4281.c (revision 110232) @@ -1,982 +1,979 @@ /* * Copyright (c) 2000 Orion Hodson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHERIN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THEPOSSIBILITY OF * SUCH DAMAGE. * * The order of pokes in the initiation sequence is based on Linux * driver by Thomas Sailer, gw boynton (wesb@crystal.cirrus.com), tom * woller (twoller@crystal.cirrus.com). Shingo Watanabe (nabe@nabechan.org) * contributed towards power management. */ #include #include #include #include #include SND_DECLARE_FILE("$FreeBSD$"); #define CS4281_DEFAULT_BUFSZ 16384 /* Max fifo size for full duplex is 64 */ #define CS4281_FIFO_SIZE 15 /* DMA Engine Indices */ #define CS4281_DMA_PLAY 0 #define CS4281_DMA_REC 1 /* Misc */ -#define MIN(x,y) (x) < (y) ? (x) : (y) -#define MAX(x,y) (x) > (y) ? (x) : (y) - #define inline __inline #ifndef DEB #define DEB(x) /* x */ #endif /* DEB */ /* ------------------------------------------------------------------------- */ /* Structures */ struct sc_info; /* channel registers */ struct sc_chinfo { struct sc_info *parent; struct snd_dbuf *buffer; struct pcm_channel *channel; u_int32_t spd, fmt, bps, blksz; int dma_setup, dma_active, dma_chan; }; /* device private data */ struct sc_info { device_t dev; u_int32_t type; bus_space_tag_t st; bus_space_handle_t sh; bus_dma_tag_t parent_dmat; struct resource *reg, *irq, *mem; int regtype, regid, irqid, memid; void *ih; int power; unsigned long bufsz; struct sc_chinfo pch; struct sc_chinfo rch; }; /* -------------------------------------------------------------------- */ /* prototypes */ /* ADC/DAC control */ static u_int32_t adcdac_go(struct sc_chinfo *ch, u_int32_t go); static void adcdac_prog(struct sc_chinfo *ch); /* power management and interrupt control */ static void cs4281_intr(void *); static int cs4281_power(struct sc_info *, int); static int cs4281_init(struct sc_info *); /* talk to the card */ static u_int32_t cs4281_rd(struct sc_info *, int); static void cs4281_wr(struct sc_info *, int, u_int32_t); /* misc */ static u_int8_t cs4281_rate_to_rv(u_int32_t); static u_int32_t cs4281_format_to_dmr(u_int32_t); static u_int32_t cs4281_format_to_bps(u_int32_t); /* -------------------------------------------------------------------- */ /* formats (do not add formats without editing cs_fmt_tab) */ static u_int32_t cs4281_fmts[] = { AFMT_U8, AFMT_U8 | AFMT_STEREO, AFMT_S8, AFMT_S8 | AFMT_STEREO, AFMT_S16_LE, AFMT_S16_LE | AFMT_STEREO, AFMT_U16_LE, AFMT_U16_LE | AFMT_STEREO, AFMT_S16_BE, AFMT_S16_BE | AFMT_STEREO, AFMT_U16_BE, AFMT_U16_BE | AFMT_STEREO, 0 }; static struct pcmchan_caps cs4281_caps = {6024, 48000, cs4281_fmts, 0}; /* -------------------------------------------------------------------- */ /* Hardware */ static inline u_int32_t cs4281_rd(struct sc_info *sc, int regno) { return bus_space_read_4(sc->st, sc->sh, regno); } static inline void cs4281_wr(struct sc_info *sc, int regno, u_int32_t data) { bus_space_write_4(sc->st, sc->sh, regno, data); DELAY(100); } static inline void cs4281_clr4(struct sc_info *sc, int regno, u_int32_t mask) { u_int32_t r; r = cs4281_rd(sc, regno); cs4281_wr(sc, regno, r & ~mask); } static inline void cs4281_set4(struct sc_info *sc, int regno, u_int32_t mask) { u_int32_t v; v = cs4281_rd(sc, regno); cs4281_wr(sc, regno, v | mask); } static int cs4281_waitset(struct sc_info *sc, int regno, u_int32_t mask, int tries) { u_int32_t v; while(tries > 0) { DELAY(100); v = cs4281_rd(sc, regno); if ((v & mask) == mask) break; tries --; } return tries; } static int cs4281_waitclr(struct sc_info *sc, int regno, u_int32_t mask, int tries) { u_int32_t v; while(tries > 0) { DELAY(100); v = ~ cs4281_rd(sc, regno); if (v & mask) break; tries --; } return tries; } /* ------------------------------------------------------------------------- */ /* Register value mapping functions */ static u_int32_t cs4281_rates[] = {48000, 44100, 22050, 16000, 11025, 8000}; #define CS4281_NUM_RATES sizeof(cs4281_rates)/sizeof(cs4281_rates[0]) static u_int8_t cs4281_rate_to_rv(u_int32_t rate) { u_int32_t v; for (v = 0; v < CS4281_NUM_RATES; v++) { if (rate == cs4281_rates[v]) return v; } v = 1536000 / rate; if (v > 255 || v < 32) v = 5; /* default to 8k */ return v; } static u_int32_t cs4281_rv_to_rate(u_int8_t rv) { u_int32_t r; if (rv < CS4281_NUM_RATES) return cs4281_rates[rv]; r = 1536000 / rv; return r; } static inline u_int32_t cs4281_format_to_dmr(u_int32_t format) { u_int32_t dmr = 0; if (AFMT_8BIT & format) dmr |= CS4281PCI_DMR_SIZE8; if (!(AFMT_STEREO & format)) dmr |= CS4281PCI_DMR_MONO; if (AFMT_BIGENDIAN & format) dmr |= CS4281PCI_DMR_BEND; if (!(AFMT_SIGNED & format)) dmr |= CS4281PCI_DMR_USIGN; return dmr; } static inline u_int32_t cs4281_format_to_bps(u_int32_t format) { return ((AFMT_8BIT & format) ? 1 : 2) * ((AFMT_STEREO & format) ? 2 : 1); } /* -------------------------------------------------------------------- */ /* ac97 codec */ static u_int32_t cs4281_rdcd(kobj_t obj, void *devinfo, int regno) { struct sc_info *sc = (struct sc_info *)devinfo; int codecno; codecno = regno >> 8; regno &= 0xff; /* Remove old state */ cs4281_rd(sc, CS4281PCI_ACSDA); /* Fill in AC97 register value request form */ cs4281_wr(sc, CS4281PCI_ACCAD, regno); cs4281_wr(sc, CS4281PCI_ACCDA, 0); cs4281_wr(sc, CS4281PCI_ACCTL, CS4281PCI_ACCTL_ESYN | CS4281PCI_ACCTL_VFRM | CS4281PCI_ACCTL_DCV | CS4281PCI_ACCTL_CRW); /* Wait for read to complete */ if (cs4281_waitclr(sc, CS4281PCI_ACCTL, CS4281PCI_ACCTL_DCV, 250) == 0) { device_printf(sc->dev, "cs4281_rdcd: DCV did not go\n"); return 0xffffffff; } /* Wait for valid status */ if (cs4281_waitset(sc, CS4281PCI_ACSTS, CS4281PCI_ACSTS_VSTS, 250) == 0) { device_printf(sc->dev,"cs4281_rdcd: VSTS did not come\n"); return 0xffffffff; } return cs4281_rd(sc, CS4281PCI_ACSDA); } static void cs4281_wrcd(kobj_t obj, void *devinfo, int regno, u_int32_t data) { struct sc_info *sc = (struct sc_info *)devinfo; int codecno; codecno = regno >> 8; regno &= 0xff; cs4281_wr(sc, CS4281PCI_ACCAD, regno); cs4281_wr(sc, CS4281PCI_ACCDA, data); cs4281_wr(sc, CS4281PCI_ACCTL, CS4281PCI_ACCTL_ESYN | CS4281PCI_ACCTL_VFRM | CS4281PCI_ACCTL_DCV); if (cs4281_waitclr(sc, CS4281PCI_ACCTL, CS4281PCI_ACCTL_DCV, 250) == 0) { device_printf(sc->dev,"cs4281_wrcd: DCV did not go\n"); } } static kobj_method_t cs4281_ac97_methods[] = { KOBJMETHOD(ac97_read, cs4281_rdcd), KOBJMETHOD(ac97_write, cs4281_wrcd), { 0, 0 } }; AC97_DECLARE(cs4281_ac97); /* ------------------------------------------------------------------------- */ /* shared rec/play channel interface */ static void * cs4281chan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct sc_info *sc = devinfo; struct sc_chinfo *ch = (dir == PCMDIR_PLAY) ? &sc->pch : &sc->rch; ch->buffer = b; if (sndbuf_alloc(ch->buffer, sc->parent_dmat, sc->bufsz) != 0) { return NULL; } ch->parent = sc; ch->channel = c; ch->fmt = AFMT_U8; ch->spd = DSP_DEFAULT_SPEED; ch->bps = 1; ch->blksz = sndbuf_getsize(ch->buffer); ch->dma_chan = (dir == PCMDIR_PLAY) ? CS4281_DMA_PLAY : CS4281_DMA_REC; ch->dma_setup = 0; adcdac_go(ch, 0); adcdac_prog(ch); return ch; } static int cs4281chan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t go; go = adcdac_go(ch, 0); /* 2 interrupts are possible and used in buffer (half-empty,empty), * hence factor of 2. */ ch->blksz = MIN(blocksize, sc->bufsz / 2); sndbuf_resize(ch->buffer, 2, ch->blksz); ch->dma_setup = 0; adcdac_prog(ch); adcdac_go(ch, go); DEB(printf("cs4281chan_setblocksize: blksz %d Setting %d\n", blocksize, ch->blksz)); return ch->blksz; } static int cs4281chan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t go, v, r; go = adcdac_go(ch, 0); /* pause */ r = (ch->dma_chan == CS4281_DMA_PLAY) ? CS4281PCI_DACSR : CS4281PCI_ADCSR; v = cs4281_rate_to_rv(speed); cs4281_wr(sc, r, v); adcdac_go(ch, go); /* unpause */ ch->spd = cs4281_rv_to_rate(v); return ch->spd; } static int cs4281chan_setformat(kobj_t obj, void *data, u_int32_t format) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t v, go; go = adcdac_go(ch, 0); /* pause */ if (ch->dma_chan == CS4281_DMA_PLAY) v = CS4281PCI_DMR_TR_PLAY; else v = CS4281PCI_DMR_TR_REC; v |= CS4281PCI_DMR_DMA | CS4281PCI_DMR_AUTO; v |= cs4281_format_to_dmr(format); cs4281_wr(sc, CS4281PCI_DMR(ch->dma_chan), v); adcdac_go(ch, go); /* unpause */ ch->fmt = format; ch->bps = cs4281_format_to_bps(format); ch->dma_setup = 0; return 0; } static int cs4281chan_getptr(kobj_t obj, void *data) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t dba, dca, ptr; int sz; sz = sndbuf_getsize(ch->buffer); dba = cs4281_rd(sc, CS4281PCI_DBA(ch->dma_chan)); dca = cs4281_rd(sc, CS4281PCI_DCA(ch->dma_chan)); ptr = (dca - dba + sz) % sz; return ptr; } static int cs4281chan_trigger(kobj_t obj, void *data, int go) { struct sc_chinfo *ch = data; switch(go) { case PCMTRIG_START: adcdac_prog(ch); adcdac_go(ch, 1); break; case PCMTRIG_ABORT: adcdac_go(ch, 0); break; default: break; } /* return 0 if ok */ return 0; } static struct pcmchan_caps * cs4281chan_getcaps(kobj_t obj, void *data) { return &cs4281_caps; } static kobj_method_t cs4281chan_methods[] = { KOBJMETHOD(channel_init, cs4281chan_init), KOBJMETHOD(channel_setformat, cs4281chan_setformat), KOBJMETHOD(channel_setspeed, cs4281chan_setspeed), KOBJMETHOD(channel_setblocksize, cs4281chan_setblocksize), KOBJMETHOD(channel_trigger, cs4281chan_trigger), KOBJMETHOD(channel_getptr, cs4281chan_getptr), KOBJMETHOD(channel_getcaps, cs4281chan_getcaps), { 0, 0 } }; CHANNEL_DECLARE(cs4281chan); /* -------------------------------------------------------------------- */ /* ADC/DAC control */ /* adcdac_go enables/disable DMA channel, returns non-zero if DMA was * active before call */ static u_int32_t adcdac_go(struct sc_chinfo *ch, u_int32_t go) { struct sc_info *sc = ch->parent; u_int32_t going; going = !(cs4281_rd(sc, CS4281PCI_DCR(ch->dma_chan)) & CS4281PCI_DCR_MSK); if (go) cs4281_clr4(sc, CS4281PCI_DCR(ch->dma_chan), CS4281PCI_DCR_MSK); else cs4281_set4(sc, CS4281PCI_DCR(ch->dma_chan), CS4281PCI_DCR_MSK); cs4281_wr(sc, CS4281PCI_HICR, CS4281PCI_HICR_EOI); return going; } static void adcdac_prog(struct sc_chinfo *ch) { struct sc_info *sc = ch->parent; u_int32_t go; if (!ch->dma_setup) { go = adcdac_go(ch, 0); cs4281_wr(sc, CS4281PCI_DBA(ch->dma_chan), vtophys(sndbuf_getbuf(ch->buffer))); cs4281_wr(sc, CS4281PCI_DBC(ch->dma_chan), sndbuf_getsize(ch->buffer) / ch->bps - 1); ch->dma_setup = 1; adcdac_go(ch, go); } } /* -------------------------------------------------------------------- */ /* The interrupt handler */ static void cs4281_intr(void *p) { struct sc_info *sc = (struct sc_info *)p; u_int32_t hisr; hisr = cs4281_rd(sc, CS4281PCI_HISR); if (hisr == 0) return; if (hisr & CS4281PCI_HISR_DMA(CS4281_DMA_PLAY)) { chn_intr(sc->pch.channel); cs4281_rd(sc, CS4281PCI_HDSR(CS4281_DMA_PLAY)); /* Clear interrupt */ } if (hisr & CS4281PCI_HISR_DMA(CS4281_DMA_REC)) { chn_intr(sc->rch.channel); cs4281_rd(sc, CS4281PCI_HDSR(CS4281_DMA_REC)); /* Clear interrupt */ } /* Signal End-of-Interrupt */ cs4281_wr(sc, CS4281PCI_HICR, CS4281PCI_HICR_EOI); } /* -------------------------------------------------------------------- */ /* power management related */ static int cs4281_power(struct sc_info *sc, int state) { switch (state) { case 0: /* Permit r/w access to all BA0 registers */ cs4281_wr(sc, CS4281PCI_CWPR, CS4281PCI_CWPR_MAGIC); /* Power on */ cs4281_clr4(sc, CS4281PCI_EPPMC, CS4281PCI_EPPMC_FPDN); break; case 3: /* Power off card and codec */ cs4281_set4(sc, CS4281PCI_EPPMC, CS4281PCI_EPPMC_FPDN); cs4281_clr4(sc, CS4281PCI_SPMC, CS4281PCI_SPMC_RSTN); break; } DEB(printf("cs4281_power %d -> %d\n", sc->power, state)); sc->power = state; return 0; } static int cs4281_init(struct sc_info *sc) { u_int32_t i, v; /* (0) Blast clock register and serial port */ cs4281_wr(sc, CS4281PCI_CLKCR1, 0); cs4281_wr(sc, CS4281PCI_SERMC, 0); /* (1) Make ESYN 0 to turn sync pulse on AC97 link */ cs4281_wr(sc, CS4281PCI_ACCTL, 0); DELAY(50); /* (2) Effect Reset */ cs4281_wr(sc, CS4281PCI_SPMC, 0); DELAY(100); cs4281_wr(sc, CS4281PCI_SPMC, CS4281PCI_SPMC_RSTN); /* Wait 50ms for ABITCLK to become stable */ DELAY(50000); /* (3) Enable Sound System Clocks */ cs4281_wr(sc, CS4281PCI_CLKCR1, CS4281PCI_CLKCR1_DLLP); DELAY(50000); /* Wait for PLL to stabilize */ cs4281_wr(sc, CS4281PCI_CLKCR1, CS4281PCI_CLKCR1_DLLP | CS4281PCI_CLKCR1_SWCE); /* (4) Power Up - this combination is essential. */ cs4281_set4(sc, CS4281PCI_SSPM, CS4281PCI_SSPM_ACLEN | CS4281PCI_SSPM_PSRCEN | CS4281PCI_SSPM_CSRCEN | CS4281PCI_SSPM_MIXEN); /* (5) Wait for clock stabilization */ if (cs4281_waitset(sc, CS4281PCI_CLKCR1, CS4281PCI_CLKCR1_DLLRDY, 250) == 0) { device_printf(sc->dev, "Clock stabilization failed\n"); return -1; } /* (6) Enable ASYNC generation. */ cs4281_wr(sc, CS4281PCI_ACCTL,CS4281PCI_ACCTL_ESYN); /* Wait to allow AC97 to start generating clock bit */ DELAY(50000); /* Set AC97 timing */ cs4281_wr(sc, CS4281PCI_SERMC, CS4281PCI_SERMC_PTC_AC97); /* (7) Wait for AC97 ready signal */ if (cs4281_waitset(sc, CS4281PCI_ACSTS, CS4281PCI_ACSTS_CRDY, 250) == 0) { device_printf(sc->dev, "codec did not avail\n"); return -1; } /* (8) Assert valid frame signal to begin sending commands to * AC97 codec */ cs4281_wr(sc, CS4281PCI_ACCTL, CS4281PCI_ACCTL_VFRM | CS4281PCI_ACCTL_ESYN); /* (9) Wait for codec calibration */ for(i = 0 ; i < 1000; i++) { DELAY(10000); v = cs4281_rdcd(0, sc, AC97_REG_POWER); if ((v & 0x0f) == 0x0f) { break; } } if (i == 1000) { device_printf(sc->dev, "codec failed to calibrate\n"); return -1; } /* (10) Set AC97 timing */ cs4281_wr(sc, CS4281PCI_SERMC, CS4281PCI_SERMC_PTC_AC97); /* (11) Wait for valid data to arrive */ if (cs4281_waitset(sc, CS4281PCI_ACISV, CS4281PCI_ACISV_ISV(3) | CS4281PCI_ACISV_ISV(4), 10000) == 0) { device_printf(sc->dev, "cs4281 never got valid data\n"); return -1; } /* (12) Start digital data transfer of audio data to codec */ cs4281_wr(sc, CS4281PCI_ACOSV, CS4281PCI_ACOSV_SLV(3) | CS4281PCI_ACOSV_SLV(4)); /* Set Master and headphone to max */ cs4281_wrcd(0, sc, AC97_MIX_AUXOUT, 0); cs4281_wrcd(0, sc, AC97_MIX_MASTER, 0); /* Power on the DAC */ v = cs4281_rdcd(0, sc, AC97_REG_POWER) & 0xfdff; cs4281_wrcd(0, sc, AC97_REG_POWER, v); /* Wait until DAC state ready */ for(i = 0; i < 320; i++) { DELAY(100); v = cs4281_rdcd(0, sc, AC97_REG_POWER); if (v & 0x02) break; } /* Power on the ADC */ v = cs4281_rdcd(0, sc, AC97_REG_POWER) & 0xfeff; cs4281_wrcd(0, sc, AC97_REG_POWER, v); /* Wait until ADC state ready */ for(i = 0; i < 320; i++) { DELAY(100); v = cs4281_rdcd(0, sc, AC97_REG_POWER); if (v & 0x01) break; } /* FIFO configuration (driver is DMA orientated, implicit FIFO) */ /* Play FIFO */ v = CS4281PCI_FCR_RS(CS4281PCI_RPCM_PLAY_SLOT) | CS4281PCI_FCR_LS(CS4281PCI_LPCM_PLAY_SLOT) | CS4281PCI_FCR_SZ(CS4281_FIFO_SIZE)| CS4281PCI_FCR_OF(0); cs4281_wr(sc, CS4281PCI_FCR(CS4281_DMA_PLAY), v); cs4281_wr(sc, CS4281PCI_FCR(CS4281_DMA_PLAY), v | CS4281PCI_FCR_FEN); /* Record FIFO */ v = CS4281PCI_FCR_RS(CS4281PCI_RPCM_REC_SLOT) | CS4281PCI_FCR_LS(CS4281PCI_LPCM_REC_SLOT) | CS4281PCI_FCR_SZ(CS4281_FIFO_SIZE)| CS4281PCI_FCR_OF(CS4281_FIFO_SIZE + 1); cs4281_wr(sc, CS4281PCI_FCR(CS4281_DMA_REC), v | CS4281PCI_FCR_PSH); cs4281_wr(sc, CS4281PCI_FCR(CS4281_DMA_REC), v | CS4281PCI_FCR_FEN); /* Match AC97 slots to FIFOs */ v = CS4281PCI_SRCSA_PLSS(CS4281PCI_LPCM_PLAY_SLOT) | CS4281PCI_SRCSA_PRSS(CS4281PCI_RPCM_PLAY_SLOT) | CS4281PCI_SRCSA_CLSS(CS4281PCI_LPCM_REC_SLOT) | CS4281PCI_SRCSA_CRSS(CS4281PCI_RPCM_REC_SLOT); cs4281_wr(sc, CS4281PCI_SRCSA, v); /* Set Auto-Initialize and set directions */ cs4281_wr(sc, CS4281PCI_DMR(CS4281_DMA_PLAY), CS4281PCI_DMR_DMA | CS4281PCI_DMR_AUTO | CS4281PCI_DMR_TR_PLAY); cs4281_wr(sc, CS4281PCI_DMR(CS4281_DMA_REC), CS4281PCI_DMR_DMA | CS4281PCI_DMR_AUTO | CS4281PCI_DMR_TR_REC); /* Enable half and empty buffer interrupts keeping DMA paused */ cs4281_wr(sc, CS4281PCI_DCR(CS4281_DMA_PLAY), CS4281PCI_DCR_TCIE | CS4281PCI_DCR_HTCIE | CS4281PCI_DCR_MSK); cs4281_wr(sc, CS4281PCI_DCR(CS4281_DMA_REC), CS4281PCI_DCR_TCIE | CS4281PCI_DCR_HTCIE | CS4281PCI_DCR_MSK); /* Enable Interrupts */ cs4281_clr4(sc, CS4281PCI_HIMR, CS4281PCI_HIMR_DMAI | CS4281PCI_HIMR_DMA(CS4281_DMA_PLAY) | CS4281PCI_HIMR_DMA(CS4281_DMA_REC)); /* Set playback volume */ cs4281_wr(sc, CS4281PCI_PPLVC, 7); cs4281_wr(sc, CS4281PCI_PPRVC, 7); return 0; } /* -------------------------------------------------------------------- */ /* Probe and attach the card */ static int cs4281_pci_probe(device_t dev) { char *s = NULL; switch (pci_get_devid(dev)) { case CS4281_PCI_ID: s = "Crystal Semiconductor CS4281"; break; } if (s) device_set_desc(dev, s); return s ? 0 : ENXIO; } static int cs4281_pci_attach(device_t dev) { struct sc_info *sc; struct ac97_info *codec = NULL; u_int32_t data; char status[SND_STATUSLEN]; if ((sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { device_printf(dev, "cannot allocate softc\n"); return ENXIO; } sc->dev = dev; sc->type = pci_get_devid(dev); data = pci_read_config(dev, PCIR_COMMAND, 2); data |= (PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, data, 2); #if __FreeBSD_version > 500000 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { /* Reset the power state. */ device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); } #else data = pci_read_config(dev, CS4281PCI_PMCS_OFFSET, 4); if (data & CS4281PCI_PMCS_PS_MASK) { /* Reset the power state. */ device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", data & CS4281PCI_PMCS_PS_MASK); pci_write_config(dev, CS4281PCI_PMCS_OFFSET, data & ~CS4281PCI_PMCS_PS_MASK, 4); } #endif sc->regid = PCIR_MAPS; sc->regtype = SYS_RES_MEMORY; sc->reg = bus_alloc_resource(dev, sc->regtype, &sc->regid, 0, ~0, CS4281PCI_BA0_SIZE, RF_ACTIVE); if (!sc->reg) { sc->regtype = SYS_RES_IOPORT; sc->reg = bus_alloc_resource(dev, sc->regtype, &sc->regid, 0, ~0, CS4281PCI_BA0_SIZE, RF_ACTIVE); if (!sc->reg) { device_printf(dev, "unable to allocate register space\n"); goto bad; } } sc->st = rman_get_bustag(sc->reg); sc->sh = rman_get_bushandle(sc->reg); sc->memid = PCIR_MAPS + 4; sc->mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid, 0, ~0, CS4281PCI_BA1_SIZE, RF_ACTIVE); if (sc->mem == NULL) { device_printf(dev, "unable to allocate fifo space\n"); goto bad; } sc->irqid = 0; sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irqid, 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); if (!sc->irq) { device_printf(dev, "unable to allocate interrupt\n"); goto bad; } if (snd_setup_intr(dev, sc->irq, 0, cs4281_intr, sc, &sc->ih)) { device_printf(dev, "unable to setup interrupt\n"); goto bad; } sc->bufsz = pcm_getbuffersize(dev, 4096, CS4281_DEFAULT_BUFSZ, 65536); if (bus_dma_tag_create(/*parent*/NULL, /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/sc->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, &sc->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } /* power up */ cs4281_power(sc, 0); /* init chip */ if (cs4281_init(sc) == -1) { device_printf(dev, "unable to initialize the card\n"); goto bad; } /* create/init mixer */ codec = AC97_CREATE(dev, sc, cs4281_ac97); if (codec == NULL) goto bad; mixer_init(dev, ac97_getmixerclass(), codec); if (pcm_register(dev, sc, 1, 1)) goto bad; pcm_addchan(dev, PCMDIR_PLAY, &cs4281chan_class, sc); pcm_addchan(dev, PCMDIR_REC, &cs4281chan_class, sc); snprintf(status, SND_STATUSLEN, "at %s 0x%lx irq %ld", (sc->regtype == SYS_RES_IOPORT)? "io" : "memory", rman_get_start(sc->reg), rman_get_start(sc->irq)); pcm_setstatus(dev, status); return 0; bad: if (codec) ac97_destroy(codec); if (sc->reg) bus_release_resource(dev, sc->regtype, sc->regid, sc->reg); if (sc->mem) bus_release_resource(dev, SYS_RES_MEMORY, sc->memid, sc->mem); if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); if (sc->parent_dmat) bus_dma_tag_destroy(sc->parent_dmat); free(sc, M_DEVBUF); return ENXIO; } static int cs4281_pci_detach(device_t dev) { int r; struct sc_info *sc; r = pcm_unregister(dev); if (r) return r; sc = pcm_getdevinfo(dev); /* power off */ cs4281_power(sc, 3); bus_release_resource(dev, sc->regtype, sc->regid, sc->reg); bus_release_resource(dev, SYS_RES_MEMORY, sc->memid, sc->mem); bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); bus_dma_tag_destroy(sc->parent_dmat); free(sc, M_DEVBUF); return 0; } static int cs4281_pci_suspend(device_t dev) { struct sc_info *sc; sc = pcm_getdevinfo(dev); sc->rch.dma_active = adcdac_go(&sc->rch, 0); sc->pch.dma_active = adcdac_go(&sc->pch, 0); cs4281_power(sc, 3); return 0; } static int cs4281_pci_resume(device_t dev) { struct sc_info *sc; sc = pcm_getdevinfo(dev); /* power up */ cs4281_power(sc, 0); /* initialize chip */ if (cs4281_init(sc) == -1) { device_printf(dev, "unable to reinitialize the card\n"); return ENXIO; } /* restore mixer state */ if (mixer_reinit(dev) == -1) { device_printf(dev, "unable to reinitialize the mixer\n"); return ENXIO; } /* restore chip state */ cs4281chan_setspeed(NULL, &sc->rch, sc->rch.spd); cs4281chan_setblocksize(NULL, &sc->rch, sc->rch.blksz); cs4281chan_setformat(NULL, &sc->rch, sc->rch.fmt); adcdac_go(&sc->rch, sc->rch.dma_active); cs4281chan_setspeed(NULL, &sc->pch, sc->pch.spd); cs4281chan_setblocksize(NULL, &sc->pch, sc->pch.blksz); cs4281chan_setformat(NULL, &sc->pch, sc->pch.fmt); adcdac_go(&sc->pch, sc->pch.dma_active); return 0; } static device_method_t cs4281_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cs4281_pci_probe), DEVMETHOD(device_attach, cs4281_pci_attach), DEVMETHOD(device_detach, cs4281_pci_detach), DEVMETHOD(device_suspend, cs4281_pci_suspend), DEVMETHOD(device_resume, cs4281_pci_resume), { 0, 0 } }; static driver_t cs4281_driver = { "pcm", cs4281_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_cs4281, pci, cs4281_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_cs4281, snd_pcm, PCM_MINVER, PCM_PREFVER, PCM_MAXVER); MODULE_VERSION(snd_cs4281, 1); Index: head/sys/dev/sym/sym_hipd.c =================================================================== --- head/sys/dev/sym/sym_hipd.c (revision 110231) +++ head/sys/dev/sym/sym_hipd.c (revision 110232) @@ -1,10414 +1,10407 @@ /* * Device driver optimized for the Symbios/LSI 53C896/53C895A/53C1010 * PCI-SCSI controllers. * * Copyright (C) 1999-2001 Gerard Roudier * * This driver also supports the following Symbios/LSI PCI-SCSI chips: * 53C810A, 53C825A, 53C860, 53C875, 53C876, 53C885, 53C895, * 53C810, 53C815, 53C825 and the 53C1510D is 53C8XX mode. * * * This driver for FreeBSD-CAM is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-1999 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier * Stefan Esser * Copyright (C) 1994 Wolfgang Stanglmeier * * The initialisation code, and part of the code that addresses * FreeBSD-CAM services is based on the aic7xxx driver for FreeBSD-CAM * written by Justin T. Gibbs. * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham * *----------------------------------------------------------------------------- * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #define SYM_DRIVER_NAME "sym-1.6.5-20000902" /* #define SYM_DEBUG_GENERIC_SUPPORT */ /* #define CAM_NEW_TRAN_CODE */ #include /* * Only use the BUS stuff for PCI under FreeBSD 4 and later versions. * Note that the old BUS stuff also works for FreeBSD 4 and spares * about 1 KB for the driver object file. */ #if __FreeBSD_version >= 400000 #define FreeBSD_Bus_Dma_Abstraction #define FreeBSD_Bus_Io_Abstraction #define FreeBSD_Bus_Space_Abstraction #endif /* * Driver configuration options. */ #include "opt_sym.h" #include #ifndef FreeBSD_Bus_Io_Abstraction #include "ncr.h" /* To know if the ncr has been configured */ #endif #include #include #include #include #ifdef FreeBSD_Bus_Io_Abstraction #include #include #endif #include #include #include #ifdef FreeBSD_Bus_Space_Abstraction #include /* * Only include bus_pio if needed. * This avoids bus space primitives to be uselessly bloated * by out-of-age PIO operations. */ #ifdef SYM_CONF_IOMAPPED #include #endif #endif #include #ifdef FreeBSD_Bus_Io_Abstraction #include #include #endif #include #include #include #include #include #include #include #include #include #include /* Short and quite clear integer types */ typedef int8_t s8; typedef int16_t s16; typedef int32_t s32; typedef u_int8_t u8; typedef u_int16_t u16; typedef u_int32_t u32; /* * From 'cam.error_recovery_diffs.20010313.context' patch. */ #ifdef CAM_NEW_TRAN_CODE #define FreeBSD_New_Tran_Settings #endif /* CAM_NEW_TRAN_CODE */ /* * Driver definitions. */ #include #include /* * IA32 architecture does not reorder STORES and prevents * LOADS from passing STORES. It is called `program order' * by Intel and allows device drivers to deal with memory * ordering by only ensuring that the code is not reordered * by the compiler when ordering is required. * Other architectures implement a weaker ordering that * requires memory barriers (and also IO barriers when they * make sense) to be used. */ #if defined __i386__ #define MEMORY_BARRIER() do { ; } while(0) #elif defined __alpha__ #define MEMORY_BARRIER() alpha_mb() #elif defined __powerpc__ #define MEMORY_BARRIER() __asm__ volatile("eieio; sync" : : : "memory") #elif defined __ia64__ #define MEMORY_BARRIER() __asm__ volatile("mf.a; mf" : : : "memory") #elif defined __sparc64__ #define MEMORY_BARRIER() __asm__ volatile("membar #Sync" : : : "memory") #else #error "Not supported platform" #endif /* * Portable but silly implemented byte order primitives. * We define the primitives we need, since FreeBSD doesn't * seem to have them yet. */ #if BYTE_ORDER == BIG_ENDIAN #define __revb16(x) ( (((u16)(x) & (u16)0x00ffU) << 8) | \ (((u16)(x) & (u16)0xff00U) >> 8) ) #define __revb32(x) ( (((u32)(x) & 0x000000ffU) << 24) | \ (((u32)(x) & 0x0000ff00U) << 8) | \ (((u32)(x) & 0x00ff0000U) >> 8) | \ (((u32)(x) & 0xff000000U) >> 24) ) #define __htole16(v) __revb16(v) #define __htole32(v) __revb32(v) #define __le16toh(v) __htole16(v) #define __le32toh(v) __htole32(v) static __inline u16 _htole16(u16 v) { return __htole16(v); } static __inline u32 _htole32(u32 v) { return __htole32(v); } #define _le16toh _htole16 #define _le32toh _htole32 #else /* LITTLE ENDIAN */ #define __htole16(v) (v) #define __htole32(v) (v) #define __le16toh(v) (v) #define __le32toh(v) (v) #define _htole16(v) (v) #define _htole32(v) (v) #define _le16toh(v) (v) #define _le32toh(v) (v) #endif /* BYTE_ORDER */ /* * A la VMS/CAM-3 queue management. */ typedef struct sym_quehead { struct sym_quehead *flink; /* Forward pointer */ struct sym_quehead *blink; /* Backward pointer */ } SYM_QUEHEAD; #define sym_que_init(ptr) do { \ (ptr)->flink = (ptr); (ptr)->blink = (ptr); \ } while (0) static __inline struct sym_quehead *sym_que_first(struct sym_quehead *head) { return (head->flink == head) ? 0 : head->flink; } static __inline struct sym_quehead *sym_que_last(struct sym_quehead *head) { return (head->blink == head) ? 0 : head->blink; } static __inline void __sym_que_add(struct sym_quehead * new, struct sym_quehead * blink, struct sym_quehead * flink) { flink->blink = new; new->flink = flink; new->blink = blink; blink->flink = new; } static __inline void __sym_que_del(struct sym_quehead * blink, struct sym_quehead * flink) { flink->blink = blink; blink->flink = flink; } static __inline int sym_que_empty(struct sym_quehead *head) { return head->flink == head; } static __inline void sym_que_splice(struct sym_quehead *list, struct sym_quehead *head) { struct sym_quehead *first = list->flink; if (first != list) { struct sym_quehead *last = list->blink; struct sym_quehead *at = head->flink; first->blink = head; head->flink = first; last->flink = at; at->blink = last; } } #define sym_que_entry(ptr, type, member) \ ((type *)((char *)(ptr)-(unsigned int)(&((type *)0)->member))) #define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink) #define sym_remque(el) __sym_que_del((el)->blink, (el)->flink) #define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink) static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head) { struct sym_quehead *elem = head->flink; if (elem != head) __sym_que_del(head, elem->flink); else elem = 0; return elem; } #define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head) static __inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head) { struct sym_quehead *elem = head->blink; if (elem != head) __sym_que_del(elem->blink, head); else elem = 0; return elem; } /* * This one may be useful. */ #define FOR_EACH_QUEUED_ELEMENT(head, qp) \ for (qp = (head)->flink; qp != (head); qp = qp->flink) /* * FreeBSD does not offer our kind of queue in the CAM CCB. * So, we have to cast. */ #define sym_qptr(p) ((struct sym_quehead *) (p)) /* * Simple bitmap operations. */ #define sym_set_bit(p, n) (((u32 *)(p))[(n)>>5] |= (1<<((n)&0x1f))) #define sym_clr_bit(p, n) (((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f))) #define sym_is_bit(p, n) (((u32 *)(p))[(n)>>5] & (1<<((n)&0x1f))) /* * Number of tasks per device we want to handle. */ #if SYM_CONF_MAX_TAG_ORDER > 8 #error "more than 256 tags per logical unit not allowed." #endif #define SYM_CONF_MAX_TASK (1< SYM_CONF_MAX_TASK #undef SYM_CONF_MAX_TAG #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK #endif /* * This one means 'NO TAG for this job' */ #define NO_TAG (256) /* * Number of SCSI targets. */ #if SYM_CONF_MAX_TARGET > 16 #error "more than 16 targets not allowed." #endif /* * Number of logical units per target. */ #if SYM_CONF_MAX_LUN > 64 #error "more than 64 logical units per target not allowed." #endif /* * Asynchronous pre-scaler (ns). Shall be 40 for * the SCSI timings to be compliant. */ #define SYM_CONF_MIN_ASYNC (40) /* * Number of entries in the START and DONE queues. * * We limit to 1 PAGE in order to succeed allocation of * these queues. Each entry is 8 bytes long (2 DWORDS). */ #ifdef SYM_CONF_MAX_START #define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2) #else #define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2) #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) #endif #if SYM_CONF_MAX_QUEUE > PAGE_SIZE/8 #undef SYM_CONF_MAX_QUEUE #define SYM_CONF_MAX_QUEUE PAGE_SIZE/8 #undef SYM_CONF_MAX_START #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) #endif /* * For this one, we want a short name :-) */ #define MAX_QUEUE SYM_CONF_MAX_QUEUE /* - * These ones should have been already defined. - */ -#ifndef MIN -#define MIN(a, b) (((a) < (b)) ? (a) : (b)) -#endif - -/* * Active debugging tags and verbosity. */ #define DEBUG_ALLOC (0x0001) #define DEBUG_PHASE (0x0002) #define DEBUG_POLL (0x0004) #define DEBUG_QUEUE (0x0008) #define DEBUG_RESULT (0x0010) #define DEBUG_SCATTER (0x0020) #define DEBUG_SCRIPT (0x0040) #define DEBUG_TINY (0x0080) #define DEBUG_TIMING (0x0100) #define DEBUG_NEGO (0x0200) #define DEBUG_TAGS (0x0400) #define DEBUG_POINTER (0x0800) #if 0 static int sym_debug = 0; #define DEBUG_FLAGS sym_debug #else /* #define DEBUG_FLAGS (0x0631) */ #define DEBUG_FLAGS (0x0000) #endif #define sym_verbose (np->verbose) /* * Insert a delay in micro-seconds and milli-seconds. */ static void UDELAY(int us) { DELAY(us); } static void MDELAY(int ms) { while (ms--) UDELAY(1000); } /* * Simple power of two buddy-like allocator. * * This simple code is not intended to be fast, but to * provide power of 2 aligned memory allocations. * Since the SCRIPTS processor only supplies 8 bit arithmetic, * this allocator allows simple and fast address calculations * from the SCRIPTS code. In addition, cache line alignment * is guaranteed for power of 2 cache line size. * * This allocator has been developped for the Linux sym53c8xx * driver, since this O/S does not provide naturally aligned * allocations. * It has the advantage of allowing the driver to use private * pages of memory that will be useful if we ever need to deal * with IO MMUs for PCI. */ #define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */ #define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */ #if 0 #define MEMO_FREE_UNUSED /* Free unused pages immediately */ #endif #define MEMO_WARN 1 #define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER) #define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT) #define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1) #define get_pages() malloc(MEMO_CLUSTER_SIZE, M_DEVBUF, M_NOWAIT) #define free_pages(p) free((p), M_DEVBUF) typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */ typedef struct m_link { /* Link between free memory chunks */ struct m_link *next; } m_link_s; #ifdef FreeBSD_Bus_Dma_Abstraction typedef struct m_vtob { /* Virtual to Bus address translation */ struct m_vtob *next; bus_dmamap_t dmamap; /* Map for this chunk */ m_addr_t vaddr; /* Virtual address */ m_addr_t baddr; /* Bus physical address */ } m_vtob_s; /* Hash this stuff a bit to speed up translations */ #define VTOB_HASH_SHIFT 5 #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT) #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1) #define VTOB_HASH_CODE(m) \ ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK) #endif typedef struct m_pool { /* Memory pool of a given kind */ #ifdef FreeBSD_Bus_Dma_Abstraction bus_dma_tag_t dev_dmat; /* Identifies the pool */ bus_dma_tag_t dmat; /* Tag for our fixed allocations */ m_addr_t (*getp)(struct m_pool *); #ifdef MEMO_FREE_UNUSED void (*freep)(struct m_pool *, m_addr_t); #endif #define M_GETP() mp->getp(mp) #define M_FREEP(p) mp->freep(mp, p) int nump; m_vtob_s *(vtob[VTOB_HASH_SIZE]); struct m_pool *next; #else #define M_GETP() get_pages() #define M_FREEP(p) free_pages(p) #endif /* FreeBSD_Bus_Dma_Abstraction */ struct m_link h[MEMO_CLUSTER_SHIFT - MEMO_SHIFT + 1]; } m_pool_s; static void *___sym_malloc(m_pool_s *mp, int size) { int i = 0; int s = (1 << MEMO_SHIFT); int j; m_addr_t a; m_link_s *h = mp->h; if (size > MEMO_CLUSTER_SIZE) return 0; while (size > s) { s <<= 1; ++i; } j = i; while (!h[j].next) { if (s == MEMO_CLUSTER_SIZE) { h[j].next = (m_link_s *) M_GETP(); if (h[j].next) h[j].next->next = 0; break; } ++j; s <<= 1; } a = (m_addr_t) h[j].next; if (a) { h[j].next = h[j].next->next; while (j > i) { j -= 1; s >>= 1; h[j].next = (m_link_s *) (a+s); h[j].next->next = 0; } } #ifdef DEBUG printf("___sym_malloc(%d) = %p\n", size, (void *) a); #endif return (void *) a; } static void ___sym_mfree(m_pool_s *mp, void *ptr, int size) { int i = 0; int s = (1 << MEMO_SHIFT); m_link_s *q; m_addr_t a, b; m_link_s *h = mp->h; #ifdef DEBUG printf("___sym_mfree(%p, %d)\n", ptr, size); #endif if (size > MEMO_CLUSTER_SIZE) return; while (size > s) { s <<= 1; ++i; } a = (m_addr_t) ptr; while (1) { #ifdef MEMO_FREE_UNUSED if (s == MEMO_CLUSTER_SIZE) { M_FREEP(a); break; } #endif b = a ^ s; q = &h[i]; while (q->next && q->next != (m_link_s *) b) { q = q->next; } if (!q->next) { ((m_link_s *) a)->next = h[i].next; h[i].next = (m_link_s *) a; break; } q->next = q->next->next; a = a & b; s <<= 1; ++i; } } static void *__sym_calloc2(m_pool_s *mp, int size, char *name, int uflags) { void *p; p = ___sym_malloc(mp, size); if (DEBUG_FLAGS & DEBUG_ALLOC) printf ("new %-10s[%4d] @%p.\n", name, size, p); if (p) bzero(p, size); else if (uflags & MEMO_WARN) printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size); return p; } #define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, MEMO_WARN) static void __sym_mfree(m_pool_s *mp, void *ptr, int size, char *name) { if (DEBUG_FLAGS & DEBUG_ALLOC) printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr); ___sym_mfree(mp, ptr, size); } /* * Default memory pool we donnot need to involve in DMA. */ #ifndef FreeBSD_Bus_Dma_Abstraction /* * Without the `bus dma abstraction', all the memory is assumed * DMAable and a single pool is all what we need. */ static m_pool_s mp0; #else /* * With the `bus dma abstraction', we use a separate pool for * memory we donnot need to involve in DMA. */ static m_addr_t ___mp0_getp(m_pool_s *mp) { m_addr_t m = (m_addr_t) get_pages(); if (m) ++mp->nump; return m; } #ifdef MEMO_FREE_UNUSED static void ___mp0_freep(m_pool_s *mp, m_addr_t m) { free_pages(m); --mp->nump; } #endif #ifdef MEMO_FREE_UNUSED static m_pool_s mp0 = {0, 0, ___mp0_getp, ___mp0_freep}; #else static m_pool_s mp0 = {0, 0, ___mp0_getp}; #endif #endif /* FreeBSD_Bus_Dma_Abstraction */ /* * Actual memory allocation routine for non-DMAed memory. */ static void *sym_calloc(int size, char *name) { void *m; /* Lock */ m = __sym_calloc(&mp0, size, name); /* Unlock */ return m; } /* * Actual memory allocation routine for non-DMAed memory. */ static void sym_mfree(void *ptr, int size, char *name) { /* Lock */ __sym_mfree(&mp0, ptr, size, name); /* Unlock */ } /* * DMAable pools. */ #ifndef FreeBSD_Bus_Dma_Abstraction /* * Without `bus dma abstraction', all the memory is DMAable, and * only a single pool is needed (vtophys() is our friend). */ #define __sym_calloc_dma(b, s, n) sym_calloc(s, n) #define __sym_mfree_dma(b, p, s, n) sym_mfree(p, s, n) #ifdef __alpha__ #define __vtobus(b, p) alpha_XXX_dmamap((vm_offset_t)(p)) #else /*__i386__, __sparc64__*/ #define __vtobus(b, p) vtophys(p) #endif #else /* * With `bus dma abstraction', we use a separate pool per parent * BUS handle. A reverse table (hashed) is maintained for virtual * to BUS address translation. */ static void getbaddrcb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *baddr; baddr = (bus_addr_t *)arg; *baddr = segs->ds_addr; } static m_addr_t ___dma_getp(m_pool_s *mp) { m_vtob_s *vbp; void *vaddr = 0; bus_addr_t baddr = 0; vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB"); if (!vbp) goto out_err; if (bus_dmamem_alloc(mp->dmat, &vaddr, BUS_DMA_NOWAIT, &vbp->dmamap)) goto out_err; bus_dmamap_load(mp->dmat, vbp->dmamap, vaddr, MEMO_CLUSTER_SIZE, getbaddrcb, &baddr, 0); if (baddr) { int hc = VTOB_HASH_CODE(vaddr); vbp->vaddr = (m_addr_t) vaddr; vbp->baddr = (m_addr_t) baddr; vbp->next = mp->vtob[hc]; mp->vtob[hc] = vbp; ++mp->nump; return (m_addr_t) vaddr; } out_err: if (baddr) bus_dmamap_unload(mp->dmat, vbp->dmamap); if (vaddr) bus_dmamem_free(mp->dmat, vaddr, vbp->dmamap); if (vbp->dmamap) bus_dmamap_destroy(mp->dmat, vbp->dmamap); if (vbp) __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB"); return 0; } #ifdef MEMO_FREE_UNUSED static void ___dma_freep(m_pool_s *mp, m_addr_t m) { m_vtob_s **vbpp, *vbp; int hc = VTOB_HASH_CODE(m); vbpp = &mp->vtob[hc]; while (*vbpp && (*vbpp)->vaddr != m) vbpp = &(*vbpp)->next; if (*vbpp) { vbp = *vbpp; *vbpp = (*vbpp)->next; bus_dmamap_unload(mp->dmat, vbp->dmamap); bus_dmamem_free(mp->dmat, (void *) vbp->vaddr, vbp->dmamap); bus_dmamap_destroy(mp->dmat, vbp->dmamap); __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB"); --mp->nump; } } #endif static __inline m_pool_s *___get_dma_pool(bus_dma_tag_t dev_dmat) { m_pool_s *mp; for (mp = mp0.next; mp && mp->dev_dmat != dev_dmat; mp = mp->next); return mp; } static m_pool_s *___cre_dma_pool(bus_dma_tag_t dev_dmat) { m_pool_s *mp = 0; mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL"); if (mp) { mp->dev_dmat = dev_dmat; if (!bus_dma_tag_create(dev_dmat, 1, MEMO_CLUSTER_SIZE, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR_32BIT, NULL, NULL, MEMO_CLUSTER_SIZE, 1, MEMO_CLUSTER_SIZE, 0, &mp->dmat)) { mp->getp = ___dma_getp; #ifdef MEMO_FREE_UNUSED mp->freep = ___dma_freep; #endif mp->next = mp0.next; mp0.next = mp; return mp; } } if (mp) __sym_mfree(&mp0, mp, sizeof(*mp), "MPOOL"); return 0; } #ifdef MEMO_FREE_UNUSED static void ___del_dma_pool(m_pool_s *p) { struct m_pool **pp = &mp0.next; while (*pp && *pp != p) pp = &(*pp)->next; if (*pp) { *pp = (*pp)->next; bus_dma_tag_destroy(p->dmat); __sym_mfree(&mp0, p, sizeof(*p), "MPOOL"); } } #endif static void *__sym_calloc_dma(bus_dma_tag_t dev_dmat, int size, char *name) { struct m_pool *mp; void *m = 0; /* Lock */ mp = ___get_dma_pool(dev_dmat); if (!mp) mp = ___cre_dma_pool(dev_dmat); if (mp) m = __sym_calloc(mp, size, name); #ifdef MEMO_FREE_UNUSED if (mp && !mp->nump) ___del_dma_pool(mp); #endif /* Unlock */ return m; } static void __sym_mfree_dma(bus_dma_tag_t dev_dmat, void *m, int size, char *name) { struct m_pool *mp; /* Lock */ mp = ___get_dma_pool(dev_dmat); if (mp) __sym_mfree(mp, m, size, name); #ifdef MEMO_FREE_UNUSED if (mp && !mp->nump) ___del_dma_pool(mp); #endif /* Unlock */ } static m_addr_t __vtobus(bus_dma_tag_t dev_dmat, void *m) { m_pool_s *mp; int hc = VTOB_HASH_CODE(m); m_vtob_s *vp = 0; m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK; /* Lock */ mp = ___get_dma_pool(dev_dmat); if (mp) { vp = mp->vtob[hc]; while (vp && (m_addr_t) vp->vaddr != a) vp = vp->next; } /* Unlock */ if (!vp) panic("sym: VTOBUS FAILED!\n"); return vp ? vp->baddr + (((m_addr_t) m) - a) : 0; } #endif /* FreeBSD_Bus_Dma_Abstraction */ /* * Verbs for DMAable memory handling. * The _uvptv_ macro avoids a nasty warning about pointer to volatile * being discarded. */ #define _uvptv_(p) ((void *)((vm_offset_t)(p))) #define _sym_calloc_dma(np, s, n) __sym_calloc_dma(np->bus_dmat, s, n) #define _sym_mfree_dma(np, p, s, n) \ __sym_mfree_dma(np->bus_dmat, _uvptv_(p), s, n) #define sym_calloc_dma(s, n) _sym_calloc_dma(np, s, n) #define sym_mfree_dma(p, s, n) _sym_mfree_dma(np, p, s, n) #define _vtobus(np, p) __vtobus(np->bus_dmat, _uvptv_(p)) #define vtobus(p) _vtobus(np, p) /* * Print a buffer in hexadecimal format. */ static void sym_printb_hex (u_char *p, int n) { while (n-- > 0) printf (" %x", *p++); } /* * Same with a label at beginning and .\n at end. */ static void sym_printl_hex (char *label, u_char *p, int n) { printf ("%s", label); sym_printb_hex (p, n); printf (".\n"); } /* * Return a string for SCSI BUS mode. */ static char *sym_scsi_bus_mode(int mode) { switch(mode) { case SMODE_HVD: return "HVD"; case SMODE_SE: return "SE"; case SMODE_LVD: return "LVD"; } return "??"; } /* * Some poor and bogus sync table that refers to Tekram NVRAM layout. */ #ifdef SYM_CONF_NVRAM_SUPPORT static u_char Tekram_sync[16] = {25,31,37,43, 50,62,75,125, 12,15,18,21, 6,7,9,10}; #endif /* * Union of supported NVRAM formats. */ struct sym_nvram { int type; #define SYM_SYMBIOS_NVRAM (1) #define SYM_TEKRAM_NVRAM (2) #ifdef SYM_CONF_NVRAM_SUPPORT union { Symbios_nvram Symbios; Tekram_nvram Tekram; } data; #endif }; /* * This one is hopefully useless, but actually useful. :-) */ #ifndef assert #define assert(expression) { \ if (!(expression)) { \ (void)panic( \ "assertion \"%s\" failed: file \"%s\", line %d\n", \ #expression, \ __FILE__, __LINE__); \ } \ } #endif /* * Some provision for a possible big endian mode supported by * Symbios chips (never seen, by the way). * For now, this stuff does not deserve any comments. :) */ #define sym_offb(o) (o) #define sym_offw(o) (o) /* * Some provision for support for BIG ENDIAN CPU. * Btw, FreeBSD does not seem to be ready yet for big endian. */ #if BYTE_ORDER == BIG_ENDIAN #define cpu_to_scr(dw) _htole32(dw) #define scr_to_cpu(dw) _le32toh(dw) #else #define cpu_to_scr(dw) (dw) #define scr_to_cpu(dw) (dw) #endif /* * Access to the chip IO registers and on-chip RAM. * We use the `bus space' interface under FreeBSD-4 and * later kernel versions. */ #ifdef FreeBSD_Bus_Space_Abstraction #if defined(SYM_CONF_IOMAPPED) #define INB_OFF(o) bus_space_read_1(np->io_tag, np->io_bsh, o) #define INW_OFF(o) bus_space_read_2(np->io_tag, np->io_bsh, o) #define INL_OFF(o) bus_space_read_4(np->io_tag, np->io_bsh, o) #define OUTB_OFF(o, v) bus_space_write_1(np->io_tag, np->io_bsh, o, (v)) #define OUTW_OFF(o, v) bus_space_write_2(np->io_tag, np->io_bsh, o, (v)) #define OUTL_OFF(o, v) bus_space_write_4(np->io_tag, np->io_bsh, o, (v)) #else /* Memory mapped IO */ #define INB_OFF(o) bus_space_read_1(np->mmio_tag, np->mmio_bsh, o) #define INW_OFF(o) bus_space_read_2(np->mmio_tag, np->mmio_bsh, o) #define INL_OFF(o) bus_space_read_4(np->mmio_tag, np->mmio_bsh, o) #define OUTB_OFF(o, v) bus_space_write_1(np->mmio_tag, np->mmio_bsh, o, (v)) #define OUTW_OFF(o, v) bus_space_write_2(np->mmio_tag, np->mmio_bsh, o, (v)) #define OUTL_OFF(o, v) bus_space_write_4(np->mmio_tag, np->mmio_bsh, o, (v)) #endif /* SYM_CONF_IOMAPPED */ #define OUTRAM_OFF(o, a, l) \ bus_space_write_region_1(np->ram_tag, np->ram_bsh, o, (a), (l)) #else /* not defined FreeBSD_Bus_Space_Abstraction */ #if BYTE_ORDER == BIG_ENDIAN #error "BIG ENDIAN support requires bus space kernel interface" #endif /* * Access to the chip IO registers and on-chip RAM. * We use legacy MMIO and IO interface for FreeBSD 3.X versions. */ /* * Define some understable verbs for IO and MMIO. */ #define io_read8(p) scr_to_cpu(inb((p))) #define io_read16(p) scr_to_cpu(inw((p))) #define io_read32(p) scr_to_cpu(inl((p))) #define io_write8(p, v) outb((p), cpu_to_scr(v)) #define io_write16(p, v) outw((p), cpu_to_scr(v)) #define io_write32(p, v) outl((p), cpu_to_scr(v)) #ifdef __alpha__ #define mmio_read8(a) readb(a) #define mmio_read16(a) readw(a) #define mmio_read32(a) readl(a) #define mmio_write8(a, b) writeb(a, b) #define mmio_write16(a, b) writew(a, b) #define mmio_write32(a, b) writel(a, b) #define memcpy_to_pci(d, s, n) memcpy_toio((u32)(d), (void *)(s), (n)) #else /*__i386__, __sparc64__*/ #define mmio_read8(a) scr_to_cpu((*(volatile unsigned char *) (a))) #define mmio_read16(a) scr_to_cpu((*(volatile unsigned short *) (a))) #define mmio_read32(a) scr_to_cpu((*(volatile unsigned int *) (a))) #define mmio_write8(a, b) (*(volatile unsigned char *) (a)) = cpu_to_scr(b) #define mmio_write16(a, b) (*(volatile unsigned short *) (a)) = cpu_to_scr(b) #define mmio_write32(a, b) (*(volatile unsigned int *) (a)) = cpu_to_scr(b) #define memcpy_to_pci(d, s, n) bcopy((s), (void *)(d), (n)) #endif /* * Normal IO */ #if defined(SYM_CONF_IOMAPPED) #define INB_OFF(o) io_read8(np->io_port + sym_offb(o)) #define OUTB_OFF(o, v) io_write8(np->io_port + sym_offb(o), (v)) #define INW_OFF(o) io_read16(np->io_port + sym_offw(o)) #define OUTW_OFF(o, v) io_write16(np->io_port + sym_offw(o), (v)) #define INL_OFF(o) io_read32(np->io_port + (o)) #define OUTL_OFF(o, v) io_write32(np->io_port + (o), (v)) #else /* Memory mapped IO */ #define INB_OFF(o) mmio_read8(np->mmio_va + sym_offb(o)) #define OUTB_OFF(o, v) mmio_write8(np->mmio_va + sym_offb(o), (v)) #define INW_OFF(o) mmio_read16(np->mmio_va + sym_offw(o)) #define OUTW_OFF(o, v) mmio_write16(np->mmio_va + sym_offw(o), (v)) #define INL_OFF(o) mmio_read32(np->mmio_va + (o)) #define OUTL_OFF(o, v) mmio_write32(np->mmio_va + (o), (v)) #endif #define OUTRAM_OFF(o, a, l) memcpy_to_pci(np->ram_va + (o), (a), (l)) #endif /* FreeBSD_Bus_Space_Abstraction */ /* * Common definitions for both bus space and legacy IO methods. */ #define INB(r) INB_OFF(offsetof(struct sym_reg,r)) #define INW(r) INW_OFF(offsetof(struct sym_reg,r)) #define INL(r) INL_OFF(offsetof(struct sym_reg,r)) #define OUTB(r, v) OUTB_OFF(offsetof(struct sym_reg,r), (v)) #define OUTW(r, v) OUTW_OFF(offsetof(struct sym_reg,r), (v)) #define OUTL(r, v) OUTL_OFF(offsetof(struct sym_reg,r), (v)) #define OUTONB(r, m) OUTB(r, INB(r) | (m)) #define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m)) #define OUTONW(r, m) OUTW(r, INW(r) | (m)) #define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m)) #define OUTONL(r, m) OUTL(r, INL(r) | (m)) #define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m)) /* * We normally want the chip to have a consistent view * of driver internal data structures when we restart it. * Thus these macros. */ #define OUTL_DSP(v) \ do { \ MEMORY_BARRIER(); \ OUTL (nc_dsp, (v)); \ } while (0) #define OUTONB_STD() \ do { \ MEMORY_BARRIER(); \ OUTONB (nc_dcntl, (STD|NOCOM)); \ } while (0) /* * Command control block states. */ #define HS_IDLE (0) #define HS_BUSY (1) #define HS_NEGOTIATE (2) /* sync/wide data transfer*/ #define HS_DISCONNECT (3) /* Disconnected by target */ #define HS_WAIT (4) /* waiting for resource */ #define HS_DONEMASK (0x80) #define HS_COMPLETE (4|HS_DONEMASK) #define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */ #define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */ #define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */ /* * Software Interrupt Codes */ #define SIR_BAD_SCSI_STATUS (1) #define SIR_SEL_ATN_NO_MSG_OUT (2) #define SIR_MSG_RECEIVED (3) #define SIR_MSG_WEIRD (4) #define SIR_NEGO_FAILED (5) #define SIR_NEGO_PROTO (6) #define SIR_SCRIPT_STOPPED (7) #define SIR_REJECT_TO_SEND (8) #define SIR_SWIDE_OVERRUN (9) #define SIR_SODL_UNDERRUN (10) #define SIR_RESEL_NO_MSG_IN (11) #define SIR_RESEL_NO_IDENTIFY (12) #define SIR_RESEL_BAD_LUN (13) #define SIR_TARGET_SELECTED (14) #define SIR_RESEL_BAD_I_T_L (15) #define SIR_RESEL_BAD_I_T_L_Q (16) #define SIR_ABORT_SENT (17) #define SIR_RESEL_ABORTED (18) #define SIR_MSG_OUT_DONE (19) #define SIR_COMPLETE_ERROR (20) #define SIR_DATA_OVERRUN (21) #define SIR_BAD_PHASE (22) #define SIR_MAX (22) /* * Extended error bit codes. * xerr_status field of struct sym_ccb. */ #define XE_EXTRA_DATA (1) /* unexpected data phase */ #define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */ #define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */ #define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */ #define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */ /* * Negotiation status. * nego_status field of struct sym_ccb. */ #define NS_SYNC (1) #define NS_WIDE (2) #define NS_PPR (3) /* * A CCB hashed table is used to retrieve CCB address * from DSA value. */ #define CCB_HASH_SHIFT 8 #define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT) #define CCB_HASH_MASK (CCB_HASH_SIZE-1) #define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK) /* * Device flags. */ #define SYM_DISC_ENABLED (1) #define SYM_TAGS_ENABLED (1<<1) #define SYM_SCAN_BOOT_DISABLED (1<<2) #define SYM_SCAN_LUNS_DISABLED (1<<3) /* * Host adapter miscellaneous flags. */ #define SYM_AVOID_BUS_RESET (1) #define SYM_SCAN_TARGETS_HILO (1<<1) /* * Device quirks. * Some devices, for example the CHEETAH 2 LVD, disconnects without * saving the DATA POINTER then reselects and terminates the IO. * On reselection, the automatic RESTORE DATA POINTER makes the * CURRENT DATA POINTER not point at the end of the IO. * This behaviour just breaks our calculation of the residual. * For now, we just force an AUTO SAVE on disconnection and will * fix that in a further driver version. */ #define SYM_QUIRK_AUTOSAVE 1 /* * Misc. */ #define SYM_SNOOP_TIMEOUT (10000000) #define SYM_PCI_IO PCIR_MAPS #define SYM_PCI_MMIO (PCIR_MAPS + 4) #define SYM_PCI_RAM (PCIR_MAPS + 8) #define SYM_PCI_RAM64 (PCIR_MAPS + 12) /* * Back-pointer from the CAM CCB to our data structures. */ #define sym_hcb_ptr spriv_ptr0 /* #define sym_ccb_ptr spriv_ptr1 */ /* * We mostly have to deal with pointers. * Thus these typedef's. */ typedef struct sym_tcb *tcb_p; typedef struct sym_lcb *lcb_p; typedef struct sym_ccb *ccb_p; typedef struct sym_hcb *hcb_p; /* * Gather negotiable parameters value */ struct sym_trans { #ifdef FreeBSD_New_Tran_Settings u8 scsi_version; u8 spi_version; #endif u8 period; u8 offset; u8 width; u8 options; /* PPR options */ }; struct sym_tinfo { struct sym_trans current; struct sym_trans goal; struct sym_trans user; }; #define BUS_8_BIT MSG_EXT_WDTR_BUS_8_BIT #define BUS_16_BIT MSG_EXT_WDTR_BUS_16_BIT /* * Global TCB HEADER. * * Due to lack of indirect addressing on earlier NCR chips, * this substructure is copied from the TCB to a global * address after selection. * For SYMBIOS chips that support LOAD/STORE this copy is * not needed and thus not performed. */ struct sym_tcbh { /* * Scripts bus addresses of LUN table accessed from scripts. * LUN #0 is a special case, since multi-lun devices are rare, * and we we want to speed-up the general case and not waste * resources. */ u32 luntbl_sa; /* bus address of this table */ u32 lun0_sa; /* bus address of LCB #0 */ /* * Actual SYNC/WIDE IO registers value for this target. * 'sval', 'wval' and 'uval' are read from SCRIPTS and * so have alignment constraints. */ /*0*/ u_char uval; /* -> SCNTL4 register */ /*1*/ u_char sval; /* -> SXFER io register */ /*2*/ u_char filler1; /*3*/ u_char wval; /* -> SCNTL3 io register */ }; /* * Target Control Block */ struct sym_tcb { /* * TCB header. * Assumed at offset 0. */ /*0*/ struct sym_tcbh head; /* * LUN table used by the SCRIPTS processor. * An array of bus addresses is used on reselection. */ u32 *luntbl; /* LCBs bus address table */ /* * LUN table used by the C code. */ lcb_p lun0p; /* LCB of LUN #0 (usual case) */ #if SYM_CONF_MAX_LUN > 1 lcb_p *lunmp; /* Other LCBs [1..MAX_LUN] */ #endif /* * Bitmap that tells about LUNs that succeeded at least * 1 IO and therefore assumed to be a real device. * Avoid useless allocation of the LCB structure. */ u32 lun_map[(SYM_CONF_MAX_LUN+31)/32]; /* * Bitmap that tells about LUNs that haven't yet an LCB * allocated (not discovered or LCB allocation failed). */ u32 busy0_map[(SYM_CONF_MAX_LUN+31)/32]; /* * Transfer capabilities (SIP) */ struct sym_tinfo tinfo; /* * Keep track of the CCB used for the negotiation in order * to ensure that only 1 negotiation is queued at a time. */ ccb_p nego_cp; /* CCB used for the nego */ /* * Set when we want to reset the device. */ u_char to_reset; /* * Other user settable limits and options. * These limits are read from the NVRAM if present. */ u_char usrflags; u_short usrtags; }; /* * Global LCB HEADER. * * Due to lack of indirect addressing on earlier NCR chips, * this substructure is copied from the LCB to a global * address after selection. * For SYMBIOS chips that support LOAD/STORE this copy is * not needed and thus not performed. */ struct sym_lcbh { /* * SCRIPTS address jumped by SCRIPTS on reselection. * For not probed logical units, this address points to * SCRIPTS that deal with bad LU handling (must be at * offset zero of the LCB for that reason). */ /*0*/ u32 resel_sa; /* * Task (bus address of a CCB) read from SCRIPTS that points * to the unique ITL nexus allowed to be disconnected. */ u32 itl_task_sa; /* * Task table bus address (read from SCRIPTS). */ u32 itlq_tbl_sa; }; /* * Logical Unit Control Block */ struct sym_lcb { /* * TCB header. * Assumed at offset 0. */ /*0*/ struct sym_lcbh head; /* * Task table read from SCRIPTS that contains pointers to * ITLQ nexuses. The bus address read from SCRIPTS is * inside the header. */ u32 *itlq_tbl; /* Kernel virtual address */ /* * Busy CCBs management. */ u_short busy_itlq; /* Number of busy tagged CCBs */ u_short busy_itl; /* Number of busy untagged CCBs */ /* * Circular tag allocation buffer. */ u_short ia_tag; /* Tag allocation index */ u_short if_tag; /* Tag release index */ u_char *cb_tags; /* Circular tags buffer */ /* * Set when we want to clear all tasks. */ u_char to_clear; /* * Capabilities. */ u_char user_flags; u_char current_flags; }; /* * Action from SCRIPTS on a task. * Is part of the CCB, but is also used separately to plug * error handling action to perform from SCRIPTS. */ struct sym_actscr { u32 start; /* Jumped by SCRIPTS after selection */ u32 restart; /* Jumped by SCRIPTS on relection */ }; /* * Phase mismatch context. * * It is part of the CCB and is used as parameters for the * DATA pointer. We need two contexts to handle correctly the * SAVED DATA POINTER. */ struct sym_pmc { struct sym_tblmove sg; /* Updated interrupted SG block */ u32 ret; /* SCRIPT return address */ }; /* * LUN control block lookup. * We use a direct pointer for LUN #0, and a table of * pointers which is only allocated for devices that support * LUN(s) > 0. */ #if SYM_CONF_MAX_LUN <= 1 #define sym_lp(np, tp, lun) (!lun) ? (tp)->lun0p : 0 #else #define sym_lp(np, tp, lun) \ (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : 0 #endif /* * Status are used by the host and the script processor. * * The last four bytes (status[4]) are copied to the * scratchb register (declared as scr0..scr3) just after the * select/reselect, and copied back just after disconnecting. * Inside the script the XX_REG are used. */ /* * Last four bytes (script) */ #define QU_REG scr0 #define HS_REG scr1 #define HS_PRT nc_scr1 #define SS_REG scr2 #define SS_PRT nc_scr2 #define HF_REG scr3 #define HF_PRT nc_scr3 /* * Last four bytes (host) */ #define actualquirks phys.head.status[0] #define host_status phys.head.status[1] #define ssss_status phys.head.status[2] #define host_flags phys.head.status[3] /* * Host flags */ #define HF_IN_PM0 1u #define HF_IN_PM1 (1u<<1) #define HF_ACT_PM (1u<<2) #define HF_DP_SAVED (1u<<3) #define HF_SENSE (1u<<4) #define HF_EXT_ERR (1u<<5) #define HF_DATA_IN (1u<<6) #ifdef SYM_CONF_IARB_SUPPORT #define HF_HINT_IARB (1u<<7) #endif /* * Global CCB HEADER. * * Due to lack of indirect addressing on earlier NCR chips, * this substructure is copied from the ccb to a global * address after selection (or reselection) and copied back * before disconnect. * For SYMBIOS chips that support LOAD/STORE this copy is * not needed and thus not performed. */ struct sym_ccbh { /* * Start and restart SCRIPTS addresses (must be at 0). */ /*0*/ struct sym_actscr go; /* * SCRIPTS jump address that deal with data pointers. * 'savep' points to the position in the script responsible * for the actual transfer of data. * It's written on reception of a SAVE_DATA_POINTER message. */ u32 savep; /* Jump address to saved data pointer */ u32 lastp; /* SCRIPTS address at end of data */ u32 goalp; /* Not accessed for now from SCRIPTS */ /* * Status fields. */ u8 status[4]; }; /* * Data Structure Block * * During execution of a ccb by the script processor, the * DSA (data structure address) register points to this * substructure of the ccb. */ struct sym_dsb { /* * CCB header. * Also assumed at offset 0 of the sym_ccb structure. */ /*0*/ struct sym_ccbh head; /* * Phase mismatch contexts. * We need two to handle correctly the SAVED DATA POINTER. * MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic * for address calculation from SCRIPTS. */ struct sym_pmc pm0; struct sym_pmc pm1; /* * Table data for Script */ struct sym_tblsel select; struct sym_tblmove smsg; struct sym_tblmove smsg_ext; struct sym_tblmove cmd; struct sym_tblmove sense; struct sym_tblmove wresid; struct sym_tblmove data [SYM_CONF_MAX_SG]; }; /* * Our Command Control Block */ struct sym_ccb { /* * This is the data structure which is pointed by the DSA * register when it is executed by the script processor. * It must be the first entry. */ struct sym_dsb phys; /* * Pointer to CAM ccb and related stuff. */ union ccb *cam_ccb; /* CAM scsiio ccb */ u8 cdb_buf[16]; /* Copy of CDB */ u8 *sns_bbuf; /* Bounce buffer for sense data */ #define SYM_SNS_BBUF_LEN sizeof(struct scsi_sense_data) int data_len; /* Total data length */ int segments; /* Number of SG segments */ /* * Miscellaneous status'. */ u_char nego_status; /* Negotiation status */ u_char xerr_status; /* Extended error flags */ u32 extra_bytes; /* Extraneous bytes transferred */ /* * Message areas. * We prepare a message to be sent after selection. * We may use a second one if the command is rescheduled * due to CHECK_CONDITION or COMMAND TERMINATED. * Contents are IDENTIFY and SIMPLE_TAG. * While negotiating sync or wide transfer, * a SDTR or WDTR message is appended. */ u_char scsi_smsg [12]; u_char scsi_smsg2[12]; /* * Auto request sense related fields. */ u_char sensecmd[6]; /* Request Sense command */ u_char sv_scsi_status; /* Saved SCSI status */ u_char sv_xerr_status; /* Saved extended status */ int sv_resid; /* Saved residual */ /* * Map for the DMA of user data. */ #ifdef FreeBSD_Bus_Dma_Abstraction void *arg; /* Argument for some callback */ bus_dmamap_t dmamap; /* DMA map for user data */ u_char dmamapped; #define SYM_DMA_NONE 0 #define SYM_DMA_READ 1 #define SYM_DMA_WRITE 2 #endif /* * Other fields. */ u32 ccb_ba; /* BUS address of this CCB */ u_short tag; /* Tag for this transfer */ /* NO_TAG means no tag */ u_char target; u_char lun; ccb_p link_ccbh; /* Host adapter CCB hash chain */ SYM_QUEHEAD link_ccbq; /* Link to free/busy CCB queue */ u32 startp; /* Initial data pointer */ int ext_sg; /* Extreme data pointer, used */ int ext_ofs; /* to calculate the residual. */ u_char to_abort; /* Want this IO to be aborted */ }; #define CCB_BA(cp,lbl) (cp->ccb_ba + offsetof(struct sym_ccb, lbl)) /* * Host Control Block */ struct sym_hcb { /* * Global headers. * Due to poorness of addressing capabilities, earlier * chips (810, 815, 825) copy part of the data structures * (CCB, TCB and LCB) in fixed areas. */ #ifdef SYM_CONF_GENERIC_SUPPORT struct sym_ccbh ccb_head; struct sym_tcbh tcb_head; struct sym_lcbh lcb_head; #endif /* * Idle task and invalid task actions and * their bus addresses. */ struct sym_actscr idletask, notask, bad_itl, bad_itlq; vm_offset_t idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba; /* * Dummy lun table to protect us against target * returning bad lun number on reselection. */ u32 *badluntbl; /* Table physical address */ u32 badlun_sa; /* SCRIPT handler BUS address */ /* * Bus address of this host control block. */ u32 hcb_ba; /* * Bit 32-63 of the on-chip RAM bus address in LE format. * The START_RAM64 script loads the MMRS and MMWS from this * field. */ u32 scr_ram_seg; /* * Chip and controller indentification. */ #ifdef FreeBSD_Bus_Io_Abstraction device_t device; #else pcici_t pci_tag; #endif int unit; char inst_name[8]; /* * Initial value of some IO register bits. * These values are assumed to have been set by BIOS, and may * be used to probe adapter implementation differences. */ u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4, sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4, sv_stest1; /* * Actual initial value of IO register bits used by the * driver. They are loaded at initialisation according to * features that are to be enabled/disabled. */ u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4, rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4; /* * Target data. */ struct sym_tcb target[SYM_CONF_MAX_TARGET]; /* * Target control block bus address array used by the SCRIPT * on reselection. */ u32 *targtbl; u32 targtbl_ba; /* * CAM SIM information for this instance. */ struct cam_sim *sim; struct cam_path *path; /* * Allocated hardware resources. */ #ifdef FreeBSD_Bus_Io_Abstraction struct resource *irq_res; struct resource *io_res; struct resource *mmio_res; struct resource *ram_res; int ram_id; void *intr; #endif /* * Bus stuff. * * My understanding of PCI is that all agents must share the * same addressing range and model. * But some hardware architecture guys provide complex and * brain-deaded stuff that makes shit. * This driver only support PCI compliant implementations and * deals with part of the BUS stuff complexity only to fit O/S * requirements. */ #ifdef FreeBSD_Bus_Io_Abstraction bus_space_handle_t io_bsh; bus_space_tag_t io_tag; bus_space_handle_t mmio_bsh; bus_space_tag_t mmio_tag; bus_space_handle_t ram_bsh; bus_space_tag_t ram_tag; #endif /* * DMA stuff. */ #ifdef FreeBSD_Bus_Dma_Abstraction bus_dma_tag_t bus_dmat; /* DMA tag from parent BUS */ bus_dma_tag_t data_dmat; /* DMA tag for user data */ #endif /* * Virtual and physical bus addresses of the chip. */ vm_offset_t mmio_va; /* MMIO kernel virtual address */ vm_offset_t mmio_pa; /* MMIO CPU physical address */ vm_offset_t mmio_ba; /* MMIO BUS address */ int mmio_ws; /* MMIO Window size */ vm_offset_t ram_va; /* RAM kernel virtual address */ vm_offset_t ram_pa; /* RAM CPU physical address */ vm_offset_t ram_ba; /* RAM BUS address */ int ram_ws; /* RAM window size */ u32 io_port; /* IO port address */ /* * SCRIPTS virtual and physical bus addresses. * 'script' is loaded in the on-chip RAM if present. * 'scripth' stays in main memory for all chips except the * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM. */ u_char *scripta0; /* Copies of script and scripth */ u_char *scriptb0; /* Copies of script and scripth */ vm_offset_t scripta_ba; /* Actual script and scripth */ vm_offset_t scriptb_ba; /* bus addresses. */ vm_offset_t scriptb0_ba; u_short scripta_sz; /* Actual size of script A */ u_short scriptb_sz; /* Actual size of script B */ /* * Bus addresses, setup and patch methods for * the selected firmware. */ struct sym_fwa_ba fwa_bas; /* Useful SCRIPTA bus addresses */ struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */ void (*fw_setup)(hcb_p np, struct sym_fw *fw); void (*fw_patch)(hcb_p np); char *fw_name; /* * General controller parameters and configuration. */ u_short device_id; /* PCI device id */ u_char revision_id; /* PCI device revision id */ u_int features; /* Chip features map */ u_char myaddr; /* SCSI id of the adapter */ u_char maxburst; /* log base 2 of dwords burst */ u_char maxwide; /* Maximum transfer width */ u_char minsync; /* Min sync period factor (ST) */ u_char maxsync; /* Max sync period factor (ST) */ u_char maxoffs; /* Max scsi offset (ST) */ u_char minsync_dt; /* Min sync period factor (DT) */ u_char maxsync_dt; /* Max sync period factor (DT) */ u_char maxoffs_dt; /* Max scsi offset (DT) */ u_char multiplier; /* Clock multiplier (1,2,4) */ u_char clock_divn; /* Number of clock divisors */ u32 clock_khz; /* SCSI clock frequency in KHz */ u32 pciclk_khz; /* Estimated PCI clock in KHz */ /* * Start queue management. * It is filled up by the host processor and accessed by the * SCRIPTS processor in order to start SCSI commands. */ volatile /* Prevent code optimizations */ u32 *squeue; /* Start queue virtual address */ u32 squeue_ba; /* Start queue BUS address */ u_short squeueput; /* Next free slot of the queue */ u_short actccbs; /* Number of allocated CCBs */ /* * Command completion queue. * It is the same size as the start queue to avoid overflow. */ u_short dqueueget; /* Next position to scan */ volatile /* Prevent code optimizations */ u32 *dqueue; /* Completion (done) queue */ u32 dqueue_ba; /* Done queue BUS address */ /* * Miscellaneous buffers accessed by the scripts-processor. * They shall be DWORD aligned, because they may be read or * written with a script command. */ u_char msgout[8]; /* Buffer for MESSAGE OUT */ u_char msgin [8]; /* Buffer for MESSAGE IN */ u32 lastmsg; /* Last SCSI message sent */ u_char scratch; /* Scratch for SCSI receive */ /* * Miscellaneous configuration and status parameters. */ u_char usrflags; /* Miscellaneous user flags */ u_char scsi_mode; /* Current SCSI BUS mode */ u_char verbose; /* Verbosity for this controller*/ u32 cache; /* Used for cache test at init. */ /* * CCB lists and queue. */ ccb_p ccbh[CCB_HASH_SIZE]; /* CCB hashed by DSA value */ SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */ SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */ /* * During error handling and/or recovery, * active CCBs that are to be completed with * error or requeued are moved from the busy_ccbq * to the comp_ccbq prior to completion. */ SYM_QUEHEAD comp_ccbq; /* * CAM CCB pending queue. */ SYM_QUEHEAD cam_ccbq; /* * IMMEDIATE ARBITRATION (IARB) control. * * We keep track in 'last_cp' of the last CCB that has been * queued to the SCRIPTS processor and clear 'last_cp' when * this CCB completes. If last_cp is not zero at the moment * we queue a new CCB, we set a flag in 'last_cp' that is * used by the SCRIPTS as a hint for setting IARB. * We donnot set more than 'iarb_max' consecutive hints for * IARB in order to leave devices a chance to reselect. * By the way, any non zero value of 'iarb_max' is unfair. :) */ #ifdef SYM_CONF_IARB_SUPPORT u_short iarb_max; /* Max. # consecutive IARB hints*/ u_short iarb_count; /* Actual # of these hints */ ccb_p last_cp; #endif /* * Command abort handling. * We need to synchronize tightly with the SCRIPTS * processor in order to handle things correctly. */ u_char abrt_msg[4]; /* Message to send buffer */ struct sym_tblmove abrt_tbl; /* Table for the MOV of it */ struct sym_tblsel abrt_sel; /* Sync params for selection */ u_char istat_sem; /* Tells the chip to stop (SEM) */ }; #define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl)) /* * Return the name of the controller. */ static __inline char *sym_name(hcb_p np) { return np->inst_name; } /*--------------------------------------------------------------------------*/ /*------------------------------ FIRMWARES ---------------------------------*/ /*--------------------------------------------------------------------------*/ /* * This stuff will be moved to a separate source file when * the driver will be broken into several source modules. */ /* * Macros used for all firmwares. */ #define SYM_GEN_A(s, label) ((short) offsetof(s, label)), #define SYM_GEN_B(s, label) ((short) offsetof(s, label)), #define PADDR_A(label) SYM_GEN_PADDR_A(struct SYM_FWA_SCR, label) #define PADDR_B(label) SYM_GEN_PADDR_B(struct SYM_FWB_SCR, label) #ifdef SYM_CONF_GENERIC_SUPPORT /* * Allocate firmware #1 script area. */ #define SYM_FWA_SCR sym_fw1a_scr #define SYM_FWB_SCR sym_fw1b_scr #include struct sym_fwa_ofs sym_fw1a_ofs = { SYM_GEN_FW_A(struct SYM_FWA_SCR) }; struct sym_fwb_ofs sym_fw1b_ofs = { SYM_GEN_FW_B(struct SYM_FWB_SCR) }; #undef SYM_FWA_SCR #undef SYM_FWB_SCR #endif /* SYM_CONF_GENERIC_SUPPORT */ /* * Allocate firmware #2 script area. */ #define SYM_FWA_SCR sym_fw2a_scr #define SYM_FWB_SCR sym_fw2b_scr #include struct sym_fwa_ofs sym_fw2a_ofs = { SYM_GEN_FW_A(struct SYM_FWA_SCR) }; struct sym_fwb_ofs sym_fw2b_ofs = { SYM_GEN_FW_B(struct SYM_FWB_SCR) SYM_GEN_B(struct SYM_FWB_SCR, start64) SYM_GEN_B(struct SYM_FWB_SCR, pm_handle) }; #undef SYM_FWA_SCR #undef SYM_FWB_SCR #undef SYM_GEN_A #undef SYM_GEN_B #undef PADDR_A #undef PADDR_B #ifdef SYM_CONF_GENERIC_SUPPORT /* * Patch routine for firmware #1. */ static void sym_fw1_patch(hcb_p np) { struct sym_fw1a_scr *scripta0; struct sym_fw1b_scr *scriptb0; scripta0 = (struct sym_fw1a_scr *) np->scripta0; scriptb0 = (struct sym_fw1b_scr *) np->scriptb0; /* * Remove LED support if not needed. */ if (!(np->features & FE_LED0)) { scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); scripta0->start[0] = cpu_to_scr(SCR_NO_OP); } #ifdef SYM_CONF_IARB_SUPPORT /* * If user does not want to use IMMEDIATE ARBITRATION * when we are reselected while attempting to arbitrate, * patch the SCRIPTS accordingly with a SCRIPT NO_OP. */ if (!SYM_CONF_SET_IARB_ON_ARB_LOST) scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); #endif /* * Patch some data in SCRIPTS. * - start and done queue initial bus address. * - target bus address table bus address. */ scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); } #endif /* SYM_CONF_GENERIC_SUPPORT */ /* * Patch routine for firmware #2. */ static void sym_fw2_patch(hcb_p np) { struct sym_fw2a_scr *scripta0; struct sym_fw2b_scr *scriptb0; scripta0 = (struct sym_fw2a_scr *) np->scripta0; scriptb0 = (struct sym_fw2b_scr *) np->scriptb0; /* * Remove LED support if not needed. */ if (!(np->features & FE_LED0)) { scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); scripta0->start[0] = cpu_to_scr(SCR_NO_OP); } #ifdef SYM_CONF_IARB_SUPPORT /* * If user does not want to use IMMEDIATE ARBITRATION * when we are reselected while attempting to arbitrate, * patch the SCRIPTS accordingly with a SCRIPT NO_OP. */ if (!SYM_CONF_SET_IARB_ON_ARB_LOST) scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); #endif /* * Patch some variable in SCRIPTS. * - start and done queue initial bus address. * - target bus address table bus address. */ scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); /* * Remove the load of SCNTL4 on reselection if not a C10. */ if (!(np->features & FE_C10)) { scripta0->resel_scntl4[0] = cpu_to_scr(SCR_NO_OP); scripta0->resel_scntl4[1] = cpu_to_scr(0); } /* * Remove a couple of work-arounds specific to C1010 if * they are not desirable. See `sym_fw2.h' for more details. */ if (!(np->device_id == PCI_ID_LSI53C1010_2 && np->revision_id < 0x1 && np->pciclk_khz < 60000)) { scripta0->datao_phase[0] = cpu_to_scr(SCR_NO_OP); scripta0->datao_phase[1] = cpu_to_scr(0); } if (!(np->device_id == PCI_ID_LSI53C1010 && /* np->revision_id < 0xff */ 1)) { scripta0->sel_done[0] = cpu_to_scr(SCR_NO_OP); scripta0->sel_done[1] = cpu_to_scr(0); } /* * Patch some other variables in SCRIPTS. * These ones are loaded by the SCRIPTS processor. */ scriptb0->pm0_data_addr[0] = cpu_to_scr(np->scripta_ba + offsetof(struct sym_fw2a_scr, pm0_data)); scriptb0->pm1_data_addr[0] = cpu_to_scr(np->scripta_ba + offsetof(struct sym_fw2a_scr, pm1_data)); } /* * Fill the data area in scripts. * To be done for all firmwares. */ static void sym_fw_fill_data (u32 *in, u32 *out) { int i; for (i = 0; i < SYM_CONF_MAX_SG; i++) { *in++ = SCR_CHMOV_TBL ^ SCR_DATA_IN; *in++ = offsetof (struct sym_dsb, data[i]); *out++ = SCR_CHMOV_TBL ^ SCR_DATA_OUT; *out++ = offsetof (struct sym_dsb, data[i]); } } /* * Setup useful script bus addresses. * To be done for all firmwares. */ static void sym_fw_setup_bus_addresses(hcb_p np, struct sym_fw *fw) { u32 *pa; u_short *po; int i; /* * Build the bus address table for script A * from the script A offset table. */ po = (u_short *) fw->a_ofs; pa = (u32 *) &np->fwa_bas; for (i = 0 ; i < sizeof(np->fwa_bas)/sizeof(u32) ; i++) pa[i] = np->scripta_ba + po[i]; /* * Same for script B. */ po = (u_short *) fw->b_ofs; pa = (u32 *) &np->fwb_bas; for (i = 0 ; i < sizeof(np->fwb_bas)/sizeof(u32) ; i++) pa[i] = np->scriptb_ba + po[i]; } #ifdef SYM_CONF_GENERIC_SUPPORT /* * Setup routine for firmware #1. */ static void sym_fw1_setup(hcb_p np, struct sym_fw *fw) { struct sym_fw1a_scr *scripta0; struct sym_fw1b_scr *scriptb0; scripta0 = (struct sym_fw1a_scr *) np->scripta0; scriptb0 = (struct sym_fw1b_scr *) np->scriptb0; /* * Fill variable parts in scripts. */ sym_fw_fill_data(scripta0->data_in, scripta0->data_out); /* * Setup bus addresses used from the C code.. */ sym_fw_setup_bus_addresses(np, fw); } #endif /* SYM_CONF_GENERIC_SUPPORT */ /* * Setup routine for firmware #2. */ static void sym_fw2_setup(hcb_p np, struct sym_fw *fw) { struct sym_fw2a_scr *scripta0; struct sym_fw2b_scr *scriptb0; scripta0 = (struct sym_fw2a_scr *) np->scripta0; scriptb0 = (struct sym_fw2b_scr *) np->scriptb0; /* * Fill variable parts in scripts. */ sym_fw_fill_data(scripta0->data_in, scripta0->data_out); /* * Setup bus addresses used from the C code.. */ sym_fw_setup_bus_addresses(np, fw); } /* * Allocate firmware descriptors. */ #ifdef SYM_CONF_GENERIC_SUPPORT static struct sym_fw sym_fw1 = SYM_FW_ENTRY(sym_fw1, "NCR-generic"); #endif /* SYM_CONF_GENERIC_SUPPORT */ static struct sym_fw sym_fw2 = SYM_FW_ENTRY(sym_fw2, "LOAD/STORE-based"); /* * Find the most appropriate firmware for a chip. */ static struct sym_fw * sym_find_firmware(struct sym_pci_chip *chip) { if (chip->features & FE_LDSTR) return &sym_fw2; #ifdef SYM_CONF_GENERIC_SUPPORT else if (!(chip->features & (FE_PFEN|FE_NOPM|FE_DAC))) return &sym_fw1; #endif else return 0; } /* * Bind a script to physical addresses. */ static void sym_fw_bind_script (hcb_p np, u32 *start, int len) { u32 opcode, new, old, tmp1, tmp2; u32 *end, *cur; int relocs; cur = start; end = start + len/4; while (cur < end) { opcode = *cur; /* * If we forget to change the length * in scripts, a field will be * padded with 0. This is an illegal * command. */ if (opcode == 0) { printf ("%s: ERROR0 IN SCRIPT at %d.\n", sym_name(np), (int) (cur-start)); MDELAY (10000); ++cur; continue; }; /* * We use the bogus value 0xf00ff00f ;-) * to reserve data area in SCRIPTS. */ if (opcode == SCR_DATA_ZERO) { *cur++ = 0; continue; } if (DEBUG_FLAGS & DEBUG_SCRIPT) printf ("%d: <%x>\n", (int) (cur-start), (unsigned)opcode); /* * We don't have to decode ALL commands */ switch (opcode >> 28) { case 0xf: /* * LOAD / STORE DSA relative, don't relocate. */ relocs = 0; break; case 0xe: /* * LOAD / STORE absolute. */ relocs = 1; break; case 0xc: /* * COPY has TWO arguments. */ relocs = 2; tmp1 = cur[1]; tmp2 = cur[2]; if ((tmp1 ^ tmp2) & 3) { printf ("%s: ERROR1 IN SCRIPT at %d.\n", sym_name(np), (int) (cur-start)); MDELAY (10000); } /* * If PREFETCH feature not enabled, remove * the NO FLUSH bit if present. */ if ((opcode & SCR_NO_FLUSH) && !(np->features & FE_PFEN)) { opcode = (opcode & ~SCR_NO_FLUSH); } break; case 0x0: /* * MOVE/CHMOV (absolute address) */ if (!(np->features & FE_WIDE)) opcode = (opcode | OPC_MOVE); relocs = 1; break; case 0x1: /* * MOVE/CHMOV (table indirect) */ if (!(np->features & FE_WIDE)) opcode = (opcode | OPC_MOVE); relocs = 0; break; case 0x8: /* * JUMP / CALL * dont't relocate if relative :-) */ if (opcode & 0x00800000) relocs = 0; else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/ relocs = 2; else relocs = 1; break; case 0x4: case 0x5: case 0x6: case 0x7: relocs = 1; break; default: relocs = 0; break; }; /* * Scriptify:) the opcode. */ *cur++ = cpu_to_scr(opcode); /* * If no relocation, assume 1 argument * and just scriptize:) it. */ if (!relocs) { *cur = cpu_to_scr(*cur); ++cur; continue; } /* * Otherwise performs all needed relocations. */ while (relocs--) { old = *cur; switch (old & RELOC_MASK) { case RELOC_REGISTER: new = (old & ~RELOC_MASK) + np->mmio_ba; break; case RELOC_LABEL_A: new = (old & ~RELOC_MASK) + np->scripta_ba; break; case RELOC_LABEL_B: new = (old & ~RELOC_MASK) + np->scriptb_ba; break; case RELOC_SOFTC: new = (old & ~RELOC_MASK) + np->hcb_ba; break; case 0: /* * Don't relocate a 0 address. * They are mostly used for patched or * script self-modified areas. */ if (old == 0) { new = old; break; } /* fall through */ default: new = 0; panic("sym_fw_bind_script: " "weird relocation %x\n", old); break; } *cur++ = cpu_to_scr(new); } }; } /*--------------------------------------------------------------------------*/ /*--------------------------- END OF FIRMARES -----------------------------*/ /*--------------------------------------------------------------------------*/ /* * Function prototypes. */ static void sym_save_initial_setting (hcb_p np); static int sym_prepare_setting (hcb_p np, struct sym_nvram *nvram); static int sym_prepare_nego (hcb_p np, ccb_p cp, int nego, u_char *msgptr); static void sym_put_start_queue (hcb_p np, ccb_p cp); static void sym_chip_reset (hcb_p np); static void sym_soft_reset (hcb_p np); static void sym_start_reset (hcb_p np); static int sym_reset_scsi_bus (hcb_p np, int enab_int); static int sym_wakeup_done (hcb_p np); static void sym_flush_busy_queue (hcb_p np, int cam_status); static void sym_flush_comp_queue (hcb_p np, int cam_status); static void sym_init (hcb_p np, int reason); static int sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, u_char *fakp); static void sym_setsync (hcb_p np, ccb_p cp, u_char ofs, u_char per, u_char div, u_char fak); static void sym_setwide (hcb_p np, ccb_p cp, u_char wide); static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak); static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak); static void sym_log_hard_error (hcb_p np, u_short sist, u_char dstat); static void sym_intr (void *arg); static void sym_poll (struct cam_sim *sim); static void sym_recover_scsi_int (hcb_p np, u_char hsts); static void sym_int_sto (hcb_p np); static void sym_int_udc (hcb_p np); static void sym_int_sbmc (hcb_p np); static void sym_int_par (hcb_p np, u_short sist); static void sym_int_ma (hcb_p np); static int sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, int task); static void sym_sir_bad_scsi_status (hcb_p np, int num, ccb_p cp); static int sym_clear_tasks (hcb_p np, int status, int targ, int lun, int task); static void sym_sir_task_recovery (hcb_p np, int num); static int sym_evaluate_dp (hcb_p np, ccb_p cp, u32 scr, int *ofs); static void sym_modify_dp (hcb_p np, tcb_p tp, ccb_p cp, int ofs); static int sym_compute_residual (hcb_p np, ccb_p cp); static int sym_show_msg (u_char * msg); static void sym_print_msg (ccb_p cp, char *label, u_char *msg); static void sym_sync_nego (hcb_p np, tcb_p tp, ccb_p cp); static void sym_ppr_nego (hcb_p np, tcb_p tp, ccb_p cp); static void sym_wide_nego (hcb_p np, tcb_p tp, ccb_p cp); static void sym_nego_default (hcb_p np, tcb_p tp, ccb_p cp); static void sym_nego_rejected (hcb_p np, tcb_p tp, ccb_p cp); static void sym_int_sir (hcb_p np); static void sym_free_ccb (hcb_p np, ccb_p cp); static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order); static ccb_p sym_alloc_ccb (hcb_p np); static ccb_p sym_ccb_from_dsa (hcb_p np, u32 dsa); static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln); static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln); static int sym_snooptest (hcb_p np); static void sym_selectclock(hcb_p np, u_char scntl3); static void sym_getclock (hcb_p np, int mult); static int sym_getpciclock (hcb_p np); static void sym_complete_ok (hcb_p np, ccb_p cp); static void sym_complete_error (hcb_p np, ccb_p cp); static void sym_timeout (void *arg); static int sym_abort_scsiio (hcb_p np, union ccb *ccb, int timed_out); static void sym_reset_dev (hcb_p np, union ccb *ccb); static void sym_action (struct cam_sim *sim, union ccb *ccb); static void sym_action1 (struct cam_sim *sim, union ccb *ccb); static int sym_setup_cdb (hcb_p np, struct ccb_scsiio *csio, ccb_p cp); static void sym_setup_data_and_start (hcb_p np, struct ccb_scsiio *csio, ccb_p cp); #ifdef FreeBSD_Bus_Dma_Abstraction static int sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs); #else static int sym_scatter_virtual (hcb_p np, ccb_p cp, vm_offset_t vaddr, vm_size_t len); static int sym_scatter_sg_virtual (hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs); static int sym_scatter_physical (hcb_p np, ccb_p cp, vm_offset_t paddr, vm_size_t len); #endif static int sym_scatter_sg_physical (hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs); static void sym_action2 (struct cam_sim *sim, union ccb *ccb); static void sym_update_trans (hcb_p np, tcb_p tp, struct sym_trans *tip, struct ccb_trans_settings *cts); static void sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts); #ifdef FreeBSD_Bus_Io_Abstraction static struct sym_pci_chip *sym_find_pci_chip (device_t dev); static int sym_pci_probe (device_t dev); static int sym_pci_attach (device_t dev); #else static struct sym_pci_chip *sym_find_pci_chip (pcici_t tag); static const char *sym_pci_probe (pcici_t tag, pcidi_t type); static void sym_pci_attach (pcici_t tag, int unit); static int sym_pci_attach2 (pcici_t tag, int unit); #endif static void sym_pci_free (hcb_p np); static int sym_cam_attach (hcb_p np); static void sym_cam_free (hcb_p np); static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram); static void sym_nvram_setup_target (hcb_p np, int targ, struct sym_nvram *nvp); static int sym_read_nvram (hcb_p np, struct sym_nvram *nvp); /* * Print something which allows to retrieve the controler type, * unit, target, lun concerned by a kernel message. */ static void PRINT_TARGET (hcb_p np, int target) { printf ("%s:%d:", sym_name(np), target); } static void PRINT_LUN(hcb_p np, int target, int lun) { printf ("%s:%d:%d:", sym_name(np), target, lun); } static void PRINT_ADDR (ccb_p cp) { if (cp && cp->cam_ccb) xpt_print_path(cp->cam_ccb->ccb_h.path); } /* * Take into account this ccb in the freeze count. */ static void sym_freeze_cam_ccb(union ccb *ccb) { if (!(ccb->ccb_h.flags & CAM_DEV_QFRZDIS)) { if (!(ccb->ccb_h.status & CAM_DEV_QFRZN)) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); } } } /* * Set the status field of a CAM CCB. */ static __inline void sym_set_cam_status(union ccb *ccb, cam_status status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } /* * Get the status field of a CAM CCB. */ static __inline int sym_get_cam_status(union ccb *ccb) { return ccb->ccb_h.status & CAM_STATUS_MASK; } /* * Enqueue a CAM CCB. */ static void sym_enqueue_cam_ccb(hcb_p np, union ccb *ccb) { assert(!(ccb->ccb_h.status & CAM_SIM_QUEUED)); ccb->ccb_h.status = CAM_REQ_INPROG; ccb->ccb_h.timeout_ch = timeout(sym_timeout, (caddr_t) ccb, ccb->ccb_h.timeout*hz/1000); ccb->ccb_h.status |= CAM_SIM_QUEUED; ccb->ccb_h.sym_hcb_ptr = np; sym_insque_tail(sym_qptr(&ccb->ccb_h.sim_links), &np->cam_ccbq); } /* * Complete a pending CAM CCB. */ static void sym_xpt_done(hcb_p np, union ccb *ccb) { if (ccb->ccb_h.status & CAM_SIM_QUEUED) { untimeout(sym_timeout, (caddr_t) ccb, ccb->ccb_h.timeout_ch); sym_remque(sym_qptr(&ccb->ccb_h.sim_links)); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.sym_hcb_ptr = 0; } if (ccb->ccb_h.flags & CAM_DEV_QFREEZE) sym_freeze_cam_ccb(ccb); xpt_done(ccb); } static void sym_xpt_done2(hcb_p np, union ccb *ccb, int cam_status) { sym_set_cam_status(ccb, cam_status); sym_xpt_done(np, ccb); } /* * SYMBIOS chip clock divisor table. * * Divisors are multiplied by 10,000,000 in order to make * calculations more simple. */ #define _5M 5000000 static u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; /* * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64, * 128 transfers. All chips support at least 16 transfers * bursts. The 825A, 875 and 895 chips support bursts of up * to 128 transfers and the 895A and 896 support bursts of up * to 64 transfers. All other chips support up to 16 * transfers bursts. * * For PCI 32 bit data transfers each transfer is a DWORD. * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers. * * We use log base 2 (burst length) as internal code, with * value 0 meaning "burst disabled". */ /* * Burst length from burst code. */ #define burst_length(bc) (!(bc))? 0 : 1 << (bc) /* * Burst code from io register bits. */ #define burst_code(dmode, ctest4, ctest5) \ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 /* * Set initial io register bits from burst code. */ static __inline void sym_init_burst(hcb_p np, u_char bc) { np->rv_ctest4 &= ~0x80; np->rv_dmode &= ~(0x3 << 6); np->rv_ctest5 &= ~0x4; if (!bc) { np->rv_ctest4 |= 0x80; } else { --bc; np->rv_dmode |= ((bc & 0x3) << 6); np->rv_ctest5 |= (bc & 0x4); } } /* * Print out the list of targets that have some flag disabled by user. */ static void sym_print_targets_flag(hcb_p np, int mask, char *msg) { int cnt; int i; for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { if (i == np->myaddr) continue; if (np->target[i].usrflags & mask) { if (!cnt++) printf("%s: %s disabled for targets", sym_name(np), msg); printf(" %d", i); } } if (cnt) printf(".\n"); } /* * Save initial settings of some IO registers. * Assumed to have been set by BIOS. * We cannot reset the chip prior to reading the * IO registers, since informations will be lost. * Since the SCRIPTS processor may be running, this * is not safe on paper, but it seems to work quite * well. :) */ static void sym_save_initial_setting (hcb_p np) { np->sv_scntl0 = INB(nc_scntl0) & 0x0a; np->sv_scntl3 = INB(nc_scntl3) & 0x07; np->sv_dmode = INB(nc_dmode) & 0xce; np->sv_dcntl = INB(nc_dcntl) & 0xa8; np->sv_ctest3 = INB(nc_ctest3) & 0x01; np->sv_ctest4 = INB(nc_ctest4) & 0x80; np->sv_gpcntl = INB(nc_gpcntl); np->sv_stest1 = INB(nc_stest1); np->sv_stest2 = INB(nc_stest2) & 0x20; np->sv_stest4 = INB(nc_stest4); if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */ np->sv_scntl4 = INB(nc_scntl4); np->sv_ctest5 = INB(nc_ctest5) & 0x04; } else np->sv_ctest5 = INB(nc_ctest5) & 0x24; } /* * Prepare io register values used by sym_init() according * to selected and supported features. */ static int sym_prepare_setting(hcb_p np, struct sym_nvram *nvram) { u_char burst_max; u32 period; int i; /* * Wide ? */ np->maxwide = (np->features & FE_WIDE)? 1 : 0; /* * Get the frequency of the chip's clock. */ if (np->features & FE_QUAD) np->multiplier = 4; else if (np->features & FE_DBLR) np->multiplier = 2; else np->multiplier = 1; np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000; np->clock_khz *= np->multiplier; if (np->clock_khz != 40000) sym_getclock(np, np->multiplier); /* * Divisor to be used for async (timer pre-scaler). */ i = np->clock_divn - 1; while (--i >= 0) { if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) { ++i; break; } } np->rv_scntl3 = i+1; /* * The C1010 uses hardwired divisors for async. * So, we just throw away, the async. divisor.:-) */ if (np->features & FE_C10) np->rv_scntl3 = 0; /* * Minimum synchronous period factor supported by the chip. * Btw, 'period' is in tenths of nanoseconds. */ period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; if (period <= 250) np->minsync = 10; else if (period <= 303) np->minsync = 11; else if (period <= 500) np->minsync = 12; else np->minsync = (period + 40 - 1) / 40; /* * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). */ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3))) np->minsync = 25; else if (np->minsync < 12 && !(np->features & (FE_ULTRA2|FE_ULTRA3))) np->minsync = 12; /* * Maximum synchronous period factor supported by the chip. */ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); np->maxsync = period > 2540 ? 254 : period / 10; /* * If chip is a C1010, guess the sync limits in DT mode. */ if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) { if (np->clock_khz == 160000) { np->minsync_dt = 9; np->maxsync_dt = 50; np->maxoffs_dt = 62; } } /* * 64 bit addressing (895A/896/1010) ? */ if (np->features & FE_DAC) #if BITS_PER_LONG > 32 np->rv_ccntl1 |= (XTIMOD | EXTIBMV); #else np->rv_ccntl1 |= (DDAC); #endif /* * Phase mismatch handled by SCRIPTS (895A/896/1010) ? */ if (np->features & FE_NOPM) np->rv_ccntl0 |= (ENPMJ); /* * C1010 Errata. * In dual channel mode, contention occurs if internal cycles * are used. Disable internal cycles. */ if (np->device_id == PCI_ID_LSI53C1010 && np->revision_id < 0x2) np->rv_ccntl0 |= DILS; /* * Select burst length (dwords) */ burst_max = SYM_SETUP_BURST_ORDER; if (burst_max == 255) burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5); if (burst_max > 7) burst_max = 7; if (burst_max > np->maxburst) burst_max = np->maxburst; /* * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2. * This chip and the 860 Rev 1 may wrongly use PCI cache line * based transactions on LOAD/STORE instructions. So we have * to prevent these chips from using such PCI transactions in * this driver. The generic ncr driver that does not use * LOAD/STORE instructions does not need this work-around. */ if ((np->device_id == PCI_ID_SYM53C810 && np->revision_id >= 0x10 && np->revision_id <= 0x11) || (np->device_id == PCI_ID_SYM53C860 && np->revision_id <= 0x1)) np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP); /* * Select all supported special features. * If we are using on-board RAM for scripts, prefetch (PFEN) * does not help, but burst op fetch (BOF) does. * Disabling PFEN makes sure BOF will be used. */ if (np->features & FE_ERL) np->rv_dmode |= ERL; /* Enable Read Line */ if (np->features & FE_BOF) np->rv_dmode |= BOF; /* Burst Opcode Fetch */ if (np->features & FE_ERMP) np->rv_dmode |= ERMP; /* Enable Read Multiple */ #if 1 if ((np->features & FE_PFEN) && !np->ram_ba) #else if (np->features & FE_PFEN) #endif np->rv_dcntl |= PFEN; /* Prefetch Enable */ if (np->features & FE_CLSE) np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ if (np->features & FE_WRIE) np->rv_ctest3 |= WRIE; /* Write and Invalidate */ if (np->features & FE_DFS) np->rv_ctest5 |= DFS; /* Dma Fifo Size */ /* * Select some other */ if (SYM_SETUP_PCI_PARITY) np->rv_ctest4 |= MPEE; /* Master parity checking */ if (SYM_SETUP_SCSI_PARITY) np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ /* * Get parity checking, host ID and verbose mode from NVRAM */ np->myaddr = 255; sym_nvram_setup_host (np, nvram); /* * Get SCSI addr of host adapter (set by bios?). */ if (np->myaddr == 255) { np->myaddr = INB(nc_scid) & 0x07; if (!np->myaddr) np->myaddr = SYM_SETUP_HOST_ID; } /* * Prepare initial io register bits for burst length */ sym_init_burst(np, burst_max); /* * Set SCSI BUS mode. * - LVD capable chips (895/895A/896/1010) report the * current BUS mode through the STEST4 IO register. * - For previous generation chips (825/825A/875), * user has to tell us how to check against HVD, * since a 100% safe algorithm is not possible. */ np->scsi_mode = SMODE_SE; if (np->features & (FE_ULTRA2|FE_ULTRA3)) np->scsi_mode = (np->sv_stest4 & SMODE); else if (np->features & FE_DIFF) { if (SYM_SETUP_SCSI_DIFF == 1) { if (np->sv_scntl3) { if (np->sv_stest2 & 0x20) np->scsi_mode = SMODE_HVD; } else if (nvram->type == SYM_SYMBIOS_NVRAM) { if (!(INB(nc_gpreg) & 0x08)) np->scsi_mode = SMODE_HVD; } } else if (SYM_SETUP_SCSI_DIFF == 2) np->scsi_mode = SMODE_HVD; } if (np->scsi_mode == SMODE_HVD) np->rv_stest2 |= 0x20; /* * Set LED support from SCRIPTS. * Ignore this feature for boards known to use a * specific GPIO wiring and for the 895A, 896 * and 1010 that drive the LED directly. */ if ((SYM_SETUP_SCSI_LED || (nvram->type == SYM_SYMBIOS_NVRAM || (nvram->type == SYM_TEKRAM_NVRAM && np->device_id == PCI_ID_SYM53C895))) && !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) np->features |= FE_LED0; /* * Set irq mode. */ switch(SYM_SETUP_IRQ_MODE & 3) { case 2: np->rv_dcntl |= IRQM; break; case 1: np->rv_dcntl |= (np->sv_dcntl & IRQM); break; default: break; } /* * Configure targets according to driver setup. * If NVRAM present get targets setup from NVRAM. */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { tcb_p tp = &np->target[i]; #ifdef FreeBSD_New_Tran_Settings tp->tinfo.user.scsi_version = tp->tinfo.current.scsi_version= 2; tp->tinfo.user.spi_version = tp->tinfo.current.spi_version = 2; #endif tp->tinfo.user.period = np->minsync; tp->tinfo.user.offset = np->maxoffs; tp->tinfo.user.width = np->maxwide ? BUS_16_BIT : BUS_8_BIT; tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); tp->usrtags = SYM_SETUP_MAX_TAG; sym_nvram_setup_target (np, i, nvram); /* * For now, guess PPR/DT support from the period * and BUS width. */ if (np->features & FE_ULTRA3) { if (tp->tinfo.user.period <= 9 && tp->tinfo.user.width == BUS_16_BIT) { tp->tinfo.user.options |= PPR_OPT_DT; tp->tinfo.user.offset = np->maxoffs_dt; #ifdef FreeBSD_New_Tran_Settings tp->tinfo.user.spi_version = 3; #endif } } if (!tp->usrtags) tp->usrflags &= ~SYM_TAGS_ENABLED; } /* * Let user know about the settings. */ i = nvram->type; printf("%s: %s NVRAM, ID %d, Fast-%d, %s, %s\n", sym_name(np), i == SYM_SYMBIOS_NVRAM ? "Symbios" : (i == SYM_TEKRAM_NVRAM ? "Tekram" : "No"), np->myaddr, (np->features & FE_ULTRA3) ? 80 : (np->features & FE_ULTRA2) ? 40 : (np->features & FE_ULTRA) ? 20 : 10, sym_scsi_bus_mode(np->scsi_mode), (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity"); /* * Tell him more on demand. */ if (sym_verbose) { printf("%s: %s IRQ line driver%s\n", sym_name(np), np->rv_dcntl & IRQM ? "totem pole" : "open drain", np->ram_ba ? ", using on-chip SRAM" : ""); printf("%s: using %s firmware.\n", sym_name(np), np->fw_name); if (np->features & FE_NOPM) printf("%s: handling phase mismatch from SCRIPTS.\n", sym_name(np)); } /* * And still more. */ if (sym_verbose > 1) { printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } /* * Let user be aware of targets that have some disable flags set. */ sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT"); if (sym_verbose) sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED, "SCAN FOR LUNS"); return 0; } /* * Prepare the next negotiation message if needed. * * Fill in the part of message buffer that contains the * negotiation and the nego_status field of the CCB. * Returns the size of the message in bytes. */ static int sym_prepare_nego(hcb_p np, ccb_p cp, int nego, u_char *msgptr) { tcb_p tp = &np->target[cp->target]; int msglen = 0; /* * Early C1010 chips need a work-around for DT * data transfer to work. */ if (!(np->features & FE_U3EN)) tp->tinfo.goal.options = 0; /* * negotiate using PPR ? */ if (tp->tinfo.goal.options & PPR_OPT_MASK) nego = NS_PPR; /* * negotiate wide transfers ? */ else if (tp->tinfo.current.width != tp->tinfo.goal.width) nego = NS_WIDE; /* * negotiate synchronous transfers? */ else if (tp->tinfo.current.period != tp->tinfo.goal.period || tp->tinfo.current.offset != tp->tinfo.goal.offset) nego = NS_SYNC; switch (nego) { case NS_SYNC: msgptr[msglen++] = M_EXTENDED; msgptr[msglen++] = 3; msgptr[msglen++] = M_X_SYNC_REQ; msgptr[msglen++] = tp->tinfo.goal.period; msgptr[msglen++] = tp->tinfo.goal.offset; break; case NS_WIDE: msgptr[msglen++] = M_EXTENDED; msgptr[msglen++] = 2; msgptr[msglen++] = M_X_WIDE_REQ; msgptr[msglen++] = tp->tinfo.goal.width; break; case NS_PPR: msgptr[msglen++] = M_EXTENDED; msgptr[msglen++] = 6; msgptr[msglen++] = M_X_PPR_REQ; msgptr[msglen++] = tp->tinfo.goal.period; msgptr[msglen++] = 0; msgptr[msglen++] = tp->tinfo.goal.offset; msgptr[msglen++] = tp->tinfo.goal.width; msgptr[msglen++] = tp->tinfo.goal.options & PPR_OPT_DT; break; }; cp->nego_status = nego; if (nego) { tp->nego_cp = cp; /* Keep track a nego will be performed */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, nego == NS_SYNC ? "sync msgout" : nego == NS_WIDE ? "wide msgout" : "ppr msgout", msgptr); }; }; return msglen; } /* * Insert a job into the start queue. */ static void sym_put_start_queue(hcb_p np, ccb_p cp) { u_short qidx; #ifdef SYM_CONF_IARB_SUPPORT /* * If the previously queued CCB is not yet done, * set the IARB hint. The SCRIPTS will go with IARB * for this job when starting the previous one. * We leave devices a chance to win arbitration by * not using more than 'iarb_max' consecutive * immediate arbitrations. */ if (np->last_cp && np->iarb_count < np->iarb_max) { np->last_cp->host_flags |= HF_HINT_IARB; ++np->iarb_count; } else np->iarb_count = 0; np->last_cp = cp; #endif /* * Insert first the idle task and then our job. * The MB should ensure proper ordering. */ qidx = np->squeueput + 2; if (qidx >= MAX_QUEUE*2) qidx = 0; np->squeue [qidx] = cpu_to_scr(np->idletask_ba); MEMORY_BARRIER(); np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba); np->squeueput = qidx; if (DEBUG_FLAGS & DEBUG_QUEUE) printf ("%s: queuepos=%d.\n", sym_name (np), np->squeueput); /* * Script processor may be waiting for reselect. * Wake it up. */ MEMORY_BARRIER(); OUTB (nc_istat, SIGP|np->istat_sem); } /* * Soft reset the chip. * * Raising SRST when the chip is running may cause * problems on dual function chips (see below). * On the other hand, LVD devices need some delay * to settle and report actual BUS mode in STEST4. */ static void sym_chip_reset (hcb_p np) { OUTB (nc_istat, SRST); UDELAY (10); OUTB (nc_istat, 0); UDELAY(2000); /* For BUS MODE to settle */ } /* * Soft reset the chip. * * Some 896 and 876 chip revisions may hang-up if we set * the SRST (soft reset) bit at the wrong time when SCRIPTS * are running. * So, we need to abort the current operation prior to * soft resetting the chip. */ static void sym_soft_reset (hcb_p np) { u_char istat; int i; OUTB (nc_istat, CABRT); for (i = 1000000 ; i ; --i) { istat = INB (nc_istat); if (istat & SIP) { INW (nc_sist); continue; } if (istat & DIP) { OUTB (nc_istat, 0); INB (nc_dstat); break; } } if (!i) printf("%s: unable to abort current chip operation.\n", sym_name(np)); sym_chip_reset (np); } /* * Start reset process. * * The interrupt handler will reinitialize the chip. */ static void sym_start_reset(hcb_p np) { (void) sym_reset_scsi_bus(np, 1); } static int sym_reset_scsi_bus(hcb_p np, int enab_int) { u32 term; int retv = 0; sym_soft_reset(np); /* Soft reset the chip */ if (enab_int) OUTW (nc_sien, RST); /* * Enable Tolerant, reset IRQD if present and * properly set IRQ mode, prior to resetting the bus. */ OUTB (nc_stest3, TE); OUTB (nc_dcntl, (np->rv_dcntl & IRQM)); OUTB (nc_scntl1, CRST); UDELAY (200); if (!SYM_SETUP_SCSI_BUS_CHECK) goto out; /* * Check for no terminators or SCSI bus shorts to ground. * Read SCSI data bus, data parity bits and control signals. * We are expecting RESET to be TRUE and other signals to be * FALSE. */ term = INB(nc_sstat0); term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */ ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */ ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */ INB(nc_sbcl); /* req ack bsy sel atn msg cd io */ if (!(np->features & FE_WIDE)) term &= 0x3ffff; if (term != (2<<7)) { printf("%s: suspicious SCSI data while resetting the BUS.\n", sym_name(np)); printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " "0x%lx, expecting 0x%lx\n", sym_name(np), (np->features & FE_WIDE) ? "dp1,d15-8," : "", (u_long)term, (u_long)(2<<7)); if (SYM_SETUP_SCSI_BUS_CHECK == 1) retv = 1; } out: OUTB (nc_scntl1, 0); /* MDELAY(100); */ return retv; } /* * The chip may have completed jobs. Look at the DONE QUEUE. * * On architectures that may reorder LOAD/STORE operations, * a memory barrier may be needed after the reading of the * so-called `flag' and prior to dealing with the data. */ static int sym_wakeup_done (hcb_p np) { ccb_p cp; int i, n; u32 dsa; n = 0; i = np->dqueueget; while (1) { dsa = scr_to_cpu(np->dqueue[i]); if (!dsa) break; np->dqueue[i] = 0; if ((i = i+2) >= MAX_QUEUE*2) i = 0; cp = sym_ccb_from_dsa(np, dsa); if (cp) { MEMORY_BARRIER(); sym_complete_ok (np, cp); ++n; } else printf ("%s: bad DSA (%x) in done queue.\n", sym_name(np), (u_int) dsa); } np->dqueueget = i; return n; } /* * Complete all active CCBs with error. * Used on CHIP/SCSI RESET. */ static void sym_flush_busy_queue (hcb_p np, int cam_status) { /* * Move all active CCBs to the COMP queue * and flush this queue. */ sym_que_splice(&np->busy_ccbq, &np->comp_ccbq); sym_que_init(&np->busy_ccbq); sym_flush_comp_queue(np, cam_status); } /* * Start chip. * * 'reason' means: * 0: initialisation. * 1: SCSI BUS RESET delivered or received. * 2: SCSI BUS MODE changed. */ static void sym_init (hcb_p np, int reason) { int i; u32 phys; /* * Reset chip if asked, otherwise just clear fifos. */ if (reason == 1) sym_soft_reset(np); else { OUTB (nc_stest3, TE|CSF); OUTONB (nc_ctest3, CLF); } /* * Clear Start Queue */ phys = np->squeue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->squeue[i] = cpu_to_scr(np->idletask_ba); np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->squeueput = 0; /* * Clear Done Queue */ phys = np->dqueue_ba; for (i = 0; i < MAX_QUEUE*2; i += 2) { np->dqueue[i] = 0; np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4); } np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys); /* * Start at first entry. */ np->dqueueget = 0; /* * Install patches in scripts. * This also let point to first position the start * and done queue pointers used from SCRIPTS. */ np->fw_patch(np); /* * Wakeup all pending jobs. */ sym_flush_busy_queue(np, CAM_SCSI_BUS_RESET); /* * Init chip. */ OUTB (nc_istat, 0x00 ); /* Remove Reset, abort */ UDELAY (2000); /* The 895 needs time for the bus mode to settle */ OUTB (nc_scntl0, np->rv_scntl0 | 0xc0); /* full arb., ena parity, par->ATN */ OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ OUTW (nc_respid, 1ul<myaddr); /* Id to respond to */ OUTB (nc_istat , SIGP ); /* Signal Process */ OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */ OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */ OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */ /* Extended Sreq/Sack filtering not supported on the C10 */ if (np->features & FE_C10) OUTB (nc_stest2, np->rv_stest2); else OUTB (nc_stest2, EXT|np->rv_stest2); OUTB (nc_stest3, TE); /* TolerANT enable */ OUTB (nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */ /* * For now, disable AIP generation on C1010-66. */ if (np->device_id == PCI_ID_LSI53C1010_2) OUTB (nc_aipcntl1, DISAIP); /* * C10101 Errata. * Errant SGE's when in narrow. Write bits 4 & 5 of * STEST1 register to disable SGE. We probably should do * that from SCRIPTS for each selection/reselection, but * I just don't want. :) */ if (np->device_id == PCI_ID_LSI53C1010 && /* np->revision_id < 0xff */ 1) OUTB (nc_stest1, INB(nc_stest1) | 0x30); /* * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. * Disable overlapped arbitration for some dual function devices, * regardless revision id (kind of post-chip-design feature. ;-)) */ if (np->device_id == PCI_ID_SYM53C875) OUTB (nc_ctest0, (1<<5)); else if (np->device_id == PCI_ID_SYM53C896) np->rv_ccntl0 |= DPR; /* * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing * and/or hardware phase mismatch, since only such chips * seem to support those IO registers. */ if (np->features & (FE_DAC|FE_NOPM)) { OUTB (nc_ccntl0, np->rv_ccntl0); OUTB (nc_ccntl1, np->rv_ccntl1); } /* * If phase mismatch handled by scripts (895A/896/1010), * set PM jump addresses. */ if (np->features & FE_NOPM) { OUTL (nc_pmjad1, SCRIPTB_BA (np, pm_handle)); OUTL (nc_pmjad2, SCRIPTB_BA (np, pm_handle)); } /* * Enable GPIO0 pin for writing if LED support from SCRIPTS. * Also set GPIO5 and clear GPIO6 if hardware LED control. */ if (np->features & FE_LED0) OUTB(nc_gpcntl, INB(nc_gpcntl) & ~0x01); else if (np->features & FE_LEDC) OUTB(nc_gpcntl, (INB(nc_gpcntl) & ~0x41) | 0x20); /* * enable ints */ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); OUTB (nc_dien , MDPE|BF|SSI|SIR|IID); /* * For 895/6 enable SBMC interrupt and save current SCSI bus mode. * Try to eat the spurious SBMC interrupt that may occur when * we reset the chip but not the SCSI BUS (at initialization). */ if (np->features & (FE_ULTRA2|FE_ULTRA3)) { OUTONW (nc_sien, SBMC); if (reason == 0) { MDELAY(100); INW (nc_sist); } np->scsi_mode = INB (nc_stest4) & SMODE; } /* * Fill in target structure. * Reinitialize usrsync. * Reinitialize usrwide. * Prepare sync negotiation according to actual SCSI bus mode. */ for (i=0;itarget[i]; tp->to_reset = 0; tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; tp->tinfo.current.width = BUS_8_BIT; tp->tinfo.current.options = 0; } /* * Download SCSI SCRIPTS to on-chip RAM if present, * and start script processor. */ if (np->ram_ba) { if (sym_verbose > 1) printf ("%s: Downloading SCSI SCRIPTS.\n", sym_name(np)); if (np->ram_ws == 8192) { OUTRAM_OFF(4096, np->scriptb0, np->scriptb_sz); OUTL (nc_mmws, np->scr_ram_seg); OUTL (nc_mmrs, np->scr_ram_seg); OUTL (nc_sfs, np->scr_ram_seg); phys = SCRIPTB_BA (np, start64); } else phys = SCRIPTA_BA (np, init); OUTRAM_OFF(0, np->scripta0, np->scripta_sz); } else phys = SCRIPTA_BA (np, init); np->istat_sem = 0; OUTL (nc_dsa, np->hcb_ba); OUTL_DSP (phys); /* * Notify the XPT about the RESET condition. */ if (reason != 0) xpt_async(AC_BUS_RESET, np->path, NULL); } /* * Get clock factor and sync divisor for a given * synchronous factor period. */ static int sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, u_char *fakp) { u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */ int div = np->clock_divn; /* Number of divisors supported */ u32 fak; /* Sync factor in sxfer */ u32 per; /* Period in tenths of ns */ u32 kpc; /* (per * clk) */ int ret; /* * Compute the synchronous period in tenths of nano-seconds */ if (dt && sfac <= 9) per = 125; else if (sfac <= 10) per = 250; else if (sfac == 11) per = 303; else if (sfac == 12) per = 500; else per = 40 * sfac; ret = per; kpc = per * clk; if (dt) kpc <<= 1; /* * For earliest C10 revision 0, we cannot use extra * clocks for the setting of the SCSI clocking. * Note that this limits the lowest sync data transfer * to 5 Mega-transfers per second and may result in * using higher clock divisors. */ #if 1 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) { /* * Look for the lowest clock divisor that allows an * output speed not faster than the period. */ while (div > 0) { --div; if (kpc > (div_10M[div] << 2)) { ++div; break; } } fak = 0; /* No extra clocks */ if (div == np->clock_divn) { /* Are we too fast ? */ ret = -1; } *divp = div; *fakp = fak; return ret; } #endif /* * Look for the greatest clock divisor that allows an * input speed faster than the period. */ while (div-- > 0) if (kpc >= (div_10M[div] << 2)) break; /* * Calculate the lowest clock factor that allows an output * speed not faster than the period, and the max output speed. * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT. * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT. */ if (dt) { fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2; /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */ } else { fak = (kpc - 1) / div_10M[div] + 1 - 4; /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */ } /* * Check against our hardware limits, or bugs :). */ if (fak < 0) {fak = 0; ret = -1;} if (fak > 2) {fak = 2; ret = -1;} /* * Compute and return sync parameters. */ *divp = div; *fakp = fak; return ret; } /* * Tell the SCSI layer about the new transfer parameters. */ static void sym_xpt_async_transfer_neg(hcb_p np, int target, u_int spi_valid) { struct ccb_trans_settings cts; struct cam_path *path; int sts; tcb_p tp = &np->target[target]; sts = xpt_create_path(&path, NULL, cam_sim_path(np->sim), target, CAM_LUN_WILDCARD); if (sts != CAM_REQ_CMP) return; bzero(&cts, sizeof(cts)); #ifdef FreeBSD_New_Tran_Settings #define cts__scsi (cts.proto_specific.scsi) #define cts__spi (cts.xport_specific.spi) cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.protocol = PROTO_SCSI; cts.transport = XPORT_SPI; cts.protocol_version = tp->tinfo.current.scsi_version; cts.transport_version = tp->tinfo.current.spi_version; cts__spi.valid = spi_valid; if (spi_valid & CTS_SPI_VALID_SYNC_RATE) cts__spi.sync_period = tp->tinfo.current.period; if (spi_valid & CTS_SPI_VALID_SYNC_OFFSET) cts__spi.sync_offset = tp->tinfo.current.offset; if (spi_valid & CTS_SPI_VALID_BUS_WIDTH) cts__spi.bus_width = tp->tinfo.current.width; if (spi_valid & CTS_SPI_VALID_PPR_OPTIONS) cts__spi.ppr_options = tp->tinfo.current.options; #undef cts__spi #undef cts__scsi #else cts.valid = spi_valid; if (spi_valid & CCB_TRANS_SYNC_RATE_VALID) cts.sync_period = tp->tinfo.current.period; if (spi_valid & CCB_TRANS_SYNC_OFFSET_VALID) cts.sync_offset = tp->tinfo.current.offset; if (spi_valid & CCB_TRANS_BUS_WIDTH_VALID) cts.bus_width = tp->tinfo.current.width; #endif xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); xpt_async(AC_TRANSFER_NEG, path, &cts); xpt_free_path(path); } #ifdef FreeBSD_New_Tran_Settings #define SYM_SPI_VALID_WDTR \ CTS_SPI_VALID_BUS_WIDTH | \ CTS_SPI_VALID_SYNC_RATE | \ CTS_SPI_VALID_SYNC_OFFSET #define SYM_SPI_VALID_SDTR \ CTS_SPI_VALID_SYNC_RATE | \ CTS_SPI_VALID_SYNC_OFFSET #define SYM_SPI_VALID_PPR \ CTS_SPI_VALID_PPR_OPTIONS | \ CTS_SPI_VALID_BUS_WIDTH | \ CTS_SPI_VALID_SYNC_RATE | \ CTS_SPI_VALID_SYNC_OFFSET #else #define SYM_SPI_VALID_WDTR \ CCB_TRANS_BUS_WIDTH_VALID | \ CCB_TRANS_SYNC_RATE_VALID | \ CCB_TRANS_SYNC_OFFSET_VALID #define SYM_SPI_VALID_SDTR \ CCB_TRANS_SYNC_RATE_VALID | \ CCB_TRANS_SYNC_OFFSET_VALID #define SYM_SPI_VALID_PPR \ CCB_TRANS_BUS_WIDTH_VALID | \ CCB_TRANS_SYNC_RATE_VALID | \ CCB_TRANS_SYNC_OFFSET_VALID #endif /* * We received a WDTR. * Let everything be aware of the changes. */ static void sym_setwide(hcb_p np, ccb_p cp, u_char wide) { tcb_p tp = &np->target[cp->target]; sym_settrans(np, cp, 0, 0, 0, wide, 0, 0); /* * Tell the SCSI layer about the new transfer parameters. */ tp->tinfo.goal.width = tp->tinfo.current.width = wide; tp->tinfo.current.offset = 0; tp->tinfo.current.period = 0; tp->tinfo.current.options = 0; sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_WDTR); } /* * We received a SDTR. * Let everything be aware of the changes. */ static void sym_setsync(hcb_p np, ccb_p cp, u_char ofs, u_char per, u_char div, u_char fak) { tcb_p tp = &np->target[cp->target]; u_char wide = (cp->phys.select.sel_scntl3 & EWS) ? 1 : 0; sym_settrans(np, cp, 0, ofs, per, wide, div, fak); /* * Tell the SCSI layer about the new transfer parameters. */ tp->tinfo.goal.period = tp->tinfo.current.period = per; tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs; tp->tinfo.goal.options = tp->tinfo.current.options = 0; sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_SDTR); } /* * We received a PPR. * Let everything be aware of the changes. */ static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { tcb_p tp = &np->target[cp->target]; sym_settrans(np, cp, dt, ofs, per, wide, div, fak); /* * Tell the SCSI layer about the new transfer parameters. */ tp->tinfo.goal.width = tp->tinfo.current.width = wide; tp->tinfo.goal.period = tp->tinfo.current.period = per; tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs; tp->tinfo.goal.options = tp->tinfo.current.options = dt; sym_xpt_async_transfer_neg(np, cp->target, SYM_SPI_VALID_PPR); } /* * Switch trans mode for current job and it's target. */ static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { SYM_QUEHEAD *qp; union ccb *ccb; tcb_p tp; u_char target = INB (nc_sdid) & 0x0f; u_char sval, wval, uval; assert (cp); if (!cp) return; ccb = cp->cam_ccb; assert (ccb); if (!ccb) return; assert (target == (cp->target & 0xf)); tp = &np->target[target]; sval = tp->head.sval; wval = tp->head.wval; uval = tp->head.uval; #if 0 printf("XXXX sval=%x wval=%x uval=%x (%x)\n", sval, wval, uval, np->rv_scntl3); #endif /* * Set the offset. */ if (!(np->features & FE_C10)) sval = (sval & ~0x1f) | ofs; else sval = (sval & ~0x3f) | ofs; /* * Set the sync divisor and extra clock factor. */ if (ofs != 0) { wval = (wval & ~0x70) | ((div+1) << 4); if (!(np->features & FE_C10)) sval = (sval & ~0xe0) | (fak << 5); else { uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT); if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT); if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT); } } /* * Set the bus width. */ wval = wval & ~EWS; if (wide != 0) wval |= EWS; /* * Set misc. ultra enable bits. */ if (np->features & FE_C10) { uval = uval & ~(U3EN|AIPCKEN); if (dt) { assert(np->features & FE_U3EN); uval |= U3EN; } } else { wval = wval & ~ULTRA; if (per <= 12) wval |= ULTRA; } /* * Stop there if sync parameters are unchanged. */ if (tp->head.sval == sval && tp->head.wval == wval && tp->head.uval == uval) return; tp->head.sval = sval; tp->head.wval = wval; tp->head.uval = uval; /* * Disable extended Sreq/Sack filtering if per < 50. * Not supported on the C1010. */ if (per < 50 && !(np->features & FE_C10)) OUTOFFB (nc_stest2, EXT); /* * set actual value and sync_status */ OUTB (nc_sxfer, tp->head.sval); OUTB (nc_scntl3, tp->head.wval); if (np->features & FE_C10) { OUTB (nc_scntl4, tp->head.uval); } /* * patch ALL busy ccbs of this target. */ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->target != target) continue; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; if (np->features & FE_C10) { cp->phys.select.sel_scntl4 = tp->head.uval; } } } /* * log message for real hard errors * * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc). * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf. * * exception register: * ds: dstat * si: sist * * SCSI bus lines: * so: control lines as driven by chip. * si: control lines as seen by chip. * sd: scsi data lines as seen by chip. * * wide/fastmode: * sxfer: (see the manual) * scntl3: (see the manual) * * current script command: * dsp: script address (relative to start of script). * dbc: first word of script command. * * First 24 register of the chip: * r0..rf */ static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat) { u32 dsp; int script_ofs; int script_size; char *script_name; u_char *script_base; int i; dsp = INL (nc_dsp); if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { script_ofs = dsp - np->scripta_ba; script_size = np->scripta_sz; script_base = (u_char *) np->scripta0; script_name = "scripta"; } else if (np->scriptb_ba < dsp && dsp <= np->scriptb_ba + np->scriptb_sz) { script_ofs = dsp - np->scriptb_ba; script_size = np->scriptb_sz; script_base = (u_char *) np->scriptb0; script_name = "scriptb"; } else { script_ofs = dsp; script_size = 0; script_base = 0; script_name = "mem"; } printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n", sym_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist, (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer), (unsigned)INB (nc_scntl3), script_name, script_ofs, (unsigned)INL (nc_dbc)); if (((script_ofs & 3) == 0) && (unsigned)script_ofs < script_size) { printf ("%s: script cmd = %08x\n", sym_name(np), scr_to_cpu((int) *(u32 *)(script_base + script_ofs))); } printf ("%s: regdump:", sym_name(np)); for (i=0; i<24;i++) printf (" %02x", (unsigned)INB_OFF(i)); printf (".\n"); /* * PCI BUS error, read the PCI ststus register. */ if (dstat & (MDPE|BF)) { u_short pci_sts; #ifdef FreeBSD_Bus_Io_Abstraction pci_sts = pci_read_config(np->device, PCIR_STATUS, 2); #else pci_sts = pci_cfgread(np->pci_tag, PCIR_STATUS, 2); #endif if (pci_sts & 0xf900) { #ifdef FreeBSD_Bus_Io_Abstraction pci_write_config(np->device, PCIR_STATUS, pci_sts, 2); #else pci_cfgwrite(np->pci_tag, PCIR_STATUS, pci_sts, 2); #endif printf("%s: PCI STATUS = 0x%04x\n", sym_name(np), pci_sts & 0xf900); } } } /* * chip interrupt handler * * In normal situations, interrupt conditions occur one at * a time. But when something bad happens on the SCSI BUS, * the chip may raise several interrupt flags before * stopping and interrupting the CPU. The additionnal * interrupt flags are stacked in some extra registers * after the SIP and/or DIP flag has been raised in the * ISTAT. After the CPU has read the interrupt condition * flag from SIST or DSTAT, the chip unstacks the other * interrupt flags and sets the corresponding bits in * SIST or DSTAT. Since the chip starts stacking once the * SIP or DIP flag is set, there is a small window of time * where the stacking does not occur. * * Typically, multiple interrupt conditions may happen in * the following situations: * * - SCSI parity error + Phase mismatch (PAR|MA) * When a parity error is detected in input phase * and the device switches to msg-in phase inside a * block MOV. * - SCSI parity error + Unexpected disconnect (PAR|UDC) * When a stupid device does not want to handle the * recovery of an SCSI parity error. * - Some combinations of STO, PAR, UDC, ... * When using non compliant SCSI stuff, when user is * doing non compliant hot tampering on the BUS, when * something really bad happens to a device, etc ... * * The heuristic suggested by SYMBIOS to handle * multiple interrupts is to try unstacking all * interrupts conditions and to handle them on some * priority based on error severity. * This will work when the unstacking has been * successful, but we cannot be 100 % sure of that, * since the CPU may have been faster to unstack than * the chip is able to stack. Hmmm ... But it seems that * such a situation is very unlikely to happen. * * If this happen, for example STO caught by the CPU * then UDC happenning before the CPU have restarted * the SCRIPTS, the driver may wrongly complete the * same command on UDC, since the SCRIPTS didn't restart * and the DSA still points to the same command. * We avoid this situation by setting the DSA to an * invalid value when the CCB is completed and before * restarting the SCRIPTS. * * Another issue is that we need some section of our * recovery procedures to be somehow uninterruptible but * the SCRIPTS processor does not provides such a * feature. For this reason, we handle recovery preferently * from the C code and check against some SCRIPTS critical * sections from the C code. * * Hopefully, the interrupt handling of the driver is now * able to resist to weird BUS error conditions, but donnot * ask me for any guarantee that it will never fail. :-) * Use at your own decision and risk. */ static void sym_intr1 (hcb_p np) { u_char istat, istatc; u_char dstat; u_short sist; /* * interrupt on the fly ? * * A `dummy read' is needed to ensure that the * clear of the INTF flag reaches the device * before the scanning of the DONE queue. */ istat = INB (nc_istat); if (istat & INTF) { OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem); istat = INB (nc_istat); /* DUMMY READ */ if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); (void)sym_wakeup_done (np); }; if (!(istat & (SIP|DIP))) return; #if 0 /* We should never get this one */ if (istat & CABRT) OUTB (nc_istat, CABRT); #endif /* * PAR and MA interrupts may occur at the same time, * and we need to know of both in order to handle * this situation properly. We try to unstack SCSI * interrupts for that reason. BTW, I dislike a LOT * such a loop inside the interrupt routine. * Even if DMA interrupt stacking is very unlikely to * happen, we also try unstacking these ones, since * this has no performance impact. */ sist = 0; dstat = 0; istatc = istat; do { if (istatc & SIP) sist |= INW (nc_sist); if (istatc & DIP) dstat |= INB (nc_dstat); istatc = INB (nc_istat); istat |= istatc; } while (istatc & (SIP|DIP)); if (DEBUG_FLAGS & DEBUG_TINY) printf ("<%d|%x:%x|%x:%x>", (int)INB(nc_scr0), dstat,sist, (unsigned)INL(nc_dsp), (unsigned)INL(nc_dbc)); /* * On paper, a memory barrier may be needed here. * And since we are paranoid ... :) */ MEMORY_BARRIER(); /* * First, interrupts we want to service cleanly. * * Phase mismatch (MA) is the most frequent interrupt * for chip earlier than the 896 and so we have to service * it as quickly as possible. * A SCSI parity error (PAR) may be combined with a phase * mismatch condition (MA). * Programmed interrupts (SIR) are used to call the C code * from SCRIPTS. * The single step interrupt (SSI) is not used in this * driver. */ if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & PAR) sym_int_par (np, sist); else if (sist & MA) sym_int_ma (np); else if (dstat & SIR) sym_int_sir (np); else if (dstat & SSI) OUTONB_STD (); else goto unknown_int; return; }; /* * Now, interrupts that donnot happen in normal * situations and that we may need to recover from. * * On SCSI RESET (RST), we reset everything. * On SCSI BUS MODE CHANGE (SBMC), we complete all * active CCBs with RESET status, prepare all devices * for negotiating again and restart the SCRIPTS. * On STO and UDC, we complete the CCB with the corres- * ponding status and restart the SCRIPTS. */ if (sist & RST) { xpt_print_path(np->path); printf("SCSI BUS reset detected.\n"); sym_init (np, 1); return; }; OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ if (!(sist & (GEN|HTH|SGE)) && !(dstat & (MDPE|BF|ABRT|IID))) { if (sist & SBMC) sym_int_sbmc (np); else if (sist & STO) sym_int_sto (np); else if (sist & UDC) sym_int_udc (np); else goto unknown_int; return; }; /* * Now, interrupts we are not able to recover cleanly. * * Log message for hard errors. * Reset everything. */ sym_log_hard_error(np, sist, dstat); if ((sist & (GEN|HTH|SGE)) || (dstat & (MDPE|BF|ABRT|IID))) { sym_start_reset(np); return; }; unknown_int: /* * We just miss the cause of the interrupt. :( * Print a message. The timeout will do the real work. */ printf( "%s: unknown interrupt(s) ignored, " "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", sym_name(np), istat, dstat, sist); } static void sym_intr(void *arg) { if (DEBUG_FLAGS & DEBUG_TINY) printf ("["); sym_intr1((hcb_p) arg); if (DEBUG_FLAGS & DEBUG_TINY) printf ("]"); return; } static void sym_poll(struct cam_sim *sim) { int s = splcam(); sym_intr(cam_sim_softc(sim)); splx(s); } /* * generic recovery from scsi interrupt * * The doc says that when the chip gets an SCSI interrupt, * it tries to stop in an orderly fashion, by completing * an instruction fetch that had started or by flushing * the DMA fifo for a write to memory that was executing. * Such a fashion is not enough to know if the instruction * that was just before the current DSP value has been * executed or not. * * There are some small SCRIPTS sections that deal with * the start queue and the done queue that may break any * assomption from the C code if we are interrupted * inside, so we reset if this happens. Btw, since these * SCRIPTS sections are executed while the SCRIPTS hasn't * started SCSI operations, it is very unlikely to happen. * * All the driver data structures are supposed to be * allocated from the same 4 GB memory window, so there * is a 1 to 1 relationship between DSA and driver data * structures. Since we are careful :) to invalidate the * DSA when we complete a command or when the SCRIPTS * pushes a DSA into a queue, we can trust it when it * points to a CCB. */ static void sym_recover_scsi_int (hcb_p np, u_char hsts) { u32 dsp = INL (nc_dsp); u32 dsa = INL (nc_dsa); ccb_p cp = sym_ccb_from_dsa(np, dsa); /* * If we haven't been interrupted inside the SCRIPTS * critical pathes, we can safely restart the SCRIPTS * and trust the DSA value if it matches a CCB. */ if ((!(dsp > SCRIPTA_BA (np, getjob_begin) && dsp < SCRIPTA_BA (np, getjob_end) + 1)) && (!(dsp > SCRIPTA_BA (np, ungetjob) && dsp < SCRIPTA_BA (np, reselect) + 1)) && (!(dsp > SCRIPTB_BA (np, sel_for_abort) && dsp < SCRIPTB_BA (np, sel_for_abort_1) + 1)) && (!(dsp > SCRIPTA_BA (np, done) && dsp < SCRIPTA_BA (np, done_end) + 1))) { OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ /* * If we have a CCB, let the SCRIPTS call us back for * the handling of the error with SCRATCHA filled with * STARTPOS. This way, we will be able to freeze the * device queue and requeue awaiting IOs. */ if (cp) { cp->host_status = hsts; OUTL_DSP (SCRIPTA_BA (np, complete_error)); } /* * Otherwise just restart the SCRIPTS. */ else { OUTL (nc_dsa, 0xffffff); OUTL_DSP (SCRIPTA_BA (np, start)); } } else goto reset_all; return; reset_all: sym_start_reset(np); } /* * chip exception handler for selection timeout */ static void sym_int_sto (hcb_p np) { u32 dsp = INL (nc_dsp); if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); if (dsp == SCRIPTA_BA (np, wf_sel_done) + 8) sym_recover_scsi_int(np, HS_SEL_TIMEOUT); else sym_start_reset(np); } /* * chip exception handler for unexpected disconnect */ static void sym_int_udc (hcb_p np) { printf ("%s: unexpected disconnect\n", sym_name(np)); sym_recover_scsi_int(np, HS_UNEXPECTED); } /* * chip exception handler for SCSI bus mode change * * spi2-r12 11.2.3 says a transceiver mode change must * generate a reset event and a device that detects a reset * event shall initiate a hard reset. It says also that a * device that detects a mode change shall set data transfer * mode to eight bit asynchronous, etc... * So, just reinitializing all except chip should be enough. */ static void sym_int_sbmc (hcb_p np) { u_char scsi_mode = INB (nc_stest4) & SMODE; /* * Notify user. */ xpt_print_path(np->path); printf("SCSI BUS mode change from %s to %s.\n", sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode)); /* * Should suspend command processing for a few seconds and * reinitialize all except the chip. */ sym_init (np, 2); } /* * chip exception handler for SCSI parity error. * * When the chip detects a SCSI parity error and is * currently executing a (CH)MOV instruction, it does * not interrupt immediately, but tries to finish the * transfer of the current scatter entry before * interrupting. The following situations may occur: * * - The complete scatter entry has been transferred * without the device having changed phase. * The chip will then interrupt with the DSP pointing * to the instruction that follows the MOV. * * - A phase mismatch occurs before the MOV finished * and phase errors are to be handled by the C code. * The chip will then interrupt with both PAR and MA * conditions set. * * - A phase mismatch occurs before the MOV finished and * phase errors are to be handled by SCRIPTS. * The chip will load the DSP with the phase mismatch * JUMP address and interrupt the host processor. */ static void sym_int_par (hcb_p np, u_short sist) { u_char hsts = INB (HS_PRT); u32 dsp = INL (nc_dsp); u32 dbc = INL (nc_dbc); u32 dsa = INL (nc_dsa); u_char sbcl = INB (nc_sbcl); u_char cmd = dbc >> 24; int phase = cmd & 7; ccb_p cp = sym_ccb_from_dsa(np, dsa); printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", sym_name(np), hsts, dbc, sbcl); /* * Check that the chip is connected to the SCSI BUS. */ if (!(INB (nc_scntl1) & ISCON)) { sym_recover_scsi_int(np, HS_UNEXPECTED); return; } /* * If the nexus is not clearly identified, reset the bus. * We will try to do better later. */ if (!cp) goto reset_all; /* * Check instruction was a MOV, direction was INPUT and * ATN is asserted. */ if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8)) goto reset_all; /* * Keep track of the parity error. */ OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_PARITY_ERR; /* * Prepare the message to send to the device. */ np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR; /* * If the old phase was DATA IN phase, we have to deal with * the 3 situations described above. * For other input phases (MSG IN and STATUS), the device * must resend the whole thing that failed parity checking * or signal error. So, jumping to dispatcher should be OK. */ if (phase == 1 || phase == 5) { /* Phase mismatch handled by SCRIPTS */ if (dsp == SCRIPTB_BA (np, pm_handle)) OUTL_DSP (dsp); /* Phase mismatch handled by the C code */ else if (sist & MA) sym_int_ma (np); /* No phase mismatch occurred */ else { OUTL (nc_temp, dsp); OUTL_DSP (SCRIPTA_BA (np, dispatch)); } } else OUTL_DSP (SCRIPTA_BA (np, clrack)); return; reset_all: sym_start_reset(np); return; } /* * chip exception handler for phase errors. * * We have to construct a new transfer descriptor, * to transfer the rest of the current block. */ static void sym_int_ma (hcb_p np) { u32 dbc; u32 rest; u32 dsp; u32 dsa; u32 nxtdsp; u32 *vdsp; u32 oadr, olen; u32 *tblp; u32 newcmd; u_int delta; u_char cmd; u_char hflags, hflags0; struct sym_pmc *pm; ccb_p cp; dsp = INL (nc_dsp); dbc = INL (nc_dbc); dsa = INL (nc_dsa); cmd = dbc >> 24; rest = dbc & 0xffffff; delta = 0; /* * locate matching cp if any. */ cp = sym_ccb_from_dsa(np, dsa); /* * Donnot take into account dma fifo and various buffers in * INPUT phase since the chip flushes everything before * raising the MA interrupt for interrupted INPUT phases. * For DATA IN phase, we will check for the SWIDE later. */ if ((cmd & 7) != 1 && (cmd & 7) != 5) { u_char ss0, ss2; if (np->features & FE_DFBC) delta = INW (nc_dfbc); else { u32 dfifo; /* * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership. */ dfifo = INL(nc_dfifo); /* * Calculate remaining bytes in DMA fifo. * (CTEST5 = dfifo >> 16) */ if (dfifo & (DFS << 16)) delta = ((((dfifo >> 8) & 0x300) | (dfifo & 0xff)) - rest) & 0x3ff; else delta = ((dfifo & 0xff) - rest) & 0x7f; } /* * The data in the dma fifo has not been transfered to * the target -> add the amount to the rest * and clear the data. * Check the sstat2 register in case of wide transfer. */ rest += delta; ss0 = INB (nc_sstat0); if (ss0 & OLF) rest++; if (!(np->features & FE_C10)) if (ss0 & ORF) rest++; if (cp && (cp->phys.select.sel_scntl3 & EWS)) { ss2 = INB (nc_sstat2); if (ss2 & OLF1) rest++; if (!(np->features & FE_C10)) if (ss2 & ORF1) rest++; }; /* * Clear fifos. */ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */ OUTB (nc_stest3, TE|CSF); /* scsi fifo */ } /* * log the information */ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) printf ("P%x%x RL=%d D=%d ", cmd&7, INB(nc_sbcl)&7, (unsigned) rest, (unsigned) delta); /* * try to find the interrupted script command, * and the address at which to continue. */ vdsp = 0; nxtdsp = 0; if (dsp > np->scripta_ba && dsp <= np->scripta_ba + np->scripta_sz) { vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8)); nxtdsp = dsp; } else if (dsp > np->scriptb_ba && dsp <= np->scriptb_ba + np->scriptb_sz) { vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8)); nxtdsp = dsp; } /* * log the information */ if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); }; if (!vdsp) { printf ("%s: interrupted SCRIPT address not found.\n", sym_name (np)); goto reset_all; } if (!cp) { printf ("%s: SCSI phase error fixup: CCB already dequeued.\n", sym_name (np)); goto reset_all; } /* * get old startaddress and old length. */ oadr = scr_to_cpu(vdsp[1]); if (cmd & 0x10) { /* Table indirect */ tblp = (u32 *) ((char*) &cp->phys + oadr); olen = scr_to_cpu(tblp[0]); oadr = scr_to_cpu(tblp[1]); } else { tblp = (u32 *) 0; olen = scr_to_cpu(vdsp[0]) & 0xffffff; }; if (DEBUG_FLAGS & DEBUG_PHASE) { printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", (unsigned) (scr_to_cpu(vdsp[0]) >> 24), tblp, (unsigned) olen, (unsigned) oadr); }; /* * check cmd against assumed interrupted script command. * If dt data phase, the MOVE instruction hasn't bit 4 of * the phase. */ if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) { PRINT_ADDR(cp); printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24); goto reset_all; }; /* * if old phase not dataphase, leave here. */ if (cmd & 2) { PRINT_ADDR(cp); printf ("phase change %x-%x %d@%08x resid=%d.\n", cmd&7, INB(nc_sbcl)&7, (unsigned)olen, (unsigned)oadr, (unsigned)rest); goto unexpected_phase; }; /* * Choose the correct PM save area. * * Look at the PM_SAVE SCRIPT if you want to understand * this stuff. The equivalent code is implemented in * SCRIPTS for the 895A, 896 and 1010 that are able to * handle PM from the SCRIPTS processor. */ hflags0 = INB (HF_PRT); hflags = hflags0; if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) { if (hflags & HF_IN_PM0) nxtdsp = scr_to_cpu(cp->phys.pm0.ret); else if (hflags & HF_IN_PM1) nxtdsp = scr_to_cpu(cp->phys.pm1.ret); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; } if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; newcmd = SCRIPTA_BA (np, pm0_data); } else { pm = &cp->phys.pm1; newcmd = SCRIPTA_BA (np, pm1_data); } hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED); if (hflags != hflags0) OUTB (HF_PRT, hflags); /* * fillin the phase mismatch context */ pm->sg.addr = cpu_to_scr(oadr + olen - rest); pm->sg.size = cpu_to_scr(rest); pm->ret = cpu_to_scr(nxtdsp); /* * If we have a SWIDE, * - prepare the address to write the SWIDE from SCRIPTS, * - compute the SCRIPTS address to restart from, * - move current data pointer context by one byte. */ nxtdsp = SCRIPTA_BA (np, dispatch); if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) && (INB (nc_scntl2) & WSR)) { u32 tmp; /* * Set up the table indirect for the MOVE * of the residual byte and adjust the data * pointer context. */ tmp = scr_to_cpu(pm->sg.addr); cp->phys.wresid.addr = cpu_to_scr(tmp); pm->sg.addr = cpu_to_scr(tmp + 1); tmp = scr_to_cpu(pm->sg.size); cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1); pm->sg.size = cpu_to_scr(tmp - 1); /* * If only the residual byte is to be moved, * no PM context is needed. */ if ((tmp&0xffffff) == 1) newcmd = pm->ret; /* * Prepare the address of SCRIPTS that will * move the residual byte to memory. */ nxtdsp = SCRIPTB_BA (np, wsr_ma_helper); } if (DEBUG_FLAGS & DEBUG_PHASE) { PRINT_ADDR(cp); printf ("PM %x %x %x / %x %x %x.\n", hflags0, hflags, newcmd, (unsigned)scr_to_cpu(pm->sg.addr), (unsigned)scr_to_cpu(pm->sg.size), (unsigned)scr_to_cpu(pm->ret)); } /* * Restart the SCRIPTS processor. */ OUTL (nc_temp, newcmd); OUTL_DSP (nxtdsp); return; /* * Unexpected phase changes that occurs when the current phase * is not a DATA IN or DATA OUT phase are due to error conditions. * Such event may only happen when the SCRIPTS is using a * multibyte SCSI MOVE. * * Phase change Some possible cause * * COMMAND --> MSG IN SCSI parity error detected by target. * COMMAND --> STATUS Bad command or refused by target. * MSG OUT --> MSG IN Message rejected by target. * MSG OUT --> COMMAND Bogus target that discards extended * negotiation messages. * * The code below does not care of the new phase and so * trusts the target. Why to annoy it ? * If the interrupted phase is COMMAND phase, we restart at * dispatcher. * If a target does not get all the messages after selection, * the code assumes blindly that the target discards extended * messages and clears the negotiation status. * If the target does not want all our response to negotiation, * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids * bloat for such a should_not_happen situation). * In all other situation, we reset the BUS. * Are these assumptions reasonnable ? (Wait and see ...) */ unexpected_phase: dsp -= 8; nxtdsp = 0; switch (cmd & 7) { case 2: /* COMMAND phase */ nxtdsp = SCRIPTA_BA (np, dispatch); break; #if 0 case 3: /* STATUS phase */ nxtdsp = SCRIPTA_BA (np, dispatch); break; #endif case 6: /* MSG OUT phase */ /* * If the device may want to use untagged when we want * tagged, we prepare an IDENTIFY without disc. granted, * since we will not be able to handle reselect. * Otherwise, we just don't care. */ if (dsp == SCRIPTA_BA (np, send_ident)) { if (cp->tag != NO_TAG && olen - rest <= 3) { cp->host_status = HS_BUSY; np->msgout[0] = M_IDENTIFY | cp->lun; nxtdsp = SCRIPTB_BA (np, ident_break_atn); } else nxtdsp = SCRIPTB_BA (np, ident_break); } else if (dsp == SCRIPTB_BA (np, send_wdtr) || dsp == SCRIPTB_BA (np, send_sdtr) || dsp == SCRIPTB_BA (np, send_ppr)) { nxtdsp = SCRIPTB_BA (np, nego_bad_phase); } break; #if 0 case 7: /* MSG IN phase */ nxtdsp = SCRIPTA_BA (np, clrack); break; #endif } if (nxtdsp) { OUTL_DSP (nxtdsp); return; } reset_all: sym_start_reset(np); } /* * Dequeue from the START queue all CCBs that match * a given target/lun/task condition (-1 means all), * and move them from the BUSY queue to the COMP queue * with CAM_REQUEUE_REQ status condition. * This function is used during error handling/recovery. * It is called with SCRIPTS not running. */ static int sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, int task) { int j; ccb_p cp; /* * Make sure the starting index is within range. */ assert((i >= 0) && (i < 2*MAX_QUEUE)); /* * Walk until end of START queue and dequeue every job * that matches the target/lun/task condition. */ j = i; while (i != np->squeueput) { cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i])); assert(cp); #ifdef SYM_CONF_IARB_SUPPORT /* Forget hints for IARB, they may be no longer relevant */ cp->host_flags &= ~HF_HINT_IARB; #endif if ((target == -1 || cp->target == target) && (lun == -1 || cp->lun == lun) && (task == -1 || cp->tag == task)) { sym_set_cam_status(cp->cam_ccb, CAM_REQUEUE_REQ); sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); } else { if (i != j) np->squeue[j] = np->squeue[i]; if ((j += 2) >= MAX_QUEUE*2) j = 0; } if ((i += 2) >= MAX_QUEUE*2) i = 0; } if (i != j) /* Copy back the idle task if needed */ np->squeue[j] = np->squeue[i]; np->squeueput = j; /* Update our current start queue pointer */ return (i - j) / 2; } /* * Complete all CCBs queued to the COMP queue. * * These CCBs are assumed: * - Not to be referenced either by devices or * SCRIPTS-related queues and datas. * - To have to be completed with an error condition * or requeued. * * The device queue freeze count is incremented * for each CCB that does not prevent this. * This function is called when all CCBs involved * in error handling/recovery have been reaped. */ static void sym_flush_comp_queue(hcb_p np, int cam_status) { SYM_QUEHEAD *qp; ccb_p cp; while ((qp = sym_remque_head(&np->comp_ccbq)) != 0) { union ccb *ccb; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); /* Leave quiet CCBs waiting for resources */ if (cp->host_status == HS_WAIT) continue; ccb = cp->cam_ccb; if (cam_status) sym_set_cam_status(ccb, cam_status); sym_free_ccb(np, cp); sym_freeze_cam_ccb(ccb); sym_xpt_done(np, ccb); } } /* * chip handler for bad SCSI status condition * * In case of bad SCSI status, we unqueue all the tasks * currently queued to the controller but not yet started * and then restart the SCRIPTS processor immediately. * * QUEUE FULL and BUSY conditions are handled the same way. * Basically all the not yet started tasks are requeued in * device queue and the queue is frozen until a completion. * * For CHECK CONDITION and COMMAND TERMINATED status, we use * the CCB of the failed command to prepare a REQUEST SENSE * SCSI command and queue it to the controller queue. * * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ static void sym_sir_bad_scsi_status(hcb_p np, int num, ccb_p cp) { tcb_p tp = &np->target[cp->target]; u32 startp; u_char s_status = cp->ssss_status; u_char h_flags = cp->host_flags; int msglen; int nego; int i; /* * Compute the index of the next job to start from SCRIPTS. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; /* * The last CCB queued used for IARB hint may be * no longer relevant. Forget it. */ #ifdef SYM_CONF_IARB_SUPPORT if (np->last_cp) np->last_cp = 0; #endif /* * Now deal with the SCSI status. */ switch(s_status) { case S_BUSY: case S_QUEUE_FULL: if (sym_verbose >= 2) { PRINT_ADDR(cp); printf (s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n"); } default: /* S_INT, S_INT_COND_MET, S_CONFLICT */ sym_complete_error (np, cp); break; case S_TERMINATED: case S_CHECK_COND: /* * If we get an SCSI error when requesting sense, give up. */ if (h_flags & HF_SENSE) { sym_complete_error (np, cp); break; } /* * Dequeue all queued CCBs for that device not yet started, * and restart the SCRIPTS processor immediately. */ (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); OUTL_DSP (SCRIPTA_BA (np, start)); /* * Save some info of the actual IO. * Compute the data residual. */ cp->sv_scsi_status = cp->ssss_status; cp->sv_xerr_status = cp->xerr_status; cp->sv_resid = sym_compute_residual(np, cp); /* * Prepare all needed data structures for * requesting sense data. */ /* * identify message */ cp->scsi_smsg2[0] = M_IDENTIFY | cp->lun; msglen = 1; /* * If we are currently using anything different from * async. 8 bit data transfers with that target, * start a negotiation, since the device may want * to report us a UNIT ATTENTION condition due to * a cause we currently ignore, and we donnot want * to be stuck with WIDE and/or SYNC data transfer. * * cp->nego_status is filled by sym_prepare_nego(). */ cp->nego_status = 0; nego = 0; if (tp->tinfo.current.options & PPR_OPT_MASK) nego = NS_PPR; else if (tp->tinfo.current.width != BUS_8_BIT) nego = NS_WIDE; else if (tp->tinfo.current.offset != 0) nego = NS_SYNC; if (nego) msglen += sym_prepare_nego (np,cp, nego, &cp->scsi_smsg2[msglen]); /* * Message table indirect structure. */ cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg2)); cp->phys.smsg.size = cpu_to_scr(msglen); /* * sense command */ cp->phys.cmd.addr = cpu_to_scr(CCB_BA (cp, sensecmd)); cp->phys.cmd.size = cpu_to_scr(6); /* * patch requested size into sense command */ cp->sensecmd[0] = 0x03; cp->sensecmd[1] = cp->lun << 5; #ifdef FreeBSD_New_Tran_Settings if (tp->tinfo.current.scsi_version > 2 || cp->lun > 7) cp->sensecmd[1] = 0; #endif cp->sensecmd[4] = SYM_SNS_BBUF_LEN; cp->data_len = SYM_SNS_BBUF_LEN; /* * sense data */ bzero(cp->sns_bbuf, SYM_SNS_BBUF_LEN); cp->phys.sense.addr = cpu_to_scr(vtobus(cp->sns_bbuf)); cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN); /* * requeue the command. */ startp = SCRIPTB_BA (np, sdata_in); cp->phys.head.savep = cpu_to_scr(startp); cp->phys.head.goalp = cpu_to_scr(startp + 16); cp->phys.head.lastp = cpu_to_scr(startp); cp->startp = cpu_to_scr(startp); cp->actualquirks = SYM_QUIRK_AUTOSAVE; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->host_flags = (HF_SENSE|HF_DATA_IN); cp->xerr_status = 0; cp->extra_bytes = 0; cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select)); /* * Requeue the command. */ sym_put_start_queue(np, cp); /* * Give back to upper layer everything we have dequeued. */ sym_flush_comp_queue(np, 0); break; } } /* * After a device has accepted some management message * as BUS DEVICE RESET, ABORT TASK, etc ..., or when * a device signals a UNIT ATTENTION condition, some * tasks are thrown away by the device. We are required * to reflect that on our tasks list since the device * will never complete these tasks. * * This function move from the BUSY queue to the COMP * queue all disconnected CCBs for a given target that * match the following criteria: * - lun=-1 means any logical UNIT otherwise a given one. * - task=-1 means any task, otherwise a given one. */ static int sym_clear_tasks(hcb_p np, int cam_status, int target, int lun, int task) { SYM_QUEHEAD qtmp, *qp; int i = 0; ccb_p cp; /* * Move the entire BUSY queue to our temporary queue. */ sym_que_init(&qtmp); sym_que_splice(&np->busy_ccbq, &qtmp); sym_que_init(&np->busy_ccbq); /* * Put all CCBs that matches our criteria into * the COMP queue and put back other ones into * the BUSY queue. */ while ((qp = sym_remque_head(&qtmp)) != 0) { union ccb *ccb; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); ccb = cp->cam_ccb; if (cp->host_status != HS_DISCONNECT || cp->target != target || (lun != -1 && cp->lun != lun) || (task != -1 && (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) { sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); continue; } sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); /* Preserve the software timeout condition */ if (sym_get_cam_status(ccb) != CAM_CMD_TIMEOUT) sym_set_cam_status(ccb, cam_status); ++i; #if 0 printf("XXXX TASK @%p CLEARED\n", cp); #endif } return i; } /* * chip handler for TASKS recovery * * We cannot safely abort a command, while the SCRIPTS * processor is running, since we just would be in race * with it. * * As long as we have tasks to abort, we keep the SEM * bit set in the ISTAT. When this bit is set, the * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED) * each time it enters the scheduler. * * If we have to reset a target, clear tasks of a unit, * or to perform the abort of a disconnected job, we * restart the SCRIPTS for selecting the target. Once * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED). * If it loses arbitration, the SCRIPTS will interrupt again * the next time it will enter its scheduler, and so on ... * * On SIR_TARGET_SELECTED, we scan for the more * appropriate thing to do: * * - If nothing, we just sent a M_ABORT message to the * target to get rid of the useless SCSI bus ownership. * According to the specs, no tasks shall be affected. * - If the target is to be reset, we send it a M_RESET * message. * - If a logical UNIT is to be cleared , we send the * IDENTIFY(lun) + M_ABORT. * - If an untagged task is to be aborted, we send the * IDENTIFY(lun) + M_ABORT. * - If a tagged task is to be aborted, we send the * IDENTIFY(lun) + task attributes + M_ABORT_TAG. * * Once our 'kiss of death' :) message has been accepted * by the target, the SCRIPTS interrupts again * (SIR_ABORT_SENT). On this interrupt, we complete * all the CCBs that should have been aborted by the * target according to our message. */ static void sym_sir_task_recovery(hcb_p np, int num) { SYM_QUEHEAD *qp; ccb_p cp; tcb_p tp; int target=-1, lun=-1, task; int i, k; switch(num) { /* * The SCRIPTS processor stopped before starting * the next command in order to allow us to perform * some task recovery. */ case SIR_SCRIPT_STOPPED: /* * Do we have any target to reset or unit to clear ? */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { tp = &np->target[i]; if (tp->to_reset || (tp->lun0p && tp->lun0p->to_clear)) { target = i; break; } if (!tp->lunmp) continue; for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { target = i; break; } } if (target != -1) break; } /* * If not, walk the busy queue for any * disconnected CCB to be aborted. */ if (target == -1) { FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->to_abort) { target = cp->target; break; } } } /* * If some target is to be selected, * prepare and start the selection. */ if (target != -1) { tp = &np->target[target]; np->abrt_sel.sel_id = target; np->abrt_sel.sel_scntl3 = tp->head.wval; np->abrt_sel.sel_sxfer = tp->head.sval; OUTL(nc_dsa, np->hcb_ba); OUTL_DSP (SCRIPTB_BA (np, sel_for_abort)); return; } /* * Now look for a CCB to abort that haven't started yet. * Btw, the SCRIPTS processor is still stopped, so * we are not in race. */ i = 0; cp = 0; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_BUSY && cp->host_status != HS_NEGOTIATE) continue; if (!cp->to_abort) continue; #ifdef SYM_CONF_IARB_SUPPORT /* * If we are using IMMEDIATE ARBITRATION, we donnot * want to cancel the last queued CCB, since the * SCRIPTS may have anticipated the selection. */ if (cp == np->last_cp) { cp->to_abort = 0; continue; } #endif i = 1; /* Means we have found some */ break; } if (!i) { /* * We are done, so we donnot need * to synchronize with the SCRIPTS anylonger. * Remove the SEM flag from the ISTAT. */ np->istat_sem = 0; OUTB (nc_istat, SIGP); break; } /* * Compute index of next position in the start * queue the SCRIPTS intends to start and dequeue * all CCBs for that device that haven't been started. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); /* * Make sure at least our IO to abort has been dequeued. */ assert(i && sym_get_cam_status(cp->cam_ccb) == CAM_REQUEUE_REQ); /* * Keep track in cam status of the reason of the abort. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT); else sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED); /* * Complete with error everything that we have dequeued. */ sym_flush_comp_queue(np, 0); break; /* * The SCRIPTS processor has selected a target * we may have some manual recovery to perform for. */ case SIR_TARGET_SELECTED: target = (INB (nc_sdid) & 0xf); tp = &np->target[target]; np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg)); /* * If the target is to be reset, prepare a * M_RESET message and clear the to_reset flag * since we donnot expect this operation to fail. */ if (tp->to_reset) { np->abrt_msg[0] = M_RESET; np->abrt_tbl.size = 1; tp->to_reset = 0; break; } /* * Otherwise, look for some logical unit to be cleared. */ if (tp->lun0p && tp->lun0p->to_clear) lun = 0; else if (tp->lunmp) { for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { lun = k; break; } } } /* * If a logical unit is to be cleared, prepare * an IDENTIFY(lun) + ABORT MESSAGE. */ if (lun != -1) { lcb_p lp = sym_lp(np, tp, lun); lp->to_clear = 0; /* We donnot expect to fail here */ np->abrt_msg[0] = M_IDENTIFY | lun; np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; break; } /* * Otherwise, look for some disconnected job to * abort for this target. */ i = 0; cp = 0; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->host_status != HS_DISCONNECT) continue; if (cp->target != target) continue; if (!cp->to_abort) continue; i = 1; /* Means we have some */ break; } /* * If we have none, probably since the device has * completed the command before we won abitration, * send a M_ABORT message without IDENTIFY. * According to the specs, the device must just * disconnect the BUS and not abort any task. */ if (!i) { np->abrt_msg[0] = M_ABORT; np->abrt_tbl.size = 1; break; } /* * We have some task to abort. * Set the IDENTIFY(lun) */ np->abrt_msg[0] = M_IDENTIFY | cp->lun; /* * If we want to abort an untagged command, we * will send an IDENTIFY + M_ABORT. * Otherwise (tagged command), we will send * an IDENTIFY + task attributes + ABORT TAG. */ if (cp->tag == NO_TAG) { np->abrt_msg[1] = M_ABORT; np->abrt_tbl.size = 2; } else { np->abrt_msg[1] = cp->scsi_smsg[1]; np->abrt_msg[2] = cp->scsi_smsg[2]; np->abrt_msg[3] = M_ABORT_TAG; np->abrt_tbl.size = 4; } /* * Keep track of software timeout condition, since the * peripheral driver may not count retries on abort * conditions not due to timeout. */ if (cp->to_abort == 2) sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT); cp->to_abort = 0; /* We donnot expect to fail here */ break; /* * The target has accepted our message and switched * to BUS FREE phase as we expected. */ case SIR_ABORT_SENT: target = (INB (nc_sdid) & 0xf); tp = &np->target[target]; /* ** If we didn't abort anything, leave here. */ if (np->abrt_msg[0] == M_ABORT) break; /* * If we sent a M_RESET, then a hardware reset has * been performed by the target. * - Reset everything to async 8 bit * - Tell ourself to negotiate next time :-) * - Prepare to clear all disconnected CCBs for * this target from our task list (lun=task=-1) */ lun = -1; task = -1; if (np->abrt_msg[0] == M_RESET) { tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; tp->tinfo.current.period = 0; tp->tinfo.current.offset = 0; tp->tinfo.current.width = BUS_8_BIT; tp->tinfo.current.options = 0; } /* * Otherwise, check for the LUN and TASK(s) * concerned by the cancelation. * If it is not ABORT_TAG then it is CLEAR_QUEUE * or an ABORT message :-) */ else { lun = np->abrt_msg[0] & 0x3f; if (np->abrt_msg[1] == M_ABORT_TAG) task = np->abrt_msg[2]; } /* * Complete all the CCBs the device should have * aborted due to our 'kiss of death' message. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; (void) sym_dequeue_from_squeue(np, i, target, lun, -1); (void) sym_clear_tasks(np, CAM_REQ_ABORTED, target, lun, task); sym_flush_comp_queue(np, 0); /* * If we sent a BDR, make uper layer aware of that. */ if (np->abrt_msg[0] == M_RESET) xpt_async(AC_SENT_BDR, np->path, NULL); break; } /* * Print to the log the message we intend to send. */ if (num == SIR_TARGET_SELECTED) { PRINT_TARGET(np, target); sym_printl_hex("control msgout:", np->abrt_msg, np->abrt_tbl.size); np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size); } /* * Let the SCRIPTS processor continue. */ OUTONB_STD (); } /* * Gerard's alchemy:) that deals with with the data * pointer for both MDP and the residual calculation. * * I didn't want to bloat the code by more than 200 * lignes for the handling of both MDP and the residual. * This has been achieved by using a data pointer * representation consisting in an index in the data * array (dp_sg) and a negative offset (dp_ofs) that * have the following meaning: * * - dp_sg = SYM_CONF_MAX_SG * we are at the end of the data script. * - dp_sg < SYM_CONF_MAX_SG * dp_sg points to the next entry of the scatter array * we want to transfer. * - dp_ofs < 0 * dp_ofs represents the residual of bytes of the * previous entry scatter entry we will send first. * - dp_ofs = 0 * no residual to send first. * * The function sym_evaluate_dp() accepts an arbitray * offset (basically from the MDP message) and returns * the corresponding values of dp_sg and dp_ofs. */ static int sym_evaluate_dp(hcb_p np, ccb_p cp, u32 scr, int *ofs) { u32 dp_scr; int dp_ofs, dp_sg, dp_sgmin; int tmp; struct sym_pmc *pm; /* * Compute the resulted data pointer in term of a script * address within some DATA script and a signed byte offset. */ dp_scr = scr; dp_ofs = *ofs; if (dp_scr == SCRIPTA_BA (np, pm0_data)) pm = &cp->phys.pm0; else if (dp_scr == SCRIPTA_BA (np, pm1_data)) pm = &cp->phys.pm1; else pm = 0; if (pm) { dp_scr = scr_to_cpu(pm->ret); dp_ofs -= scr_to_cpu(pm->sg.size); } /* * If we are auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { *ofs = dp_ofs; return 0; } /* * Deduce the index of the sg entry. * Keep track of the index of the first valid entry. * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the * end of the data. */ tmp = scr_to_cpu(cp->phys.head.goalp); dp_sg = SYM_CONF_MAX_SG; if (dp_scr != tmp) dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4); dp_sgmin = SYM_CONF_MAX_SG - cp->segments; /* * Move to the sg entry the data pointer belongs to. * * If we are inside the data area, we expect result to be: * * Either, * dp_ofs = 0 and dp_sg is the index of the sg entry * the data pointer belongs to (or the end of the data) * Or, * dp_ofs < 0 and dp_sg is the index of the sg entry * the data pointer belongs to + 1. */ if (dp_ofs < 0) { int n; while (dp_sg > dp_sgmin) { --dp_sg; tmp = scr_to_cpu(cp->phys.data[dp_sg].size); n = dp_ofs + (tmp & 0xffffff); if (n > 0) { ++dp_sg; break; } dp_ofs = n; } } else if (dp_ofs > 0) { while (dp_sg < SYM_CONF_MAX_SG) { tmp = scr_to_cpu(cp->phys.data[dp_sg].size); dp_ofs -= (tmp & 0xffffff); ++dp_sg; if (dp_ofs <= 0) break; } } /* * Make sure the data pointer is inside the data area. * If not, return some error. */ if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0)) goto out_err; else if (dp_sg > SYM_CONF_MAX_SG || (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0)) goto out_err; /* * Save the extreme pointer if needed. */ if (dp_sg > cp->ext_sg || (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) { cp->ext_sg = dp_sg; cp->ext_ofs = dp_ofs; } /* * Return data. */ *ofs = dp_ofs; return dp_sg; out_err: return -1; } /* * chip handler for MODIFY DATA POINTER MESSAGE * * We also call this function on IGNORE WIDE RESIDUE * messages that do not match a SWIDE full condition. * Btw, we assume in that situation that such a message * is equivalent to a MODIFY DATA POINTER (offset=-1). */ static void sym_modify_dp(hcb_p np, tcb_p tp, ccb_p cp, int ofs) { int dp_ofs = ofs; u32 dp_scr = INL (nc_temp); u32 dp_ret; u32 tmp; u_char hflags; int dp_sg; struct sym_pmc *pm; /* * Not supported for auto-sense. */ if (cp->host_flags & HF_SENSE) goto out_reject; /* * Apply our alchemy:) (see comments in sym_evaluate_dp()), * to the resulted data pointer. */ dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs); if (dp_sg < 0) goto out_reject; /* * And our alchemy:) allows to easily calculate the data * script address we want to return for the next data phase. */ dp_ret = cpu_to_scr(cp->phys.head.goalp); dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4); /* * If offset / scatter entry is zero we donnot need * a context for the new current data pointer. */ if (dp_ofs == 0) { dp_scr = dp_ret; goto out_ok; } /* * Get a context for the new current data pointer. */ hflags = INB (HF_PRT); if (hflags & HF_DP_SAVED) hflags ^= HF_ACT_PM; if (!(hflags & HF_ACT_PM)) { pm = &cp->phys.pm0; dp_scr = SCRIPTA_BA (np, pm0_data); } else { pm = &cp->phys.pm1; dp_scr = SCRIPTA_BA (np, pm1_data); } hflags &= ~(HF_DP_SAVED); OUTB (HF_PRT, hflags); /* * Set up the new current data pointer. * ofs < 0 there, and for the next data phase, we * want to transfer part of the data of the sg entry * corresponding to index dp_sg-1 prior to returning * to the main data script. */ pm->ret = cpu_to_scr(dp_ret); tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr); tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs; pm->sg.addr = cpu_to_scr(tmp); pm->sg.size = cpu_to_scr(-dp_ofs); out_ok: OUTL (nc_temp, dp_scr); OUTL_DSP (SCRIPTA_BA (np, clrack)); return; out_reject: OUTL_DSP (SCRIPTB_BA (np, msg_bad)); } /* * chip calculation of the data residual. * * As I used to say, the requirement of data residual * in SCSI is broken, useless and cannot be achieved * without huge complexity. * But most OSes and even the official CAM require it. * When stupidity happens to be so widely spread inside * a community, it gets hard to convince. * * Anyway, I don't care, since I am not going to use * any software that considers this data residual as * a relevant information. :) */ static int sym_compute_residual(hcb_p np, ccb_p cp) { int dp_sg, dp_sgmin, resid = 0; int dp_ofs = 0; /* * Check for some data lost or just thrown away. * We are not required to be quite accurate in this * situation. Btw, if we are odd for output and the * device claims some more data, it may well happen * than our residual be zero. :-) */ if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) { if (cp->xerr_status & XE_EXTRA_DATA) resid -= cp->extra_bytes; if (cp->xerr_status & XE_SODL_UNRUN) ++resid; if (cp->xerr_status & XE_SWIDE_OVRUN) --resid; } /* * If all data has been transferred, * there is no residual. */ if (cp->phys.head.lastp == cp->phys.head.goalp) return resid; /* * If no data transfer occurs, or if the data * pointer is weird, return full residual. */ if (cp->startp == cp->phys.head.lastp || sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), &dp_ofs) < 0) { return cp->data_len; } /* * If we were auto-sensing, then we are done. */ if (cp->host_flags & HF_SENSE) { return -dp_ofs; } /* * We are now full comfortable in the computation * of the data residual (2's complement). */ dp_sgmin = SYM_CONF_MAX_SG - cp->segments; resid = -cp->ext_ofs; for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size); resid += (tmp & 0xffffff); } /* * Hopefully, the result is not too wrong. */ return resid; } /* * Print out the content of a SCSI message. */ static int sym_show_msg (u_char * msg) { u_char i; printf ("%x",*msg); if (*msg==M_EXTENDED) { for (i=1;i<8;i++) { if (i-1>msg[1]) break; printf ("-%x",msg[i]); }; return (i+1); } else if ((*msg & 0xf0) == 0x20) { printf ("-%x",msg[1]); return (2); }; return (1); } static void sym_print_msg (ccb_p cp, char *label, u_char *msg) { PRINT_ADDR(cp); if (label) printf ("%s: ", label); (void) sym_show_msg (msg); printf (".\n"); } /* * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER. * * When we try to negotiate, we append the negotiation message * to the identify and (maybe) simple tag message. * The host status field is set to HS_NEGOTIATE to mark this * situation. * * If the target doesn't answer this message immediately * (as required by the standard), the SIR_NEGO_FAILED interrupt * will be raised eventually. * The handler removes the HS_NEGOTIATE status, and sets the * negotiated value to the default (async / nowide). * * If we receive a matching answer immediately, we check it * for validity, and set the values. * * If we receive a Reject message immediately, we assume the * negotiation has failed, and fall back to standard values. * * If we receive a negotiation message while not in HS_NEGOTIATE * state, it's a target initiated negotiation. We prepare a * (hopefully) valid answer, set our parameters, and send back * this answer to the target. * * If the target doesn't fetch the answer (no message out phase), * we assume the negotiation has failed, and fall back to default * settings (SIR_NEGO_PROTO interrupt). * * When we set the values, we adjust them in all ccbs belonging * to this target, in the controller's register, and in the "phys" * field of the controller's struct sym_hcb. */ /* * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message. */ static void sym_sync_nego(hcb_p np, tcb_p tp, ccb_p cp) { u_char chg, ofs, per, fak, div; int req = 1; /* * Synchronous request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "sync msgin", np->msgin); }; /* * request or answer ? */ if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_SYNC) goto reject_it; req = 0; } /* * get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[4]; /* * check values against our limits. */ if (ofs) { if (ofs > np->maxoffs) {chg = 1; ofs = np->maxoffs;} if (req) { if (ofs > tp->tinfo.user.offset) {chg = 1; ofs = tp->tinfo.user.offset;} } } if (ofs) { if (per < np->minsync) {chg = 1; per = np->minsync;} if (req) { if (per < tp->tinfo.user.period) {chg = 1; per = tp->tinfo.user.period;} } } div = fak = 0; if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0) goto reject_it; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp); printf ("sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n", ofs, per, div, fak, chg); } /* * This was an answer message */ if (req == 0) { if (chg) /* Answer wasn't acceptable. */ goto reject_it; sym_setsync (np, cp, ofs, per, div, fak); OUTL_DSP (SCRIPTA_BA (np, clrack)); return; } /* * It was a request. Set value and * prepare an answer message */ sym_setsync (np, cp, ofs, per, div, fak); np->msgout[0] = M_EXTENDED; np->msgout[1] = 3; np->msgout[2] = M_X_SYNC_REQ; np->msgout[3] = per; np->msgout[4] = ofs; cp->nego_status = NS_SYNC; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "sync msgout", np->msgout); } np->msgin [0] = M_NOOP; OUTL_DSP (SCRIPTB_BA (np, sdtr_resp)); return; reject_it: sym_setsync (np, cp, 0, 0, 0, 0); OUTL_DSP (SCRIPTB_BA (np, msg_bad)); } /* * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message. */ static void sym_ppr_nego(hcb_p np, tcb_p tp, ccb_p cp) { u_char chg, ofs, per, fak, dt, div, wide; int req = 1; /* * Synchronous request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "ppr msgin", np->msgin); }; /* * get requested values. */ chg = 0; per = np->msgin[3]; ofs = np->msgin[5]; wide = np->msgin[6]; dt = np->msgin[7] & PPR_OPT_DT; /* * request or answer ? */ if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_PPR) goto reject_it; req = 0; } /* * check values against our limits. */ if (wide > np->maxwide) {chg = 1; wide = np->maxwide;} if (!wide || !(np->features & FE_ULTRA3)) dt &= ~PPR_OPT_DT; if (req) { if (wide > tp->tinfo.user.width) {chg = 1; wide = tp->tinfo.user.width;} } if (!(np->features & FE_U3EN)) /* Broken U3EN bit not supported */ dt &= ~PPR_OPT_DT; if (dt != (np->msgin[7] & PPR_OPT_MASK)) chg = 1; if (ofs) { if (dt) { if (ofs > np->maxoffs_dt) {chg = 1; ofs = np->maxoffs_dt;} } else if (ofs > np->maxoffs) {chg = 1; ofs = np->maxoffs;} if (req) { if (ofs > tp->tinfo.user.offset) {chg = 1; ofs = tp->tinfo.user.offset;} } } if (ofs) { if (dt) { if (per < np->minsync_dt) {chg = 1; per = np->minsync_dt;} } else if (per < np->minsync) {chg = 1; per = np->minsync;} if (req) { if (per < tp->tinfo.user.period) {chg = 1; per = tp->tinfo.user.period;} } } div = fak = 0; if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0) goto reject_it; if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp); printf ("ppr: " "dt=%x ofs=%d per=%d wide=%d div=%d fak=%d chg=%d.\n", dt, ofs, per, wide, div, fak, chg); } /* * It was an answer. */ if (req == 0) { if (chg) /* Answer wasn't acceptable */ goto reject_it; sym_setpprot (np, cp, dt, ofs, per, wide, div, fak); OUTL_DSP (SCRIPTA_BA (np, clrack)); return; } /* * It was a request. Set value and * prepare an answer message */ sym_setpprot (np, cp, dt, ofs, per, wide, div, fak); np->msgout[0] = M_EXTENDED; np->msgout[1] = 6; np->msgout[2] = M_X_PPR_REQ; np->msgout[3] = per; np->msgout[4] = 0; np->msgout[5] = ofs; np->msgout[6] = wide; np->msgout[7] = dt; cp->nego_status = NS_PPR; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "ppr msgout", np->msgout); } np->msgin [0] = M_NOOP; OUTL_DSP (SCRIPTB_BA (np, ppr_resp)); return; reject_it: sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0); OUTL_DSP (SCRIPTB_BA (np, msg_bad)); /* * If it was a device response that should result in * ST, we may want to try a legacy negotiation later. */ if (!req && !dt) { tp->tinfo.goal.options = 0; tp->tinfo.goal.width = wide; tp->tinfo.goal.period = per; tp->tinfo.goal.offset = ofs; } return; } /* * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message. */ static void sym_wide_nego(hcb_p np, tcb_p tp, ccb_p cp) { u_char chg, wide; int req = 1; /* * Wide request message received. */ if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "wide msgin", np->msgin); }; /* * Is it a request from the device? */ if (INB (HS_PRT) == HS_NEGOTIATE) { OUTB (HS_PRT, HS_BUSY); if (cp->nego_status && cp->nego_status != NS_WIDE) goto reject_it; req = 0; } /* * get requested values. */ chg = 0; wide = np->msgin[3]; /* * check values against driver limits. */ if (wide > np->maxwide) {chg = 1; wide = np->maxwide;} if (req) { if (wide > tp->tinfo.user.width) {chg = 1; wide = tp->tinfo.user.width;} } if (DEBUG_FLAGS & DEBUG_NEGO) { PRINT_ADDR(cp); printf ("wdtr: wide=%d chg=%d.\n", wide, chg); } /* * This was an answer message */ if (req == 0) { if (chg) /* Answer wasn't acceptable. */ goto reject_it; sym_setwide (np, cp, wide); /* * Negotiate for SYNC immediately after WIDE response. * This allows to negotiate for both WIDE and SYNC on * a single SCSI command (Suggested by Justin Gibbs). */ if (tp->tinfo.goal.offset) { np->msgout[0] = M_EXTENDED; np->msgout[1] = 3; np->msgout[2] = M_X_SYNC_REQ; np->msgout[3] = tp->tinfo.goal.period; np->msgout[4] = tp->tinfo.goal.offset; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "sync msgout", np->msgout); } cp->nego_status = NS_SYNC; OUTB (HS_PRT, HS_NEGOTIATE); OUTL_DSP (SCRIPTB_BA (np, sdtr_resp)); return; } OUTL_DSP (SCRIPTA_BA (np, clrack)); return; }; /* * It was a request, set value and * prepare an answer message */ sym_setwide (np, cp, wide); np->msgout[0] = M_EXTENDED; np->msgout[1] = 2; np->msgout[2] = M_X_WIDE_REQ; np->msgout[3] = wide; np->msgin [0] = M_NOOP; cp->nego_status = NS_WIDE; if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_msg(cp, "wide msgout", np->msgout); } OUTL_DSP (SCRIPTB_BA (np, wdtr_resp)); return; reject_it: OUTL_DSP (SCRIPTB_BA (np, msg_bad)); } /* * Reset SYNC or WIDE to default settings. * * Called when a negotiation does not succeed either * on rejection or on protocol error. * * If it was a PPR that made problems, we may want to * try a legacy negotiation later. */ static void sym_nego_default(hcb_p np, tcb_p tp, ccb_p cp) { /* * any error in negotiation: * fall back to default mode. */ switch (cp->nego_status) { case NS_PPR: #if 0 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0); #else tp->tinfo.goal.options = 0; if (tp->tinfo.goal.period < np->minsync) tp->tinfo.goal.period = np->minsync; if (tp->tinfo.goal.offset > np->maxoffs) tp->tinfo.goal.offset = np->maxoffs; #endif break; case NS_SYNC: sym_setsync (np, cp, 0, 0, 0, 0); break; case NS_WIDE: sym_setwide (np, cp, 0); break; }; np->msgin [0] = M_NOOP; np->msgout[0] = M_NOOP; cp->nego_status = 0; } /* * chip handler for MESSAGE REJECT received in response to * a WIDE or SYNCHRONOUS negotiation. */ static void sym_nego_rejected(hcb_p np, tcb_p tp, ccb_p cp) { sym_nego_default(np, tp, cp); OUTB (HS_PRT, HS_BUSY); } /* * chip exception handler for programmed interrupts. */ static void sym_int_sir (hcb_p np) { u_char num = INB (nc_dsps); u32 dsa = INL (nc_dsa); ccb_p cp = sym_ccb_from_dsa(np, dsa); u_char target = INB (nc_sdid) & 0x0f; tcb_p tp = &np->target[target]; int tmp; if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); switch (num) { /* * Command has been completed with error condition * or has been auto-sensed. */ case SIR_COMPLETE_ERROR: sym_complete_error(np, cp); return; /* * The C code is currently trying to recover from something. * Typically, user want to abort some command. */ case SIR_SCRIPT_STOPPED: case SIR_TARGET_SELECTED: case SIR_ABORT_SENT: sym_sir_task_recovery(np, num); return; /* * The device didn't go to MSG OUT phase after having * been selected with ATN. We donnot want to handle * that. */ case SIR_SEL_ATN_NO_MSG_OUT: printf ("%s:%d: No MSG OUT phase after selection with ATN.\n", sym_name (np), target); goto out_stuck; /* * The device didn't switch to MSG IN phase after * having reseleted the initiator. */ case SIR_RESEL_NO_MSG_IN: printf ("%s:%d: No MSG IN phase after reselection.\n", sym_name (np), target); goto out_stuck; /* * After reselection, the device sent a message that wasn't * an IDENTIFY. */ case SIR_RESEL_NO_IDENTIFY: printf ("%s:%d: No IDENTIFY after reselection.\n", sym_name (np), target); goto out_stuck; /* * The device reselected a LUN we donnot know about. */ case SIR_RESEL_BAD_LUN: np->msgout[0] = M_RESET; goto out; /* * The device reselected for an untagged nexus and we * haven't any. */ case SIR_RESEL_BAD_I_T_L: np->msgout[0] = M_ABORT; goto out; /* * The device reselected for a tagged nexus that we donnot * have. */ case SIR_RESEL_BAD_I_T_L_Q: np->msgout[0] = M_ABORT_TAG; goto out; /* * The SCRIPTS let us know that the device has grabbed * our message and will abort the job. */ case SIR_RESEL_ABORTED: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; printf ("%s:%d: message %x sent on bad reselection.\n", sym_name (np), target, np->lastmsg); goto out; /* * The SCRIPTS let us know that a message has been * successfully sent to the device. */ case SIR_MSG_OUT_DONE: np->lastmsg = np->msgout[0]; np->msgout[0] = M_NOOP; /* Should we really care of that */ if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) { if (cp) { cp->xerr_status &= ~XE_PARITY_ERR; if (!cp->xerr_status) OUTOFFB (HF_PRT, HF_EXT_ERR); } } goto out; /* * The device didn't send a GOOD SCSI status. * We may have some work to do prior to allow * the SCRIPTS processor to continue. */ case SIR_BAD_SCSI_STATUS: if (!cp) goto out; sym_sir_bad_scsi_status(np, num, cp); return; /* * We are asked by the SCRIPTS to prepare a * REJECT message. */ case SIR_REJECT_TO_SEND: sym_print_msg(cp, "M_REJECT to send for ", np->msgin); np->msgout[0] = M_REJECT; goto out; /* * We have been ODD at the end of a DATA IN * transfer and the device didn't send a * IGNORE WIDE RESIDUE message. * It is a data overrun condition. */ case SIR_SWIDE_OVERRUN: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SWIDE_OVRUN; } goto out; /* * We have been ODD at the end of a DATA OUT * transfer. * It is a data underrun condition. */ case SIR_SODL_UNDERRUN: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_SODL_UNRUN; } goto out; /* * The device wants us to tranfer more data than * expected or in the wrong direction. * The number of extra bytes is in scratcha. * It is a data overrun condition. */ case SIR_DATA_OVERRUN: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_EXTRA_DATA; cp->extra_bytes += INL (nc_scratcha); } goto out; /* * The device switched to an illegal phase (4/5). */ case SIR_BAD_PHASE: if (cp) { OUTONB (HF_PRT, HF_EXT_ERR); cp->xerr_status |= XE_BAD_PHASE; } goto out; /* * We received a message. */ case SIR_MSG_RECEIVED: if (!cp) goto out_stuck; switch (np->msgin [0]) { /* * We received an extended message. * We handle MODIFY DATA POINTER, SDTR, WDTR * and reject all other extended messages. */ case M_EXTENDED: switch (np->msgin [2]) { case M_X_MODIFY_DP: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp,"modify DP",np->msgin); tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + (np->msgin[5]<<8) + (np->msgin[6]); sym_modify_dp(np, tp, cp, tmp); return; case M_X_SYNC_REQ: sym_sync_nego(np, tp, cp); return; case M_X_PPR_REQ: sym_ppr_nego(np, tp, cp); return; case M_X_WIDE_REQ: sym_wide_nego(np, tp, cp); return; default: goto out_reject; } break; /* * We received a 1/2 byte message not handled from SCRIPTS. * We are only expecting MESSAGE REJECT and IGNORE WIDE * RESIDUE messages that haven't been anticipated by * SCRIPTS on SWIDE full condition. Unanticipated IGNORE * WIDE RESIDUE messages are aliased as MODIFY DP (-1). */ case M_IGN_RESIDUE: if (DEBUG_FLAGS & DEBUG_POINTER) sym_print_msg(cp,"ign wide residue", np->msgin); sym_modify_dp(np, tp, cp, -1); return; case M_REJECT: if (INB (HS_PRT) == HS_NEGOTIATE) sym_nego_rejected(np, tp, cp); else { PRINT_ADDR(cp); printf ("M_REJECT received (%x:%x).\n", scr_to_cpu(np->lastmsg), np->msgout[0]); } goto out_clrack; break; default: goto out_reject; } break; /* * We received an unknown message. * Ignore all MSG IN phases and reject it. */ case SIR_MSG_WEIRD: sym_print_msg(cp, "WEIRD message received", np->msgin); OUTL_DSP (SCRIPTB_BA (np, msg_weird)); return; /* * Negotiation failed. * Target does not send us the reply. * Remove the HS_NEGOTIATE status. */ case SIR_NEGO_FAILED: OUTB (HS_PRT, HS_BUSY); /* * Negotiation failed. * Target does not want answer message. */ case SIR_NEGO_PROTO: sym_nego_default(np, tp, cp); goto out; }; out: OUTONB_STD (); return; out_reject: OUTL_DSP (SCRIPTB_BA (np, msg_bad)); return; out_clrack: OUTL_DSP (SCRIPTA_BA (np, clrack)); return; out_stuck: return; } /* * Acquire a control block */ static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order) { tcb_p tp = &np->target[tn]; lcb_p lp = sym_lp(np, tp, ln); u_short tag = NO_TAG; SYM_QUEHEAD *qp; ccb_p cp = (ccb_p) 0; /* * Look for a free CCB */ if (sym_que_empty(&np->free_ccbq)) (void) sym_alloc_ccb(np); qp = sym_remque_head(&np->free_ccbq); if (!qp) goto out; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); /* * If the LCB is not yet available and the LUN * has been probed ok, try to allocate the LCB. */ if (!lp && sym_is_bit(tp->lun_map, ln)) { lp = sym_alloc_lcb(np, tn, ln); if (!lp) goto out_free; } /* * If the LCB is not available here, then the * logical unit is not yet discovered. For those * ones only accept 1 SCSI IO per logical unit, * since we cannot allow disconnections. */ if (!lp) { if (!sym_is_bit(tp->busy0_map, ln)) sym_set_bit(tp->busy0_map, ln); else goto out_free; } else { /* * If we have been asked for a tagged command. */ if (tag_order) { /* * Debugging purpose. */ assert(lp->busy_itl == 0); /* * Allocate resources for tags if not yet. */ if (!lp->cb_tags) { sym_alloc_lcb_tags(np, tn, ln); if (!lp->cb_tags) goto out_free; } /* * Get a tag for this SCSI IO and set up * the CCB bus address for reselection, * and count it for this LUN. * Toggle reselect path to tagged. */ if (lp->busy_itlq < SYM_CONF_MAX_TASK) { tag = lp->cb_tags[lp->ia_tag]; if (++lp->ia_tag == SYM_CONF_MAX_TASK) lp->ia_tag = 0; lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba); ++lp->busy_itlq; lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA (np, resel_tag)); } else goto out_free; } /* * This command will not be tagged. * If we already have either a tagged or untagged * one, refuse to overlap this untagged one. */ else { /* * Debugging purpose. */ assert(lp->busy_itl == 0 && lp->busy_itlq == 0); /* * Count this nexus for this LUN. * Set up the CCB bus address for reselection. * Toggle reselect path to untagged. */ if (++lp->busy_itl == 1) { lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); lp->head.resel_sa = cpu_to_scr(SCRIPTA_BA (np, resel_no_tag)); } else goto out_free; } } /* * Put the CCB into the busy queue. */ sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); /* * Remember all informations needed to free this CCB. */ cp->to_abort = 0; cp->tag = tag; cp->target = tn; cp->lun = ln; if (DEBUG_FLAGS & DEBUG_TAGS) { PRINT_LUN(np, tn, ln); printf ("ccb @%p using tag %d.\n", cp, tag); } out: return cp; out_free: sym_insque_head(&cp->link_ccbq, &np->free_ccbq); return (ccb_p) 0; } /* * Release one control block */ static void sym_free_ccb (hcb_p np, ccb_p cp) { tcb_p tp = &np->target[cp->target]; lcb_p lp = sym_lp(np, tp, cp->lun); if (DEBUG_FLAGS & DEBUG_TAGS) { PRINT_LUN(np, cp->target, cp->lun); printf ("ccb @%p freeing tag %d.\n", cp, cp->tag); } /* * If LCB available, */ if (lp) { /* * If tagged, release the tag, set the relect path */ if (cp->tag != NO_TAG) { /* * Free the tag value. */ lp->cb_tags[lp->if_tag] = cp->tag; if (++lp->if_tag == SYM_CONF_MAX_TASK) lp->if_tag = 0; /* * Make the reselect path invalid, * and uncount this CCB. */ lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba); --lp->busy_itlq; } else { /* Untagged */ /* * Make the reselect path invalid, * and uncount this CCB. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); --lp->busy_itl; } /* * If no JOB active, make the LUN reselect path invalid. */ if (lp->busy_itlq == 0 && lp->busy_itl == 0) lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); } /* * Otherwise, we only accept 1 IO per LUN. * Clear the bit that keeps track of this IO. */ else sym_clr_bit(tp->busy0_map, cp->lun); /* * We donnot queue more than 1 ccb per target * with negotiation at any time. If this ccb was * used for negotiation, clear this info in the tcb. */ if (cp == tp->nego_cp) tp->nego_cp = 0; #ifdef SYM_CONF_IARB_SUPPORT /* * If we just complete the last queued CCB, * clear this info that is no longer relevant. */ if (cp == np->last_cp) np->last_cp = 0; #endif #ifdef FreeBSD_Bus_Dma_Abstraction /* * Unmap user data from DMA map if needed. */ if (cp->dmamapped) { bus_dmamap_unload(np->data_dmat, cp->dmamap); cp->dmamapped = 0; } #endif /* * Make this CCB available. */ cp->cam_ccb = 0; cp->host_status = HS_IDLE; sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->free_ccbq); } /* * Allocate a CCB from memory and initialize its fixed part. */ static ccb_p sym_alloc_ccb(hcb_p np) { ccb_p cp = 0; int hcode; /* * Prevent from allocating more CCBs than we can * queue to the controller. */ if (np->actccbs >= SYM_CONF_MAX_START) return 0; /* * Allocate memory for this CCB. */ cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB"); if (!cp) goto out_free; /* * Allocate a bounce buffer for sense data. */ cp->sns_bbuf = sym_calloc_dma(SYM_SNS_BBUF_LEN, "SNS_BBUF"); if (!cp->sns_bbuf) goto out_free; /* * Allocate a map for the DMA of user data. */ #ifdef FreeBSD_Bus_Dma_Abstraction if (bus_dmamap_create(np->data_dmat, 0, &cp->dmamap)) goto out_free; #endif /* * Count it. */ np->actccbs++; /* * Compute the bus address of this ccb. */ cp->ccb_ba = vtobus(cp); /* * Insert this ccb into the hashed list. */ hcode = CCB_HASH_CODE(cp->ccb_ba); cp->link_ccbh = np->ccbh[hcode]; np->ccbh[hcode] = cp; /* * Initialyze the start and restart actions. */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, idle)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); /* * Initilialyze some other fields. */ cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2])); /* * Chain into free ccb queue. */ sym_insque_head(&cp->link_ccbq, &np->free_ccbq); return cp; out_free: if (cp) { if (cp->sns_bbuf) sym_mfree_dma(cp->sns_bbuf,SYM_SNS_BBUF_LEN,"SNS_BBUF"); sym_mfree_dma(cp, sizeof(*cp), "CCB"); } return 0; } /* * Look up a CCB from a DSA value. */ static ccb_p sym_ccb_from_dsa(hcb_p np, u32 dsa) { int hcode; ccb_p cp; hcode = CCB_HASH_CODE(dsa); cp = np->ccbh[hcode]; while (cp) { if (cp->ccb_ba == dsa) break; cp = cp->link_ccbh; } return cp; } /* * Target control block initialisation. * Nothing important to do at the moment. */ static void sym_init_tcb (hcb_p np, u_char tn) { /* * Check some alignments required by the chip. */ assert (((offsetof(struct sym_reg, nc_sxfer) ^ offsetof(struct sym_tcb, head.sval)) &3) == 0); assert (((offsetof(struct sym_reg, nc_scntl3) ^ offsetof(struct sym_tcb, head.wval)) &3) == 0); } /* * Lun control block allocation and initialization. */ static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln) { tcb_p tp = &np->target[tn]; lcb_p lp = sym_lp(np, tp, ln); /* * Already done, just return. */ if (lp) return lp; /* * Check against some race. */ assert(!sym_is_bit(tp->busy0_map, ln)); /* * Initialize the target control block if not yet. */ sym_init_tcb (np, tn); /* * Allocate the LCB bus address array. * Compute the bus address of this table. */ if (ln && !tp->luntbl) { int i; tp->luntbl = sym_calloc_dma(256, "LUNTBL"); if (!tp->luntbl) goto fail; for (i = 0 ; i < 64 ; i++) tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl)); } /* * Allocate the table of pointers for LUN(s) > 0, if needed. */ if (ln && !tp->lunmp) { tp->lunmp = sym_calloc(SYM_CONF_MAX_LUN * sizeof(lcb_p), "LUNMP"); if (!tp->lunmp) goto fail; } /* * Allocate the lcb. * Make it available to the chip. */ lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB"); if (!lp) goto fail; if (ln) { tp->lunmp[ln] = lp; tp->luntbl[ln] = cpu_to_scr(vtobus(lp)); } else { tp->lun0p = lp; tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); } /* * Let the itl task point to error handling. */ lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); /* * Set the reselect pattern to our default. :) */ lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); /* * Set user capabilities. */ lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); fail: return lp; } /* * Allocate LCB resources for tagged command queuing. */ static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln) { tcb_p tp = &np->target[tn]; lcb_p lp = sym_lp(np, tp, ln); int i; /* * If LCB not available, try to allocate it. */ if (!lp && !(lp = sym_alloc_lcb(np, tn, ln))) goto fail; /* * Allocate the task table and and the tag allocation * circular buffer. We want both or none. */ lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); if (!lp->itlq_tbl) goto fail; lp->cb_tags = sym_calloc(SYM_CONF_MAX_TASK, "CB_TAGS"); if (!lp->cb_tags) { sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); lp->itlq_tbl = 0; goto fail; } /* * Initialize the task table with invalid entries. */ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba); /* * Fill up the tag buffer with tag numbers. */ for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) lp->cb_tags[i] = i; /* * Make the task table available to SCRIPTS, * And accept tagged commands now. */ lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl)); return; fail: return; } /* * Test the pci bus snoop logic :-( * * Has to be called with interrupts disabled. */ #ifndef SYM_CONF_IOMAPPED static int sym_regtest (hcb_p np) { register volatile u32 data; /* * chip registers may NOT be cached. * write 0xffffffff to a read only register area, * and try to read it back. */ data = 0xffffffff; OUTL_OFF(offsetof(struct sym_reg, nc_dstat), data); data = INL_OFF(offsetof(struct sym_reg, nc_dstat)); #if 1 if (data == 0xffffffff) { #else if ((data & 0xe2f0fffd) != 0x02000080) { #endif printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", (unsigned) data); return (0x10); }; return (0); } #endif static int sym_snooptest (hcb_p np) { u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat; int i, err=0; #ifndef SYM_CONF_IOMAPPED err |= sym_regtest (np); if (err) return (err); #endif restart_test: /* * Enable Master Parity Checking as we intend * to enable it for normal operations. */ OUTB (nc_ctest4, (np->rv_ctest4 & MPEE)); /* * init */ pc = SCRIPTB0_BA (np, snooptest); host_wr = 1; sym_wr = 2; /* * Set memory and register. */ np->cache = cpu_to_scr(host_wr); OUTL (nc_temp, sym_wr); /* * Start script (exchange values) */ OUTL (nc_dsa, np->hcb_ba); OUTL_DSP (pc); /* * Wait 'til done (with timeout) */ for (i=0; i=SYM_SNOOP_TIMEOUT) { printf ("CACHE TEST FAILED: timeout.\n"); return (0x20); }; /* * Check for fatal DMA errors. */ dstat = INB (nc_dstat); #if 1 /* Band aiding for broken hardwares that fail PCI parity */ if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) { printf ("%s: PCI DATA PARITY ERROR DETECTED - " "DISABLING MASTER DATA PARITY CHECKING.\n", sym_name(np)); np->rv_ctest4 &= ~MPEE; goto restart_test; } #endif if (dstat & (MDPE|BF|IID)) { printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat); return (0x80); } /* * Save termination position. */ pc = INL (nc_dsp); /* * Read memory and register. */ host_rd = scr_to_cpu(np->cache); sym_rd = INL (nc_scratcha); sym_bk = INL (nc_temp); /* * Check termination position. */ if (pc != SCRIPTB0_BA (np, snoopend)+8) { printf ("CACHE TEST FAILED: script execution failed.\n"); printf ("start=%08lx, pc=%08lx, end=%08lx\n", (u_long) SCRIPTB0_BA (np, snooptest), (u_long) pc, (u_long) SCRIPTB0_BA (np, snoopend) +8); return (0x40); }; /* * Show results. */ if (host_wr != sym_rd) { printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n", (int) host_wr, (int) sym_rd); err |= 1; }; if (host_rd != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n", (int) sym_wr, (int) host_rd); err |= 2; }; if (sym_bk != sym_wr) { printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n", (int) sym_wr, (int) sym_bk); err |= 4; }; return (err); } /* * Determine the chip's clock frequency. * * This is essential for the negotiation of the synchronous * transfer rate. * * Note: we have to return the correct value. * THERE IS NO SAFE DEFAULT VALUE. * * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. * 53C860 and 53C875 rev. 1 support fast20 transfers but * do not have a clock doubler and so are provided with a * 80 MHz clock. All other fast20 boards incorporate a doubler * and so should be delivered with a 40 MHz clock. * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base * clock and provide a clock quadrupler (160 Mhz). */ /* * Select SCSI clock frequency */ static void sym_selectclock(hcb_p np, u_char scntl3) { /* * If multiplier not present or not selected, leave here. */ if (np->multiplier <= 1) { OUTB(nc_scntl3, scntl3); return; } if (sym_verbose >= 2) printf ("%s: enabling clock multiplier\n", sym_name(np)); OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */ /* * Wait for the LCKFRQ bit to be set if supported by the chip. * Otherwise wait 20 micro-seconds. */ if (np->features & FE_LCKFRQ) { int i = 20; while (!(INB(nc_stest4) & LCKFRQ) && --i > 0) UDELAY (20); if (!i) printf("%s: the chip cannot lock the frequency\n", sym_name(np)); } else UDELAY (20); OUTB(nc_stest3, HSC); /* Halt the scsi clock */ OUTB(nc_scntl3, scntl3); OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ OUTB(nc_stest3, 0x00); /* Restart scsi clock */ } /* * calculate SCSI clock frequency (in KHz) */ static unsigned getfreq (hcb_p np, int gen) { unsigned int ms = 0; unsigned int f; /* * Measure GEN timer delay in order * to calculate SCSI clock frequency * * This code will never execute too * many loop iterations (if DELAY is * reasonably correct). It could get * too low a delay (too high a freq.) * if the CPU is slow executing the * loop for some reason (an NMI, for * example). For this reason we will * if multiple measurements are to be * performed trust the higher delay * (lower frequency returned). */ OUTW (nc_sien , 0); /* mask all scsi interrupts */ (void) INW (nc_sist); /* clear pending scsi interrupt */ OUTB (nc_dien , 0); /* mask all dma interrupts */ (void) INW (nc_sist); /* another one, just to be sure :) */ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */ OUTB (nc_stime1, 0); /* disable general purpose timer */ OUTB (nc_stime1, gen); /* set to nominal delay of 1<= 2) printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n", sym_name(np), gen, ms, f); return f; } static unsigned sym_getfreq (hcb_p np) { u_int f1, f2; int gen = 11; (void) getfreq (np, gen); /* throw away first result */ f1 = getfreq (np, gen); f2 = getfreq (np, gen); if (f1 > f2) f1 = f2; /* trust lower result */ return f1; } /* * Get/probe chip SCSI clock frequency */ static void sym_getclock (hcb_p np, int mult) { unsigned char scntl3 = np->sv_scntl3; unsigned char stest1 = np->sv_stest1; unsigned f1; /* * For the C10 core, assume 40 MHz. */ if (np->features & FE_C10) { np->multiplier = mult; np->clock_khz = 40000 * mult; return; } np->multiplier = 1; f1 = 40000; /* * True with 875/895/896/895A with clock multiplier selected */ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { if (sym_verbose >= 2) printf ("%s: clock multiplier found\n", sym_name(np)); np->multiplier = mult; } /* * If multiplier not found or scntl3 not 7,5,3, * reset chip and get frequency from general purpose timer. * Otherwise trust scntl3 BIOS setting. */ if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { OUTB (nc_stest1, 0); /* make sure doubler is OFF */ f1 = sym_getfreq (np); if (sym_verbose) printf ("%s: chip clock is %uKHz\n", sym_name(np), f1); if (f1 < 45000) f1 = 40000; else if (f1 < 55000) f1 = 50000; else f1 = 80000; if (f1 < 80000 && mult > 1) { if (sym_verbose >= 2) printf ("%s: clock multiplier assumed\n", sym_name(np)); np->multiplier = mult; } } else { if ((scntl3 & 7) == 3) f1 = 40000; else if ((scntl3 & 7) == 5) f1 = 80000; else f1 = 160000; f1 /= np->multiplier; } /* * Compute controller synchronous parameters. */ f1 *= np->multiplier; np->clock_khz = f1; } /* * Get/probe PCI clock frequency */ static int sym_getpciclock (hcb_p np) { int f = 0; /* * For the C1010-33, this doesn't work. * For the C1010-66, this will be tested when I'll have * such a beast to play with. */ if (!(np->features & FE_C10)) { OUTB (nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */ f = (int) sym_getfreq (np); OUTB (nc_stest1, 0); } np->pciclk_khz = f; return f; } /*============= DRIVER ACTION/COMPLETION ====================*/ /* * Print something that tells about extended errors. */ static void sym_print_xerr(ccb_p cp, int x_status) { if (x_status & XE_PARITY_ERR) { PRINT_ADDR(cp); printf ("unrecovered SCSI parity error.\n"); } if (x_status & XE_EXTRA_DATA) { PRINT_ADDR(cp); printf ("extraneous data discarded.\n"); } if (x_status & XE_BAD_PHASE) { PRINT_ADDR(cp); printf ("illegal scsi phase (4/5).\n"); } if (x_status & XE_SODL_UNRUN) { PRINT_ADDR(cp); printf ("ODD transfer in DATA OUT phase.\n"); } if (x_status & XE_SWIDE_OVRUN) { PRINT_ADDR(cp); printf ("ODD transfer in DATA IN phase.\n"); } } /* * Choose the more appropriate CAM status if * the IO encountered an extended error. */ static int sym_xerr_cam_status(int cam_status, int x_status) { if (x_status) { if (x_status & XE_PARITY_ERR) cam_status = CAM_UNCOR_PARITY; else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) cam_status = CAM_DATA_RUN_ERR; else if (x_status & XE_BAD_PHASE) cam_status = CAM_REQ_CMP_ERR; else cam_status = CAM_REQ_CMP_ERR; } return cam_status; } /* * Complete execution of a SCSI command with extented * error, SCSI status error, or having been auto-sensed. * * The SCRIPTS processor is not running there, so we * can safely access IO registers and remove JOBs from * the START queue. * SCRATCHA is assumed to have been loaded with STARTPOS * before the SCRIPTS called the C code. */ static void sym_complete_error (hcb_p np, ccb_p cp) { struct ccb_scsiio *csio; u_int cam_status; int i; /* * Paranoid check. :) */ if (!cp || !cp->cam_ccb) return; if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) { printf ("CCB=%lx STAT=%x/%x/%x DEV=%d/%d\n", (unsigned long)cp, cp->host_status, cp->ssss_status, cp->host_flags, cp->target, cp->lun); MDELAY(100); } /* * Get CAM command pointer. */ csio = &cp->cam_ccb->csio; /* * Check for extended errors. */ if (cp->xerr_status) { if (sym_verbose) sym_print_xerr(cp, cp->xerr_status); if (cp->host_status == HS_COMPLETE) cp->host_status = HS_COMP_ERR; } /* * Calculate the residual. */ csio->sense_resid = 0; csio->resid = sym_compute_residual(np, cp); if (!SYM_CONF_RESIDUAL_SUPPORT) {/* If user does not want residuals */ csio->resid = 0; /* throw them away. :) */ cp->sv_resid = 0; } if (cp->host_flags & HF_SENSE) { /* Auto sense */ csio->scsi_status = cp->sv_scsi_status; /* Restore status */ csio->sense_resid = csio->resid; /* Swap residuals */ csio->resid = cp->sv_resid; cp->sv_resid = 0; if (sym_verbose && cp->sv_xerr_status) sym_print_xerr(cp, cp->sv_xerr_status); if (cp->host_status == HS_COMPLETE && cp->ssss_status == S_GOOD && cp->xerr_status == 0) { cam_status = sym_xerr_cam_status(CAM_SCSI_STATUS_ERROR, cp->sv_xerr_status); cam_status |= CAM_AUTOSNS_VALID; /* * Bounce back the sense data to user and * fix the residual. */ bzero(&csio->sense_data, csio->sense_len); bcopy(cp->sns_bbuf, &csio->sense_data, MIN(csio->sense_len, SYM_SNS_BBUF_LEN)); csio->sense_resid += csio->sense_len; csio->sense_resid -= SYM_SNS_BBUF_LEN; #if 0 /* * If the device reports a UNIT ATTENTION condition * due to a RESET condition, we should consider all * disconnect CCBs for this unit as aborted. */ if (1) { u_char *p; p = (u_char *) csio->sense_data; if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29) sym_clear_tasks(np, CAM_REQ_ABORTED, cp->target,cp->lun, -1); } #endif } else cam_status = CAM_AUTOSENSE_FAIL; } else if (cp->host_status == HS_COMPLETE) { /* Bad SCSI status */ csio->scsi_status = cp->ssss_status; cam_status = CAM_SCSI_STATUS_ERROR; } else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ cam_status = CAM_SEL_TIMEOUT; else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ cam_status = CAM_UNEXP_BUSFREE; else { /* Extended error */ if (sym_verbose) { PRINT_ADDR(cp); printf ("COMMAND FAILED (%x %x %x).\n", cp->host_status, cp->ssss_status, cp->xerr_status); } csio->scsi_status = cp->ssss_status; /* * Set the most appropriate value for CAM status. */ cam_status = sym_xerr_cam_status(CAM_REQ_CMP_ERR, cp->xerr_status); } /* * Dequeue all queued CCBs for that device * not yet started by SCRIPTS. */ i = (INL (nc_scratcha) - np->squeue_ba) / 4; (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); /* * Restart the SCRIPTS processor. */ OUTL_DSP (SCRIPTA_BA (np, start)); #ifdef FreeBSD_Bus_Dma_Abstraction /* * Synchronize DMA map if needed. */ if (cp->dmamapped) { bus_dmamap_sync(np->data_dmat, cp->dmamap, (bus_dmasync_op_t)(cp->dmamapped == SYM_DMA_READ ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); } #endif /* * Add this one to the COMP queue. * Complete all those commands with either error * or requeue condition. */ sym_set_cam_status((union ccb *) csio, cam_status); sym_remque(&cp->link_ccbq); sym_insque_head(&cp->link_ccbq, &np->comp_ccbq); sym_flush_comp_queue(np, 0); } /* * Complete execution of a successful SCSI command. * * Only successful commands go to the DONE queue, * since we need to have the SCRIPTS processor * stopped on any error condition. * The SCRIPTS processor is running while we are * completing successful commands. */ static void sym_complete_ok (hcb_p np, ccb_p cp) { struct ccb_scsiio *csio; tcb_p tp; lcb_p lp; /* * Paranoid check. :) */ if (!cp || !cp->cam_ccb) return; assert (cp->host_status == HS_COMPLETE); /* * Get command, target and lun pointers. */ csio = &cp->cam_ccb->csio; tp = &np->target[cp->target]; lp = sym_lp(np, tp, cp->lun); /* * Assume device discovered on first success. */ if (!lp) sym_set_bit(tp->lun_map, cp->lun); /* * If all data have been transferred, given than no * extended error did occur, there is no residual. */ csio->resid = 0; if (cp->phys.head.lastp != cp->phys.head.goalp) csio->resid = sym_compute_residual(np, cp); /* * Wrong transfer residuals may be worse than just always * returning zero. User can disable this feature from * sym_conf.h. Residual support is enabled by default. */ if (!SYM_CONF_RESIDUAL_SUPPORT) csio->resid = 0; #ifdef FreeBSD_Bus_Dma_Abstraction /* * Synchronize DMA map if needed. */ if (cp->dmamapped) { bus_dmamap_sync(np->data_dmat, cp->dmamap, (bus_dmasync_op_t)(cp->dmamapped == SYM_DMA_READ ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); } #endif /* * Set status and complete the command. */ csio->scsi_status = cp->ssss_status; sym_set_cam_status((union ccb *) csio, CAM_REQ_CMP); sym_free_ccb (np, cp); sym_xpt_done(np, (union ccb *) csio); } /* * Our timeout handler. */ static void sym_timeout1(void *arg) { union ccb *ccb = (union ccb *) arg; hcb_p np = ccb->ccb_h.sym_hcb_ptr; /* * Check that the CAM CCB is still queued. */ if (!np) return; switch(ccb->ccb_h.func_code) { case XPT_SCSI_IO: (void) sym_abort_scsiio(np, ccb, 1); break; default: break; } } static void sym_timeout(void *arg) { int s = splcam(); sym_timeout1(arg); splx(s); } /* * Abort an SCSI IO. */ static int sym_abort_scsiio(hcb_p np, union ccb *ccb, int timed_out) { ccb_p cp; SYM_QUEHEAD *qp; /* * Look up our CCB control block. */ cp = 0; FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { ccb_p cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp2->cam_ccb == ccb) { cp = cp2; break; } } if (!cp || cp->host_status == HS_WAIT) return -1; /* * If a previous abort didn't succeed in time, * perform a BUS reset. */ if (cp->to_abort) { sym_reset_scsi_bus(np, 1); return 0; } /* * Mark the CCB for abort and allow time for. */ cp->to_abort = timed_out ? 2 : 1; ccb->ccb_h.timeout_ch = timeout(sym_timeout, (caddr_t) ccb, 10*hz); /* * Tell the SCRIPTS processor to stop and synchronize with us. */ np->istat_sem = SEM; OUTB (nc_istat, SIGP|SEM); return 0; } /* * Reset a SCSI device (all LUNs of a target). */ static void sym_reset_dev(hcb_p np, union ccb *ccb) { tcb_p tp; struct ccb_hdr *ccb_h = &ccb->ccb_h; if (ccb_h->target_id == np->myaddr || ccb_h->target_id >= SYM_CONF_MAX_TARGET || ccb_h->target_lun >= SYM_CONF_MAX_LUN) { sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); return; } tp = &np->target[ccb_h->target_id]; tp->to_reset = 1; sym_xpt_done2(np, ccb, CAM_REQ_CMP); np->istat_sem = SEM; OUTB (nc_istat, SIGP|SEM); return; } /* * SIM action entry point. */ static void sym_action(struct cam_sim *sim, union ccb *ccb) { int s = splcam(); sym_action1(sim, ccb); splx(s); } static void sym_action1(struct cam_sim *sim, union ccb *ccb) { hcb_p np; tcb_p tp; lcb_p lp; ccb_p cp; int tmp; u_char idmsg, *msgptr; u_int msglen; struct ccb_scsiio *csio; struct ccb_hdr *ccb_h; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("sym_action\n")); /* * Retrieve our controller data structure. */ np = (hcb_p) cam_sim_softc(sim); /* * The common case is SCSI IO. * We deal with other ones elsewhere. */ if (ccb->ccb_h.func_code != XPT_SCSI_IO) { sym_action2(sim, ccb); return; } csio = &ccb->csio; ccb_h = &csio->ccb_h; /* * Work around races. */ if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { xpt_done(ccb); return; } /* * Minimal checkings, so that we will not * go outside our tables. */ if (ccb_h->target_id == np->myaddr || ccb_h->target_id >= SYM_CONF_MAX_TARGET || ccb_h->target_lun >= SYM_CONF_MAX_LUN) { sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); return; } /* * Retreive the target and lun descriptors. */ tp = &np->target[ccb_h->target_id]; lp = sym_lp(np, tp, ccb_h->target_lun); /* * Complete the 1st INQUIRY command with error * condition if the device is flagged NOSCAN * at BOOT in the NVRAM. This may speed up * the boot and maintain coherency with BIOS * device numbering. Clearing the flag allows * user to rescan skipped devices later. * We also return error for devices not flagged * for SCAN LUNS in the NVRAM since some mono-lun * devices behave badly when asked for some non * zero LUN. Btw, this is an absolute hack.:-) */ if (!(ccb_h->flags & CAM_CDB_PHYS) && (0x12 == ((ccb_h->flags & CAM_CDB_POINTER) ? csio->cdb_io.cdb_ptr[0] : csio->cdb_io.cdb_bytes[0]))) { if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) || ((tp->usrflags & SYM_SCAN_LUNS_DISABLED) && ccb_h->target_lun != 0)) { tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); return; } } /* * Get a control block for this IO. */ tmp = ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0); cp = sym_get_ccb(np, ccb_h->target_id, ccb_h->target_lun, tmp); if (!cp) { sym_xpt_done2(np, ccb, CAM_RESRC_UNAVAIL); return; } /* * Keep track of the IO in our CCB. */ cp->cam_ccb = ccb; /* * Build the IDENTIFY message. */ idmsg = M_IDENTIFY | cp->lun; if (cp->tag != NO_TAG || (lp && (lp->current_flags & SYM_DISC_ENABLED))) idmsg |= 0x40; msgptr = cp->scsi_smsg; msglen = 0; msgptr[msglen++] = idmsg; /* * Build the tag message if present. */ if (cp->tag != NO_TAG) { u_char order = csio->tag_action; switch(order) { case M_ORDERED_TAG: break; case M_HEAD_TAG: break; default: order = M_SIMPLE_TAG; } msgptr[msglen++] = order; /* * For less than 128 tags, actual tags are numbered * 1,3,5,..2*MAXTAGS+1,since we may have to deal * with devices that have problems with #TAG 0 or too * great #TAG numbers. For more tags (up to 256), * we use directly our tag number. */ #if SYM_CONF_MAX_TASK > (512/4) msgptr[msglen++] = cp->tag; #else msgptr[msglen++] = (cp->tag << 1) + 1; #endif } /* * Build a negotiation message if needed. * (nego_status is filled by sym_prepare_nego()) */ cp->nego_status = 0; if (tp->tinfo.current.width != tp->tinfo.goal.width || tp->tinfo.current.period != tp->tinfo.goal.period || tp->tinfo.current.offset != tp->tinfo.goal.offset || tp->tinfo.current.options != tp->tinfo.goal.options) { if (!tp->nego_cp && lp) msglen += sym_prepare_nego(np, cp, 0, msgptr + msglen); } /* * Fill in our ccb */ /* * Startqueue */ cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select)); cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA (np, resel_dsa)); /* * select */ cp->phys.select.sel_id = cp->target; cp->phys.select.sel_scntl3 = tp->head.wval; cp->phys.select.sel_sxfer = tp->head.sval; cp->phys.select.sel_scntl4 = tp->head.uval; /* * message */ cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg)); cp->phys.smsg.size = cpu_to_scr(msglen); /* * command */ if (sym_setup_cdb(np, csio, cp) < 0) { sym_free_ccb(np, cp); sym_xpt_done(np, ccb); return; } /* * status */ #if 0 /* Provision */ cp->actualquirks = tp->quirks; #endif cp->actualquirks = SYM_QUIRK_AUTOSAVE; cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; cp->ssss_status = S_ILLEGAL; cp->xerr_status = 0; cp->host_flags = 0; cp->extra_bytes = 0; /* * extreme data pointer. * shall be positive, so -1 is lower than lowest.:) */ cp->ext_sg = -1; cp->ext_ofs = 0; /* * Build the data descriptor block * and start the IO. */ sym_setup_data_and_start(np, csio, cp); } /* * Setup buffers and pointers that address the CDB. * I bet, physical CDBs will never be used on the planet, * since they can be bounced without significant overhead. */ static int sym_setup_cdb(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) { struct ccb_hdr *ccb_h; u32 cmd_ba; int cmd_len; ccb_h = &csio->ccb_h; /* * CDB is 16 bytes max. */ if (csio->cdb_len > sizeof(cp->cdb_buf)) { sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); return -1; } cmd_len = csio->cdb_len; if (ccb_h->flags & CAM_CDB_POINTER) { /* CDB is a pointer */ if (!(ccb_h->flags & CAM_CDB_PHYS)) { /* CDB pointer is virtual */ bcopy(csio->cdb_io.cdb_ptr, cp->cdb_buf, cmd_len); cmd_ba = CCB_BA (cp, cdb_buf[0]); } else { /* CDB pointer is physical */ #if 0 cmd_ba = ((u32)csio->cdb_io.cdb_ptr) & 0xffffffff; #else sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); return -1; #endif } } else { /* CDB is in the CAM ccb (buffer) */ bcopy(csio->cdb_io.cdb_bytes, cp->cdb_buf, cmd_len); cmd_ba = CCB_BA (cp, cdb_buf[0]); } cp->phys.cmd.addr = cpu_to_scr(cmd_ba); cp->phys.cmd.size = cpu_to_scr(cmd_len); return 0; } /* * Set up data pointers used by SCRIPTS. */ static void __inline sym_setup_data_pointers(hcb_p np, ccb_p cp, int dir) { u32 lastp, goalp; /* * No segments means no data. */ if (!cp->segments) dir = CAM_DIR_NONE; /* * Set the data pointer. */ switch(dir) { case CAM_DIR_OUT: goalp = SCRIPTA_BA (np, data_out2) + 8; lastp = goalp - 8 - (cp->segments * (2*4)); break; case CAM_DIR_IN: cp->host_flags |= HF_DATA_IN; goalp = SCRIPTA_BA (np, data_in2) + 8; lastp = goalp - 8 - (cp->segments * (2*4)); break; case CAM_DIR_NONE: default: lastp = goalp = SCRIPTB_BA (np, no_data); break; } cp->phys.head.lastp = cpu_to_scr(lastp); cp->phys.head.goalp = cpu_to_scr(goalp); cp->phys.head.savep = cpu_to_scr(lastp); cp->startp = cp->phys.head.savep; } #ifdef FreeBSD_Bus_Dma_Abstraction /* * Call back routine for the DMA map service. * If bounce buffers are used (why ?), we may sleep and then * be called there in another context. */ static void sym_execute_ccb(void *arg, bus_dma_segment_t *psegs, int nsegs, int error) { ccb_p cp; hcb_p np; union ccb *ccb; int s; s = splcam(); cp = (ccb_p) arg; ccb = cp->cam_ccb; np = (hcb_p) cp->arg; /* * Deal with weird races. */ if (sym_get_cam_status(ccb) != CAM_REQ_INPROG) goto out_abort; /* * Deal with weird errors. */ if (error) { cp->dmamapped = 0; sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED); goto out_abort; } /* * Build the data descriptor for the chip. */ if (nsegs) { int retv; /* 896 rev 1 requires to be careful about boundaries */ if (np->device_id == PCI_ID_SYM53C896 && np->revision_id <= 1) retv = sym_scatter_sg_physical(np, cp, psegs, nsegs); else retv = sym_fast_scatter_sg_physical(np,cp, psegs,nsegs); if (retv < 0) { sym_set_cam_status(cp->cam_ccb, CAM_REQ_TOO_BIG); goto out_abort; } } /* * Synchronize the DMA map only if we have * actually mapped the data. */ if (cp->dmamapped) { bus_dmamap_sync(np->data_dmat, cp->dmamap, (bus_dmasync_op_t)(cp->dmamapped == SYM_DMA_READ ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); } /* * Set host status to busy state. * May have been set back to HS_WAIT to avoid a race. */ cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; /* * Set data pointers. */ sym_setup_data_pointers(np, cp, (ccb->ccb_h.flags & CAM_DIR_MASK)); /* * Enqueue this IO in our pending queue. */ sym_enqueue_cam_ccb(np, ccb); /* * When `#ifed 1', the code below makes the driver * panic on the first attempt to write to a SCSI device. * It is the first test we want to do after a driver * change that does not seem obviously safe. :) */ #if 0 switch (cp->cdb_buf[0]) { case 0x0A: case 0x2A: case 0xAA: panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n"); MDELAY(10000); break; default: break; } #endif /* * Activate this job. */ sym_put_start_queue(np, cp); out: splx(s); return; out_abort: sym_free_ccb(np, cp); sym_xpt_done(np, ccb); goto out; } /* * How complex it gets to deal with the data in CAM. * The Bus Dma stuff makes things still more complex. */ static void sym_setup_data_and_start(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) { struct ccb_hdr *ccb_h; int dir, retv; ccb_h = &csio->ccb_h; /* * Now deal with the data. */ cp->data_len = csio->dxfer_len; cp->arg = np; /* * No direction means no data. */ dir = (ccb_h->flags & CAM_DIR_MASK); if (dir == CAM_DIR_NONE) { sym_execute_ccb(cp, NULL, 0, 0); return; } if (!(ccb_h->flags & CAM_SCATTER_VALID)) { /* Single buffer */ if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* Buffer is virtual */ int s; cp->dmamapped = (dir == CAM_DIR_IN) ? SYM_DMA_READ : SYM_DMA_WRITE; s = splsoftvm(); retv = bus_dmamap_load(np->data_dmat, cp->dmamap, csio->data_ptr, csio->dxfer_len, sym_execute_ccb, cp, 0); if (retv == EINPROGRESS) { cp->host_status = HS_WAIT; xpt_freeze_simq(np->sim, 1); csio->ccb_h.status |= CAM_RELEASE_SIMQ; } splx(s); } else { /* Buffer is physical */ struct bus_dma_segment seg; seg.ds_addr = (bus_addr_t) csio->data_ptr; sym_execute_ccb(cp, &seg, 1, 0); } } else { /* Scatter/gather list */ struct bus_dma_segment *segs; if ((ccb_h->flags & CAM_SG_LIST_PHYS) != 0) { /* The SG list pointer is physical */ sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); goto out_abort; } if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* SG buffer pointers are virtual */ sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); goto out_abort; } /* SG buffer pointers are physical */ segs = (struct bus_dma_segment *)csio->data_ptr; sym_execute_ccb(cp, segs, csio->sglist_cnt, 0); } return; out_abort: sym_free_ccb(np, cp); sym_xpt_done(np, (union ccb *) csio); } /* * Move the scatter list to our data block. */ static int sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs) { struct sym_tblmove *data; bus_dma_segment_t *psegs2; if (nsegs > SYM_CONF_MAX_SG) return -1; data = &cp->phys.data[SYM_CONF_MAX_SG-1]; psegs2 = &psegs[nsegs-1]; cp->segments = nsegs; while (1) { data->addr = cpu_to_scr(psegs2->ds_addr); data->size = cpu_to_scr(psegs2->ds_len); if (DEBUG_FLAGS & DEBUG_SCATTER) { printf ("%s scatter: paddr=%lx len=%ld\n", sym_name(np), (long) psegs2->ds_addr, (long) psegs2->ds_len); } if (psegs2 != psegs) { --data; --psegs2; continue; } break; } return 0; } #else /* FreeBSD_Bus_Dma_Abstraction */ /* * How complex it gets to deal with the data in CAM. * Variant without the Bus Dma Abstraction option. */ static void sym_setup_data_and_start(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) { struct ccb_hdr *ccb_h; int dir, retv; ccb_h = &csio->ccb_h; /* * Now deal with the data. */ cp->data_len = 0; cp->segments = 0; /* * No direction means no data. */ dir = (ccb_h->flags & CAM_DIR_MASK); if (dir == CAM_DIR_NONE) goto end_scatter; if (!(ccb_h->flags & CAM_SCATTER_VALID)) { /* Single buffer */ if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* Buffer is virtual */ retv = sym_scatter_virtual(np, cp, (vm_offset_t) csio->data_ptr, (vm_size_t) csio->dxfer_len); } else { /* Buffer is physical */ retv = sym_scatter_physical(np, cp, (vm_offset_t) csio->data_ptr, (vm_size_t) csio->dxfer_len); } } else { /* Scatter/gather list */ int nsegs; struct bus_dma_segment *segs; segs = (struct bus_dma_segment *)csio->data_ptr; nsegs = csio->sglist_cnt; if ((ccb_h->flags & CAM_SG_LIST_PHYS) != 0) { /* The SG list pointer is physical */ sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); goto out_abort; } if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* SG buffer pointers are virtual */ retv = sym_scatter_sg_virtual(np, cp, segs, nsegs); } else { /* SG buffer pointers are physical */ retv = sym_scatter_sg_physical(np, cp, segs, nsegs); } } if (retv < 0) { sym_set_cam_status(cp->cam_ccb, CAM_REQ_TOO_BIG); goto out_abort; } end_scatter: /* * Set data pointers. */ sym_setup_data_pointers(np, cp, dir); /* * Enqueue this IO in our pending queue. */ sym_enqueue_cam_ccb(np, (union ccb *) csio); /* * Activate this job. */ sym_put_start_queue(np, cp); /* * Command is successfully queued. */ return; out_abort: sym_free_ccb(np, cp); sym_xpt_done(np, (union ccb *) csio); } /* * Scatter a virtual buffer into bus addressable chunks. */ static int sym_scatter_virtual(hcb_p np, ccb_p cp, vm_offset_t vaddr, vm_size_t len) { u_long pe, pn; u_long n, k; int s; cp->data_len += len; pe = vaddr + len; n = len; s = SYM_CONF_MAX_SG - 1 - cp->segments; while (n && s >= 0) { pn = (pe - 1) & ~PAGE_MASK; k = pe - pn; if (k > n) { k = n; pn = pe - n; } if (DEBUG_FLAGS & DEBUG_SCATTER) { printf ("%s scatter: va=%lx pa=%lx siz=%ld\n", sym_name(np), pn, (u_long) vtobus(pn), k); } cp->phys.data[s].addr = cpu_to_scr(vtobus(pn)); cp->phys.data[s].size = cpu_to_scr(k); pe = pn; n -= k; --s; } cp->segments = SYM_CONF_MAX_SG - 1 - s; return n ? -1 : 0; } /* * Scatter a SG list with virtual addresses into bus addressable chunks. */ static int sym_scatter_sg_virtual(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs) { int i, retv = 0; for (i = nsegs - 1 ; i >= 0 ; --i) { retv = sym_scatter_virtual(np, cp, psegs[i].ds_addr, psegs[i].ds_len); if (retv < 0) break; } return retv; } /* * Scatter a physical buffer into bus addressable chunks. */ static int sym_scatter_physical(hcb_p np, ccb_p cp, vm_offset_t paddr, vm_size_t len) { struct bus_dma_segment seg; seg.ds_addr = paddr; seg.ds_len = len; return sym_scatter_sg_physical(np, cp, &seg, 1); } #endif /* FreeBSD_Bus_Dma_Abstraction */ /* * Scatter a SG list with physical addresses into bus addressable chunks. * We need to ensure 16MB boundaries not to be crossed during DMA of * each segment, due to some chips being flawed. */ #define BOUND_MASK ((1UL<<24)-1) static int sym_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs) { u_long ps, pe, pn; u_long k; int s, t; #ifndef FreeBSD_Bus_Dma_Abstraction s = SYM_CONF_MAX_SG - 1 - cp->segments; #else s = SYM_CONF_MAX_SG - 1; #endif t = nsegs - 1; ps = psegs[t].ds_addr; pe = ps + psegs[t].ds_len; while (s >= 0) { pn = (pe - 1) & ~BOUND_MASK; if (pn <= ps) pn = ps; k = pe - pn; if (DEBUG_FLAGS & DEBUG_SCATTER) { printf ("%s scatter: paddr=%lx len=%ld\n", sym_name(np), pn, k); } cp->phys.data[s].addr = cpu_to_scr(pn); cp->phys.data[s].size = cpu_to_scr(k); #ifndef FreeBSD_Bus_Dma_Abstraction cp->data_len += k; #endif --s; if (pn == ps) { if (--t < 0) break; ps = psegs[t].ds_addr; pe = ps + psegs[t].ds_len; } else pe = pn; } cp->segments = SYM_CONF_MAX_SG - 1 - s; return t >= 0 ? -1 : 0; } #undef BOUND_MASK /* * SIM action for non performance critical stuff. */ static void sym_action2(struct cam_sim *sim, union ccb *ccb) { hcb_p np; tcb_p tp; lcb_p lp; struct ccb_hdr *ccb_h; /* * Retrieve our controller data structure. */ np = (hcb_p) cam_sim_softc(sim); ccb_h = &ccb->ccb_h; switch (ccb_h->func_code) { case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; cts = &ccb->cts; tp = &np->target[ccb_h->target_id]; /* * Update SPI transport settings in TARGET control block. * Update SCSI device settings in LUN control block. */ lp = sym_lp(np, tp, ccb_h->target_lun); #ifdef FreeBSD_New_Tran_Settings if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { #else if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { #endif sym_update_trans(np, tp, &tp->tinfo.goal, cts); if (lp) sym_update_dflags(np, &lp->current_flags, cts); } #ifdef FreeBSD_New_Tran_Settings if (cts->type == CTS_TYPE_USER_SETTINGS) { #else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { #endif sym_update_trans(np, tp, &tp->tinfo.user, cts); if (lp) sym_update_dflags(np, &lp->user_flags, cts); } sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct sym_trans *tip; u_char dflags; cts = &ccb->cts; tp = &np->target[ccb_h->target_id]; lp = sym_lp(np, tp, ccb_h->target_lun); #ifdef FreeBSD_New_Tran_Settings #define cts__scsi (&cts->proto_specific.scsi) #define cts__spi (&cts->xport_specific.spi) if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { tip = &tp->tinfo.current; dflags = lp ? lp->current_flags : 0; } else { tip = &tp->tinfo.user; dflags = lp ? lp->user_flags : tp->usrflags; } cts->protocol = PROTO_SCSI; cts->transport = XPORT_SPI; cts->protocol_version = tip->scsi_version; cts->transport_version = tip->spi_version; cts__spi->sync_period = tip->period; cts__spi->sync_offset = tip->offset; cts__spi->bus_width = tip->width; cts__spi->ppr_options = tip->options; cts__spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_PPR_OPTIONS; cts__spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (dflags & SYM_DISC_ENABLED) cts__spi->flags |= CTS_SPI_FLAGS_DISC_ENB; cts__spi->valid |= CTS_SPI_VALID_DISC; cts__scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; if (dflags & SYM_TAGS_ENABLED) cts__scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; cts__scsi->valid |= CTS_SCSI_VALID_TQ; #undef cts__spi #undef cts__scsi #else if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { tip = &tp->tinfo.current; dflags = lp ? lp->current_flags : 0; } else { tip = &tp->tinfo.user; dflags = lp ? lp->user_flags : tp->usrflags; } cts->sync_period = tip->period; cts->sync_offset = tip->offset; cts->bus_width = tip->width; cts->valid = CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID | CCB_TRANS_BUS_WIDTH_VALID; cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); if (dflags & SYM_DISC_ENABLED) cts->flags |= CCB_TRANS_DISC_ENB; if (dflags & SYM_TAGS_ENABLED) cts->flags |= CCB_TRANS_TAG_ENB; cts->valid |= CCB_TRANS_DISC_VALID; cts->valid |= CCB_TRANS_TQ_VALID; #endif sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; u32 size_mb; u32 secs_per_cylinder; int extended; /* * Silly DOS geometry. */ ccg = &ccb->ccg; size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); extended = 1; if (size_mb > 1024 && extended) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; } case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_MDP_ABLE|PI_SDTR_ABLE|PI_TAG_ABLE; if ((np->features & FE_WIDE) != 0) cpi->hba_inquiry |= PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = 0; if (np->usrflags & SYM_SCAN_TARGETS_HILO) cpi->hba_misc |= PIM_SCANHILO; if (np->usrflags & SYM_AVOID_BUS_RESET) cpi->hba_misc |= PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = (np->features & FE_WIDE) ? 15 : 7; /* Semantic problem:)LUN number max = max number of LUNs - 1 */ cpi->max_lun = SYM_CONF_MAX_LUN-1; if (SYM_SETUP_MAX_LUN < SYM_CONF_MAX_LUN) cpi->max_lun = SYM_SETUP_MAX_LUN-1; cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = np->myaddr; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Symbios", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); #ifdef FreeBSD_New_Tran_Settings cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST; if (np->features & FE_ULTRA3) { cpi->transport_version = 3; cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST; } #endif sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; } case XPT_ABORT: { union ccb *abort_ccb = ccb->cab.abort_ccb; switch(abort_ccb->ccb_h.func_code) { case XPT_SCSI_IO: if (sym_abort_scsiio(np, abort_ccb, 0) == 0) { sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; } default: sym_xpt_done2(np, ccb, CAM_UA_ABORT); break; } break; } case XPT_RESET_DEV: { sym_reset_dev(np, ccb); break; } case XPT_RESET_BUS: { sym_reset_scsi_bus(np, 0); if (sym_verbose) { xpt_print_path(np->path); printf("SCSI BUS reset delivered.\n"); } sym_init (np, 1); sym_xpt_done2(np, ccb, CAM_REQ_CMP); break; } case XPT_ACCEPT_TARGET_IO: case XPT_CONT_TARGET_IO: case XPT_EN_LUN: case XPT_NOTIFY_ACK: case XPT_IMMED_NOTIFY: case XPT_TERM_IO: default: sym_xpt_done2(np, ccb, CAM_REQ_INVALID); break; } } /* * Asynchronous notification handler. */ static void sym_async(void *cb_arg, u32 code, struct cam_path *path, void *arg) { hcb_p np; struct cam_sim *sim; u_int tn; tcb_p tp; int s; s = splcam(); sim = (struct cam_sim *) cb_arg; np = (hcb_p) cam_sim_softc(sim); switch (code) { case AC_LOST_DEVICE: tn = xpt_path_target_id(path); if (tn >= SYM_CONF_MAX_TARGET) break; tp = &np->target[tn]; tp->to_reset = 0; tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; tp->tinfo.current.period = tp->tinfo.goal.period = 0; tp->tinfo.current.offset = tp->tinfo.goal.offset = 0; tp->tinfo.current.width = tp->tinfo.goal.width = BUS_8_BIT; tp->tinfo.current.options = tp->tinfo.goal.options = 0; break; default: break; } splx(s); } /* * Update transfer settings of a target. */ static void sym_update_trans(hcb_p np, tcb_p tp, struct sym_trans *tip, struct ccb_trans_settings *cts) { /* * Update the infos. */ #ifdef FreeBSD_New_Tran_Settings #define cts__spi (&cts->xport_specific.spi) if ((cts__spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) tip->width = cts__spi->bus_width; if ((cts__spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) tip->offset = cts__spi->sync_offset; if ((cts__spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) tip->period = cts__spi->sync_period; if ((cts__spi->valid & CTS_SPI_VALID_PPR_OPTIONS) != 0) tip->options = (cts__spi->ppr_options & PPR_OPT_DT); if (cts->protocol_version != PROTO_VERSION_UNSPECIFIED && cts->protocol_version != PROTO_VERSION_UNKNOWN) tip->scsi_version = cts->protocol_version; if (cts->transport_version != XPORT_VERSION_UNSPECIFIED && cts->transport_version != XPORT_VERSION_UNKNOWN) tip->spi_version = cts->transport_version; #undef cts__spi #else if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) tip->width = cts->bus_width; if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0) tip->offset = cts->sync_offset; if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) tip->period = cts->sync_period; #endif /* * Scale against driver configuration limits. */ if (tip->width > SYM_SETUP_MAX_WIDE) tip->width = SYM_SETUP_MAX_WIDE; if (tip->offset > SYM_SETUP_MAX_OFFS) tip->offset = SYM_SETUP_MAX_OFFS; if (tip->period < SYM_SETUP_MIN_SYNC) tip->period = SYM_SETUP_MIN_SYNC; /* * Scale against actual controller BUS width. */ if (tip->width > np->maxwide) tip->width = np->maxwide; #ifdef FreeBSD_New_Tran_Settings /* * Only accept DT if controller supports and SYNC/WIDE asked. */ if (!((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) || !(tip->width == BUS_16_BIT && tip->offset)) { tip->options &= ~PPR_OPT_DT; } #else /* * For now, only assume DT if period <= 9, BUS 16 and offset != 0. */ tip->options = 0; if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3) && tip->period <= 9 && tip->width == BUS_16_BIT && tip->offset) { tip->options |= PPR_OPT_DT; } #endif /* * Scale period factor and offset against controller limits. */ if (tip->options & PPR_OPT_DT) { if (tip->period < np->minsync_dt) tip->period = np->minsync_dt; if (tip->period > np->maxsync_dt) tip->period = np->maxsync_dt; if (tip->offset > np->maxoffs_dt) tip->offset = np->maxoffs_dt; } else { if (tip->period < np->minsync) tip->period = np->minsync; if (tip->period > np->maxsync) tip->period = np->maxsync; if (tip->offset > np->maxoffs) tip->offset = np->maxoffs; } } /* * Update flags for a device (logical unit). */ static void sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts) { #ifdef FreeBSD_New_Tran_Settings #define cts__scsi (&cts->proto_specific.scsi) #define cts__spi (&cts->xport_specific.spi) if ((cts__spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((cts__spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) *flags |= SYM_DISC_ENABLED; else *flags &= ~SYM_DISC_ENABLED; } if ((cts__scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((cts__scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) *flags |= SYM_TAGS_ENABLED; else *flags &= ~SYM_TAGS_ENABLED; } #undef cts__spi #undef cts__scsi #else if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) *flags |= SYM_DISC_ENABLED; else *flags &= ~SYM_DISC_ENABLED; } if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) *flags |= SYM_TAGS_ENABLED; else *flags &= ~SYM_TAGS_ENABLED; } #endif } /*============= DRIVER INITIALISATION ==================*/ #ifdef FreeBSD_Bus_Io_Abstraction static device_method_t sym_pci_methods[] = { DEVMETHOD(device_probe, sym_pci_probe), DEVMETHOD(device_attach, sym_pci_attach), { 0, 0 } }; static driver_t sym_pci_driver = { "sym", sym_pci_methods, sizeof(struct sym_hcb) }; static devclass_t sym_devclass; DRIVER_MODULE(sym, pci, sym_pci_driver, sym_devclass, 0, 0); #else /* Pre-FreeBSD_Bus_Io_Abstraction */ static u_long sym_unit; static struct pci_device sym_pci_driver = { "sym", sym_pci_probe, sym_pci_attach, &sym_unit, NULL }; #if __FreeBSD_version >= 400000 COMPAT_PCI_DRIVER (sym, sym_pci_driver); #else DATA_SET (pcidevice_set, sym_pci_driver); #endif #endif /* FreeBSD_Bus_Io_Abstraction */ static struct sym_pci_chip sym_pci_dev_table[] = { {PCI_ID_SYM53C810, 0x0f, "810", 4, 8, 4, 64, FE_ERL} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1, FE_BOF} , #else {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1, FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} , #endif {PCI_ID_SYM53C815, 0xff, "815", 4, 8, 4, 64, FE_BOF|FE_ERL} , {PCI_ID_SYM53C825, 0x0f, "825", 6, 8, 4, 64, FE_WIDE|FE_BOF|FE_ERL|FE_DIFF} , {PCI_ID_SYM53C825, 0xff, "825a", 6, 8, 4, 2, FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} , {PCI_ID_SYM53C860, 0xff, "860", 4, 8, 5, 1, FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} , {PCI_ID_SYM53C875, 0x01, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , {PCI_ID_SYM53C875, 0xff, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , {PCI_ID_SYM53C875_2, 0xff, "875", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , {PCI_ID_SYM53C885, 0xff, "885", 6, 16, 5, 2, FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_DIFF} , #ifdef SYM_DEBUG_GENERIC_SUPPORT {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS| FE_RAM|FE_LCKFRQ} , #else {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_LCKFRQ} , #endif {PCI_ID_SYM53C896, 0xff, "896", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_ID_SYM53C895A, 0xff, "895a", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} , {PCI_ID_LSI53C1010, 0x00, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10} , {PCI_ID_LSI53C1010, 0xff, "1010-33", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| FE_C10|FE_U3EN} , {PCI_ID_LSI53C1010_2, 0xff, "1010-66", 6, 31, 7, 8, FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC| FE_C10|FE_U3EN} , {PCI_ID_LSI53C1510D, 0xff, "1510d", 6, 31, 7, 4, FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| FE_RAM|FE_IO256|FE_LEDC} }; #define sym_pci_num_devs \ (sizeof(sym_pci_dev_table) / sizeof(sym_pci_dev_table[0])) /* * Look up the chip table. * * Return a pointer to the chip entry if found, * zero otherwise. */ static struct sym_pci_chip * #ifdef FreeBSD_Bus_Io_Abstraction sym_find_pci_chip(device_t dev) #else sym_find_pci_chip(pcici_t pci_tag) #endif { struct sym_pci_chip *chip; int i; u_short device_id; u_char revision; #ifdef FreeBSD_Bus_Io_Abstraction if (pci_get_vendor(dev) != PCI_VENDOR_NCR) return 0; device_id = pci_get_device(dev); revision = pci_get_revid(dev); #else if (pci_cfgread(pci_tag, PCIR_VENDOR, 2) != PCI_VENDOR_NCR) return 0; device_id = pci_cfgread(pci_tag, PCIR_DEVICE, 2); revision = pci_cfgread(pci_tag, PCIR_REVID, 1); #endif for (i = 0; i < sym_pci_num_devs; i++) { chip = &sym_pci_dev_table[i]; if (device_id != chip->device_id) continue; if (revision > chip->revision_id) continue; return chip; } return 0; } /* * Tell upper layer if the chip is supported. */ #ifdef FreeBSD_Bus_Io_Abstraction static int sym_pci_probe(device_t dev) { struct sym_pci_chip *chip; chip = sym_find_pci_chip(dev); if (chip && sym_find_firmware(chip)) { device_set_desc(dev, chip->name); return (chip->lp_probe_bit & SYM_SETUP_LP_PROBE_MAP)? -2000 : 0; } return ENXIO; } #else /* Pre-FreeBSD_Bus_Io_Abstraction */ static const char * sym_pci_probe(pcici_t pci_tag, pcidi_t type) { struct sym_pci_chip *chip; chip = sym_find_pci_chip(pci_tag); if (chip && sym_find_firmware(chip)) { #if NNCR > 0 /* Only claim chips we are allowed to take precedence over the ncr */ if (!(chip->lp_probe_bit & SYM_SETUP_LP_PROBE_MAP)) #else if (1) #endif return chip->name; } return 0; } #endif /* * Attach a sym53c8xx device. */ #ifdef FreeBSD_Bus_Io_Abstraction static int sym_pci_attach(device_t dev) #else static void sym_pci_attach(pcici_t pci_tag, int unit) { int err = sym_pci_attach2(pci_tag, unit); if (err) printf("sym: failed to attach unit %d - err=%d.\n", unit, err); } static int sym_pci_attach2(pcici_t pci_tag, int unit) #endif { struct sym_pci_chip *chip; u_short command; u_char cachelnsz; struct sym_hcb *np = 0; struct sym_nvram nvram; struct sym_fw *fw = 0; int i; #ifdef FreeBSD_Bus_Dma_Abstraction bus_dma_tag_t bus_dmat; /* * I expected to be told about a parent * DMA tag, but didn't find any. */ bus_dmat = NULL; #endif /* * Only probed devices should be attached. * We just enjoy being paranoid. :) */ #ifdef FreeBSD_Bus_Io_Abstraction chip = sym_find_pci_chip(dev); #else chip = sym_find_pci_chip(pci_tag); #endif if (chip == NULL || (fw = sym_find_firmware(chip)) == NULL) return (ENXIO); /* * Allocate immediately the host control block, * since we are only expecting to succeed. :) * We keep track in the HCB of all the resources that * are to be released on error. */ #ifdef FreeBSD_Bus_Dma_Abstraction np = __sym_calloc_dma(bus_dmat, sizeof(*np), "HCB"); if (np) np->bus_dmat = bus_dmat; else goto attach_failed; #else np = sym_calloc_dma(sizeof(*np), "HCB"); if (!np) goto attach_failed; #endif /* * Copy some useful infos to the HCB. */ np->hcb_ba = vtobus(np); np->verbose = bootverbose; #ifdef FreeBSD_Bus_Io_Abstraction np->device = dev; np->unit = device_get_unit(dev); np->device_id = pci_get_device(dev); np->revision_id = pci_get_revid(dev); #else np->pci_tag = pci_tag; np->unit = unit; np->device_id = pci_cfgread(pci_tag, PCIR_DEVICE, 2); np->revision_id = pci_cfgread(pci_tag, PCIR_REVID, 1); #endif np->features = chip->features; np->clock_divn = chip->nr_divisor; np->maxoffs = chip->offset_max; np->maxburst = chip->burst_max; np->scripta_sz = fw->a_size; np->scriptb_sz = fw->b_size; np->fw_setup = fw->setup; np->fw_patch = fw->patch; np->fw_name = fw->name; /* * Edit its name. */ snprintf(np->inst_name, sizeof(np->inst_name), "sym%d", np->unit); /* * Initialyze the CCB free and busy queues. */ sym_que_init(&np->free_ccbq); sym_que_init(&np->busy_ccbq); sym_que_init(&np->comp_ccbq); sym_que_init(&np->cam_ccbq); /* * Allocate a tag for the DMA of user data. */ #ifdef FreeBSD_Bus_Dma_Abstraction if (bus_dma_tag_create(np->bus_dmat, 1, (1<<24), BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, SYM_CONF_MAX_SG, (1<<24), 0, &np->data_dmat)) { device_printf(dev, "failed to create DMA tag.\n"); goto attach_failed; } #endif /* * Read and apply some fix-ups to the PCI COMMAND * register. We want the chip to be enabled for: * - BUS mastering * - PCI parity checking (reporting would also be fine) * - Write And Invalidate. */ #ifdef FreeBSD_Bus_Io_Abstraction command = pci_read_config(dev, PCIR_COMMAND, 2); #else command = pci_cfgread(pci_tag, PCIR_COMMAND, 2); #endif command |= PCIM_CMD_BUSMASTEREN; command |= PCIM_CMD_PERRESPEN; command |= /* PCIM_CMD_MWIEN */ 0x0010; #ifdef FreeBSD_Bus_Io_Abstraction pci_write_config(dev, PCIR_COMMAND, command, 2); #else pci_cfgwrite(pci_tag, PCIR_COMMAND, command, 2); #endif /* * Let the device know about the cache line size, * if it doesn't yet. */ #ifdef FreeBSD_Bus_Io_Abstraction cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); #else cachelnsz = pci_cfgread(pci_tag, PCIR_CACHELNSZ, 1); #endif if (!cachelnsz) { cachelnsz = 8; #ifdef FreeBSD_Bus_Io_Abstraction pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1); #else pci_cfgwrite(pci_tag, PCIR_CACHELNSZ, cachelnsz, 1); #endif } /* * Alloc/get/map/retrieve everything that deals with MMIO. */ #ifdef FreeBSD_Bus_Io_Abstraction if ((command & PCIM_CMD_MEMEN) != 0) { int regs_id = SYM_PCI_MMIO; np->mmio_res = bus_alloc_resource(dev, SYS_RES_MEMORY, ®s_id, 0, ~0, 1, RF_ACTIVE); } if (!np->mmio_res) { device_printf(dev, "failed to allocate MMIO resources\n"); goto attach_failed; } np->mmio_bsh = rman_get_bushandle(np->mmio_res); np->mmio_tag = rman_get_bustag(np->mmio_res); np->mmio_pa = rman_get_start(np->mmio_res); np->mmio_va = (vm_offset_t) rman_get_virtual(np->mmio_res); np->mmio_ba = np->mmio_pa; #else if ((command & PCIM_CMD_MEMEN) != 0) { vm_offset_t vaddr, paddr; if (!pci_map_mem(pci_tag, SYM_PCI_MMIO, &vaddr, &paddr)) { printf("%s: failed to map MMIO window\n", sym_name(np)); goto attach_failed; } np->mmio_va = vaddr; np->mmio_pa = paddr; np->mmio_ba = paddr; } #endif /* * Allocate the IRQ. */ #ifdef FreeBSD_Bus_Io_Abstraction i = 0; np->irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &i, 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); if (!np->irq_res) { device_printf(dev, "failed to allocate IRQ resource\n"); goto attach_failed; } #endif #ifdef SYM_CONF_IOMAPPED /* * User want us to use normal IO with PCI. * Alloc/get/map/retrieve everything that deals with IO. */ #ifdef FreeBSD_Bus_Io_Abstraction if ((command & PCI_COMMAND_IO_ENABLE) != 0) { int regs_id = SYM_PCI_IO; np->io_res = bus_alloc_resource(dev, SYS_RES_IOPORT, ®s_id, 0, ~0, 1, RF_ACTIVE); } if (!np->io_res) { device_printf(dev, "failed to allocate IO resources\n"); goto attach_failed; } np->io_bsh = rman_get_bushandle(np->io_res); np->io_tag = rman_get_bustag(np->io_res); np->io_port = rman_get_start(np->io_res); #else if ((command & PCI_COMMAND_IO_ENABLE) != 0) { pci_port_t io_port; if (!pci_map_port (pci_tag, SYM_PCI_IO, &io_port)) { printf("%s: failed to map IO window\n", sym_name(np)); goto attach_failed; } np->io_port = io_port; } #endif #endif /* SYM_CONF_IOMAPPED */ /* * If the chip has RAM. * Alloc/get/map/retrieve the corresponding resources. */ if ((np->features & (FE_RAM|FE_RAM8K)) && (command & PCIM_CMD_MEMEN) != 0) { #ifdef FreeBSD_Bus_Io_Abstraction int regs_id = SYM_PCI_RAM; if (np->features & FE_64BIT) regs_id = SYM_PCI_RAM64; np->ram_res = bus_alloc_resource(dev, SYS_RES_MEMORY, ®s_id, 0, ~0, 1, RF_ACTIVE); if (!np->ram_res) { device_printf(dev,"failed to allocate RAM resources\n"); goto attach_failed; } np->ram_id = regs_id; np->ram_bsh = rman_get_bushandle(np->ram_res); np->ram_tag = rman_get_bustag(np->ram_res); np->ram_pa = rman_get_start(np->ram_res); np->ram_va = (vm_offset_t) rman_get_virtual(np->ram_res); np->ram_ba = np->ram_pa; #else vm_offset_t vaddr, paddr; int regs_id = SYM_PCI_RAM; if (np->features & FE_64BIT) regs_id = SYM_PCI_RAM64; if (!pci_map_mem(pci_tag, regs_id, &vaddr, &paddr)) { printf("%s: failed to map RAM window\n", sym_name(np)); goto attach_failed; } np->ram_va = vaddr; np->ram_pa = paddr; np->ram_ba = paddr; #endif } /* * Save setting of some IO registers, so we will * be able to probe specific implementations. */ sym_save_initial_setting (np); /* * Reset the chip now, since it has been reported * that SCSI clock calibration may not work properly * if the chip is currently active. */ sym_chip_reset (np); /* * Try to read the user set-up. */ (void) sym_read_nvram(np, &nvram); /* * Prepare controller and devices settings, according * to chip features, user set-up and driver set-up. */ (void) sym_prepare_setting(np, &nvram); /* * Check the PCI clock frequency. * Must be performed after prepare_setting since it destroys * STEST1 that is used to probe for the clock doubler. */ i = sym_getpciclock(np); if (i > 37000) #ifdef FreeBSD_Bus_Io_Abstraction device_printf(dev, "PCI BUS clock seems too high: %u KHz.\n",i); #else printf("%s: PCI BUS clock seems too high: %u KHz.\n", sym_name(np), i); #endif /* * Allocate the start queue. */ np->squeue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE"); if (!np->squeue) goto attach_failed; np->squeue_ba = vtobus(np->squeue); /* * Allocate the done queue. */ np->dqueue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE"); if (!np->dqueue) goto attach_failed; np->dqueue_ba = vtobus(np->dqueue); /* * Allocate the target bus address array. */ np->targtbl = (u32 *) sym_calloc_dma(256, "TARGTBL"); if (!np->targtbl) goto attach_failed; np->targtbl_ba = vtobus(np->targtbl); /* * Allocate SCRIPTS areas. */ np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0"); np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0"); if (!np->scripta0 || !np->scriptb0) goto attach_failed; /* * Allocate some CCB. We need at least ONE. */ if (!sym_alloc_ccb(np)) goto attach_failed; /* * Calculate BUS addresses where we are going * to load the SCRIPTS. */ np->scripta_ba = vtobus(np->scripta0); np->scriptb_ba = vtobus(np->scriptb0); np->scriptb0_ba = np->scriptb_ba; if (np->ram_ba) { np->scripta_ba = np->ram_ba; if (np->features & FE_RAM8K) { np->ram_ws = 8192; np->scriptb_ba = np->scripta_ba + 4096; #if BITS_PER_LONG > 32 np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32); #endif } else np->ram_ws = 4096; } /* * Copy scripts to controller instance. */ bcopy(fw->a_base, np->scripta0, np->scripta_sz); bcopy(fw->b_base, np->scriptb0, np->scriptb_sz); /* * Setup variable parts in scripts and compute * scripts bus addresses used from the C code. */ np->fw_setup(np, fw); /* * Bind SCRIPTS with physical addresses usable by the * SCRIPTS processor (as seen from the BUS = BUS addresses). */ sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz); sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz); #ifdef SYM_CONF_IARB_SUPPORT /* * If user wants IARB to be set when we win arbitration * and have other jobs, compute the max number of consecutive * settings of IARB hints before we leave devices a chance to * arbitrate for reselection. */ #ifdef SYM_SETUP_IARB_MAX np->iarb_max = SYM_SETUP_IARB_MAX; #else np->iarb_max = 4; #endif #endif /* * Prepare the idle and invalid task actions. */ np->idletask.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->idletask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); np->idletask_ba = vtobus(&np->idletask); np->notask.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->notask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); np->notask_ba = vtobus(&np->notask); np->bad_itl.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); np->bad_itl_ba = vtobus(&np->bad_itl); np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA (np, idle)); np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA (np,bad_i_t_l_q)); np->bad_itlq_ba = vtobus(&np->bad_itlq); /* * Allocate and prepare the lun JUMP table that is used * for a target prior the probing of devices (bad lun table). * A private table will be allocated for the target on the * first INQUIRY response received. */ np->badluntbl = sym_calloc_dma(256, "BADLUNTBL"); if (!np->badluntbl) goto attach_failed; np->badlun_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */ np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); /* * Prepare the bus address array that contains the bus * address of each target control block. * For now, assume all logical units are wrong. :) */ for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i])); np->target[i].head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); np->target[i].head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); } /* * Now check the cache handling of the pci chipset. */ if (sym_snooptest (np)) { #ifdef FreeBSD_Bus_Io_Abstraction device_printf(dev, "CACHE INCORRECTLY CONFIGURED.\n"); #else printf("%s: CACHE INCORRECTLY CONFIGURED.\n", sym_name(np)); #endif goto attach_failed; }; /* * Now deal with CAM. * Hopefully, we will succeed with that one.:) */ if (!sym_cam_attach(np)) goto attach_failed; /* * Sigh! we are done. */ return 0; /* * We have failed. * We will try to free all the resources we have * allocated, but if we are a boot device, this * will not help that much.;) */ attach_failed: if (np) sym_pci_free(np); return ENXIO; } /* * Free everything that have been allocated for this device. */ static void sym_pci_free(hcb_p np) { SYM_QUEHEAD *qp; ccb_p cp; tcb_p tp; lcb_p lp; int target, lun; int s; /* * First free CAM resources. */ s = splcam(); sym_cam_free(np); splx(s); /* * Now every should be quiet for us to * free other resources. */ #ifdef FreeBSD_Bus_Io_Abstraction if (np->ram_res) bus_release_resource(np->device, SYS_RES_MEMORY, np->ram_id, np->ram_res); if (np->mmio_res) bus_release_resource(np->device, SYS_RES_MEMORY, SYM_PCI_MMIO, np->mmio_res); if (np->io_res) bus_release_resource(np->device, SYS_RES_IOPORT, SYM_PCI_IO, np->io_res); if (np->irq_res) bus_release_resource(np->device, SYS_RES_IRQ, 0, np->irq_res); #else /* * YEAH!!! * It seems there is no means to free MMIO resources. */ #endif if (np->scriptb0) sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0"); if (np->scripta0) sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0"); if (np->squeue) sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE"); if (np->dqueue) sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE"); while ((qp = sym_remque_head(&np->free_ccbq)) != 0) { cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); #ifdef FreeBSD_Bus_Dma_Abstraction bus_dmamap_destroy(np->data_dmat, cp->dmamap); #endif sym_mfree_dma(cp->sns_bbuf, SYM_SNS_BBUF_LEN, "SNS_BBUF"); sym_mfree_dma(cp, sizeof(*cp), "CCB"); } if (np->badluntbl) sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL"); for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { tp = &np->target[target]; for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) { lp = sym_lp(np, tp, lun); if (!lp) continue; if (lp->itlq_tbl) sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); if (lp->cb_tags) sym_mfree(lp->cb_tags, SYM_CONF_MAX_TASK, "CB_TAGS"); sym_mfree_dma(lp, sizeof(*lp), "LCB"); } #if SYM_CONF_MAX_LUN > 1 if (tp->lunmp) sym_mfree(tp->lunmp, SYM_CONF_MAX_LUN*sizeof(lcb_p), "LUNMP"); #endif } if (np->targtbl) sym_mfree_dma(np->targtbl, 256, "TARGTBL"); #ifdef FreeBSD_Bus_Dma_Abstraction if (np->data_dmat) bus_dma_tag_destroy(np->data_dmat); #endif sym_mfree_dma(np, sizeof(*np), "HCB"); } /* * Allocate CAM resources and register a bus to CAM. */ static int sym_cam_attach(hcb_p np) { struct cam_devq *devq = 0; struct cam_sim *sim = 0; struct cam_path *path = 0; struct ccb_setasync csa; int err, s; s = splcam(); /* * Establish our interrupt handler. */ #ifdef FreeBSD_Bus_Io_Abstraction err = bus_setup_intr(np->device, np->irq_res, INTR_TYPE_CAM | INTR_ENTROPY, sym_intr, np, &np->intr); if (err) { device_printf(np->device, "bus_setup_intr() failed: %d\n", err); goto fail; } #else err = 0; if (!pci_map_int (np->pci_tag, sym_intr, np, &cam_imask)) { printf("%s: failed to map interrupt\n", sym_name(np)); goto fail; } #endif /* * Create the device queue for our sym SIM. */ devq = cam_simq_alloc(SYM_CONF_MAX_START); if (!devq) goto fail; /* * Construct our SIM entry. */ sim = cam_sim_alloc(sym_action, sym_poll, "sym", np, np->unit, 1, SYM_SETUP_MAX_TAG, devq); if (!sim) goto fail; devq = 0; if (xpt_bus_register(sim, 0) != CAM_SUCCESS) goto fail; np->sim = sim; sim = 0; if (xpt_create_path(&path, 0, cam_sim_path(np->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { goto fail; } np->path = path; /* * Hmmm... This should be useful, but I donnot want to * know about. */ #if __FreeBSD_version < 400000 #ifdef __alpha__ #ifdef FreeBSD_Bus_Io_Abstraction alpha_register_pci_scsi(pci_get_bus(np->device), pci_get_slot(np->device), np->sim); #else alpha_register_pci_scsi(pci_tag->bus, pci_tag->slot, np->sim); #endif #endif #endif /* * Establish our async notification handler. */ xpt_setup_ccb(&csa.ccb_h, np->path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = sym_async; csa.callback_arg = np->sim; xpt_action((union ccb *)&csa); /* * Start the chip now, without resetting the BUS, since * it seems that this must stay under control of CAM. * With LVD/SE capable chips and BUS in SE mode, we may * get a spurious SMBC interrupt. */ sym_init (np, 0); splx(s); return 1; fail: if (sim) cam_sim_free(sim, FALSE); if (devq) cam_simq_free(devq); sym_cam_free(np); splx(s); return 0; } /* * Free everything that deals with CAM. */ static void sym_cam_free(hcb_p np) { #ifdef FreeBSD_Bus_Io_Abstraction if (np->intr) bus_teardown_intr(np->device, np->irq_res, np->intr); #else /* pci_unmap_int(np->pci_tag); */ /* Does nothing */ #endif if (np->sim) { xpt_bus_deregister(cam_sim_path(np->sim)); cam_sim_free(np->sim, /*free_devq*/ TRUE); } if (np->path) xpt_free_path(np->path); } /*============ OPTIONNAL NVRAM SUPPORT =================*/ /* * Get host setup from NVRAM. */ static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram) { #ifdef SYM_CONF_NVRAM_SUPPORT /* * Get parity checking, host ID, verbose mode * and miscellaneous host flags from NVRAM. */ switch(nvram->type) { case SYM_SYMBIOS_NVRAM: if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE)) np->rv_scntl0 &= ~0x0a; np->myaddr = nvram->data.Symbios.host_id & 0x0f; if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS) np->verbose += 1; if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO) np->usrflags |= SYM_SCAN_TARGETS_HILO; if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET) np->usrflags |= SYM_AVOID_BUS_RESET; break; case SYM_TEKRAM_NVRAM: np->myaddr = nvram->data.Tekram.host_id & 0x0f; break; default: break; } #endif } /* * Get target setup from NVRAM. */ #ifdef SYM_CONF_NVRAM_SUPPORT static void sym_Symbios_setup_target(hcb_p np,int target, Symbios_nvram *nvram); static void sym_Tekram_setup_target(hcb_p np,int target, Tekram_nvram *nvram); #endif static void sym_nvram_setup_target (hcb_p np, int target, struct sym_nvram *nvp) { #ifdef SYM_CONF_NVRAM_SUPPORT switch(nvp->type) { case SYM_SYMBIOS_NVRAM: sym_Symbios_setup_target (np, target, &nvp->data.Symbios); break; case SYM_TEKRAM_NVRAM: sym_Tekram_setup_target (np, target, &nvp->data.Tekram); break; default: break; } #endif } #ifdef SYM_CONF_NVRAM_SUPPORT /* * Get target set-up from Symbios format NVRAM. */ static void sym_Symbios_setup_target(hcb_p np, int target, Symbios_nvram *nvram) { tcb_p tp = &np->target[target]; Symbios_target *tn = &nvram->target[target]; tp->tinfo.user.period = tn->sync_period ? (tn->sync_period + 3) / 4 : 0; tp->tinfo.user.width = tn->bus_width == 0x10 ? BUS_16_BIT : BUS_8_BIT; tp->usrtags = (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SYM_SETUP_MAX_TAG : 0; if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE)) tp->usrflags &= ~SYM_DISC_ENABLED; if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME)) tp->usrflags |= SYM_SCAN_BOOT_DISABLED; if (!(tn->flags & SYMBIOS_SCAN_LUNS)) tp->usrflags |= SYM_SCAN_LUNS_DISABLED; } /* * Get target set-up from Tekram format NVRAM. */ static void sym_Tekram_setup_target(hcb_p np, int target, Tekram_nvram *nvram) { tcb_p tp = &np->target[target]; struct Tekram_target *tn = &nvram->target[target]; int i; if (tn->flags & TEKRAM_SYNC_NEGO) { i = tn->sync_index & 0xf; tp->tinfo.user.period = Tekram_sync[i]; } tp->tinfo.user.width = (tn->flags & TEKRAM_WIDE_NEGO) ? BUS_16_BIT : BUS_8_BIT; if (tn->flags & TEKRAM_TAGGED_COMMANDS) { tp->usrtags = 2 << nvram->max_tags_index; } if (tn->flags & TEKRAM_DISCONNECT_ENABLE) tp->usrflags |= SYM_DISC_ENABLED; /* If any device does not support parity, we will not use this option */ if (!(tn->flags & TEKRAM_PARITY_CHECK)) np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */ } #ifdef SYM_CONF_DEBUG_NVRAM /* * Dump Symbios format NVRAM for debugging purpose. */ static void sym_display_Symbios_nvram(hcb_p np, Symbios_nvram *nvram) { int i; /* display Symbios nvram host data */ printf("%s: HOST ID=%d%s%s%s%s%s%s\n", sym_name(np), nvram->host_id & 0x0f, (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"", (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"", (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"", (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET)?" NO_RESET" :"", (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :""); /* display Symbios nvram drive data */ for (i = 0 ; i < 15 ; i++) { struct Symbios_target *tn = &nvram->target[i]; printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n", sym_name(np), i, (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "", (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "", (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "", (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "", tn->bus_width, tn->sync_period / 4, tn->timeout); } } /* * Dump TEKRAM format NVRAM for debugging purpose. */ static u_char Tekram_boot_delay[7] = {3, 5, 10, 20, 30, 60, 120}; static void sym_display_Tekram_nvram(hcb_p np, Tekram_nvram *nvram) { int i, tags, boot_delay; char *rem; /* display Tekram nvram host data */ tags = 2 << nvram->max_tags_index; boot_delay = 0; if (nvram->boot_delay_index < 6) boot_delay = Tekram_boot_delay[nvram->boot_delay_index]; switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) { default: case 0: rem = ""; break; case 1: rem = " REMOVABLE=boot device"; break; case 2: rem = " REMOVABLE=all"; break; } printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n", sym_name(np), nvram->host_id & 0x0f, (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" :"", (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"", (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"", (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"", (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"", (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"", (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"", rem, boot_delay, tags); /* display Tekram nvram drive data */ for (i = 0; i <= 15; i++) { int sync, j; struct Tekram_target *tn = &nvram->target[i]; j = tn->sync_index & 0xf; sync = Tekram_sync[j]; printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n", sym_name(np), i, (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "", (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "", (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "", (tn->flags & TEKRAM_START_CMD) ? " START" : "", (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "", (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "", sync); } } #endif /* SYM_CONF_DEBUG_NVRAM */ #endif /* SYM_CONF_NVRAM_SUPPORT */ /* * Try reading Symbios or Tekram NVRAM */ #ifdef SYM_CONF_NVRAM_SUPPORT static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram); static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram); #endif static int sym_read_nvram(hcb_p np, struct sym_nvram *nvp) { #ifdef SYM_CONF_NVRAM_SUPPORT /* * Try to read SYMBIOS nvram. * Try to read TEKRAM nvram if Symbios nvram not found. */ if (SYM_SETUP_SYMBIOS_NVRAM && !sym_read_Symbios_nvram (np, &nvp->data.Symbios)) { nvp->type = SYM_SYMBIOS_NVRAM; #ifdef SYM_CONF_DEBUG_NVRAM sym_display_Symbios_nvram(np, &nvp->data.Symbios); #endif } else if (SYM_SETUP_TEKRAM_NVRAM && !sym_read_Tekram_nvram (np, &nvp->data.Tekram)) { nvp->type = SYM_TEKRAM_NVRAM; #ifdef SYM_CONF_DEBUG_NVRAM sym_display_Tekram_nvram(np, &nvp->data.Tekram); #endif } else nvp->type = 0; #else nvp->type = 0; #endif return nvp->type; } #ifdef SYM_CONF_NVRAM_SUPPORT /* * 24C16 EEPROM reading. * * GPOI0 - data in/data out * GPIO1 - clock * Symbios NVRAM wiring now also used by Tekram. */ #define SET_BIT 0 #define CLR_BIT 1 #define SET_CLK 2 #define CLR_CLK 3 /* * Set/clear data/clock bit in GPIO0 */ static void S24C16_set_bit(hcb_p np, u_char write_bit, u_char *gpreg, int bit_mode) { UDELAY (5); switch (bit_mode){ case SET_BIT: *gpreg |= write_bit; break; case CLR_BIT: *gpreg &= 0xfe; break; case SET_CLK: *gpreg |= 0x02; break; case CLR_CLK: *gpreg &= 0xfd; break; } OUTB (nc_gpreg, *gpreg); UDELAY (5); } /* * Send START condition to NVRAM to wake it up. */ static void S24C16_start(hcb_p np, u_char *gpreg) { S24C16_set_bit(np, 1, gpreg, SET_BIT); S24C16_set_bit(np, 0, gpreg, SET_CLK); S24C16_set_bit(np, 0, gpreg, CLR_BIT); S24C16_set_bit(np, 0, gpreg, CLR_CLK); } /* * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!! */ static void S24C16_stop(hcb_p np, u_char *gpreg) { S24C16_set_bit(np, 0, gpreg, SET_CLK); S24C16_set_bit(np, 1, gpreg, SET_BIT); } /* * Read or write a bit to the NVRAM, * read if GPIO0 input else write if GPIO0 output */ static void S24C16_do_bit(hcb_p np, u_char *read_bit, u_char write_bit, u_char *gpreg) { S24C16_set_bit(np, write_bit, gpreg, SET_BIT); S24C16_set_bit(np, 0, gpreg, SET_CLK); if (read_bit) *read_bit = INB (nc_gpreg); S24C16_set_bit(np, 0, gpreg, CLR_CLK); S24C16_set_bit(np, 0, gpreg, CLR_BIT); } /* * Output an ACK to the NVRAM after reading, * change GPIO0 to output and when done back to an input */ static void S24C16_write_ack(hcb_p np, u_char write_bit, u_char *gpreg, u_char *gpcntl) { OUTB (nc_gpcntl, *gpcntl & 0xfe); S24C16_do_bit(np, 0, write_bit, gpreg); OUTB (nc_gpcntl, *gpcntl); } /* * Input an ACK from NVRAM after writing, * change GPIO0 to input and when done back to an output */ static void S24C16_read_ack(hcb_p np, u_char *read_bit, u_char *gpreg, u_char *gpcntl) { OUTB (nc_gpcntl, *gpcntl | 0x01); S24C16_do_bit(np, read_bit, 1, gpreg); OUTB (nc_gpcntl, *gpcntl); } /* * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK, * GPIO0 must already be set as an output */ static void S24C16_write_byte(hcb_p np, u_char *ack_data, u_char write_data, u_char *gpreg, u_char *gpcntl) { int x; for (x = 0; x < 8; x++) S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg); S24C16_read_ack(np, ack_data, gpreg, gpcntl); } /* * READ a byte from the NVRAM and then send an ACK to say we have got it, * GPIO0 must already be set as an input */ static void S24C16_read_byte(hcb_p np, u_char *read_data, u_char ack_data, u_char *gpreg, u_char *gpcntl) { int x; u_char read_bit; *read_data = 0; for (x = 0; x < 8; x++) { S24C16_do_bit(np, &read_bit, 1, gpreg); *read_data |= ((read_bit & 0x01) << (7 - x)); } S24C16_write_ack(np, ack_data, gpreg, gpcntl); } /* * Read 'len' bytes starting at 'offset'. */ static int sym_read_S24C16_nvram (hcb_p np, int offset, u_char *data, int len) { u_char gpcntl, gpreg; u_char old_gpcntl, old_gpreg; u_char ack_data; int retv = 1; int x; /* save current state of GPCNTL and GPREG */ old_gpreg = INB (nc_gpreg); old_gpcntl = INB (nc_gpcntl); gpcntl = old_gpcntl & 0x1c; /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */ OUTB (nc_gpreg, old_gpreg); OUTB (nc_gpcntl, gpcntl); /* this is to set NVRAM into a known state with GPIO0/1 both low */ gpreg = old_gpreg; S24C16_set_bit(np, 0, &gpreg, CLR_CLK); S24C16_set_bit(np, 0, &gpreg, CLR_BIT); /* now set NVRAM inactive with GPIO0/1 both high */ S24C16_stop(np, &gpreg); /* activate NVRAM */ S24C16_start(np, &gpreg); /* write device code and random address MSB */ S24C16_write_byte(np, &ack_data, 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); if (ack_data & 0x01) goto out; /* write random address LSB */ S24C16_write_byte(np, &ack_data, offset & 0xff, &gpreg, &gpcntl); if (ack_data & 0x01) goto out; /* regenerate START state to set up for reading */ S24C16_start(np, &gpreg); /* rewrite device code and address MSB with read bit set (lsb = 0x01) */ S24C16_write_byte(np, &ack_data, 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); if (ack_data & 0x01) goto out; /* now set up GPIO0 for inputting data */ gpcntl |= 0x01; OUTB (nc_gpcntl, gpcntl); /* input all requested data - only part of total NVRAM */ for (x = 0; x < len; x++) S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl); /* finally put NVRAM back in inactive mode */ gpcntl &= 0xfe; OUTB (nc_gpcntl, gpcntl); S24C16_stop(np, &gpreg); retv = 0; out: /* return GPIO0/1 to original states after having accessed NVRAM */ OUTB (nc_gpcntl, old_gpcntl); OUTB (nc_gpreg, old_gpreg); return retv; } #undef SET_BIT /* 0 */ #undef CLR_BIT /* 1 */ #undef SET_CLK /* 2 */ #undef CLR_CLK /* 3 */ /* * Try reading Symbios NVRAM. * Return 0 if OK. */ static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram) { static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0}; u_char *data = (u_char *) nvram; int len = sizeof(*nvram); u_short csum; int x; /* probe the 24c16 and read the SYMBIOS 24c16 area */ if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len)) return 1; /* check valid NVRAM signature, verify byte count and checksum */ if (nvram->type != 0 || bcmp(nvram->trailer, Symbios_trailer, 6) || nvram->byte_count != len - 12) return 1; /* verify checksum */ for (x = 6, csum = 0; x < len - 6; x++) csum += data[x]; if (csum != nvram->checksum) return 1; return 0; } /* * 93C46 EEPROM reading. * * GPOI0 - data in * GPIO1 - data out * GPIO2 - clock * GPIO4 - chip select * * Used by Tekram. */ /* * Pulse clock bit in GPIO0 */ static void T93C46_Clk(hcb_p np, u_char *gpreg) { OUTB (nc_gpreg, *gpreg | 0x04); UDELAY (2); OUTB (nc_gpreg, *gpreg); } /* * Read bit from NVRAM */ static void T93C46_Read_Bit(hcb_p np, u_char *read_bit, u_char *gpreg) { UDELAY (2); T93C46_Clk(np, gpreg); *read_bit = INB (nc_gpreg); } /* * Write bit to GPIO0 */ static void T93C46_Write_Bit(hcb_p np, u_char write_bit, u_char *gpreg) { if (write_bit & 0x01) *gpreg |= 0x02; else *gpreg &= 0xfd; *gpreg |= 0x10; OUTB (nc_gpreg, *gpreg); UDELAY (2); T93C46_Clk(np, gpreg); } /* * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!! */ static void T93C46_Stop(hcb_p np, u_char *gpreg) { *gpreg &= 0xef; OUTB (nc_gpreg, *gpreg); UDELAY (2); T93C46_Clk(np, gpreg); } /* * Send read command and address to NVRAM */ static void T93C46_Send_Command(hcb_p np, u_short write_data, u_char *read_bit, u_char *gpreg) { int x; /* send 9 bits, start bit (1), command (2), address (6) */ for (x = 0; x < 9; x++) T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg); *read_bit = INB (nc_gpreg); } /* * READ 2 bytes from the NVRAM */ static void T93C46_Read_Word(hcb_p np, u_short *nvram_data, u_char *gpreg) { int x; u_char read_bit; *nvram_data = 0; for (x = 0; x < 16; x++) { T93C46_Read_Bit(np, &read_bit, gpreg); if (read_bit & 0x01) *nvram_data |= (0x01 << (15 - x)); else *nvram_data &= ~(0x01 << (15 - x)); } } /* * Read Tekram NvRAM data. */ static int T93C46_Read_Data(hcb_p np, u_short *data,int len,u_char *gpreg) { u_char read_bit; int x; for (x = 0; x < len; x++) { /* output read command and address */ T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg); if (read_bit & 0x01) return 1; /* Bad */ T93C46_Read_Word(np, &data[x], gpreg); T93C46_Stop(np, gpreg); } return 0; } /* * Try reading 93C46 Tekram NVRAM. */ static int sym_read_T93C46_nvram (hcb_p np, Tekram_nvram *nvram) { u_char gpcntl, gpreg; u_char old_gpcntl, old_gpreg; int retv = 1; /* save current state of GPCNTL and GPREG */ old_gpreg = INB (nc_gpreg); old_gpcntl = INB (nc_gpcntl); /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in, 1/2/4 out */ gpreg = old_gpreg & 0xe9; OUTB (nc_gpreg, gpreg); gpcntl = (old_gpcntl & 0xe9) | 0x09; OUTB (nc_gpcntl, gpcntl); /* input all of NVRAM, 64 words */ retv = T93C46_Read_Data(np, (u_short *) nvram, sizeof(*nvram) / sizeof(short), &gpreg); /* return GPIO0/1/2/4 to original states after having accessed NVRAM */ OUTB (nc_gpcntl, old_gpcntl); OUTB (nc_gpreg, old_gpreg); return retv; } /* * Try reading Tekram NVRAM. * Return 0 if OK. */ static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram) { u_char *data = (u_char *) nvram; int len = sizeof(*nvram); u_short csum; int x; switch (np->device_id) { case PCI_ID_SYM53C885: case PCI_ID_SYM53C895: case PCI_ID_SYM53C896: x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, data, len); break; case PCI_ID_SYM53C875: x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, data, len); if (!x) break; default: x = sym_read_T93C46_nvram(np, nvram); break; } if (x) return 1; /* verify checksum */ for (x = 0, csum = 0; x < len - 1; x += 2) csum += data[x] + (data[x+1] << 8); if (csum != 0x1234) return 1; return 0; } #endif /* SYM_CONF_NVRAM_SUPPORT */ Index: head/sys/i386/i386/busdma_machdep.c =================================================================== --- head/sys/i386/i386/busdma_machdep.c (revision 110231) +++ head/sys/i386/i386/busdma_machdep.c (revision 110232) @@ -1,899 +1,897 @@ /* * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#define MAX(a,b) (((a) > (b)) ? (a) : (b)) -#define MIN(a,b) (((a) < (b)) ? (a) : (b)) #define MAX_BPAGES 128 struct bus_dma_tag { bus_dma_tag_t parent; bus_size_t alignment; bus_size_t boundary; bus_addr_t lowaddr; bus_addr_t highaddr; bus_dma_filter_t *filter; void *filterarg; bus_size_t maxsize; u_int nsegments; bus_size_t maxsegsz; int flags; int ref_count; int map_count; }; struct bounce_page { vm_offset_t vaddr; /* kva of bounce buffer */ bus_addr_t busaddr; /* Physical address */ vm_offset_t datavaddr; /* kva of client data */ bus_size_t datacount; /* client data count */ STAILQ_ENTRY(bounce_page) links; }; int busdma_swi_pending; static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; static int free_bpages; static int reserved_bpages; static int active_bpages; static int total_bpages; static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; struct bus_dmamap { struct bp_list bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; void *buf; /* unmapped buffer pointer */ bus_size_t buflen; /* unmapped buffer length */ bus_dmamap_callback_t *callback; void *callback_arg; STAILQ_ENTRY(bus_dmamap) links; }; static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; static struct bus_dmamap nobounce_dmamap; static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map); static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); /* * Return true if a match is made. * * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. * * If paddr is within the bounds of the dma tag then call the filter callback * to check for a match, if there is no filter callback then assume a match. */ static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) { int retval; retval = 0; do { if (paddr > dmat->lowaddr && paddr <= dmat->highaddr && (dmat->filter == NULL || (*dmat->filter)(dmat->filterarg, paddr) != 0)) retval = 1; dmat = dmat->parent; } while (retval == 0 && dmat != NULL); return (retval); } #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 /* * Allocate a device specific dma_tag. */ int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) { bus_dma_tag_t newtag; int error = 0; /* Return a NULL tag on failure */ *dmat = NULL; newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); if (newtag == NULL) return (ENOMEM); newtag->parent = parent; newtag->alignment = alignment; newtag->boundary = boundary; newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); newtag->filter = filter; newtag->filterarg = filterarg; newtag->maxsize = maxsize; newtag->nsegments = nsegments; newtag->maxsegsz = maxsegsz; newtag->flags = flags; newtag->ref_count = 1; /* Count ourself */ newtag->map_count = 0; /* Take into account any restrictions imposed by our parent tag */ if (parent != NULL) { newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); /* * XXX Not really correct??? Probably need to honor boundary * all the way up the inheritence chain. */ newtag->boundary = MAX(parent->boundary, newtag->boundary); if (newtag->filter == NULL) { /* * Short circuit looking at our parent directly * since we have encapsulated all of its information */ newtag->filter = parent->filter; newtag->filterarg = parent->filterarg; newtag->parent = parent->parent; } if (newtag->parent != NULL) { parent->ref_count++; } } if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) { /* Must bounce */ if (lowaddr > bounce_lowaddr) { /* * Go through the pool and kill any pages * that don't reside below lowaddr. */ panic("bus_dma_tag_create: page reallocation " "not implemented"); } if (ptoa(total_bpages) < maxsize) { int pages; pages = atop(maxsize) - total_bpages; /* Add pages to our bounce pool */ if (alloc_bounce_pages(newtag, pages) < pages) error = ENOMEM; } /* Performed initial allocation */ newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; } if (error != 0) { free(newtag, M_DEVBUF); } else { *dmat = newtag; } return (error); } int bus_dma_tag_destroy(bus_dma_tag_t dmat) { if (dmat != NULL) { if (dmat->map_count != 0) return (EBUSY); while (dmat != NULL) { bus_dma_tag_t parent; parent = dmat->parent; dmat->ref_count--; if (dmat->ref_count == 0) { free(dmat, M_DEVBUF); /* * Last reference count, so * release our reference * count on our parent. */ dmat = parent; } else dmat = NULL; } } return (0); } /* * Allocate a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { int error; error = 0; if (dmat->lowaddr < ptoa(Maxmem)) { /* Must bounce */ int maxpages; *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO); if (*mapp == NULL) return (ENOMEM); /* Initialize the new map */ STAILQ_INIT(&((*mapp)->bpages)); /* * Attempt to add pages to our pool on a per-instance * basis up to a sane limit. */ maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || (dmat->map_count > 0 && total_bpages < maxpages)) { int pages; if (dmat->lowaddr > bounce_lowaddr) { /* * Go through the pool and kill any pages * that don't reside below lowaddr. */ panic("bus_dmamap_create: page reallocation " "not implemented"); } pages = atop(dmat->maxsize); pages = MIN(maxpages - total_bpages, pages); error = alloc_bounce_pages(dmat, pages); if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { if (error == 0) dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; } else { error = 0; } } } else { *mapp = NULL; } if (error == 0) dmat->map_count++; return (error); } /* * Destroy a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) { if (map != NULL) { if (STAILQ_FIRST(&map->bpages) != NULL) return (EBUSY); free(map, M_DEVBUF); } dmat->map_count--; return (0); } /* * Allocate a piece of memory that can be efficiently mapped into * bus device space based on the constraints lited in the dma tag. * A dmamap to for use with dmamap_load is also allocated. */ int bus_dmamem_alloc_size(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp, bus_size_t size) { if (size > dmat->maxsize) return (ENOMEM); /* If we succeed, no mapping/bouncing will be required */ *mapp = NULL; if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) { *vaddr = malloc(size, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : 0); } else { /* * XXX Use Contigmalloc until it is merged into this facility * and handles multi-seg allocations. Nobody is doing * multi-seg allocations yet though. */ *vaddr = contigmalloc(size, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : 0, 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, dmat->boundary); } if (*vaddr == NULL) return (ENOMEM); return (0); } int bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { return (bus_dmamem_alloc_size(dmat, vaddr, flags, mapp, dmat->maxsize)); } /* * Free a piece of memory and it's allociated dmamap, that was allocated * via bus_dmamem_alloc. Make the same choice for free/contigfree. */ void bus_dmamem_free_size(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map, bus_size_t size) { /* * dmamem does not need to be bounced, so the map should be * NULL */ if (map != NULL) panic("bus_dmamem_free: Invalid map freed\n"); if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) free(vaddr, M_DEVBUF); else contigfree(vaddr, size, M_DEVBUF); } void bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) { bus_dmamem_free_size(dmat, vaddr, map, dmat->maxsize); } #define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1) /* * Map the buffer buf into bus space using the dmamap map. */ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { vm_offset_t vaddr; vm_offset_t paddr; #ifdef __GNUC__ bus_dma_segment_t dm_segments[dmat->nsegments]; #else bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; #endif bus_dma_segment_t *sg; int seg; int error; vm_offset_t nextpaddr; if (map == NULL) map = &nobounce_dmamap; error = 0; /* * If we are being called during a callback, pagesneeded will * be non-zero, so we can avoid doing the work twice. */ if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) { vm_offset_t vendaddr; /* * Count the number of bounce pages * needed in order to complete this transfer */ vaddr = trunc_page((vm_offset_t)buf); vendaddr = (vm_offset_t)buf + buflen; while (vaddr < vendaddr) { paddr = pmap_kextract(vaddr); if (run_filter(dmat, paddr) != 0) { map->pagesneeded++; } vaddr += PAGE_SIZE; } } /* Reserve Necessary Bounce Pages */ if (map->pagesneeded != 0) { int s; s = splhigh(); if (reserve_bounce_pages(dmat, map) != 0) { /* Queue us for resources */ map->dmat = dmat; map->buf = buf; map->buflen = buflen; map->callback = callback; map->callback_arg = callback_arg; STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); splx(s); return (EINPROGRESS); } splx(s); } vaddr = (vm_offset_t)buf; sg = &dm_segments[0]; seg = 1; sg->ds_len = 0; nextpaddr = 0; do { bus_size_t size; paddr = pmap_kextract(vaddr); size = PAGE_SIZE - (paddr & PAGE_MASK); if (size > buflen) size = buflen; if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { paddr = add_bounce_page(dmat, map, vaddr, size); } if (sg->ds_len == 0) { sg->ds_addr = paddr; sg->ds_len = size; } else if (paddr == nextpaddr) { sg->ds_len += size; } else { /* Go to the next segment */ sg++; seg++; if (seg > dmat->nsegments) break; sg->ds_addr = paddr; sg->ds_len = size; } vaddr += size; nextpaddr = paddr + size; buflen -= size; } while (buflen > 0); if (buflen != 0) { printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", (u_long)buflen); error = EFBIG; } (*callback)(callback_arg, dm_segments, seg, error); return (0); } /* * Utility function to load a linear buffer. lastaddrp holds state * between invocations (for multiple-buffer loads). segp contains * the starting segment on entrace, and the ending segment on exit. * first indicates if this is the first invocation of this function. */ static int _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[], void *buf, bus_size_t buflen, struct thread *td, int flags, vm_offset_t *lastaddrp, int *segp, int first) { bus_size_t sgsize; bus_addr_t curaddr, lastaddr, baddr, bmask; vm_offset_t vaddr = (vm_offset_t)buf; int seg; pmap_t pmap; if (td != NULL) pmap = vmspace_pmap(td->td_proc->p_vmspace); else pmap = NULL; lastaddr = *lastaddrp; bmask = ~(dmat->boundary - 1); for (seg = *segp; buflen > 0 ; ) { /* * Get the physical address for this segment. */ if (pmap) curaddr = pmap_extract(pmap, vaddr); else curaddr = pmap_kextract(vaddr); /* * Compute the segment size, and adjust counts. */ sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); if (buflen < sgsize) sgsize = buflen; /* * Make sure we don't cross any boundaries. */ if (dmat->boundary > 0) { baddr = (curaddr + dmat->boundary) & bmask; if (sgsize > (baddr - curaddr)) sgsize = (baddr - curaddr); } /* * Insert chunk into a segment, coalescing with * previous segment if possible. */ if (first) { segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; first = 0; } else { if (curaddr == lastaddr && (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && (dmat->boundary == 0 || (segs[seg].ds_addr & bmask) == (curaddr & bmask))) segs[seg].ds_len += sgsize; else { if (++seg >= dmat->nsegments) break; segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; } } lastaddr = curaddr + sgsize; vaddr += sgsize; buflen -= sgsize; } *segp = seg; *lastaddrp = lastaddr; /* * Did we fit? */ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } /* * Like _bus_dmamap_load(), but for mbufs. */ int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, bus_dmamap_callback2_t *callback, void *callback_arg, int flags) { #ifdef __GNUC__ bus_dma_segment_t dm_segments[dmat->nsegments]; #else bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; #endif int nsegs, error; KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, ("bus_dmamap_load_mbuf: No support for bounce pages!")); KASSERT(m0->m_flags & M_PKTHDR, ("bus_dmamap_load_mbuf: no packet header")); nsegs = 0; error = 0; if (m0->m_pkthdr.len <= dmat->maxsize) { int first = 1; vm_offset_t lastaddr = 0; struct mbuf *m; for (m = m0; m != NULL && error == 0; m = m->m_next) { error = _bus_dmamap_load_buffer(dmat, dm_segments, m->m_data, m->m_len, NULL, flags, &lastaddr, &nsegs, first); first = 0; } } else { error = EINVAL; } if (error) { /* force "no valid mappings" in callback */ (*callback)(callback_arg, dm_segments, 0, 0, error); } else { (*callback)(callback_arg, dm_segments, nsegs+1, m0->m_pkthdr.len, error); } return (error); } /* * Like _bus_dmamap_load(), but for uios. */ int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, bus_dmamap_callback2_t *callback, void *callback_arg, int flags) { vm_offset_t lastaddr; #ifdef __GNUC__ bus_dma_segment_t dm_segments[dmat->nsegments]; #else bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; #endif int nsegs, error, first, i; bus_size_t resid; struct iovec *iov; struct thread *td = NULL; KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, ("bus_dmamap_load_uio: No support for bounce pages!")); resid = uio->uio_resid; iov = uio->uio_iov; if (uio->uio_segflg == UIO_USERSPACE) { td = uio->uio_td; KASSERT(td != NULL, ("bus_dmamap_load_uio: USERSPACE but no proc")); } nsegs = 0; error = 0; first = 1; for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { /* * Now at the first iovec to load. Load each iovec * until we have exhausted the residual count. */ bus_size_t minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; caddr_t addr = (caddr_t) iov[i].iov_base; error = _bus_dmamap_load_buffer(dmat, dm_segments, addr, minlen, td, flags, &lastaddr, &nsegs, first); first = 0; resid -= minlen; } if (error) { /* force "no valid mappings" in callback */ (*callback)(callback_arg, dm_segments, 0, 0, error); } else { (*callback)(callback_arg, dm_segments, nsegs+1, uio->uio_resid, error); } return (error); } /* * Release the mapping held by map. */ void _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) { struct bounce_page *bpage; while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { STAILQ_REMOVE_HEAD(&map->bpages, links); free_bounce_page(dmat, bpage); } } void _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { struct bounce_page *bpage; if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { /* * Handle data bouncing. We might also * want to add support for invalidating * the caches on broken hardware */ switch (op) { case BUS_DMASYNC_PREWRITE: while (bpage != NULL) { bcopy((void *)bpage->datavaddr, (void *)bpage->vaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } break; case BUS_DMASYNC_POSTREAD: while (bpage != NULL) { bcopy((void *)bpage->vaddr, (void *)bpage->datavaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } break; case BUS_DMASYNC_PREREAD: case BUS_DMASYNC_POSTWRITE: /* No-ops */ break; } } } static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) { int count; count = 0; if (total_bpages == 0) { STAILQ_INIT(&bounce_page_list); STAILQ_INIT(&bounce_map_waitinglist); STAILQ_INIT(&bounce_map_callbacklist); } while (numpages > 0) { struct bounce_page *bpage; int s; bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, M_NOWAIT | M_ZERO); if (bpage == NULL) break; bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT, 0ul, dmat->lowaddr, PAGE_SIZE, 0); if (bpage->vaddr == 0) { free(bpage, M_DEVBUF); break; } bpage->busaddr = pmap_kextract(bpage->vaddr); s = splhigh(); STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); total_bpages++; free_bpages++; splx(s); count++; numpages--; } return (count); } static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) { int pages; pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); free_bpages -= pages; reserved_bpages += pages; map->pagesreserved += pages; pages = map->pagesneeded - map->pagesreserved; return (pages); } static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, bus_size_t size) { int s; struct bounce_page *bpage; if (map->pagesneeded == 0) panic("add_bounce_page: map doesn't need any pages"); map->pagesneeded--; if (map->pagesreserved == 0) panic("add_bounce_page: map doesn't need any pages"); map->pagesreserved--; s = splhigh(); bpage = STAILQ_FIRST(&bounce_page_list); if (bpage == NULL) panic("add_bounce_page: free page list is empty"); STAILQ_REMOVE_HEAD(&bounce_page_list, links); reserved_bpages--; active_bpages++; splx(s); bpage->datavaddr = vaddr; bpage->datacount = size; STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); } static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) { int s; struct bus_dmamap *map; bpage->datavaddr = 0; bpage->datacount = 0; s = splhigh(); STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); free_bpages++; active_bpages--; if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { if (reserve_bounce_pages(map->dmat, map) == 0) { STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links); busdma_swi_pending = 1; swi_sched(vm_ih, 0); } } splx(s); } void busdma_swi(void) { int s; struct bus_dmamap *map; s = splhigh(); while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); splx(s); bus_dmamap_load(map->dmat, map, map->buf, map->buflen, map->callback, map->callback_arg, /*flags*/0); s = splhigh(); } splx(s); } Index: head/sys/i386/isa/gpib.c =================================================================== --- head/sys/i386/isa/gpib.c (revision 110231) +++ head/sys/i386/isa/gpib.c (revision 110232) @@ -1,1138 +1,1136 @@ /* * GPIB driver for FreeBSD. * Version 0.1 (No interrupts, no DMA) * Supports National Instruments AT-GPIB and AT-GPIB/TNT boards. * (AT-GPIB not tested, but it should work) * * Written by Fred Cawthorne (fcawth@delphi.umd.edu) * Some sections were based partly on the lpt driver. * (some remnants may remain) * * This software is distributed with NO WARRANTIES, not even the implied * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * The author grants any other persons or organizations permission to use * or modify this software as long as this message is kept with the software, * all derivative works or modified versions. * * $FreeBSD$ */ /* Please read the README file for usage information */ #include #include #include #include #include #include #include #include #include #include #ifndef COMPAT_OLDISA #error "The gpib device requires the old isa compatibility shims" #endif -#define MIN(a, b) (((a) < (b)) ? (a) : (b)) - #define GPIBPRI (PZERO + 8) | PCATCH #define SLEEP_MAX 1000 #define SLEEP_MIN 4 static int initgpib(void); static void closegpib(void); static int sendgpibfifo(unsigned char device, char *data, int count); static int sendrawgpibfifo(unsigned char device, char *data, int count); static int readgpibfifo(unsigned char device, char *data, int count); #if 0 static void showregs(void); #endif static void enableremote(unsigned char device); static void gotolocal(unsigned char device); static void menableremote(unsigned char *device); static void mgotolocal(unsigned char *device); static void mtrigger(unsigned char *device); static void trigger(unsigned char device); static char spoll(unsigned char device); static int gpprobe(struct isa_device *dvp); static int gpattach(struct isa_device *dvp); struct isa_driver gpdriver = { INTR_TYPE_TTY, gpprobe, gpattach, "gp" }; COMPAT_ISA_DRIVER(gp, gpdriver); static d_open_t gpopen; static d_close_t gpclose; static d_write_t gpwrite; static d_ioctl_t gpioctl; #define CDEV_MAJOR 44 static struct cdevsw gp_cdevsw = { /* open */ gpopen, /* close */ gpclose, /* read */ noread, /* write */ gpwrite, /* ioctl */ gpioctl, /* poll */ nopoll, /* mmap */ nommap, /* strategy */ nostrategy, /* name */ "gp", /* maj */ CDEV_MAJOR, /* dump */ nodump, /* psize */ nopsize, /* flags */ 0, }; #define BUFSIZE 1024 #define ATTACHED 0x08 #define OPEN 0x04 #define INIT 0x02 static struct gpib_softc { char *sc_cp; /* current data to send */ int sc_count; /* bytes queued in sc_inbuf */ int sc_type; /* Type of gpib controller */ u_char sc_flags; /* flags (open and internal) */ char sc_unit; /* gpib device number */ char *sc_inbuf; /* buffer for data */ } gpib_sc; /* only support one of these? */ static int oldcount; static char oldbytes[2]; /* * Probe routine * This needs to be changed to be a bit more robust */ static int gpprobe(struct isa_device *dvp) { int status; struct gpib_softc *sc = &gpib_sc; gpib_port = dvp->id_iobase; status = 1; sc->sc_type = 3; if ((inb(KSR) & 0xF7) == 0x34) sc->sc_type = 3; else if ((inb(KSR) & 0xF7) == 0x24) sc->sc_type = 2; else if ((inb(KSR) & 0xF7) == 0x14) sc->sc_type = 1; else status = 0; return (status); } /* * gpattach() * Attach device and print the type of card to the screen. */ static int gpattach(isdp) struct isa_device *isdp; { struct gpib_softc *sc = &gpib_sc; sc->sc_unit = isdp->id_unit; if (sc->sc_type == 3) printf ("gp%d: type AT-GPIB/TNT\n", sc->sc_unit); if (sc->sc_type == 2) printf ("gp%d: type AT-GPIB chip NAT4882B\n", sc->sc_unit); if (sc->sc_type == 1) printf ("gp%d: type AT-GPIB chip NAT4882A\n", sc->sc_unit); sc->sc_flags |= ATTACHED; make_dev(&gp_cdevsw, 0, 0, 0, 0600, "gp"); return (1); } /* * gpopen() * New open on device. * * More than 1 open is not allowed on the entire device. * i.e. even if gpib5 is open, we can't open another minor device */ static int gpopen(dev, flags, fmt, td) dev_t dev; int flags; int fmt; struct thread *td; { struct gpib_softc *sc = &gpib_sc; u_char unit; int status; unit = minor(dev); /* minor number out of limits ? */ if (unit >= 32) return (ENXIO); /* Attached ? */ if (!(sc->sc_flags&ATTACHED)) /* not attached */ return (ENXIO); /* Already open */ if (sc->sc_flags&OPEN) /* too late .. */ return (EBUSY); /* Have memory for buffer? */ sc->sc_inbuf = malloc(BUFSIZE, M_DEVBUF, 0); if (sc->sc_inbuf == 0) return (ENOMEM); if (initgpib()) return (EBUSY); sc->sc_flags |= OPEN; sc->sc_count = 0; oldcount = 0; if (unit != 0) { /* Someone is trying to access an actual device */ /* So.. we'll address it to listen */ enableremote(unit); do { status = inb(ISR2); } while (!(status & 8) && tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1) == EWOULDBLOCK); outb(CDOR, (unit & 31) + 32); /* address device to listen */ do status = inb(ISR2); while (!(status & 8) && tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1) == EWOULDBLOCK); outb(CDOR, 64); /* Address controller (me) to talk */ do { status = inb(ISR2); } while (!(status & 8) && tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1) == EWOULDBLOCK); outb(AUXMR, gts); /* Set to Standby (Controller) */ do { status = inb(ISR1); } while (!(status & 2) && tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1) == EWOULDBLOCK); /* Set up the TURBO488 registers */ outb(IMR2, 0x30); /* we have to enable DMA (0x30) for turbo488 to work */ outb(CNT0, 0); /* NOTE this does not enable DMA to the host computer!! */ outb(CNT1, 0); outb(CNT2, 0); outb(CNT3, 0); outb(CMDR, 0x20); outb(CFG, 0x47); /* 16 bit, write, fifo B first, TMOE TIM */ outb(CMDR, 0x10); /* RESET fifos */ outb(CMDR, 0x04); /* Tell TURBO488 to GO */ } return (0); } /* * gpclose() * Close gpib device. */ static int gpclose(dev, flags, fmt, td) dev_t dev; int flags; int fmt; struct thread *td; { struct gpib_softc *sc = &gpib_sc; unsigned char unit; unsigned char status; unit = minor(dev); if (unit != 0) { /* Here we need to send the last character with EOS */ /* and unaddress the listening device */ status = EWOULDBLOCK; /* Wait for fifo to become empty */ do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while ((inb(ISR3) & 0x04) && status == EWOULDBLOCK); /* Fifo is not empty */ outb(CMDR, 0x08); /* Issue STOP to TURBO488 */ /* Wait for DONE and STOP */ if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR3) & 0x11) && status == EWOULDBLOCK); /* not done and stop */ /* Shut down TURBO488 */ outb(IMR2, 0x00); /* DISABLE DMA to turbo488 */ outb(CMDR, 0x20); /* soft reset turbo488 */ outb(CMDR, 0x10); /* reset fifos */ /* Send last byte with EOI set */ /* Send second to last byte if there are 2 bytes left */ if (status == EWOULDBLOCK) { do { if (!(inb(ISR1) & 2)) status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR1) & 2) && (status == EWOULDBLOCK)); if (oldcount == 2) { outb(CDOR, oldbytes[0]); /* Send second to last byte */ while (!(inb(ISR1) & 2) && (status == EWOULDBLOCK)); status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } outb(AUXMR, seoi); /* Set EOI for the last byte */ outb(AUXMR, 0x5E); /* Clear SYNC */ if (oldcount == 1) outb(CDOR, oldbytes[0]); else if (oldcount == 2) outb(CDOR, oldbytes[1]); else { outb(CDOR, 13); /* Send a CR.. we've got trouble */ printf("gpib: Warning: gpclose called with nothing left in buffer\n"); } } do { if (!(inb(ISR1) & 2)) status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR1) & 2) && (status == EWOULDBLOCK)); if (!(inb(ISR1) & 2) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR1) & 2) && status == EWOULDBLOCK); outb(AUXMR, tca); /* Regain full control of the bus */ do { status = inb(ISR2); } while (!(status & 8) && tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1) == EWOULDBLOCK); outb(CDOR, 63); /* unlisten */ do { status = inb(ISR2); } while (!(status & 8) && tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1) == EWOULDBLOCK); outb(AUXMR, 0x5E); /* Clear SYNC */ outb(CDOR, 95); /* untalk */ do { status = inb(ISR2); } while (!(status & 8) && tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1) == EWOULDBLOCK); #if 0 gotolocal(minor(dev)); #endif } closegpib(); sc->sc_flags = ATTACHED; free(sc->sc_inbuf, M_DEVBUF); sc->sc_inbuf = 0; /* Sanity */ return (0); } /* * gpwrite() * Copy from user's buffer, then write to GPIB device referenced * by minor(dev). */ static int gpwrite(dev, uio, ioflag) dev_t dev; struct uio *uio; int ioflag; { int err, count; /* main loop */ while ((gpib_sc.sc_count = MIN(BUFSIZE-1, uio->uio_resid)) > 0) { /* If there were >1 bytes left over, send them */ if (oldcount == 2) sendrawgpibfifo(minor(dev), oldbytes, 2); /* If there was 1 character left, put it at the beginning of the new buffer */ if (oldcount == 1) { (gpib_sc.sc_inbuf)[0] = oldbytes[0]; gpib_sc.sc_cp = gpib_sc.sc_inbuf; /* get from user-space */ uiomove(gpib_sc.sc_inbuf + 1, gpib_sc.sc_count, uio); gpib_sc.sc_count++; } else { gpib_sc.sc_cp = gpib_sc.sc_inbuf; /* get from user-space */ uiomove(gpib_sc.sc_inbuf, gpib_sc.sc_count, uio); } /* * NOTE we always leave one byte in case this is the last write * so close can send EOI with the last byte There may be 2 bytes * since we are doing 16 bit transfers.(note the -1 in the count below) */ /* If count <= 2 we'll either pick it up on the next write or on close */ if (gpib_sc.sc_count>2) { count = sendrawgpibfifo(minor(dev), gpib_sc.sc_cp, gpib_sc.sc_count-1); err = !count; if (err) return (1); oldcount = gpib_sc.sc_count-count; /* Set # of remaining bytes */ gpib_sc.sc_count -= count; gpib_sc.sc_cp += count; /* point char pointer to remaining bytes */ } else oldcount = gpib_sc.sc_count; oldbytes[0] = gpib_sc.sc_cp[0]; if (oldcount == 2) oldbytes[1] = gpib_sc.sc_cp[1]; } return (0); } /* * Here is how you would usually access a GPIB device * An exception would be a plotter or printer that you can just * write to using a minor device = its GPIB address */ static int gpioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct thread *td) { struct gpibdata *gd = (struct gpibdata *)data; int error, result; error = 0; switch (cmd) { case GPIBWRITE: sendgpibfifo(gd->address, gd->data, *(gd->count)); error = 0; break; case GPIBREAD: result = readgpibfifo(gd->address, gd->data, *(gd->count)); *(gd->count) = result; error = 0; break; case GPIBINIT: initgpib(); error = 0; break; case GPIBTRIGGER: trigger(gd->address); error = 0; break; case GPIBREMOTE: enableremote(gd->address); error = 0; break; case GPIBLOCAL: gotolocal(gd->address); error = 0; break; case GPIBMTRIGGER: mtrigger(gd->data); error = 0; break; case GPIBMREMOTE: menableremote(gd->data); error = 0; break; case GPIBMLOCAL: mgotolocal(gd->data); error = 0; break; case GPIBSPOLL: *(gd->data) = spoll(gd->address); error = 0; break; default: error = ENODEV; } return (error); } #if 0 /* Just in case you want a dump of the registers... */ static void showregs() { printf ("NAT4882:\n"); printf ("ISR1=%X\t", inb(ISR1)); printf ("ISR2=%X\t", inb(ISR2)); printf ("SPSR=%X\t", inb(SPSR)); printf ("KSR =%X\t", inb(KSR)); printf ("ADSR=%X\t", inb(ADSR)); printf ("CPTR=%X\t", inb(CPTR)); printf ("SASR=%X\t", inb(SASR)); printf ("ADR0=%X\t", inb(ADR0)); printf ("ISR0=%X\t", inb(ISR0)); printf ("ADR1=%X\t", inb(ADR1)); printf ("BSR =%X\n", inb(BSR)); printf ("Turbo488\n"); printf ("STS1=%X ", inb(STS1)); printf ("STS2=%X ", inb(STS2)); printf ("ISR3=%X ", inb(ISR3)); printf ("CNT0=%X ", inb(CNT0)); printf ("CNT1=%X ", inb(CNT1)); printf ("CNT2=%X ", inb(CNT2)); printf ("CNT3=%X ", inb(CNT3)); printf ("IMR3=%X ", inb(IMR3)); printf ("TIMER=%X\n", inb(TIMER)); } #endif /* * Set up the NAT4882 and TURBO488 registers * This will be nonsense to you unless you have a data sheet from * National Instruments. They should give you one if you call them */ static int initgpib() { outb(CMDR, 0x20); outb(CFG, 0x16); outb(IMR3, 0); outb(CMDR, 0x10); outb(CNT0, 0); outb(CNT1, 0); outb(CNT2, 0); outb(CNT3, 0); outb(INTR, 0); /* Put interrupt line in tri-state mode?? */ outb(AUXMR, chip_reset); outb(IMR1, 0x10); /* send interrupt to TURBO488 when END received */ outb(IMR2, 0); outb(IMR0, 0x90); /* Do we want nba here too??? */ outb(ADMR, 1); outb(ADR, 0); outb(ADR, 128); outb(AUXMR, 0xE9); outb(AUXMR, 0x49); outb(AUXMR, 0x70); outb(AUXMR, 0xD0); outb(AUXMR, 0xA0); outb(EOSR, 10); /* set EOS message to newline */ /* should I make the default to interpret END as EOS? */ /* It isn't now. The following changes this */ outb(AUXMR, 0x80); /* No special EOS handling */ #if 0 outb(AUXMR, 0x88) /* Transmit END with EOS */ outb(AUXMR, 0x84) /* Set END on EOS received */ outb(AUXMR, 0x8C) /* Do both of the above */ #endif #if 0 /* Not currently supported */ outb(AUXMR, hldi); /* Perform RFD Holdoff for all data in */ #endif outb(AUXMR, pon); outb(AUXMR, sic_rsc); tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); outb(AUXMR, sic_rsc_off); return (0); } /* This is kind of Brute force.. But it works */ static void closegpib() { outb(AUXMR, chip_reset); } /* * GPIB ROUTINES: * These will also make little sense unless you have a data sheet. * Note that the routines with an "m" in the beginning are for * accessing multiple devices in one call */ /* * This is one thing I could not figure out how to do correctly. * I tried to use the auxilary command to enable remote, but it * never worked. Here, I bypass everything and write to the BSR * to enable the remote line. NOTE that these lines are effectively * "OR'ed" with the actual lines, so writing a 1 to the bit in the BSR * forces the GPIB line true, no matter what the fancy circuitry of the * NAT4882 wants to do with it */ static void enableremote(unsigned char device) { int status; status = EWOULDBLOCK; if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(BSR, 1); /* Set REN bit on GPIB */ if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(CDOR, (device & 31) + 32); /* address device to listen */ if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(CDOR, 63); /* Unaddress device */ if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ } /* * This does not release the REM line on the gpib port, because if it did, * all the remote devices would go to local mode. This only sends the * gotolocal message to one device. Currently, REM is always held true * after enableremote is called, and is reset only on a close of the * gpib device */ static void gotolocal(unsigned char device) { int status; status = EWOULDBLOCK; if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(CDOR, (device & 31) + 32); if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(AUXMR, 0x5E); /* Clear SYNC */ outb(CDOR, 1); if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(AUXMR, 0x5E); outb(CDOR, 63); /* unaddress device */ if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ } static void menableremote(unsigned char *device) { int status, counter = 0; status = EWOULDBLOCK; if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(BSR, 1); /* Set REN bit on GPIB */ do { if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(CDOR, (device[counter] & 31) + 32); /* address device to listen */ counter++; } while (device[counter] < 32); if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(CDOR, 63); /* Unaddress device */ if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ } static void mgotolocal(unsigned char *device) { int status; int counter = 0; status = EWOULDBLOCK; if (device[counter] < 32) do { if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(CDOR, (device[counter] & 31) + 32); counter++; } while (device[counter] < 32); if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(AUXMR, 0x5E); /* Clear SYNC */ outb(CDOR, 1); if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(AUXMR, 0x5E); outb(CDOR, 63); /* unaddress device */ if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 2); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ } /* Trigger a device. What happens depends on how the device is configured. */ static void trigger(unsigned char device) { int status; status = EWOULDBLOCK; if (device < 32) { if (!(inb(ISR2) & 0x08)) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(CDOR, (device & 31) + 32); /* address device to listen */ if (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(CDOR, 8); /* send GET */ if (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(AUXMR, 0x5E); outb(CDOR, 63); /* unaddress device */ if (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ } } /* * Trigger multiple devices by addressing them all to listen, and then * sending GET */ static void mtrigger(unsigned char *device) { int status = EWOULDBLOCK; int counter = 0; if (device[0] < 32) { do { if (device[counter] < 32) if (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(CDOR, (device[counter] & 31) + 32); /* address device to listen */ counter++; } while (device[counter] < 32); if (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(CDOR, 8); /* send GET */ if (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ outb(AUXMR, 0x5E); outb(CDOR, 63); /* unaddress device */ if (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 0x08) && status == EWOULDBLOCK); /* Wait to send next cmd */ } } /* * This is not used now, but it should work with NI's 8 bit gpib board * since it does not use the TURBO488 registers at all */ /* * Send data through the TURBO488 FIFOS to a device that is already * addressed to listen. This is used by the write call when someone is * writing to a printer or plotter, etc... * * The last byte of each write is held off until either the next * write or close, so it can be sent with EOI set */ static int sendrawgpibfifo(unsigned char device, char *data, int count) { int status; int counter; int fifopos; int sleeptime; sleeptime = SLEEP_MIN; counter = 0; fifopos = 0; status = EWOULDBLOCK; do { /* Wait for fifo to become not full if it is full */ sleeptime = SLEEP_MIN; if (!(inb(ISR3) & 0x08)) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", sleeptime); if (sleeptime < SLEEP_MAX) sleeptime = sleeptime * 2; } while (!(inb(ISR3) & 0x08) && (status == EWOULDBLOCK)); /* Fifo is full */ if ((count>1) && (inb(ISR3) & 0x08)) { outw(FIFOB, *(unsigned *)(data + counter)); #if 0 printf ("gpib: sent:%c, %c\n", data[counter], data[counter + 1]); #endif counter += 2; count -= 2; } } while ((count>1) && (status == EWOULDBLOCK)); /* * The write routine and close routine must check if there is 1 * byte left and handle it accordingly */ /* Return the number of bytes written to the device */ return (counter); } static int sendgpibfifo(unsigned char device, char *data, int count) { int status; int counter; int fifopos; int sleeptime; outb(IMR2, 0x30); /* we have to enable DMA (0x30) for turbo488 to work */ outb(CNT0, 0); outb(CNT1, 0); outb(CNT2, 0); outb(CNT3, 0); status = EWOULDBLOCK; if (!(inb(ISR2) & 8)) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(CDOR, (device & 31) + 32); /* address device to listen */ if (!(inb(ISR2) & 8) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(CDOR, 64); /* Address controller (me) to talk */ if (!(inb(ISR2) & 8) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(AUXMR, gts); /* Set to Standby (Controller) */ fifopos = 0; sleeptime = SLEEP_MIN; counter = 0; fifopos = 0; outb(CMDR, 0x20); outb(CFG, 0x47); /* 16 bit, write, fifo B first, TMOE TIM */ outb(CMDR, 0x10); /* RESET fifos */ outb(CCRG, seoi); /* program to send EOI at end */ outb(CMDR, 0x04); /* Tell TURBO488 to GO */ status = EWOULDBLOCK; do { /* Wait for fifo to become not full if it is full */ sleeptime = SLEEP_MIN; if (!(inb(ISR3) & 0x08)) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", sleeptime); if (sleeptime < SLEEP_MAX) sleeptime = sleeptime * 2; } while (!(inb(ISR3) & 0x08) && (status == EWOULDBLOCK)); /* Fifo is full */ if ((count>1) && (inb(ISR3) & 0x08)) { #if 0 if (count == 2) outb(CFG, 15 + 0x40); /* send eoi when done */ #endif outw(FIFOB, *(unsigned *)(data + counter)); counter += 2; count -= 2; } } while ((count>2) && (status == EWOULDBLOCK)); if (count == 2 && status == EWOULDBLOCK) { /* Wait for fifo to become not full */ if (status == EWOULDBLOCK && !(inb(ISR3) & 0x08)) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", SLEEP_MIN); } while (!(inb(ISR3) & 0x08) && status == EWOULDBLOCK); /* Fifo is full */ #if 0 outb(CFG, 0x40 + 15); /* send eoi when done */ #endif outb(FIFOB, data[counter]); counter++; count--; } #if 0 outb(CMDR, 0x04); #endif /* Wait for fifo to become empty */ if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while ((inb(ISR3) & 0x04) && status == EWOULDBLOCK); /* Fifo is not empty */ outb(CMDR, 0x08); /* Issue STOP to TURBO488 */ /* Wait for DONE and STOP */ if (status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR3) & 0x11) && status == EWOULDBLOCK); /* not done and stop */ outb(IMR2, 0x00); /* we have to enable DMA (0x30) for turbo488 to work */ outb(CMDR, 0x20); /* soft reset turbo488 */ outb(CMDR, 0x10); /* reset fifos */ /* * Send last byte with EOI set * Here EOI is handled correctly since the string to be sent * is actually all sent during the ioctl. (See above) */ if (count == 1 && status == EWOULDBLOCK) { /* Count should always=1 here */ do { if (!(inb(ISR1) & 2)) status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR1) & 2) && (status == EWOULDBLOCK)); outb(AUXMR, seoi); /* Set EOI for the last byte */ outb(AUXMR, 0x5E); /* Clear SYNC */ outb(CDOR, data[counter]); counter++; count--; } do { if (!(inb(ISR1) & 2)) status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR1) & 2) && (status == EWOULDBLOCK)); if (!(inb(ISR1) & 2) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR1) & 2) && status == EWOULDBLOCK); outb(AUXMR, tca); /* Regain full control of the bus */ if (!(inb(ISR2) & 8) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(CDOR, 63); /* unlisten */ if (!(inb(ISR2) & 8) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(AUXMR, 0x5E); /* Clear SYNC */ outb(CDOR, 95); /* untalk */ if (!(inb(ISR2) & 8) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); return (counter); } static int readgpibfifo(unsigned char device, char *data, int count) { int status; int status2 = 0; int status1; int counter; int fifopos; unsigned inword; outb(IMR2, 0x30); /* we have to enable DMA (0x30) for turbo488 to work */ #if 0 outb(IMR3, 0x1F); outb(INTR, 1); #endif outb(CMDR, 0x20); outb(CFG, 14 + 0x60 + 1); /* Halt on int, read, fifo B first, CCEN TMOE TIM */ outb(CMDR, 0x10); /* RESET fifos */ outb(CCRG, tcs); /* program to tcs at end */ outb(CMDR, 0x08); /* STOP?? */ status = EWOULDBLOCK; do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(CDOR, 32); /* Address controller (me) to listen */ do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(CDOR, (device & 31) + 64); /* address device to talk */ do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(AUXMR, gts); /* Set to Standby (Controller) */ counter = 0; fifopos = 0; outb(CMDR, 0x04); /* Tell TURBO488 to GO */ do { status1 = inb(ISR3); if (!(status1 & 0x01) && (status1 & 0x04)) { status2 = inb(STS2); inword = inw(FIFOB); *(unsigned *)(data + counter) = inword; #if 0 printf ("Read:%c, %c\n", data[counter], data[counter + 1]); #endif counter += 2; } else { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 4); } } while (!(status1 & 0x01) && status == EWOULDBLOCK); if (!(status2 & 0x04)) { /* Only 1 byte came in on last 16 bit transfer */ data[counter-1] = 0; counter--; } else data[counter] = 0; outb(CMDR, 0x08); /* send STOP */ do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR3) & 0x11) && status == EWOULDBLOCK); /* wait for DONE and STOP */ outb(AUXMR, 0x55); outb(IMR2, 0x00); /* we have to enable DMA (0x30) for turbo488 to work */ outb(CMDR, 0x20); /* soft reset turbo488 */ outb(CMDR, 0x10); /* reset fifos */ #if 0 do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR1) & 2)); #endif outb(AUXMR, tca); /* Regain full control of the bus */ do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(CDOR, 63); /* unlisten */ do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(AUXMR, 0x5E); /* Clear SYNC */ outb(CDOR, 95); /* untalk */ do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); return (counter); } /* Return the status byte from device */ static char spoll(unsigned char device) { int status = EWOULDBLOCK; unsigned int statusbyte; if (!(inb(ISR2) & 8)) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(CDOR, (device & 31) + 64); /* address device to talk */ if (!(inb(ISR2) & 8) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(CDOR, 32); /* Address controller (me) to listen */ if (!(inb(ISR2) & 8) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(AUXMR, 0x5E); outb(CDOR, 0x18); /* Send SPE (serial poll enable) */ if (!(inb(ISR2) & 8) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); /* wait for bus to be synced */ if (!(inb(ISR0) & 1) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR0) & 1) && status == EWOULDBLOCK); outb(AUXMR, gts); /* Set to Standby (Controller) */ if (!(inb(ISR1) & 1) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR1) & 1) && status == EWOULDBLOCK); outb(AUXMR, 0x5E); outb(AUXMR, tcs); /* Take control after next read */ statusbyte = inb(DIR); if (!(inb(ISR2) & 8) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(CDOR, 0x19); /* SPD (serial poll disable) */ /* wait for bus to be synced */ if (!(inb(ISR0) & 1) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR0) & 1) && status == EWOULDBLOCK); if (!(inb(ISR2) & 8) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(CDOR, 95); /* untalk */ if (!(inb(ISR2) & 8) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); outb(AUXMR, 0x5E); outb(CDOR, 63); /* unlisten */ if (!(inb(ISR2) & 8) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR2) & 8) && status == EWOULDBLOCK); /* wait for bus to be synced */ if (!(inb(ISR0) & 1) && status == EWOULDBLOCK) do { status = tsleep((caddr_t)&gpib_sc, GPIBPRI, "gpibpoll", 1); } while (!(inb(ISR0) & 1) && status == EWOULDBLOCK); return (statusbyte); } Index: head/sys/i386/isa/gsc.c =================================================================== --- head/sys/i386/isa/gsc.c (revision 110231) +++ head/sys/i386/isa/gsc.c (revision 110232) @@ -1,836 +1,834 @@ /* gsc.c - device driver for handy scanners * * Current version supports: * * - Genius GS-4500 * * Copyright (c) 1995 Gunther Schadow. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Gunther Schadow. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #include "gsc.h" #include #include #include #include #include #include #include #include #include #include #include #ifndef COMPAT_OLDISA #error "The gsc device requires the old isa compatibility shims" #endif /*********************************************************************** * * CONSTANTS & DEFINES * ***********************************************************************/ #define PROBE_FAIL 0 #define PROBE_SUCCESS IO_GSCSIZE #define ATTACH_FAIL 0 #define ATTACH_SUCCESS 1 #define SUCCESS 0 #define FAIL -1 #define INVALID FAIL #define DMA1_READY 0x08 #ifdef GSCDEBUG #define lprintf(args) \ do { \ if (scu->flags & FLAG_DEBUG) \ printf args; \ } while (0) #else #define lprintf(args) #endif -#define MIN(a, b) (((a) < (b)) ? (a) : (b)) - #define TIMEOUT (hz*15) /* timeout while reading a buffer - default value */ #define LONG (hz/60) /* timesteps while reading a buffer */ #define GSCPRI PRIBIO /* priority while reading a buffer */ /*********************************************************************** * * LAYOUT OF THE MINOR NUMBER * ***********************************************************************/ #define UNIT_MASK 0xc0 /* unit gsc0 .. gsc3 */ #define UNIT(x) (x >> 6) #define DBUG_MASK 0x20 #define FRMT_MASK 0x18 /* output format */ #define FRMT_RAW 0x00 /* output bits as read from scanner */ #define FRMT_GRAY 0x10 /* output graymap (not implemented yet) */ #define FRMT_PBM 0x08 /* output pbm format */ #define FRMT_PGM 0x18 /*********************************************************************** * * THE GEMOMETRY TABLE * ***********************************************************************/ #define GEOMTAB_SIZE 7 static const struct gsc_geom { int dpi; /* dots per inch */ int dpl; /* dots per line */ int g_res; /* get resolution value (status flag) */ int s_res; /* set resolution value (control register) */ } geomtab[GEOMTAB_SIZE] = { { 100, 424, GSC_RES_100, GSC_CNT_424}, { 200, 840, GSC_RES_200, GSC_CNT_840}, { 300, 1264, GSC_RES_300, GSC_CNT_1264}, { 400, 1648, GSC_RES_400, GSC_CNT_1648}, { -1, 1696, -1, GSC_CNT_1696}, { -2, 2644, -2, GSC_CNT_2544}, { -3, 3648, -3, GSC_CNT_3648}, }; #define NEW_GEOM { INVALID, INVALID, INVALID, INVALID } /*********************************************************************** * * THE TABLE OF UNITS * ***********************************************************************/ struct _sbuf { size_t size; size_t poi; char *base; }; struct gsc_unit { int channel; /* DMA channel */ int data; /* - video port */ int stat; /* - status port */ int ctrl; /* - control port */ int clrp; /* - clear port */ int flags; #define ATTACHED 0x01 #define OPEN 0x02 #define READING 0x04 #define EOF 0x08 #define FLAG_DEBUG 0x10 #define PBM_MODE 0x20 int geometry; /* resolution as geomtab index */ int blen; /* length of buffer in lines */ int btime; /* timeout of buffer in seconds/hz */ struct _sbuf sbuf; char ctrl_byte; /* the byte actually written to ctrl port */ int height; /* height, for pnm modes */ size_t bcount; /* bytes to read, for pnm modes */ struct _sbuf hbuf; /* buffer for pnm header data */ }; static struct gsc_unit unittab[NGSC]; /* I could not find a reasonable buffer size limit other than by * experiments. MAXPHYS is obviously too much, while DEV_BSIZE and * PAGE_SIZE are really too small. There must be something wrong * with isa_dmastart/isa_dmarangecheck HELP!!! */ #define MAX_BUFSIZE 0x3000 #define DEFAULT_BLEN 59 /*********************************************************************** * * THE PER-DRIVER RECORD FOR ISA.C * ***********************************************************************/ static int gscprobe (struct isa_device *isdp); static int gscattach(struct isa_device *isdp); struct isa_driver gscdriver = { INTR_TYPE_TTY, gscprobe, gscattach, "gsc" }; COMPAT_ISA_DRIVER(gsc, gscdriver); static d_open_t gscopen; static d_close_t gscclose; static d_read_t gscread; static d_ioctl_t gscioctl; #define CDEV_MAJOR 47 static struct cdevsw gsc_cdevsw = { /* open */ gscopen, /* close */ gscclose, /* read */ gscread, /* write */ nowrite, /* ioctl */ gscioctl, /* poll */ nopoll, /* mmap */ nommap, /* strategy */ nostrategy, /* name */ "gsc", /* maj */ CDEV_MAJOR, /* dump */ nodump, /* psize */ nopsize, /* flags */ 0, }; /*********************************************************************** * * LOCALLY USED SUBROUTINES * ***********************************************************************/ /*********************************************************************** * * lookup_geometry -- lookup a record in the geometry table by pattern * * The caller supplies a geometry record pattern, where INVALID * matches anything. Returns the index in the table or INVALID if * lookup fails. */ static int lookup_geometry(struct gsc_geom geom, const struct gsc_unit *scu) { struct gsc_geom tab; int i; for(i=0; istat)); if ( ( geom.g_res = inb(scu->stat) ) == FAIL ) return INVALID; geom.g_res &= GSC_RES_MASK; return lookup_geometry(geom, scu); } /*********************************************************************** * * buffer_allocate -- allocate/reallocate a buffer * Now just checks that the preallocated buffer is large enough. */ static int buffer_allocate(struct gsc_unit *scu) { size_t size; size = scu->blen * geomtab[scu->geometry].dpl / 8; lprintf(("gsc.buffer_allocate: need 0x%x bytes\n", size)); if ( size > MAX_BUFSIZE ) { lprintf(("gsc.buffer_allocate: 0x%x bytes are too much\n", size)); return ENOMEM; } scu->sbuf.size = size; scu->sbuf.poi = size; lprintf(("gsc.buffer_allocate: ok\n")); return SUCCESS; } /*********************************************************************** * * buffer_read -- scan a buffer */ static int buffer_read(struct gsc_unit *scu) { int stb; int res = SUCCESS; int chan_bit; char *p; int sps; int delay; lprintf(("gsc.buffer_read: begin\n")); if (scu->ctrl_byte == INVALID) { lprintf(("gsc.buffer_read: invalid ctrl_byte\n")); return EIO; } sps=splbio(); outb( scu->ctrl, scu->ctrl_byte | GSC_POWER_ON ); outb( scu->clrp, 0 ); stb = inb( scu->stat ); isa_dmastart(ISADMA_READ, scu->sbuf.base, scu->sbuf.size, scu->channel); chan_bit = 0x01 << scu->channel; for(delay=0; !(inb(DMA1_READY) & 0x01 << scu->channel); delay += LONG) { if(delay >= scu->btime) { splx(sps); lprintf(("gsc.buffer_read: timeout\n")); res = EWOULDBLOCK; break; } res = tsleep((caddr_t)scu, GSCPRI | PCATCH, "gscread", LONG); if ( ( res == 0 ) || ( res == EWOULDBLOCK ) ) res = SUCCESS; else break; } splx(sps); isa_dmadone(ISADMA_READ, scu->sbuf.base, scu->sbuf.size, scu->channel); outb( scu->clrp, 0 ); if(res != SUCCESS) { lprintf(("gsc.buffer_read: aborted with %d\n", res)); return res; } lprintf(("gsc.buffer_read: invert buffer\n")); for(p = scu->sbuf.base + scu->sbuf.size - 1; p >= scu->sbuf.base; p--) *p = ~*p; scu->sbuf.poi = 0; lprintf(("gsc.buffer_read: ok\n")); return SUCCESS; } /*********************************************************************** * * the main functions * ***********************************************************************/ /*********************************************************************** * * gscprobe * * read status port and check for proper configuration: * - if address group matches (status byte has reasonable value) * - if DMA channel matches (status byte has correct value) */ static int gscprobe (struct isa_device *isdp) { int unit = isdp->id_unit; struct gsc_unit *scu = unittab + unit; int stb; struct gsc_geom geom = NEW_GEOM; scu->flags = FLAG_DEBUG; lprintf(("gsc%d.probe " "on iobase 0x%03x, irq %d, drq %d, addr %p, size %d\n", unit, isdp->id_iobase, isdp->id_irq, isdp->id_drq, isdp->id_maddr, isdp->id_msize)); if ( isdp->id_iobase < 0 ) { lprintf(("gsc%d.probe: no iobase given\n", unit)); return PROBE_FAIL; } stb = inb( GSC_STAT(isdp->id_iobase) ); if (stb == FAIL) { lprintf(("gsc%d.probe: get status byte failed\n", unit)); return PROBE_FAIL; } scu->data = GSC_DATA(isdp->id_iobase); scu->stat = GSC_STAT(isdp->id_iobase); scu->ctrl = GSC_CTRL(isdp->id_iobase); scu->clrp = GSC_CLRP(isdp->id_iobase); outb(scu->clrp,stb); stb = inb(scu->stat); switch(stb & GSC_CNF_MASK) { case GSC_CNF_DMA1: lprintf(("gsc%d.probe: DMA 1\n", unit)); scu->channel = 1; break; case GSC_CNF_DMA3: lprintf(("gsc%d.probe: DMA 3\n", unit)); scu->channel = 3; break; case GSC_CNF_IRQ3: lprintf(("gsc%d.probe: IRQ 3\n", unit)); goto probe_noirq; case GSC_CNF_IRQ5: lprintf(("gsc%d.probe: IRQ 5\n", unit)); probe_noirq: lprintf(("gsc%d.probe: sorry, can't use IRQ yet\n", unit)); return PROBE_FAIL; default: lprintf(("gsc%d.probe: invalid status byte 0x%02x\n", unit, (u_char) stb)); return PROBE_FAIL; } if (isdp->id_drq < 0) isdp->id_drq = scu->channel; if (scu->channel != isdp->id_drq) { lprintf(("gsc%d.probe: drq mismatch: config: %d; hardware: %d\n", unit, isdp->id_drq, scu->channel)); return PROBE_FAIL; } geom.g_res = stb & GSC_RES_MASK; scu->geometry = lookup_geometry(geom, scu); if (scu->geometry == INVALID) { lprintf(("gsc%d.probe: geometry lookup failed\n", unit)); return PROBE_FAIL; } else { scu->ctrl_byte = geomtab[scu->geometry].s_res; outb(scu->ctrl, scu->ctrl_byte | GSC_POWER_ON); lprintf(("gsc%d.probe: status 0x%02x, %ddpi\n", unit, stb, geomtab[scu->geometry].dpi)); outb(scu->ctrl, scu->ctrl_byte & ~GSC_POWER_ON); } lprintf(("gsc%d.probe: ok\n", unit)); scu->flags &= ~FLAG_DEBUG; return PROBE_SUCCESS; } /*********************************************************************** * * gscattach * * finish initialization of unit structure * get geometry value */ static int gscattach(struct isa_device *isdp) { int unit = isdp->id_unit; struct gsc_unit *scu = unittab + unit; scu->flags |= FLAG_DEBUG; lprintf(("gsc%d.attach: " "iobase 0x%03x, irq %d, drq %d, addr %p, size %d\n", unit, isdp->id_iobase, isdp->id_irq, isdp->id_drq, isdp->id_maddr, isdp->id_msize)); printf("gsc%d: GeniScan GS-4500 at %ddpi\n", unit, geomtab[scu->geometry].dpi); /* * Initialize buffer structure. * XXX this must be done early to give a good chance of getting a * contiguous buffer. This wastes memory. */ scu->sbuf.base = contigmalloc((unsigned long)MAX_BUFSIZE, M_DEVBUF, M_NOWAIT, 0ul, 0xfffffful, 1ul, 0x10000ul); if ( scu->sbuf.base == NULL ) { lprintf(("gsc%d.attach: buffer allocation failed\n", unit)); return ATTACH_FAIL; /* XXX attach must not fail */ } scu->sbuf.size = INVALID; scu->sbuf.poi = INVALID; scu->blen = DEFAULT_BLEN; scu->btime = TIMEOUT; scu->flags |= ATTACHED; lprintf(("gsc%d.attach: ok\n", unit)); scu->flags &= ~FLAG_DEBUG; #define GSC_UID 0 #define GSC_GID 13 make_dev(&gsc_cdevsw, unit<<6, GSC_UID, GSC_GID, 0666, "gsc%d", unit); make_dev(&gsc_cdevsw, ((unit<<6) + FRMT_PBM), GSC_UID, GSC_GID, 0666, "gsc%dp", unit); make_dev(&gsc_cdevsw, ((unit<<6) + DBUG_MASK), GSC_UID, GSC_GID, 0666, "gsc%dd", unit); make_dev(&gsc_cdevsw, ((unit<<6) + DBUG_MASK+FRMT_PBM), GSC_UID, GSC_GID, 0666, "gsc%dpd", unit); return ATTACH_SUCCESS; } /*********************************************************************** * * gscopen * * set open flag * set modes according to minor number * don't switch scanner on, wait until first read ioctls go before */ static int gscopen (dev_t dev, int flags, int fmt, struct thread *td) { struct gsc_unit *scu; int unit; unit = UNIT(minor(dev)) & UNIT_MASK; if ( unit >= NGSC ) { #ifdef GSCDEBUG /* XXX lprintf isn't valid here since there is no scu. */ printf("gsc%d.open: unconfigured unit number (max %d)\n", unit, NGSC); #endif return ENXIO; } scu = unittab + unit; if ( !( scu->flags & ATTACHED ) ) { lprintf(("gsc%d.open: unit was not attached successfully 0x%04x\n", unit, scu->flags)); return ENXIO; } if ( minor(dev) & DBUG_MASK ) scu->flags |= FLAG_DEBUG; else scu->flags &= ~FLAG_DEBUG; switch(minor(dev) & FRMT_MASK) { case FRMT_PBM: scu->flags |= PBM_MODE; lprintf(("gsc%d.open: pbm mode\n", unit)); break; case FRMT_RAW: lprintf(("gsc%d.open: raw mode\n", unit)); scu->flags &= ~PBM_MODE; break; default: lprintf(("gsc%d.open: gray maps are not yet supported", unit)); return ENXIO; } lprintf(("gsc%d.open: minor %d\n", unit, minor(dev))); if ( scu->flags & OPEN ) { lprintf(("gsc%d.open: already open", unit)); return EBUSY; } if (isa_dma_acquire(scu->channel)) return(EBUSY); scu->flags |= OPEN; return SUCCESS; } /*********************************************************************** * * gscclose * * turn off scanner * release the buffer */ static int gscclose (dev_t dev, int flags, int fmt, struct thread *td) { int unit = UNIT(minor(dev)); struct gsc_unit *scu = unittab + unit; lprintf(("gsc%d.close: minor %d\n", unit, minor(dev))); if ( unit >= NGSC || !( scu->flags & ATTACHED ) ) { lprintf(("gsc%d.read: unit was not attached successfully 0x%04x\n", unit, scu->flags)); return ENXIO; } outb(scu->ctrl, scu->ctrl_byte & ~GSC_POWER_ON); scu->sbuf.size = INVALID; scu->sbuf.poi = INVALID; isa_dma_release(scu->channel); scu->flags &= ~(FLAG_DEBUG | OPEN | READING); return SUCCESS; } /*********************************************************************** * * gscread */ static int gscread (dev_t dev, struct uio *uio, int ioflag) { int unit = UNIT(minor(dev)); struct gsc_unit *scu = unittab + unit; size_t nbytes; int res; lprintf(("gsc%d.read: minor %d\n", unit, minor(dev))); if ( unit >= NGSC || !( scu->flags & ATTACHED ) ) { lprintf(("gsc%d.read: unit was not attached successfully 0x%04x\n", unit, scu->flags)); return ENXIO; } if ( !(scu->flags & READING) ) { res = buffer_allocate(scu); if ( res == SUCCESS ) scu->flags |= READING; else return res; scu->ctrl_byte = geomtab[scu->geometry].s_res; /* initialize for pbm mode */ if ( scu->flags & PBM_MODE ) { char *p; int width = geomtab[scu->geometry].dpl; sprintf(scu->sbuf.base,"P4 %d %d\n", width, scu->height); scu->bcount = scu->height * width / 8; lprintf(("gsc%d.read: initializing pbm mode: `%s', bcount: 0x%x\n", unit, scu->sbuf.base, scu->bcount)); /* move header to end of sbuf */ for(p=scu->sbuf.base; *p; p++); while(--p >= scu->sbuf.base) { *(char *)(scu->sbuf.base + --scu->sbuf.poi) = *p; scu->bcount++; } } } lprintf(("gsc%d.read(before buffer_read): " "size 0x%x, pointer 0x%x, bcount 0x%x, ok\n", unit, scu->sbuf.size, scu->sbuf.poi, scu->bcount)); if ( scu->sbuf.poi == scu->sbuf.size ) if ( (res = buffer_read(scu)) != SUCCESS ) return res; lprintf(("gsc%d.read(after buffer_read): " "size 0x%x, pointer 0x%x, bcount 0x%x, ok\n", unit, scu->sbuf.size, scu->sbuf.poi, scu->bcount)); nbytes = MIN( uio->uio_resid, scu->sbuf.size - scu->sbuf.poi ); if ( (scu->flags & PBM_MODE) ) nbytes = MIN( nbytes, scu->bcount ); lprintf(("gsc%d.read: transferring 0x%x bytes", unit, nbytes)); res = uiomove(scu->sbuf.base + scu->sbuf.poi, nbytes, uio); if ( res != SUCCESS ) { lprintf(("gsc%d.read: uiomove failed %d", unit, res)); return res; } scu->sbuf.poi += nbytes; if ( scu->flags & PBM_MODE ) scu->bcount -= nbytes; lprintf(("gsc%d.read: size 0x%x, pointer 0x%x, bcount 0x%x, ok\n", unit, scu->sbuf.size, scu->sbuf.poi, scu->bcount)); return SUCCESS; } /*********************************************************************** * * gscioctl * */ static int gscioctl (dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td) { int unit = UNIT(minor(dev)); struct gsc_unit *scu = unittab + unit; lprintf(("gsc%d.ioctl: minor %d\n", unit, minor(dev))); if ( unit >= NGSC || !( scu->flags & ATTACHED ) ) { lprintf(("gsc%d.ioctl: unit was not attached successfully 0x%04x\n", unit, scu->flags)); return ENXIO; } switch(cmd) { case GSC_SRESSW: lprintf(("gsc%d.ioctl:GSC_SRESSW\n", unit)); if ( scu->flags & READING ) { lprintf(("gsc%d:ioctl on already reading unit\n", unit)); return EBUSY; } scu->geometry = get_geometry(scu); return SUCCESS; case GSC_GRES: *(int *)data=geomtab[scu->geometry].dpi; lprintf(("gsc%d.ioctl:GSC_GRES %ddpi\n", unit, *(int *)data)); return SUCCESS; case GSC_GWIDTH: *(int *)data=geomtab[scu->geometry].dpl; lprintf(("gsc%d.ioctl:GSC_GWIDTH %d\n", unit, *(int *)data)); return SUCCESS; case GSC_SRES: case GSC_SWIDTH: lprintf(("gsc%d.ioctl:GSC_SRES or GSC_SWIDTH %d\n", unit, *(int *)data)); { int g; struct gsc_geom geom = NEW_GEOM; if ( cmd == GSC_SRES ) geom.dpi = *(int *)data; else geom.dpl = *(int *)data; if ( ( g = lookup_geometry(geom, scu) ) == INVALID ) return EINVAL; scu->geometry = g; return SUCCESS; } case GSC_GHEIGHT: *(int *)data=scu->height; lprintf(("gsc%d.ioctl:GSC_GHEIGHT %d\n", unit, *(int *)data)); return SUCCESS; case GSC_SHEIGHT: lprintf(("gsc%d.ioctl:GSC_SHEIGHT %d\n", unit, *(int *)data)); if ( scu->flags & READING ) { lprintf(("gsc%d:ioctl on already reading unit\n", unit)); return EBUSY; } scu->height=*(int *)data; return SUCCESS; case GSC_GBLEN: *(int *)data=scu->blen; lprintf(("gsc%d.ioctl:GSC_GBLEN %d\n", unit, *(int *)data)); return SUCCESS; case GSC_SBLEN: lprintf(("gsc%d.ioctl:GSC_SBLEN %d\n", unit, *(int *)data)); if (*(int *)data * geomtab[scu->geometry].dpl / 8 > MAX_BUFSIZE) { lprintf(("gsc%d:ioctl buffer size too high\n", unit)); return ENOMEM; } scu->blen=*(int *)data; return SUCCESS; case GSC_GBTIME: *(int *)data = scu->btime / hz; lprintf(("gsc%d.ioctl:GSC_GBTIME %d\n", unit, *(int *)data)); return SUCCESS; case GSC_SBTIME: scu->btime = *(int *)data * hz; lprintf(("gsc%d.ioctl:GSC_SBTIME %d\n", unit, *(int *)data)); return SUCCESS; default: return ENOTTY; } } Index: head/sys/ia64/ia64/busdma_machdep.c =================================================================== --- head/sys/ia64/ia64/busdma_machdep.c (revision 110231) +++ head/sys/ia64/ia64/busdma_machdep.c (revision 110232) @@ -1,920 +1,918 @@ /* * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#define MAX(a,b) (((a) > (b)) ? (a) : (b)) -#define MIN(a,b) (((a) < (b)) ? (a) : (b)) #define MAX_BPAGES 128 struct bus_dma_tag { bus_dma_tag_t parent; bus_size_t alignment; bus_size_t boundary; bus_addr_t lowaddr; bus_addr_t highaddr; bus_dma_filter_t *filter; void *filterarg; bus_size_t maxsize; u_int nsegments; bus_size_t maxsegsz; int flags; int ref_count; int map_count; }; struct bounce_page { vm_offset_t vaddr; /* kva of bounce buffer */ bus_addr_t busaddr; /* Physical address */ vm_offset_t datavaddr; /* kva of client data */ bus_size_t datacount; /* client data count */ STAILQ_ENTRY(bounce_page) links; }; int busdma_swi_pending; static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; static int free_bpages; static int reserved_bpages; static int active_bpages; static int total_bpages; static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR; struct bus_dmamap { struct bp_list bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; void *buf; /* unmapped buffer pointer */ bus_size_t buflen; /* unmapped buffer length */ vm_offset_t busaddress; /* address in bus space */ bus_dmamap_callback_t *callback; void *callback_arg; void *sgmaphandle; /* handle into sgmap */ STAILQ_ENTRY(bus_dmamap) links; }; static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; static struct bus_dmamap nobounce_dmamap; static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map); static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) { int retval; retval = 0; do { if (paddr > dmat->lowaddr && paddr <= dmat->highaddr && (dmat->filter == NULL || (*dmat->filter)(dmat->filterarg, paddr) != 0)) retval = 1; dmat = dmat->parent; } while (retval == 0 && dmat != NULL); return (retval); } #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 /* * Allocate a device specific dma_tag. */ int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat) { bus_dma_tag_t newtag; int error = 0; /* Return a NULL tag on failure */ *dmat = NULL; newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT); if (newtag == NULL) return (ENOMEM); newtag->parent = parent; newtag->alignment = alignment; newtag->boundary = boundary; newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1); newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1); newtag->filter = filter; newtag->filterarg = filterarg; newtag->maxsize = maxsize; newtag->nsegments = nsegments; newtag->maxsegsz = maxsegsz; newtag->flags = flags; newtag->ref_count = 1; /* Count ourself */ newtag->map_count = 0; /* Take into account any restrictions imposed by our parent tag */ if (parent != NULL) { newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); /* * XXX Not really correct??? Probably need to honor boundary * all the way up the inheritence chain. */ newtag->boundary = MAX(parent->boundary, newtag->boundary); if (newtag->filter == NULL) { /* * Short circuit looking at our parent directly * since we have encapsulated all of its information */ newtag->filter = parent->filter; newtag->filterarg = parent->filterarg; newtag->parent = parent->parent; } if (newtag->parent != NULL) { parent->ref_count++; } } if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) { /* Must bounce */ if (lowaddr > bounce_lowaddr) { /* * Go through the pool and kill any pages * that don't reside below lowaddr. */ panic("bus_dma_tag_create: page reallocation " "not implemented"); } if (ptoa(total_bpages) < maxsize) { int pages; pages = atop(maxsize) - total_bpages; /* Add pages to our bounce pool */ if (alloc_bounce_pages(newtag, pages) < pages) error = ENOMEM; } /* Performed initial allocation */ newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; } if (error != 0) { free(newtag, M_DEVBUF); } else { *dmat = newtag; } return (error); } int bus_dma_tag_destroy(bus_dma_tag_t dmat) { if (dmat != NULL) { if (dmat->map_count != 0) return (EBUSY); while (dmat != NULL) { bus_dma_tag_t parent; parent = dmat->parent; dmat->ref_count--; if (dmat->ref_count == 0) { free(dmat, M_DEVBUF); } dmat = parent; } } return (0); } /* * Allocate a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { int error; error = 0; if (dmat->flags & BUS_DMA_ISA) { bus_dmamap_t map; map = (bus_dmamap_t)malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO); if (map == NULL) return (ENOMEM); #if 0 map->busaddress = sgmap_alloc_region(chipset.sgmap, dmat->maxsize, dmat->boundary, &map->sgmaphandle); #endif dmat->map_count++; *mapp = map; return (0); } if (dmat->lowaddr < ptoa(Maxmem)) { /* Must bounce */ int maxpages; *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO); if (*mapp == NULL) return (ENOMEM); /* Initialize the new map */ STAILQ_INIT(&((*mapp)->bpages)); /* * Attempt to add pages to our pool on a per-instance * basis up to a sane limit. */ maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr)); if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || (dmat->map_count > 0 && total_bpages < maxpages)) { int pages; if (dmat->lowaddr > bounce_lowaddr) { /* * Go through the pool and kill any pages * that don't reside below lowaddr. */ panic("bus_dmamap_create: page reallocation " "not implemented"); } pages = atop(dmat->maxsize); pages = MIN(maxpages - total_bpages, pages); error = alloc_bounce_pages(dmat, pages); if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { if (error == 0) dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; } else { error = 0; } } } else { *mapp = &nobounce_dmamap; } if (error == 0) dmat->map_count++; return (error); } /* * Destroy a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) { if (dmat->flags & BUS_DMA_ISA) { #if 0 sgmap_free_region(chipset.sgmap, map->sgmaphandle); #endif } if (map != NULL && map != &nobounce_dmamap) { if (STAILQ_FIRST(&map->bpages) != NULL) return (EBUSY); free(map, M_DEVBUF); } dmat->map_count--; return (0); } /* * Allocate a piece of memory that can be efficiently mapped into * bus device space based on the constraints lited in the dma tag. * A dmamap to for use with dmamap_load is also allocated. */ int bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { /* If we succeed, no mapping/bouncing will be required */ *mapp = &nobounce_dmamap; if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) { *vaddr = malloc(dmat->maxsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : 0); } else { /* * XXX Use Contigmalloc until it is merged into this facility * and handles multi-seg allocations. Nobody is doing * multi-seg allocations yet though. */ *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : 0, 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul, dmat->boundary); } if (*vaddr == NULL) return (ENOMEM); return (0); } /* * Free a piece of memory and it's allociated dmamap, that was allocated * via bus_dmamem_alloc. */ void bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) { /* * dmamem does not need to be bounced, so the map should be * NULL */ if (map != &nobounce_dmamap) panic("bus_dmamem_free: Invalid map freed\n"); free(vaddr, M_DEVBUF); } #define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1) /* * Map the buffer buf into bus space using the dmamap map. */ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { vm_offset_t vaddr; vm_offset_t paddr; #ifdef __GNUC__ bus_dma_segment_t dm_segments[dmat->nsegments]; #else bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; #endif bus_dma_segment_t *sg; int seg; int error; vm_offset_t nextpaddr; error = 0; if (dmat->flags & BUS_DMA_ISA) { /* * For ISA dma, we use the chipset's scatter-gather * map to map the tranfer into the ISA reachable range * of the bus address space. */ vaddr = trunc_page((vm_offset_t) buf); dm_segments[0].ds_addr = map->busaddress + (vm_offset_t) buf - vaddr; dm_segments[0].ds_len = buflen; buflen = round_page((vm_offset_t) buf + buflen) - vaddr; #if 0 sgmap_load_region(chipset.sgmap, map->busaddress, vaddr, buflen); #endif map->buflen = buflen; (*callback)(callback_arg, dm_segments, 1, error); return (0); } /* * If we are being called during a callback, pagesneeded will * be non-zero, so we can avoid doing the work twice. */ if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) { vm_offset_t vendaddr; /* * Count the number of bounce pages * needed in order to complete this transfer */ vaddr = trunc_page(buf); vendaddr = (vm_offset_t)buf + buflen; while (vaddr < vendaddr) { paddr = pmap_kextract(vaddr); if (run_filter(dmat, paddr) != 0) { map->pagesneeded++; } vaddr += PAGE_SIZE; } } /* Reserve Necessary Bounce Pages */ if (map->pagesneeded != 0) { int s; s = splhigh(); if (reserve_bounce_pages(dmat, map) != 0) { /* Queue us for resources */ map->dmat = dmat; map->buf = buf; map->buflen = buflen; map->callback = callback; map->callback_arg = callback_arg; STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); splx(s); return (EINPROGRESS); } splx(s); } vaddr = (vm_offset_t)buf; sg = &dm_segments[0]; seg = 1; sg->ds_len = 0; nextpaddr = 0; do { bus_size_t size; paddr = pmap_kextract(vaddr); size = PAGE_SIZE - (paddr & PAGE_MASK); if (size > buflen) size = buflen; if (map->pagesneeded != 0 && run_filter(dmat, paddr)) { paddr = add_bounce_page(dmat, map, vaddr, size); } if (sg->ds_len == 0) { sg->ds_addr = paddr; sg->ds_len = size; } else if (paddr == nextpaddr) { sg->ds_len += size; } else { /* Go to the next segment */ sg++; seg++; if (seg > dmat->nsegments) break; sg->ds_addr = paddr; sg->ds_len = size; } vaddr += size; nextpaddr = paddr + size; buflen -= size; } while (buflen > 0); if (buflen != 0) { printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n", buflen); error = EFBIG; } (*callback)(callback_arg, dm_segments, seg, error); return (0); } /* * Utility function to load a linear buffer. lastaddrp holds state * between invocations (for multiple-buffer loads). segp contains * the starting segment on entrace, and the ending segment on exit. * first indicates if this is the first invocation of this function. */ static int _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[], void *buf, bus_size_t buflen, struct thread *td, int flags, vm_offset_t *lastaddrp, int *segp, int first) { bus_size_t sgsize; bus_addr_t curaddr, lastaddr, baddr, bmask; vm_offset_t vaddr = (vm_offset_t)buf; int seg; pmap_t pmap; if (td != NULL) pmap = vmspace_pmap(td->td_proc->p_vmspace); else pmap = NULL; lastaddr = *lastaddrp; bmask = ~(dmat->boundary - 1); for (seg = *segp; buflen > 0 ; ) { /* * Get the physical address for this segment. */ if (pmap) curaddr = pmap_extract(pmap, vaddr); else curaddr = pmap_kextract(vaddr); /* * Compute the segment size, and adjust counts. */ sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK); if (buflen < sgsize) sgsize = buflen; /* * Make sure we don't cross any boundaries. */ if (dmat->boundary > 0) { baddr = (curaddr + dmat->boundary) & bmask; if (sgsize > (baddr - curaddr)) sgsize = (baddr - curaddr); } /* * Insert chunk into a segment, coalescing with * previous segment if possible. */ if (first) { segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; first = 0; } else { if (curaddr == lastaddr && (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && (dmat->boundary == 0 || (segs[seg].ds_addr & bmask) == (curaddr & bmask))) segs[seg].ds_len += sgsize; else { if (++seg >= dmat->nsegments) break; segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; } } lastaddr = curaddr + sgsize; vaddr += sgsize; buflen -= sgsize; } *segp = seg; *lastaddrp = lastaddr; /* * Did we fit? */ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } /* * Like _bus_dmamap_load(), but for mbufs. */ int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, bus_dmamap_callback2_t *callback, void *callback_arg, int flags) { #ifdef __GNUC__ bus_dma_segment_t dm_segments[dmat->nsegments]; #else bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; #endif int nsegs, error; KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, ("bus_dmamap_load_mbuf: No support for bounce pages!")); KASSERT(m0->m_flags & M_PKTHDR, ("bus_dmamap_load_mbuf: no packet header")); nsegs = 0; error = 0; if (m0->m_pkthdr.len <= dmat->maxsize) { int first = 1; vm_offset_t lastaddr = 0; struct mbuf *m; for (m = m0; m != NULL && error == 0; m = m->m_next) { error = _bus_dmamap_load_buffer(dmat, dm_segments, m->m_data, m->m_len, NULL, flags, &lastaddr, &nsegs, first); first = 0; } } else { error = EINVAL; } if (error) { /* force "no valid mappings" in callback */ (*callback)(callback_arg, dm_segments, 0, 0, error); } else { (*callback)(callback_arg, dm_segments, nsegs+1, m0->m_pkthdr.len, error); } return (error); } /* * Like _bus_dmamap_load(), but for uios. */ int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, bus_dmamap_callback2_t *callback, void *callback_arg, int flags) { vm_offset_t lastaddr; #ifdef __GNUC__ bus_dma_segment_t dm_segments[dmat->nsegments]; #else bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS]; #endif int nsegs, error, first, i; bus_size_t resid; struct iovec *iov; struct thread *td = NULL; KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL, ("bus_dmamap_load_uio: No support for bounce pages!")); resid = uio->uio_resid; iov = uio->uio_iov; if (uio->uio_segflg == UIO_USERSPACE) { td = uio->uio_td; KASSERT(td != NULL, ("bus_dmamap_load_uio: USERSPACE but no proc")); } nsegs = 0; error = 0; first = 1; for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { /* * Now at the first iovec to load. Load each iovec * until we have exhausted the residual count. */ bus_size_t minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; caddr_t addr = (caddr_t) iov[i].iov_base; error = _bus_dmamap_load_buffer(dmat, dm_segments, addr, minlen, td, flags, &lastaddr, &nsegs, first); first = 0; resid -= minlen; } if (error) { /* force "no valid mappings" in callback */ (*callback)(callback_arg, dm_segments, 0, 0, error); } else { (*callback)(callback_arg, dm_segments, nsegs+1, uio->uio_resid, error); } return (error); } /* * Release the mapping held by map. */ void _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) { struct bounce_page *bpage; if (dmat->flags & BUS_DMA_ISA) { #if 0 sgmap_unload_region(chipset.sgmap, map->busaddress, map->buflen); #endif return; } while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { STAILQ_REMOVE_HEAD(&map->bpages, links); free_bounce_page(dmat, bpage); } } void _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { struct bounce_page *bpage; if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { /* * Handle data bouncing. We might also * want to add support for invalidating * the caches on broken hardware */ switch (op) { case BUS_DMASYNC_PREWRITE: while (bpage != NULL) { bcopy((void *)bpage->datavaddr, (void *)bpage->vaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } break; case BUS_DMASYNC_POSTREAD: while (bpage != NULL) { bcopy((void *)bpage->vaddr, (void *)bpage->datavaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } break; case BUS_DMASYNC_PREREAD: case BUS_DMASYNC_POSTWRITE: /* No-ops */ break; } } } static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) { int count; count = 0; if (total_bpages == 0) { STAILQ_INIT(&bounce_page_list); STAILQ_INIT(&bounce_map_waitinglist); STAILQ_INIT(&bounce_map_callbacklist); } while (numpages > 0) { struct bounce_page *bpage; int s; bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, M_NOWAIT | M_ZERO); if (bpage == NULL) break; bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT, 0ul, dmat->lowaddr, PAGE_SIZE, 0); if (bpage->vaddr == NULL) { free(bpage, M_DEVBUF); break; } bpage->busaddr = pmap_kextract(bpage->vaddr); s = splhigh(); STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); total_bpages++; free_bpages++; splx(s); count++; numpages--; } return (count); } static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map) { int pages; pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved); free_bpages -= pages; reserved_bpages += pages; map->pagesreserved += pages; pages = map->pagesneeded - map->pagesreserved; return (pages); } static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, bus_size_t size) { int s; struct bounce_page *bpage; if (map->pagesneeded == 0) panic("add_bounce_page: map doesn't need any pages"); map->pagesneeded--; if (map->pagesreserved == 0) panic("add_bounce_page: map doesn't need any pages"); map->pagesreserved--; s = splhigh(); bpage = STAILQ_FIRST(&bounce_page_list); if (bpage == NULL) panic("add_bounce_page: free page list is empty"); STAILQ_REMOVE_HEAD(&bounce_page_list, links); reserved_bpages--; active_bpages++; splx(s); bpage->datavaddr = vaddr; bpage->datacount = size; STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); } static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) { int s; struct bus_dmamap *map; bpage->datavaddr = 0; bpage->datacount = 0; s = splhigh(); STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); free_bpages++; active_bpages--; if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { if (reserve_bounce_pages(map->dmat, map) == 0) { STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links); busdma_swi_pending = 1; swi_sched(vm_ih, 0); } } splx(s); } void busdma_swi(void) { int s; struct bus_dmamap *map; s = splhigh(); while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); splx(s); bus_dmamap_load(map->dmat, map, map->buf, map->buflen, map->callback, map->callback_arg, /*flags*/0); s = splhigh(); } splx(s); } Index: head/sys/kern/uipc_syscalls.c =================================================================== --- head/sys/kern/uipc_syscalls.c (revision 110231) +++ head/sys/kern/uipc_syscalls.c (revision 110232) @@ -1,2042 +1,2039 @@ /* * Copyright (c) 1982, 1986, 1989, 1990, 1993 * The Regents of the University of California. All rights reserved. * * sendfile(2) and related extensions: * Copyright (c) 1998, David Greenman. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94 * $FreeBSD$ */ #include "opt_compat.h" #include "opt_ktrace.h" #include "opt_mac.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #endif #include #include #include #include #include #include static void sf_buf_init(void *arg); SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL) static int sendit(struct thread *td, int s, struct msghdr *mp, int flags); static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp); static int accept1(struct thread *td, struct accept_args *uap, int compat); static int do_sendfile(struct thread *td, struct sendfile_args *uap, int compat); static int getsockname1(struct thread *td, struct getsockname_args *uap, int compat); static int getpeername1(struct thread *td, struct getpeername_args *uap, int compat); /* * Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the * sf_freelist head with the sf_lock mutex. */ static struct { SLIST_HEAD(, sf_buf) sf_head; struct mtx sf_lock; } sf_freelist; vm_offset_t sf_base; struct sf_buf *sf_bufs; u_int sf_buf_alloc_want; /* * System call interface to the socket abstraction. */ #if defined(COMPAT_43) || defined(COMPAT_SUNOS) #define COMPAT_OLDSOCK #endif /* * MPSAFE */ int socket(td, uap) struct thread *td; register struct socket_args /* { int domain; int type; int protocol; } */ *uap; { struct filedesc *fdp; struct socket *so; struct file *fp; int fd, error; mtx_lock(&Giant); fdp = td->td_proc->p_fd; error = falloc(td, &fp, &fd); if (error) goto done2; fhold(fp); error = socreate(uap->domain, &so, uap->type, uap->protocol, td->td_ucred, td); FILEDESC_LOCK(fdp); if (error) { if (fdp->fd_ofiles[fd] == fp) { fdp->fd_ofiles[fd] = NULL; FILEDESC_UNLOCK(fdp); fdrop(fp, td); } else FILEDESC_UNLOCK(fdp); } else { fp->f_data = so; /* already has ref count */ fp->f_flag = FREAD|FWRITE; fp->f_ops = &socketops; fp->f_type = DTYPE_SOCKET; FILEDESC_UNLOCK(fdp); td->td_retval[0] = fd; } fdrop(fp, td); done2: mtx_unlock(&Giant); return (error); } /* * MPSAFE */ /* ARGSUSED */ int bind(td, uap) struct thread *td; register struct bind_args /* { int s; caddr_t name; int namelen; } */ *uap; { struct socket *so; struct sockaddr *sa; int error; mtx_lock(&Giant); if ((error = fgetsock(td, uap->s, &so, NULL)) != 0) goto done2; if ((error = getsockaddr(&sa, uap->name, uap->namelen)) != 0) goto done1; #ifdef MAC error = mac_check_socket_bind(td->td_ucred, so, sa); if (error) { FREE(sa, M_SONAME); goto done1; } #endif error = sobind(so, sa, td); FREE(sa, M_SONAME); done1: fputsock(so); done2: mtx_unlock(&Giant); return (error); } /* * MPSAFE */ /* ARGSUSED */ int listen(td, uap) struct thread *td; register struct listen_args /* { int s; int backlog; } */ *uap; { struct socket *so; int error; mtx_lock(&Giant); if ((error = fgetsock(td, uap->s, &so, NULL)) == 0) { #ifdef MAC error = mac_check_socket_listen(td->td_ucred, so); if (error) goto done; #endif error = solisten(so, uap->backlog, td); #ifdef MAC done: #endif fputsock(so); } mtx_unlock(&Giant); return(error); } /* * accept1() * MPSAFE */ static int accept1(td, uap, compat) struct thread *td; register struct accept_args /* { int s; caddr_t name; int *anamelen; } */ *uap; int compat; { struct filedesc *fdp; struct file *nfp = NULL; struct sockaddr *sa; int namelen, error, s; struct socket *head, *so; int fd; u_int fflag; pid_t pgid; mtx_lock(&Giant); fdp = td->td_proc->p_fd; if (uap->name) { error = copyin(uap->anamelen, &namelen, sizeof (namelen)); if(error) goto done2; if (namelen < 0) { error = EINVAL; goto done2; } } error = fgetsock(td, uap->s, &head, &fflag); if (error) goto done2; s = splnet(); if ((head->so_options & SO_ACCEPTCONN) == 0) { splx(s); error = EINVAL; goto done; } while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) { if (head->so_state & SS_CANTRCVMORE) { head->so_error = ECONNABORTED; break; } if ((head->so_state & SS_NBIO) != 0) { head->so_error = EWOULDBLOCK; break; } error = tsleep(&head->so_timeo, PSOCK | PCATCH, "accept", 0); if (error) { splx(s); goto done; } } if (head->so_error) { error = head->so_error; head->so_error = 0; splx(s); goto done; } /* * At this point we know that there is at least one connection * ready to be accepted. Remove it from the queue prior to * allocating the file descriptor for it since falloc() may * block allowing another process to accept the connection * instead. */ so = TAILQ_FIRST(&head->so_comp); TAILQ_REMOVE(&head->so_comp, so, so_list); head->so_qlen--; error = falloc(td, &nfp, &fd); if (error) { /* * Probably ran out of file descriptors. Put the * unaccepted connection back onto the queue and * do another wakeup so some other process might * have a chance at it. */ TAILQ_INSERT_HEAD(&head->so_comp, so, so_list); head->so_qlen++; wakeup_one(&head->so_timeo); splx(s); goto done; } fhold(nfp); td->td_retval[0] = fd; /* connection has been removed from the listen queue */ KNOTE(&head->so_rcv.sb_sel.si_note, 0); so->so_state &= ~SS_COMP; so->so_head = NULL; pgid = fgetown(&head->so_sigio); if (pgid != 0) fsetown(pgid, &so->so_sigio); FILE_LOCK(nfp); soref(so); /* file descriptor reference */ nfp->f_data = so; /* nfp has ref count from falloc */ nfp->f_flag = fflag; nfp->f_ops = &socketops; nfp->f_type = DTYPE_SOCKET; FILE_UNLOCK(nfp); sa = 0; error = soaccept(so, &sa); if (error) { /* * return a namelen of zero for older code which might * ignore the return value from accept. */ if (uap->name != NULL) { namelen = 0; (void) copyout(&namelen, uap->anamelen, sizeof(*uap->anamelen)); } goto noconnection; } if (sa == NULL) { namelen = 0; if (uap->name) goto gotnoname; splx(s); error = 0; goto done; } if (uap->name) { /* check sa_len before it is destroyed */ if (namelen > sa->sa_len) namelen = sa->sa_len; #ifdef COMPAT_OLDSOCK if (compat) ((struct osockaddr *)sa)->sa_family = sa->sa_family; #endif error = copyout(sa, uap->name, (u_int)namelen); if (!error) gotnoname: error = copyout(&namelen, uap->anamelen, sizeof (*uap->anamelen)); } noconnection: if (sa) FREE(sa, M_SONAME); /* * close the new descriptor, assuming someone hasn't ripped it * out from under us. */ if (error) { FILEDESC_LOCK(fdp); if (fdp->fd_ofiles[fd] == nfp) { fdp->fd_ofiles[fd] = NULL; FILEDESC_UNLOCK(fdp); fdrop(nfp, td); } else { FILEDESC_UNLOCK(fdp); } } splx(s); /* * Release explicitly held references before returning. */ done: if (nfp != NULL) fdrop(nfp, td); fputsock(head); done2: mtx_unlock(&Giant); return (error); } /* * MPSAFE (accept1() is MPSAFE) */ int accept(td, uap) struct thread *td; struct accept_args *uap; { return (accept1(td, uap, 0)); } #ifdef COMPAT_OLDSOCK /* * MPSAFE (accept1() is MPSAFE) */ int oaccept(td, uap) struct thread *td; struct accept_args *uap; { return (accept1(td, uap, 1)); } #endif /* COMPAT_OLDSOCK */ /* * MPSAFE */ /* ARGSUSED */ int connect(td, uap) struct thread *td; register struct connect_args /* { int s; caddr_t name; int namelen; } */ *uap; { struct socket *so; struct sockaddr *sa; int error, s; mtx_lock(&Giant); if ((error = fgetsock(td, uap->s, &so, NULL)) != 0) goto done2; if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) { error = EALREADY; goto done1; } error = getsockaddr(&sa, uap->name, uap->namelen); if (error) goto done1; #ifdef MAC error = mac_check_socket_connect(td->td_ucred, so, sa); if (error) goto bad; #endif error = soconnect(so, sa, td); if (error) goto bad; if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) { FREE(sa, M_SONAME); error = EINPROGRESS; goto done1; } s = splnet(); while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { error = tsleep(&so->so_timeo, PSOCK | PCATCH, "connec", 0); if (error) break; } if (error == 0) { error = so->so_error; so->so_error = 0; } splx(s); bad: so->so_state &= ~SS_ISCONNECTING; FREE(sa, M_SONAME); if (error == ERESTART) error = EINTR; done1: fputsock(so); done2: mtx_unlock(&Giant); return (error); } /* * MPSAFE */ int socketpair(td, uap) struct thread *td; register struct socketpair_args /* { int domain; int type; int protocol; int *rsv; } */ *uap; { register struct filedesc *fdp = td->td_proc->p_fd; struct file *fp1, *fp2; struct socket *so1, *so2; int fd, error, sv[2]; mtx_lock(&Giant); error = socreate(uap->domain, &so1, uap->type, uap->protocol, td->td_ucred, td); if (error) goto done2; error = socreate(uap->domain, &so2, uap->type, uap->protocol, td->td_ucred, td); if (error) goto free1; error = falloc(td, &fp1, &fd); if (error) goto free2; fhold(fp1); sv[0] = fd; fp1->f_data = so1; /* so1 already has ref count */ error = falloc(td, &fp2, &fd); if (error) goto free3; fhold(fp2); fp2->f_data = so2; /* so2 already has ref count */ sv[1] = fd; error = soconnect2(so1, so2); if (error) goto free4; if (uap->type == SOCK_DGRAM) { /* * Datagram socket connection is asymmetric. */ error = soconnect2(so2, so1); if (error) goto free4; } FILE_LOCK(fp1); fp1->f_flag = FREAD|FWRITE; fp1->f_ops = &socketops; fp1->f_type = DTYPE_SOCKET; FILE_UNLOCK(fp1); FILE_LOCK(fp2); fp2->f_flag = FREAD|FWRITE; fp2->f_ops = &socketops; fp2->f_type = DTYPE_SOCKET; FILE_UNLOCK(fp2); error = copyout(sv, uap->rsv, 2 * sizeof (int)); fdrop(fp1, td); fdrop(fp2, td); goto done2; free4: FILEDESC_LOCK(fdp); if (fdp->fd_ofiles[sv[1]] == fp2) { fdp->fd_ofiles[sv[1]] = NULL; FILEDESC_UNLOCK(fdp); fdrop(fp2, td); } else FILEDESC_UNLOCK(fdp); fdrop(fp2, td); free3: FILEDESC_LOCK(fdp); if (fdp->fd_ofiles[sv[0]] == fp1) { fdp->fd_ofiles[sv[0]] = NULL; FILEDESC_UNLOCK(fdp); fdrop(fp1, td); } else FILEDESC_UNLOCK(fdp); fdrop(fp1, td); free2: (void)soclose(so2); free1: (void)soclose(so1); done2: mtx_unlock(&Giant); return (error); } static int sendit(td, s, mp, flags) register struct thread *td; int s; register struct msghdr *mp; int flags; { struct uio auio; register struct iovec *iov; register int i; struct mbuf *control; struct sockaddr *to = NULL; int len, error; struct socket *so; #ifdef KTRACE struct iovec *ktriov = NULL; struct uio ktruio; int iovlen; #endif if ((error = fgetsock(td, s, &so, NULL)) != 0) return (error); #ifdef MAC error = mac_check_socket_send(td->td_ucred, so); if (error) goto bad; #endif auio.uio_iov = mp->msg_iov; auio.uio_iovcnt = mp->msg_iovlen; auio.uio_segflg = UIO_USERSPACE; auio.uio_rw = UIO_WRITE; auio.uio_td = td; auio.uio_offset = 0; /* XXX */ auio.uio_resid = 0; iov = mp->msg_iov; for (i = 0; i < mp->msg_iovlen; i++, iov++) { if ((auio.uio_resid += iov->iov_len) < 0) { error = EINVAL; goto bad; } } if (mp->msg_name) { error = getsockaddr(&to, mp->msg_name, mp->msg_namelen); if (error) goto bad; } if (mp->msg_control) { if (mp->msg_controllen < sizeof(struct cmsghdr) #ifdef COMPAT_OLDSOCK && mp->msg_flags != MSG_COMPAT #endif ) { error = EINVAL; goto bad; } error = sockargs(&control, mp->msg_control, mp->msg_controllen, MT_CONTROL); if (error) goto bad; #ifdef COMPAT_OLDSOCK if (mp->msg_flags == MSG_COMPAT) { register struct cmsghdr *cm; M_PREPEND(control, sizeof(*cm), 0); if (control == 0) { error = ENOBUFS; goto bad; } else { cm = mtod(control, struct cmsghdr *); cm->cmsg_len = control->m_len; cm->cmsg_level = SOL_SOCKET; cm->cmsg_type = SCM_RIGHTS; } } #endif } else { control = 0; } #ifdef KTRACE if (KTRPOINT(td, KTR_GENIO)) { iovlen = auio.uio_iovcnt * sizeof (struct iovec); MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, 0); bcopy(auio.uio_iov, ktriov, iovlen); ktruio = auio; } #endif len = auio.uio_resid; error = so->so_proto->pr_usrreqs->pru_sosend(so, to, &auio, 0, control, flags, td); if (error) { if (auio.uio_resid != len && (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) error = 0; /* Generation of SIGPIPE can be controlled per socket */ if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE)) { PROC_LOCK(td->td_proc); psignal(td->td_proc, SIGPIPE); PROC_UNLOCK(td->td_proc); } } if (error == 0) td->td_retval[0] = len - auio.uio_resid; #ifdef KTRACE if (ktriov != NULL) { if (error == 0) { ktruio.uio_iov = ktriov; ktruio.uio_resid = td->td_retval[0]; ktrgenio(s, UIO_WRITE, &ktruio, error); } FREE(ktriov, M_TEMP); } #endif bad: fputsock(so); if (to) FREE(to, M_SONAME); return (error); } /* * MPSAFE */ int sendto(td, uap) struct thread *td; register struct sendto_args /* { int s; caddr_t buf; size_t len; int flags; caddr_t to; int tolen; } */ *uap; { struct msghdr msg; struct iovec aiov; int error; msg.msg_name = uap->to; msg.msg_namelen = uap->tolen; msg.msg_iov = &aiov; msg.msg_iovlen = 1; msg.msg_control = 0; #ifdef COMPAT_OLDSOCK msg.msg_flags = 0; #endif aiov.iov_base = uap->buf; aiov.iov_len = uap->len; mtx_lock(&Giant); error = sendit(td, uap->s, &msg, uap->flags); mtx_unlock(&Giant); return (error); } #ifdef COMPAT_OLDSOCK /* * MPSAFE */ int osend(td, uap) struct thread *td; register struct osend_args /* { int s; caddr_t buf; int len; int flags; } */ *uap; { struct msghdr msg; struct iovec aiov; int error; msg.msg_name = 0; msg.msg_namelen = 0; msg.msg_iov = &aiov; msg.msg_iovlen = 1; aiov.iov_base = uap->buf; aiov.iov_len = uap->len; msg.msg_control = 0; msg.msg_flags = 0; mtx_lock(&Giant); error = sendit(td, uap->s, &msg, uap->flags); mtx_unlock(&Giant); return (error); } /* * MPSAFE */ int osendmsg(td, uap) struct thread *td; register struct osendmsg_args /* { int s; caddr_t msg; int flags; } */ *uap; { struct msghdr msg; struct iovec aiov[UIO_SMALLIOV], *iov; int error; mtx_lock(&Giant); error = copyin(uap->msg, &msg, sizeof (struct omsghdr)); if (error) goto done2; if ((u_int)msg.msg_iovlen >= UIO_SMALLIOV) { if ((u_int)msg.msg_iovlen >= UIO_MAXIOV) { error = EMSGSIZE; goto done2; } MALLOC(iov, struct iovec *, sizeof(struct iovec) * (u_int)msg.msg_iovlen, M_IOV, 0); } else { iov = aiov; } error = copyin(msg.msg_iov, iov, (unsigned)(msg.msg_iovlen * sizeof (struct iovec))); if (error) goto done; msg.msg_flags = MSG_COMPAT; msg.msg_iov = iov; error = sendit(td, uap->s, &msg, uap->flags); done: if (iov != aiov) FREE(iov, M_IOV); done2: mtx_unlock(&Giant); return (error); } #endif /* * MPSAFE */ int sendmsg(td, uap) struct thread *td; register struct sendmsg_args /* { int s; caddr_t msg; int flags; } */ *uap; { struct msghdr msg; struct iovec aiov[UIO_SMALLIOV], *iov; int error; mtx_lock(&Giant); error = copyin(uap->msg, &msg, sizeof (msg)); if (error) goto done2; if ((u_int)msg.msg_iovlen >= UIO_SMALLIOV) { if ((u_int)msg.msg_iovlen >= UIO_MAXIOV) { error = EMSGSIZE; goto done2; } MALLOC(iov, struct iovec *, sizeof(struct iovec) * (u_int)msg.msg_iovlen, M_IOV, 0); } else { iov = aiov; } if (msg.msg_iovlen && (error = copyin(msg.msg_iov, iov, (unsigned)(msg.msg_iovlen * sizeof (struct iovec))))) goto done; msg.msg_iov = iov; #ifdef COMPAT_OLDSOCK msg.msg_flags = 0; #endif error = sendit(td, uap->s, &msg, uap->flags); done: if (iov != aiov) FREE(iov, M_IOV); done2: mtx_unlock(&Giant); return (error); } static int recvit(td, s, mp, namelenp) register struct thread *td; int s; register struct msghdr *mp; void *namelenp; { struct uio auio; register struct iovec *iov; register int i; int len, error; struct mbuf *m, *control = 0; caddr_t ctlbuf; struct socket *so; struct sockaddr *fromsa = 0; #ifdef KTRACE struct iovec *ktriov = NULL; struct uio ktruio; int iovlen; #endif if ((error = fgetsock(td, s, &so, NULL)) != 0) return (error); #ifdef MAC error = mac_check_socket_receive(td->td_ucred, so); if (error) { fputsock(so); return (error); } #endif auio.uio_iov = mp->msg_iov; auio.uio_iovcnt = mp->msg_iovlen; auio.uio_segflg = UIO_USERSPACE; auio.uio_rw = UIO_READ; auio.uio_td = td; auio.uio_offset = 0; /* XXX */ auio.uio_resid = 0; iov = mp->msg_iov; for (i = 0; i < mp->msg_iovlen; i++, iov++) { if ((auio.uio_resid += iov->iov_len) < 0) { fputsock(so); return (EINVAL); } } #ifdef KTRACE if (KTRPOINT(td, KTR_GENIO)) { iovlen = auio.uio_iovcnt * sizeof (struct iovec); MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, 0); bcopy(auio.uio_iov, ktriov, iovlen); ktruio = auio; } #endif len = auio.uio_resid; error = so->so_proto->pr_usrreqs->pru_soreceive(so, &fromsa, &auio, (struct mbuf **)0, mp->msg_control ? &control : (struct mbuf **)0, &mp->msg_flags); if (error) { if (auio.uio_resid != len && (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) error = 0; } #ifdef KTRACE if (ktriov != NULL) { if (error == 0) { ktruio.uio_iov = ktriov; ktruio.uio_resid = len - auio.uio_resid; ktrgenio(s, UIO_READ, &ktruio, error); } FREE(ktriov, M_TEMP); } #endif if (error) goto out; td->td_retval[0] = len - auio.uio_resid; if (mp->msg_name) { len = mp->msg_namelen; if (len <= 0 || fromsa == 0) len = 0; else { -#ifndef MIN -#define MIN(a,b) ((a)>(b)?(b):(a)) -#endif /* save sa_len before it is destroyed by MSG_COMPAT */ len = MIN(len, fromsa->sa_len); #ifdef COMPAT_OLDSOCK if (mp->msg_flags & MSG_COMPAT) ((struct osockaddr *)fromsa)->sa_family = fromsa->sa_family; #endif error = copyout(fromsa, mp->msg_name, (unsigned)len); if (error) goto out; } mp->msg_namelen = len; if (namelenp && (error = copyout(&len, namelenp, sizeof (int)))) { #ifdef COMPAT_OLDSOCK if (mp->msg_flags & MSG_COMPAT) error = 0; /* old recvfrom didn't check */ else #endif goto out; } } if (mp->msg_control) { #ifdef COMPAT_OLDSOCK /* * We assume that old recvmsg calls won't receive access * rights and other control info, esp. as control info * is always optional and those options didn't exist in 4.3. * If we receive rights, trim the cmsghdr; anything else * is tossed. */ if (control && mp->msg_flags & MSG_COMPAT) { if (mtod(control, struct cmsghdr *)->cmsg_level != SOL_SOCKET || mtod(control, struct cmsghdr *)->cmsg_type != SCM_RIGHTS) { mp->msg_controllen = 0; goto out; } control->m_len -= sizeof (struct cmsghdr); control->m_data += sizeof (struct cmsghdr); } #endif len = mp->msg_controllen; m = control; mp->msg_controllen = 0; ctlbuf = mp->msg_control; while (m && len > 0) { unsigned int tocopy; if (len >= m->m_len) tocopy = m->m_len; else { mp->msg_flags |= MSG_CTRUNC; tocopy = len; } if ((error = copyout(mtod(m, caddr_t), ctlbuf, tocopy)) != 0) goto out; ctlbuf += tocopy; len -= tocopy; m = m->m_next; } mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control; } out: fputsock(so); if (fromsa) FREE(fromsa, M_SONAME); if (control) m_freem(control); return (error); } /* * MPSAFE */ int recvfrom(td, uap) struct thread *td; register struct recvfrom_args /* { int s; caddr_t buf; size_t len; int flags; caddr_t from; int *fromlenaddr; } */ *uap; { struct msghdr msg; struct iovec aiov; int error; mtx_lock(&Giant); if (uap->fromlenaddr) { error = copyin(uap->fromlenaddr, &msg.msg_namelen, sizeof (msg.msg_namelen)); if (error) goto done2; } else { msg.msg_namelen = 0; } msg.msg_name = uap->from; msg.msg_iov = &aiov; msg.msg_iovlen = 1; aiov.iov_base = uap->buf; aiov.iov_len = uap->len; msg.msg_control = 0; msg.msg_flags = uap->flags; error = recvit(td, uap->s, &msg, uap->fromlenaddr); done2: mtx_unlock(&Giant); return(error); } #ifdef COMPAT_OLDSOCK /* * MPSAFE */ int orecvfrom(td, uap) struct thread *td; struct recvfrom_args *uap; { uap->flags |= MSG_COMPAT; return (recvfrom(td, uap)); } #endif #ifdef COMPAT_OLDSOCK /* * MPSAFE */ int orecv(td, uap) struct thread *td; register struct orecv_args /* { int s; caddr_t buf; int len; int flags; } */ *uap; { struct msghdr msg; struct iovec aiov; int error; mtx_lock(&Giant); msg.msg_name = 0; msg.msg_namelen = 0; msg.msg_iov = &aiov; msg.msg_iovlen = 1; aiov.iov_base = uap->buf; aiov.iov_len = uap->len; msg.msg_control = 0; msg.msg_flags = uap->flags; error = recvit(td, uap->s, &msg, NULL); mtx_unlock(&Giant); return (error); } /* * Old recvmsg. This code takes advantage of the fact that the old msghdr * overlays the new one, missing only the flags, and with the (old) access * rights where the control fields are now. * * MPSAFE */ int orecvmsg(td, uap) struct thread *td; register struct orecvmsg_args /* { int s; struct omsghdr *msg; int flags; } */ *uap; { struct msghdr msg; struct iovec aiov[UIO_SMALLIOV], *iov; int error; error = copyin(uap->msg, &msg, sizeof (struct omsghdr)); if (error) return (error); mtx_lock(&Giant); if ((u_int)msg.msg_iovlen >= UIO_SMALLIOV) { if ((u_int)msg.msg_iovlen >= UIO_MAXIOV) { error = EMSGSIZE; goto done2; } MALLOC(iov, struct iovec *, sizeof(struct iovec) * (u_int)msg.msg_iovlen, M_IOV, 0); } else { iov = aiov; } msg.msg_flags = uap->flags | MSG_COMPAT; error = copyin(msg.msg_iov, iov, (unsigned)(msg.msg_iovlen * sizeof (struct iovec))); if (error) goto done; msg.msg_iov = iov; error = recvit(td, uap->s, &msg, &uap->msg->msg_namelen); if (msg.msg_controllen && error == 0) error = copyout(&msg.msg_controllen, &uap->msg->msg_accrightslen, sizeof (int)); done: if (iov != aiov) FREE(iov, M_IOV); done2: mtx_unlock(&Giant); return (error); } #endif /* * MPSAFE */ int recvmsg(td, uap) struct thread *td; register struct recvmsg_args /* { int s; struct msghdr *msg; int flags; } */ *uap; { struct msghdr msg; struct iovec aiov[UIO_SMALLIOV], *uiov, *iov; register int error; mtx_lock(&Giant); error = copyin(uap->msg, &msg, sizeof (msg)); if (error) goto done2; if ((u_int)msg.msg_iovlen >= UIO_SMALLIOV) { if ((u_int)msg.msg_iovlen >= UIO_MAXIOV) { error = EMSGSIZE; goto done2; } MALLOC(iov, struct iovec *, sizeof(struct iovec) * (u_int)msg.msg_iovlen, M_IOV, 0); } else { iov = aiov; } #ifdef COMPAT_OLDSOCK msg.msg_flags = uap->flags &~ MSG_COMPAT; #else msg.msg_flags = uap->flags; #endif uiov = msg.msg_iov; msg.msg_iov = iov; error = copyin(uiov, iov, (unsigned)(msg.msg_iovlen * sizeof (struct iovec))); if (error) goto done; error = recvit(td, uap->s, &msg, NULL); if (!error) { msg.msg_iov = uiov; error = copyout(&msg, uap->msg, sizeof(msg)); } done: if (iov != aiov) FREE(iov, M_IOV); done2: mtx_unlock(&Giant); return (error); } /* * MPSAFE */ /* ARGSUSED */ int shutdown(td, uap) struct thread *td; register struct shutdown_args /* { int s; int how; } */ *uap; { struct socket *so; int error; mtx_lock(&Giant); if ((error = fgetsock(td, uap->s, &so, NULL)) == 0) { error = soshutdown(so, uap->how); fputsock(so); } mtx_unlock(&Giant); return(error); } /* * MPSAFE */ /* ARGSUSED */ int setsockopt(td, uap) struct thread *td; register struct setsockopt_args /* { int s; int level; int name; caddr_t val; int valsize; } */ *uap; { struct socket *so; struct sockopt sopt; int error; if (uap->val == 0 && uap->valsize != 0) return (EFAULT); if (uap->valsize < 0) return (EINVAL); mtx_lock(&Giant); if ((error = fgetsock(td, uap->s, &so, NULL)) == 0) { sopt.sopt_dir = SOPT_SET; sopt.sopt_level = uap->level; sopt.sopt_name = uap->name; sopt.sopt_val = uap->val; sopt.sopt_valsize = uap->valsize; sopt.sopt_td = td; error = sosetopt(so, &sopt); fputsock(so); } mtx_unlock(&Giant); return(error); } /* * MPSAFE */ /* ARGSUSED */ int getsockopt(td, uap) struct thread *td; register struct getsockopt_args /* { int s; int level; int name; caddr_t val; int *avalsize; } */ *uap; { int valsize, error; struct socket *so; struct sockopt sopt; mtx_lock(&Giant); if ((error = fgetsock(td, uap->s, &so, NULL)) != 0) goto done2; if (uap->val) { error = copyin(uap->avalsize, &valsize, sizeof (valsize)); if (error) goto done1; if (valsize < 0) { error = EINVAL; goto done1; } } else { valsize = 0; } sopt.sopt_dir = SOPT_GET; sopt.sopt_level = uap->level; sopt.sopt_name = uap->name; sopt.sopt_val = uap->val; sopt.sopt_valsize = (size_t)valsize; /* checked non-negative above */ sopt.sopt_td = td; error = sogetopt(so, &sopt); if (error == 0) { valsize = sopt.sopt_valsize; error = copyout(&valsize, uap->avalsize, sizeof (valsize)); } done1: fputsock(so); done2: mtx_unlock(&Giant); return (error); } /* * getsockname1() - Get socket name. * * MPSAFE */ /* ARGSUSED */ static int getsockname1(td, uap, compat) struct thread *td; register struct getsockname_args /* { int fdes; caddr_t asa; int *alen; } */ *uap; int compat; { struct socket *so; struct sockaddr *sa; int len, error; mtx_lock(&Giant); if ((error = fgetsock(td, uap->fdes, &so, NULL)) != 0) goto done2; error = copyin(uap->alen, &len, sizeof (len)); if (error) goto done1; if (len < 0) { error = EINVAL; goto done1; } sa = 0; error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, &sa); if (error) goto bad; if (sa == 0) { len = 0; goto gotnothing; } len = MIN(len, sa->sa_len); #ifdef COMPAT_OLDSOCK if (compat) ((struct osockaddr *)sa)->sa_family = sa->sa_family; #endif error = copyout(sa, uap->asa, (u_int)len); if (error == 0) gotnothing: error = copyout(&len, uap->alen, sizeof (len)); bad: if (sa) FREE(sa, M_SONAME); done1: fputsock(so); done2: mtx_unlock(&Giant); return (error); } /* * MPSAFE */ int getsockname(td, uap) struct thread *td; struct getsockname_args *uap; { return (getsockname1(td, uap, 0)); } #ifdef COMPAT_OLDSOCK /* * MPSAFE */ int ogetsockname(td, uap) struct thread *td; struct getsockname_args *uap; { return (getsockname1(td, uap, 1)); } #endif /* COMPAT_OLDSOCK */ /* * getpeername1() - Get name of peer for connected socket. * * MPSAFE */ /* ARGSUSED */ static int getpeername1(td, uap, compat) struct thread *td; register struct getpeername_args /* { int fdes; caddr_t asa; int *alen; } */ *uap; int compat; { struct socket *so; struct sockaddr *sa; int len, error; mtx_lock(&Giant); if ((error = fgetsock(td, uap->fdes, &so, NULL)) != 0) goto done2; if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) { error = ENOTCONN; goto done1; } error = copyin(uap->alen, &len, sizeof (len)); if (error) goto done1; if (len < 0) { error = EINVAL; goto done1; } sa = 0; error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, &sa); if (error) goto bad; if (sa == 0) { len = 0; goto gotnothing; } len = MIN(len, sa->sa_len); #ifdef COMPAT_OLDSOCK if (compat) ((struct osockaddr *)sa)->sa_family = sa->sa_family; #endif error = copyout(sa, uap->asa, (u_int)len); if (error) goto bad; gotnothing: error = copyout(&len, uap->alen, sizeof (len)); bad: if (sa) FREE(sa, M_SONAME); done1: fputsock(so); done2: mtx_unlock(&Giant); return (error); } /* * MPSAFE */ int getpeername(td, uap) struct thread *td; struct getpeername_args *uap; { return (getpeername1(td, uap, 0)); } #ifdef COMPAT_OLDSOCK /* * MPSAFE */ int ogetpeername(td, uap) struct thread *td; struct ogetpeername_args *uap; { /* XXX uap should have type `getpeername_args *' to begin with. */ return (getpeername1(td, (struct getpeername_args *)uap, 1)); } #endif /* COMPAT_OLDSOCK */ int sockargs(mp, buf, buflen, type) struct mbuf **mp; caddr_t buf; int buflen, type; { register struct sockaddr *sa; register struct mbuf *m; int error; if ((u_int)buflen > MLEN) { #ifdef COMPAT_OLDSOCK if (type == MT_SONAME && (u_int)buflen <= 112) buflen = MLEN; /* unix domain compat. hack */ else #endif return (EINVAL); } m = m_get(0, type); if (m == NULL) return (ENOBUFS); m->m_len = buflen; error = copyin(buf, mtod(m, caddr_t), (u_int)buflen); if (error) (void) m_free(m); else { *mp = m; if (type == MT_SONAME) { sa = mtod(m, struct sockaddr *); #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN if (sa->sa_family == 0 && sa->sa_len < AF_MAX) sa->sa_family = sa->sa_len; #endif sa->sa_len = buflen; } } return (error); } int getsockaddr(namp, uaddr, len) struct sockaddr **namp; caddr_t uaddr; size_t len; { struct sockaddr *sa; int error; if (len > SOCK_MAXADDRLEN) return ENAMETOOLONG; MALLOC(sa, struct sockaddr *, len, M_SONAME, 0); error = copyin(uaddr, sa, len); if (error) { FREE(sa, M_SONAME); } else { #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN if (sa->sa_family == 0 && sa->sa_len < AF_MAX) sa->sa_family = sa->sa_len; #endif sa->sa_len = len; *namp = sa; } return error; } /* * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) */ static void sf_buf_init(void *arg) { int i; mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", NULL, MTX_DEF); mtx_lock(&sf_freelist.sf_lock); SLIST_INIT(&sf_freelist.sf_head); sf_base = kmem_alloc_pageable(kernel_map, nsfbufs * PAGE_SIZE); sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, M_NOWAIT | M_ZERO); for (i = 0; i < nsfbufs; i++) { sf_bufs[i].kva = sf_base + i * PAGE_SIZE; SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list); } sf_buf_alloc_want = 0; mtx_unlock(&sf_freelist.sf_lock); } /* * Get an sf_buf from the freelist. Will block if none are available. */ struct sf_buf * sf_buf_alloc() { struct sf_buf *sf; int error; mtx_lock(&sf_freelist.sf_lock); while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) { sf_buf_alloc_want++; error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH, "sfbufa", 0); sf_buf_alloc_want--; /* * If we got a signal, don't risk going back to sleep. */ if (error) break; } if (sf != NULL) SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list); mtx_unlock(&sf_freelist.sf_lock); return (sf); } #define dtosf(x) (&sf_bufs[((uintptr_t)(x) - (uintptr_t)sf_base) >> PAGE_SHIFT]) /* * Detatch mapped page and release resources back to the system. */ void sf_buf_free(void *addr, void *args) { struct sf_buf *sf; struct vm_page *m; GIANT_REQUIRED; sf = dtosf(addr); pmap_qremove((vm_offset_t)addr, 1); m = sf->m; vm_page_lock_queues(); vm_page_unwire(m, 0); /* * Check for the object going away on us. This can * happen since we don't hold a reference to it. * If so, we're responsible for freeing the page. */ if (m->wire_count == 0 && m->object == NULL) vm_page_free(m); vm_page_unlock_queues(); sf->m = NULL; mtx_lock(&sf_freelist.sf_lock); SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list); if (sf_buf_alloc_want > 0) wakeup_one(&sf_freelist); mtx_unlock(&sf_freelist.sf_lock); } /* * sendfile(2) * * MPSAFE * * int sendfile(int fd, int s, off_t offset, size_t nbytes, * struct sf_hdtr *hdtr, off_t *sbytes, int flags) * * Send a file specified by 'fd' and starting at 'offset' to a socket * specified by 's'. Send only 'nbytes' of the file or until EOF if * nbytes == 0. Optionally add a header and/or trailer to the socket * output. If specified, write the total number of bytes sent into *sbytes. * */ int sendfile(struct thread *td, struct sendfile_args *uap) { return (do_sendfile(td, uap, 0)); } #ifdef COMPAT_FREEBSD4 int freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap) { struct sendfile_args args; args.fd = uap->fd; args.s = uap->s; args.offset = uap->offset; args.nbytes = uap->nbytes; args.hdtr = uap->hdtr; args.sbytes = uap->sbytes; args.flags = uap->flags; return (do_sendfile(td, &args, 1)); } #endif /* COMPAT_FREEBSD4 */ static int do_sendfile(struct thread *td, struct sendfile_args *uap, int compat) { struct vnode *vp; struct vm_object *obj; struct socket *so = NULL; struct mbuf *m; struct sf_buf *sf; struct vm_page *pg; struct writev_args nuap; struct sf_hdtr hdtr; off_t off, xfsize, hdtr_size, sbytes = 0; int error, s; mtx_lock(&Giant); hdtr_size = 0; /* * The descriptor must be a regular file and have a backing VM object. */ if ((error = fgetvp_read(td, uap->fd, &vp)) != 0) goto done; if (vp->v_type != VREG || VOP_GETVOBJECT(vp, &obj) != 0) { error = EINVAL; goto done; } if ((error = fgetsock(td, uap->s, &so, NULL)) != 0) goto done; if (so->so_type != SOCK_STREAM) { error = EINVAL; goto done; } if ((so->so_state & SS_ISCONNECTED) == 0) { error = ENOTCONN; goto done; } if (uap->offset < 0) { error = EINVAL; goto done; } #ifdef MAC error = mac_check_socket_send(td->td_ucred, so); if (error) goto done; #endif /* * If specified, get the pointer to the sf_hdtr struct for * any headers/trailers. */ if (uap->hdtr != NULL) { error = copyin(uap->hdtr, &hdtr, sizeof(hdtr)); if (error) goto done; /* * Send any headers. Wimp out and use writev(2). */ if (hdtr.headers != NULL) { nuap.fd = uap->s; nuap.iovp = hdtr.headers; nuap.iovcnt = hdtr.hdr_cnt; error = writev(td, &nuap); if (error) goto done; if (compat) sbytes += td->td_retval[0]; else hdtr_size += td->td_retval[0]; } } /* * Protect against multiple writers to the socket. */ (void) sblock(&so->so_snd, 0); /* * Loop through the pages in the file, starting with the requested * offset. Get a file page (do I/O if necessary), map the file page * into an sf_buf, attach an mbuf header to the sf_buf, and queue * it on the socket. */ for (off = uap->offset; ; off += xfsize, sbytes += xfsize) { vm_pindex_t pindex; vm_offset_t pgoff; pindex = OFF_TO_IDX(off); retry_lookup: /* * Calculate the amount to transfer. Not to exceed a page, * the EOF, or the passed in nbytes. */ xfsize = obj->un_pager.vnp.vnp_size - off; if (xfsize > PAGE_SIZE) xfsize = PAGE_SIZE; pgoff = (vm_offset_t)(off & PAGE_MASK); if (PAGE_SIZE - pgoff < xfsize) xfsize = PAGE_SIZE - pgoff; if (uap->nbytes && xfsize > (uap->nbytes - sbytes)) xfsize = uap->nbytes - sbytes; if (xfsize <= 0) break; /* * Optimize the non-blocking case by looking at the socket space * before going to the extra work of constituting the sf_buf. */ if ((so->so_state & SS_NBIO) && sbspace(&so->so_snd) <= 0) { if (so->so_state & SS_CANTSENDMORE) error = EPIPE; else error = EAGAIN; sbunlock(&so->so_snd); goto done; } /* * Attempt to look up the page. * * Allocate if not found * * Wait and loop if busy. */ pg = vm_page_lookup(obj, pindex); if (pg == NULL) { pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL | VM_ALLOC_WIRED); if (pg == NULL) { VM_WAIT; goto retry_lookup; } vm_page_lock_queues(); vm_page_wakeup(pg); } else { vm_page_lock_queues(); if (vm_page_sleep_if_busy(pg, TRUE, "sfpbsy")) goto retry_lookup; /* * Wire the page so it does not get ripped out from * under us. */ vm_page_wire(pg); } /* * If page is not valid for what we need, initiate I/O */ if (!pg->valid || !vm_page_is_valid(pg, pgoff, xfsize)) { int bsize, resid; /* * Ensure that our page is still around when the I/O * completes. */ vm_page_io_start(pg); vm_page_unlock_queues(); /* * Get the page from backing store. */ bsize = vp->v_mount->mnt_stat.f_iosize; vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td); /* * XXXMAC: Because we don't have fp->f_cred here, * we pass in NOCRED. This is probably wrong, but * is consistent with our original implementation. */ error = vn_rdwr(UIO_READ, vp, NULL, MAXBSIZE, trunc_page(off), UIO_NOCOPY, IO_NODELOCKED | IO_VMIO | ((MAXBSIZE / bsize) << 16), td->td_ucred, NOCRED, &resid, td); VOP_UNLOCK(vp, 0, td); vm_page_lock_queues(); vm_page_flag_clear(pg, PG_ZERO); vm_page_io_finish(pg); if (error) { vm_page_unwire(pg, 0); /* * See if anyone else might know about this page. * If not and it is not valid, then free it. */ if (pg->wire_count == 0 && pg->valid == 0 && pg->busy == 0 && !(pg->flags & PG_BUSY) && pg->hold_count == 0) { vm_page_busy(pg); vm_page_free(pg); } vm_page_unlock_queues(); sbunlock(&so->so_snd); goto done; } } vm_page_unlock_queues(); /* * Get a sendfile buf. We usually wait as long as necessary, * but this wait can be interrupted. */ if ((sf = sf_buf_alloc()) == NULL) { vm_page_lock_queues(); vm_page_unwire(pg, 0); if (pg->wire_count == 0 && pg->object == NULL) vm_page_free(pg); vm_page_unlock_queues(); sbunlock(&so->so_snd); error = EINTR; goto done; } /* * Allocate a kernel virtual page and insert the physical page * into it. */ sf->m = pg; pmap_qenter(sf->kva, &pg, 1); /* * Get an mbuf header and set it up as having external storage. */ MGETHDR(m, 0, MT_DATA); if (m == NULL) { error = ENOBUFS; sf_buf_free((void *)sf->kva, NULL); sbunlock(&so->so_snd); goto done; } /* * Setup external storage for mbuf. */ MEXTADD(m, sf->kva, PAGE_SIZE, sf_buf_free, NULL, M_RDONLY, EXT_SFBUF); m->m_data = (char *) sf->kva + pgoff; m->m_pkthdr.len = m->m_len = xfsize; /* * Add the buffer to the socket buffer chain. */ s = splnet(); retry_space: /* * Make sure that the socket is still able to take more data. * CANTSENDMORE being true usually means that the connection * was closed. so_error is true when an error was sensed after * a previous send. * The state is checked after the page mapping and buffer * allocation above since those operations may block and make * any socket checks stale. From this point forward, nothing * blocks before the pru_send (or more accurately, any blocking * results in a loop back to here to re-check). */ if ((so->so_state & SS_CANTSENDMORE) || so->so_error) { if (so->so_state & SS_CANTSENDMORE) { error = EPIPE; } else { error = so->so_error; so->so_error = 0; } m_freem(m); sbunlock(&so->so_snd); splx(s); goto done; } /* * Wait for socket space to become available. We do this just * after checking the connection state above in order to avoid * a race condition with sbwait(). */ if (sbspace(&so->so_snd) < so->so_snd.sb_lowat) { if (so->so_state & SS_NBIO) { m_freem(m); sbunlock(&so->so_snd); splx(s); error = EAGAIN; goto done; } error = sbwait(&so->so_snd); /* * An error from sbwait usually indicates that we've * been interrupted by a signal. If we've sent anything * then return bytes sent, otherwise return the error. */ if (error) { m_freem(m); sbunlock(&so->so_snd); splx(s); goto done; } goto retry_space; } error = (*so->so_proto->pr_usrreqs->pru_send)(so, 0, m, 0, 0, td); splx(s); if (error) { sbunlock(&so->so_snd); goto done; } } sbunlock(&so->so_snd); /* * Send trailers. Wimp out and use writev(2). */ if (uap->hdtr != NULL && hdtr.trailers != NULL) { nuap.fd = uap->s; nuap.iovp = hdtr.trailers; nuap.iovcnt = hdtr.trl_cnt; error = writev(td, &nuap); if (error) goto done; if (compat) sbytes += td->td_retval[0]; else hdtr_size += td->td_retval[0]; } done: /* * If there was no error we have to clear td->td_retval[0] * because it may have been set by writev. */ if (error == 0) { td->td_retval[0] = 0; } if (uap->sbytes != NULL) { if (!compat) sbytes += hdtr_size; copyout(&sbytes, uap->sbytes, sizeof(off_t)); } if (vp) vrele(vp); if (so) fputsock(so); mtx_unlock(&Giant); return (error); } Index: head/sys/net/zlib.c =================================================================== --- head/sys/net/zlib.c (revision 110231) +++ head/sys/net/zlib.c (revision 110232) @@ -1,5383 +1,5381 @@ /* * This file is derived from various .h and .c files from the zlib-1.0.4 * distribution by Jean-loup Gailly and Mark Adler, with some additions * by Paul Mackerras to aid in implementing Deflate compression and * decompression for PPP packets. See zlib.h for conditions of * distribution and use. * * Changes that have been made include: * - added Z_PACKET_FLUSH (see zlib.h for details) * - added inflateIncomp and deflateOutputPending * - allow strm->next_out to be NULL, meaning discard the output * * $FreeBSD$ */ /* * ==FILEVERSION 971210== * * This marker is used by the Linux installation script to determine * whether an up-to-date version of this file is already installed. */ #define NO_DUMMY_DECL #define NO_ZCFUNCS #define MY_ZCALLOC #if defined(__FreeBSD__) && defined(_KERNEL) #define inflate inflate_ppp /* FreeBSD already has an inflate :-( */ #endif /* +++ zutil.h */ /* zutil.h -- internal interface and configuration of the compression library * Copyright (C) 1995-1996 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ /* From: zutil.h,v 1.16 1996/07/24 13:41:13 me Exp $ */ #ifndef _Z_UTIL_H #define _Z_UTIL_H #ifdef _KERNEL #include #else #include "zlib.h" #endif #ifdef _KERNEL /* Assume this is a *BSD or SVR4 kernel */ #include #include #include # define HAVE_MEMCPY #else #if defined(__KERNEL__) /* Assume this is a Linux kernel */ #include #define HAVE_MEMCPY #else /* not kernel */ #if defined(MSDOS)||defined(VMS)||defined(CRAY)||defined(WIN32)||defined(RISCOS) # include # include #else extern int errno; #endif #ifdef STDC # include # include #endif #endif /* __KERNEL__ */ #endif /* _KERNEL */ #ifndef local # define local static #endif /* compile with -Dlocal if your debugger can't find static symbols */ typedef unsigned char uch; typedef uch FAR uchf; typedef unsigned short ush; typedef ush FAR ushf; typedef unsigned long ulg; extern const char *z_errmsg[10]; /* indexed by 2-zlib_error */ /* (size given to avoid silly warnings with Visual C++) */ #define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)] #define ERR_RETURN(strm,err) \ return (strm->msg = (const char*)ERR_MSG(err), (err)) /* To be used only when the state is known to be valid */ /* common constants */ #ifndef DEF_WBITS # define DEF_WBITS MAX_WBITS #endif /* default windowBits for decompression. MAX_WBITS is for compression only */ #if MAX_MEM_LEVEL >= 8 # define DEF_MEM_LEVEL 8 #else # define DEF_MEM_LEVEL MAX_MEM_LEVEL #endif /* default memLevel */ #define STORED_BLOCK 0 #define STATIC_TREES 1 #define DYN_TREES 2 /* The three kinds of block type */ #define MIN_MATCH 3 #define MAX_MATCH 258 /* The minimum and maximum match lengths */ #define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */ /* target dependencies */ #ifdef MSDOS # define OS_CODE 0x00 # ifdef __TURBOC__ # include # else /* MSC or DJGPP */ # include # endif #endif #ifdef OS2 # define OS_CODE 0x06 #endif #ifdef WIN32 /* Window 95 & Windows NT */ # define OS_CODE 0x0b #endif #if defined(VAXC) || defined(VMS) # define OS_CODE 0x02 # define FOPEN(name, mode) \ fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512") #endif #ifdef AMIGA # define OS_CODE 0x01 #endif #if defined(ATARI) || defined(atarist) # define OS_CODE 0x05 #endif #ifdef MACOS # define OS_CODE 0x07 #endif #ifdef __50SERIES /* Prime/PRIMOS */ # define OS_CODE 0x0F #endif #ifdef TOPS20 # define OS_CODE 0x0a #endif #if defined(_BEOS_) || defined(RISCOS) # define fdopen(fd,mode) NULL /* No fdopen() */ #endif /* Common defaults */ #ifndef OS_CODE # define OS_CODE 0x03 /* assume Unix */ #endif #ifndef FOPEN # define FOPEN(name, mode) fopen((name), (mode)) #endif /* functions */ #ifdef HAVE_STRERROR extern char *strerror OF((int)); # define zstrerror(errnum) strerror(errnum) #else # define zstrerror(errnum) "" #endif #if defined(pyr) # define NO_MEMCPY #endif #if (defined(M_I86SM) || defined(M_I86MM)) && !defined(_MSC_VER) /* Use our own functions for small and medium model with MSC <= 5.0. * You may have to use the same strategy for Borland C (untested). */ # define NO_MEMCPY #endif #if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY) # define HAVE_MEMCPY #endif #ifdef HAVE_MEMCPY # ifdef SMALL_MEDIUM /* MSDOS small or medium model */ # define zmemcpy _fmemcpy # define zmemcmp _fmemcmp # define zmemzero(dest, len) _fmemset(dest, 0, len) # else # define zmemcpy memcpy # define zmemcmp memcmp # define zmemzero(dest, len) memset(dest, 0, len) # endif #else extern void zmemcpy OF((Bytef* dest, Bytef* source, uInt len)); extern int zmemcmp OF((Bytef* s1, Bytef* s2, uInt len)); extern void zmemzero OF((Bytef* dest, uInt len)); #endif /* Diagnostic functions */ #ifdef DEBUG_ZLIB # include # ifndef verbose # define verbose 0 # endif extern void z_error OF((char *m)); # define Assert(cond,msg) {if(!(cond)) z_error(msg);} # define Trace(x) fprintf x # define Tracev(x) {if (verbose) fprintf x ;} # define Tracevv(x) {if (verbose>1) fprintf x ;} # define Tracec(c,x) {if (verbose && (c)) fprintf x ;} # define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} #else # define Assert(cond,msg) # define Trace(x) # define Tracev(x) # define Tracevv(x) # define Tracec(c,x) # define Tracecv(c,x) #endif typedef uLong (*check_func) OF((uLong check, const Bytef *buf, uInt len)); voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size)); void zcfree OF((voidpf opaque, voidpf ptr)); #define ZALLOC(strm, items, size) \ (*((strm)->zalloc))((strm)->opaque, (items), (size)) #define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr)) #define TRY_FREE(s, p) {if (p) ZFREE(s, p);} #endif /* _Z_UTIL_H */ /* --- zutil.h */ /* +++ deflate.h */ /* deflate.h -- internal compression state * Copyright (C) 1995-1996 Jean-loup Gailly * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ /* From: deflate.h,v 1.10 1996/07/02 12:41:00 me Exp $ */ #ifndef _DEFLATE_H #define _DEFLATE_H /* #include "zutil.h" */ /* =========================================================================== * Internal compression state. */ #define LENGTH_CODES 29 /* number of length codes, not counting the special END_BLOCK code */ #define LITERALS 256 /* number of literal bytes 0..255 */ #define L_CODES (LITERALS+1+LENGTH_CODES) /* number of Literal or Length codes, including the END_BLOCK code */ #define D_CODES 30 /* number of distance codes */ #define BL_CODES 19 /* number of codes used to transfer the bit lengths */ #define HEAP_SIZE (2*L_CODES+1) /* maximum heap size */ #define MAX_BITS 15 /* All codes must not exceed MAX_BITS bits */ #define INIT_STATE 42 #define BUSY_STATE 113 #define FINISH_STATE 666 /* Stream status */ /* Data structure describing a single value and its code string. */ typedef struct ct_data_s { union { ush freq; /* frequency count */ ush code; /* bit string */ } fc; union { ush dad; /* father node in Huffman tree */ ush len; /* length of bit string */ } dl; } FAR ct_data; #define Freq fc.freq #define Code fc.code #define Dad dl.dad #define Len dl.len typedef struct static_tree_desc_s static_tree_desc; typedef struct tree_desc_s { ct_data *dyn_tree; /* the dynamic tree */ int max_code; /* largest code with non zero frequency */ static_tree_desc *stat_desc; /* the corresponding static tree */ } FAR tree_desc; typedef ush Pos; typedef Pos FAR Posf; typedef unsigned IPos; /* A Pos is an index in the character window. We use short instead of int to * save space in the various tables. IPos is used only for parameter passing. */ typedef struct deflate_state { z_streamp strm; /* pointer back to this zlib stream */ int status; /* as the name implies */ Bytef *pending_buf; /* output still pending */ ulg pending_buf_size; /* size of pending_buf */ Bytef *pending_out; /* next pending byte to output to the stream */ int pending; /* nb of bytes in the pending buffer */ int noheader; /* suppress zlib header and adler32 */ Byte data_type; /* UNKNOWN, BINARY or ASCII */ Byte method; /* STORED (for zip only) or DEFLATED */ int last_flush; /* value of flush param for previous deflate call */ /* used by deflate.c: */ uInt w_size; /* LZ77 window size (32K by default) */ uInt w_bits; /* log2(w_size) (8..16) */ uInt w_mask; /* w_size - 1 */ Bytef *window; /* Sliding window. Input bytes are read into the second half of the window, * and move to the first half later to keep a dictionary of at least wSize * bytes. With this organization, matches are limited to a distance of * wSize-MAX_MATCH bytes, but this ensures that IO is always * performed with a length multiple of the block size. Also, it limits * the window size to 64K, which is quite useful on MSDOS. * To do: use the user input buffer as sliding window. */ ulg window_size; /* Actual size of window: 2*wSize, except when the user input buffer * is directly used as sliding window. */ Posf *prev; /* Link to older string with same hash index. To limit the size of this * array to 64K, this link is maintained only for the last 32K strings. * An index in this array is thus a window index modulo 32K. */ Posf *head; /* Heads of the hash chains or NIL. */ uInt ins_h; /* hash index of string to be inserted */ uInt hash_size; /* number of elements in hash table */ uInt hash_bits; /* log2(hash_size) */ uInt hash_mask; /* hash_size-1 */ uInt hash_shift; /* Number of bits by which ins_h must be shifted at each input * step. It must be such that after MIN_MATCH steps, the oldest * byte no longer takes part in the hash key, that is: * hash_shift * MIN_MATCH >= hash_bits */ long block_start; /* Window position at the beginning of the current output block. Gets * negative when the window is moved backwards. */ uInt match_length; /* length of best match */ IPos prev_match; /* previous match */ int match_available; /* set if previous match exists */ uInt strstart; /* start of string to insert */ uInt match_start; /* start of matching string */ uInt lookahead; /* number of valid bytes ahead in window */ uInt prev_length; /* Length of the best match at previous step. Matches not greater than this * are discarded. This is used in the lazy match evaluation. */ uInt max_chain_length; /* To speed up deflation, hash chains are never searched beyond this * length. A higher limit improves compression ratio but degrades the * speed. */ uInt max_lazy_match; /* Attempt to find a better match only when the current match is strictly * smaller than this value. This mechanism is used only for compression * levels >= 4. */ # define max_insert_length max_lazy_match /* Insert new strings in the hash table only if the match length is not * greater than this length. This saves time but degrades compression. * max_insert_length is used only for compression levels <= 3. */ int level; /* compression level (1..9) */ int strategy; /* favor or force Huffman coding*/ uInt good_match; /* Use a faster search when the previous match is longer than this */ int nice_match; /* Stop searching when current match exceeds this */ /* used by trees.c: */ /* Didn't use ct_data typedef below to supress compiler warning */ struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ struct tree_desc_s l_desc; /* desc. for literal tree */ struct tree_desc_s d_desc; /* desc. for distance tree */ struct tree_desc_s bl_desc; /* desc. for bit length tree */ ush bl_count[MAX_BITS+1]; /* number of codes at each bit length for an optimal tree */ int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ int heap_len; /* number of elements in the heap */ int heap_max; /* element of largest frequency */ /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. * The same heap array is used to build all trees. */ uch depth[2*L_CODES+1]; /* Depth of each subtree used as tie breaker for trees of equal frequency */ uchf *l_buf; /* buffer for literals or lengths */ uInt lit_bufsize; /* Size of match buffer for literals/lengths. There are 4 reasons for * limiting lit_bufsize to 64K: * - frequencies can be kept in 16 bit counters * - if compression is not successful for the first block, all input * data is still in the window so we can still emit a stored block even * when input comes from standard input. (This can also be done for * all blocks if lit_bufsize is not greater than 32K.) * - if compression is not successful for a file smaller than 64K, we can * even emit a stored file instead of a stored block (saving 5 bytes). * This is applicable only for zip (not gzip or zlib). * - creating new Huffman trees less frequently may not provide fast * adaptation to changes in the input data statistics. (Take for * example a binary file with poorly compressible code followed by * a highly compressible string table.) Smaller buffer sizes give * fast adaptation but have of course the overhead of transmitting * trees more frequently. * - I can't count above 4 */ uInt last_lit; /* running index in l_buf */ ushf *d_buf; /* Buffer for distances. To simplify the code, d_buf and l_buf have * the same number of elements. To use different lengths, an extra flag * array would be necessary. */ ulg opt_len; /* bit length of current block with optimal trees */ ulg static_len; /* bit length of current block with static trees */ ulg compressed_len; /* total bit length of compressed file */ uInt matches; /* number of string matches in current block */ int last_eob_len; /* bit length of EOB code for last block */ #ifdef DEBUG_ZLIB ulg bits_sent; /* bit length of the compressed data */ #endif ush bi_buf; /* Output buffer. bits are inserted starting at the bottom (least * significant bits). */ int bi_valid; /* Number of valid bits in bi_buf. All bits above the last valid bit * are always zero. */ } FAR deflate_state; /* Output a byte on the stream. * IN assertion: there is enough room in pending_buf. */ #define put_byte(s, c) {s->pending_buf[s->pending++] = (c);} #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) /* Minimum amount of lookahead, except at the end of the input file. * See deflate.c for comments about the MIN_MATCH+1. */ #define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD) /* In order to simplify the code, particularly on 16 bit machines, match * distances are limited to MAX_DIST instead of WSIZE. */ /* in trees.c */ void _tr_init OF((deflate_state *s)); int _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); ulg _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len, int eof)); void _tr_align OF((deflate_state *s)); void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len, int eof)); void _tr_stored_type_only OF((deflate_state *)); #endif /* --- deflate.h */ /* +++ deflate.c */ /* deflate.c -- compress data using the deflation algorithm * Copyright (C) 1995-1996 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ /* * ALGORITHM * * The "deflation" process depends on being able to identify portions * of the input text which are identical to earlier input (within a * sliding window trailing behind the input currently being processed). * * The most straightforward technique turns out to be the fastest for * most input files: try all possible matches and select the longest. * The key feature of this algorithm is that insertions into the string * dictionary are very simple and thus fast, and deletions are avoided * completely. Insertions are performed at each input character, whereas * string matches are performed only when the previous match ends. So it * is preferable to spend more time in matches to allow very fast string * insertions and avoid deletions. The matching algorithm for small * strings is inspired from that of Rabin & Karp. A brute force approach * is used to find longer strings when a small match has been found. * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze * (by Leonid Broukhis). * A previous version of this file used a more sophisticated algorithm * (by Fiala and Greene) which is guaranteed to run in linear amortized * time, but has a larger average cost, uses more memory and is patented. * However the F&G algorithm may be faster for some highly redundant * files if the parameter max_chain_length (described below) is too large. * * ACKNOWLEDGEMENTS * * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and * I found it in 'freeze' written by Leonid Broukhis. * Thanks to many people for bug reports and testing. * * REFERENCES * * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". * Available in ftp://ds.internic.net/rfc/rfc1951.txt * * A description of the Rabin and Karp algorithm is given in the book * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. * * Fiala,E.R., and Greene,D.H. * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 * */ /* From: deflate.c,v 1.15 1996/07/24 13:40:58 me Exp $ */ /* #include "deflate.h" */ char deflate_copyright[] = " deflate 1.0.4 Copyright 1995-1996 Jean-loup Gailly "; /* If you use the zlib library in a product, an acknowledgment is welcome in the documentation of your product. If for some reason you cannot include such an acknowledgment, I would appreciate that you keep this copyright string in the executable of your product. */ /* =========================================================================== * Function prototypes. */ typedef enum { need_more, /* block not completed, need more input or more output */ block_done, /* block flush performed */ finish_started, /* finish started, need only more output at next deflate */ finish_done /* finish done, accept no more input or output */ } block_state; typedef block_state (*compress_func) OF((deflate_state *s, int flush)); /* Compression function. Returns the block state after the call. */ local void fill_window OF((deflate_state *s)); local block_state deflate_stored OF((deflate_state *s, int flush)); local block_state deflate_fast OF((deflate_state *s, int flush)); local block_state deflate_slow OF((deflate_state *s, int flush)); local void lm_init OF((deflate_state *s)); local void putShortMSB OF((deflate_state *s, uInt b)); local void flush_pending OF((z_streamp strm)); local int read_buf OF((z_streamp strm, charf *buf, unsigned size)); #ifdef ASMV void match_init OF((void)); /* asm code initialization */ uInt longest_match OF((deflate_state *s, IPos cur_match)); #else local uInt longest_match OF((deflate_state *s, IPos cur_match)); #endif #ifdef DEBUG_ZLIB local void check_match OF((deflate_state *s, IPos start, IPos match, int length)); #endif /* =========================================================================== * Local data */ #define NIL 0 /* Tail of hash chains */ #ifndef TOO_FAR # define TOO_FAR 4096 #endif /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) /* Minimum amount of lookahead, except at the end of the input file. * See deflate.c for comments about the MIN_MATCH+1. */ /* Values for max_lazy_match, good_match and max_chain_length, depending on * the desired pack level (0..9). The values given below have been tuned to * exclude worst case performance for pathological files. Better values may be * found for specific files. */ typedef struct config_s { ush good_length; /* reduce lazy search above this match length */ ush max_lazy; /* do not perform lazy search above this match length */ ush nice_length; /* quit search above this match length */ ush max_chain; compress_func func; } config; local config configuration_table[10] = { /* good lazy nice chain */ /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ /* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */ /* 2 */ {4, 5, 16, 8, deflate_fast}, /* 3 */ {4, 6, 32, 32, deflate_fast}, /* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ /* 5 */ {8, 16, 32, 32, deflate_slow}, /* 6 */ {8, 16, 128, 128, deflate_slow}, /* 7 */ {8, 32, 128, 256, deflate_slow}, /* 8 */ {32, 128, 258, 1024, deflate_slow}, /* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */ /* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 * For deflate_fast() (levels <= 3) good is ignored and lazy has a different * meaning. */ #define EQUAL 0 /* result of memcmp for equal strings */ #ifndef NO_DUMMY_DECL struct static_tree_desc_s {int dummy;}; /* for buggy compilers */ #endif /* =========================================================================== * Update a hash value with the given input byte * IN assertion: all calls to to UPDATE_HASH are made with consecutive * input characters, so that a running hash key can be computed from the * previous key instead of complete recalculation each time. */ #define UPDATE_HASH(s,h,c) (h = (((h)<hash_shift) ^ (c)) & s->hash_mask) /* =========================================================================== * Insert string str in the dictionary and set match_head to the previous head * of the hash chain (the most recent string with same hash key). Return * the previous length of the hash chain. * IN assertion: all calls to to INSERT_STRING are made with consecutive * input characters and the first MIN_MATCH bytes of str are valid * (except for the last MIN_MATCH-1 bytes of the input file). */ #define INSERT_STRING(s, str, match_head) \ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \ s->head[s->ins_h] = (Pos)(str)) /* =========================================================================== * Initialize the hash table (avoiding 64K overflow for 16 bit systems). * prev[] will be initialized on the fly. */ #define CLEAR_HASH(s) \ s->head[s->hash_size-1] = NIL; \ zmemzero((charf *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head)); /* ========================================================================= */ int deflateInit_(strm, level, version, stream_size) z_streamp strm; int level; const char *version; int stream_size; { return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, version, stream_size); /* To do: ignore strm->next_in if we use it as window */ } /* ========================================================================= */ int deflateInit2_(strm, level, method, windowBits, memLevel, strategy, version, stream_size) z_streamp strm; int level; int method; int windowBits; int memLevel; int strategy; const char *version; int stream_size; { deflate_state *s; int noheader = 0; static char* my_version = ZLIB_VERSION; ushf *overlay; /* We overlay pending_buf and d_buf+l_buf. This works since the average * output size for (length,distance) codes is <= 24 bits. */ if (version == Z_NULL || version[0] != my_version[0] || stream_size != sizeof(z_stream)) { return Z_VERSION_ERROR; } if (strm == Z_NULL) return Z_STREAM_ERROR; strm->msg = Z_NULL; #ifndef NO_ZCFUNCS if (strm->zalloc == Z_NULL) { strm->zalloc = zcalloc; strm->opaque = (voidpf)0; } if (strm->zfree == Z_NULL) strm->zfree = zcfree; #endif if (level == Z_DEFAULT_COMPRESSION) level = 6; if (windowBits < 0) { /* undocumented feature: suppress zlib header */ noheader = 1; windowBits = -windowBits; } if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || windowBits < 9 || windowBits > 15 || level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) { return Z_STREAM_ERROR; } s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state)); if (s == Z_NULL) return Z_MEM_ERROR; strm->state = (struct internal_state FAR *)s; s->strm = strm; s->noheader = noheader; s->w_bits = windowBits; s->w_size = 1 << s->w_bits; s->w_mask = s->w_size - 1; s->hash_bits = memLevel + 7; s->hash_size = 1 << s->hash_bits; s->hash_mask = s->hash_size - 1; s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH); s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte)); s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); s->pending_buf = (uchf *) overlay; s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || s->pending_buf == Z_NULL) { strm->msg = (const char*)ERR_MSG(Z_MEM_ERROR); deflateEnd (strm); return Z_MEM_ERROR; } s->d_buf = overlay + s->lit_bufsize/sizeof(ush); s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; s->level = level; s->strategy = strategy; s->method = (Byte)method; return deflateReset(strm); } /* ========================================================================= */ int deflateSetDictionary (strm, dictionary, dictLength) z_streamp strm; const Bytef *dictionary; uInt dictLength; { deflate_state *s; uInt length = dictLength; uInt n; IPos hash_head = 0; if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL) return Z_STREAM_ERROR; s = (deflate_state *) strm->state; if (s->status != INIT_STATE) return Z_STREAM_ERROR; strm->adler = adler32(strm->adler, dictionary, dictLength); if (length < MIN_MATCH) return Z_OK; if (length > MAX_DIST(s)) { length = MAX_DIST(s); #ifndef USE_DICT_HEAD dictionary += dictLength - length; /* use the tail of the dictionary */ #endif } zmemcpy((charf *)s->window, dictionary, length); s->strstart = length; s->block_start = (long)length; /* Insert all strings in the hash table (except for the last two bytes). * s->lookahead stays null, so s->ins_h will be recomputed at the next * call of fill_window. */ s->ins_h = s->window[0]; UPDATE_HASH(s, s->ins_h, s->window[1]); for (n = 0; n <= length - MIN_MATCH; n++) { INSERT_STRING(s, n, hash_head); } if (hash_head) hash_head = 0; /* to make compiler happy */ return Z_OK; } /* ========================================================================= */ int deflateReset (strm) z_streamp strm; { deflate_state *s; if (strm == Z_NULL || strm->state == Z_NULL || strm->zalloc == Z_NULL || strm->zfree == Z_NULL) return Z_STREAM_ERROR; strm->total_in = strm->total_out = 0; strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */ strm->data_type = Z_UNKNOWN; s = (deflate_state *)strm->state; s->pending = 0; s->pending_out = s->pending_buf; if (s->noheader < 0) { s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */ } s->status = s->noheader ? BUSY_STATE : INIT_STATE; strm->adler = 1; s->last_flush = Z_NO_FLUSH; _tr_init(s); lm_init(s); return Z_OK; } /* ========================================================================= */ int deflateParams(strm, level, strategy) z_streamp strm; int level; int strategy; { deflate_state *s; compress_func func; int err = Z_OK; if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; s = (deflate_state *) strm->state; if (level == Z_DEFAULT_COMPRESSION) { level = 6; } if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) { return Z_STREAM_ERROR; } func = configuration_table[s->level].func; if (func != configuration_table[level].func && strm->total_in != 0) { /* Flush the last buffer: */ err = deflate(strm, Z_PARTIAL_FLUSH); } if (s->level != level) { s->level = level; s->max_lazy_match = configuration_table[level].max_lazy; s->good_match = configuration_table[level].good_length; s->nice_match = configuration_table[level].nice_length; s->max_chain_length = configuration_table[level].max_chain; } s->strategy = strategy; return err; } /* ========================================================================= * Put a short in the pending buffer. The 16-bit value is put in MSB order. * IN assertion: the stream state is correct and there is enough room in * pending_buf. */ local void putShortMSB (s, b) deflate_state *s; uInt b; { put_byte(s, (Byte)(b >> 8)); put_byte(s, (Byte)(b & 0xff)); } /* ========================================================================= * Flush as much pending output as possible. All deflate() output goes * through this function so some applications may wish to modify it * to avoid allocating a large strm->next_out buffer and copying into it. * (See also read_buf()). */ local void flush_pending(strm) z_streamp strm; { deflate_state *s = (deflate_state *) strm->state; unsigned len = s->pending; if (len > strm->avail_out) len = strm->avail_out; if (len == 0) return; if (strm->next_out != Z_NULL) { zmemcpy(strm->next_out, s->pending_out, len); strm->next_out += len; } s->pending_out += len; strm->total_out += len; strm->avail_out -= len; s->pending -= len; if (s->pending == 0) { s->pending_out = s->pending_buf; } } /* ========================================================================= */ int deflate (strm, flush) z_streamp strm; int flush; { int old_flush; /* value of flush param for previous deflate call */ deflate_state *s; if (strm == Z_NULL || strm->state == Z_NULL || flush > Z_FINISH || flush < 0) { return Z_STREAM_ERROR; } s = (deflate_state *) strm->state; if ((strm->next_in == Z_NULL && strm->avail_in != 0) || (s->status == FINISH_STATE && flush != Z_FINISH)) { ERR_RETURN(strm, Z_STREAM_ERROR); } if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR); s->strm = strm; /* just in case */ old_flush = s->last_flush; s->last_flush = flush; /* Write the zlib header */ if (s->status == INIT_STATE) { uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8; uInt level_flags = (s->level-1) >> 1; if (level_flags > 3) level_flags = 3; header |= (level_flags << 6); if (s->strstart != 0) header |= PRESET_DICT; header += 31 - (header % 31); s->status = BUSY_STATE; putShortMSB(s, header); /* Save the adler32 of the preset dictionary: */ if (s->strstart != 0) { putShortMSB(s, (uInt)(strm->adler >> 16)); putShortMSB(s, (uInt)(strm->adler & 0xffff)); } strm->adler = 1L; } /* Flush as much pending output as possible */ if (s->pending != 0) { flush_pending(strm); if (strm->avail_out == 0) { /* Since avail_out is 0, deflate will be called again with * more output space, but possibly with both pending and * avail_in equal to zero. There won't be anything to do, * but this is not an error situation so make sure we * return OK instead of BUF_ERROR at next call of deflate: */ s->last_flush = -1; return Z_OK; } /* Make sure there is something to do and avoid duplicate consecutive * flushes. For repeated and useless calls with Z_FINISH, we keep * returning Z_STREAM_END instead of Z_BUFF_ERROR. */ } else if (strm->avail_in == 0 && flush <= old_flush && flush != Z_FINISH) { ERR_RETURN(strm, Z_BUF_ERROR); } /* User must not provide more input after the first FINISH: */ if (s->status == FINISH_STATE && strm->avail_in != 0) { ERR_RETURN(strm, Z_BUF_ERROR); } /* Start a new block or continue the current one. */ if (strm->avail_in != 0 || s->lookahead != 0 || (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { block_state bstate; bstate = (*(configuration_table[s->level].func))(s, flush); if (bstate == finish_started || bstate == finish_done) { s->status = FINISH_STATE; } if (bstate == need_more || bstate == finish_started) { if (strm->avail_out == 0) { s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ } return Z_OK; /* If flush != Z_NO_FLUSH && avail_out == 0, the next call * of deflate should use the same flush parameter to make sure * that the flush is complete. So we don't have to output an * empty block here, this will be done at next call. This also * ensures that for a very small output buffer, we emit at most * one empty block. */ } if (bstate == block_done) { if (flush == Z_PARTIAL_FLUSH) { _tr_align(s); } else if (flush == Z_PACKET_FLUSH) { /* Output just the 3-bit `stored' block type value, but not a zero length. */ _tr_stored_type_only(s); } else { /* FULL_FLUSH or SYNC_FLUSH */ _tr_stored_block(s, (char*)0, 0L, 0); /* For a full flush, this empty block will be recognized * as a special marker by inflate_sync(). */ if (flush == Z_FULL_FLUSH) { CLEAR_HASH(s); /* forget history */ } } flush_pending(strm); if (strm->avail_out == 0) { s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ return Z_OK; } } } Assert(strm->avail_out > 0, "bug2"); if (flush != Z_FINISH) return Z_OK; if (s->noheader) return Z_STREAM_END; /* Write the zlib trailer (adler32) */ putShortMSB(s, (uInt)(strm->adler >> 16)); putShortMSB(s, (uInt)(strm->adler & 0xffff)); flush_pending(strm); /* If avail_out is zero, the application will call deflate again * to flush the rest. */ s->noheader = -1; /* write the trailer only once! */ return s->pending != 0 ? Z_OK : Z_STREAM_END; } /* ========================================================================= */ int deflateEnd (strm) z_streamp strm; { int status; deflate_state *s; if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; s = (deflate_state *) strm->state; status = s->status; if (status != INIT_STATE && status != BUSY_STATE && status != FINISH_STATE) { return Z_STREAM_ERROR; } /* Deallocate in reverse order of allocations: */ TRY_FREE(strm, s->pending_buf); TRY_FREE(strm, s->head); TRY_FREE(strm, s->prev); TRY_FREE(strm, s->window); ZFREE(strm, s); strm->state = Z_NULL; return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; } /* ========================================================================= * Copy the source state to the destination state. */ int deflateCopy (dest, source) z_streamp dest; z_streamp source; { deflate_state *ds; deflate_state *ss; ushf *overlay; if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) return Z_STREAM_ERROR; ss = (deflate_state *) source->state; zmemcpy(dest, source, sizeof(*dest)); ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); if (ds == Z_NULL) return Z_MEM_ERROR; dest->state = (struct internal_state FAR *) ds; zmemcpy(ds, ss, sizeof(*ds)); ds->strm = dest; ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); ds->pending_buf = (uchf *) overlay; if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || ds->pending_buf == Z_NULL) { deflateEnd (dest); return Z_MEM_ERROR; } /* ??? following zmemcpy doesn't work for 16-bit MSDOS */ zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos)); zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos)); zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; ds->l_desc.dyn_tree = ds->dyn_ltree; ds->d_desc.dyn_tree = ds->dyn_dtree; ds->bl_desc.dyn_tree = ds->bl_tree; return Z_OK; } /* =========================================================================== * Return the number of bytes of output which are immediately available * for output from the decompressor. */ int deflateOutputPending (strm) z_streamp strm; { if (strm == Z_NULL || strm->state == Z_NULL) return 0; return ((deflate_state *)(strm->state))->pending; } /* =========================================================================== * Read a new buffer from the current input stream, update the adler32 * and total number of bytes read. All deflate() input goes through * this function so some applications may wish to modify it to avoid * allocating a large strm->next_in buffer and copying from it. * (See also flush_pending()). */ local int read_buf(strm, buf, size) z_streamp strm; charf *buf; unsigned size; { unsigned len = strm->avail_in; if (len > size) len = size; if (len == 0) return 0; strm->avail_in -= len; if (!((deflate_state *)(strm->state))->noheader) { strm->adler = adler32(strm->adler, strm->next_in, len); } zmemcpy(buf, strm->next_in, len); strm->next_in += len; strm->total_in += len; return (int)len; } /* =========================================================================== * Initialize the "longest match" routines for a new zlib stream */ local void lm_init (s) deflate_state *s; { s->window_size = (ulg)2L*s->w_size; CLEAR_HASH(s); /* Set the default configuration parameters: */ s->max_lazy_match = configuration_table[s->level].max_lazy; s->good_match = configuration_table[s->level].good_length; s->nice_match = configuration_table[s->level].nice_length; s->max_chain_length = configuration_table[s->level].max_chain; s->strstart = 0; s->block_start = 0L; s->lookahead = 0; s->match_length = s->prev_length = MIN_MATCH-1; s->match_available = 0; s->ins_h = 0; #ifdef ASMV match_init(); /* initialize the asm code */ #endif } /* =========================================================================== * Set match_start to the longest match starting at the given string and * return its length. Matches shorter or equal to prev_length are discarded, * in which case the result is equal to prev_length and match_start is * garbage. * IN assertions: cur_match is the head of the hash chain for the current * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 * OUT assertion: the match length is not greater than s->lookahead. */ #ifndef ASMV /* For 80x86 and 680x0, an optimized version will be provided in match.asm or * match.S. The code will be functionally equivalent. */ local uInt longest_match(s, cur_match) deflate_state *s; IPos cur_match; /* current match */ { unsigned chain_length = s->max_chain_length;/* max hash chain length */ register Bytef *scan = s->window + s->strstart; /* current string */ register Bytef *match; /* matched string */ register int len; /* length of current match */ int best_len = s->prev_length; /* best match length so far */ int nice_match = s->nice_match; /* stop if match long enough */ IPos limit = s->strstart > (IPos)MAX_DIST(s) ? s->strstart - (IPos)MAX_DIST(s) : NIL; /* Stop when cur_match becomes <= limit. To simplify the code, * we prevent matches with the string of window index 0. */ Posf *prev = s->prev; uInt wmask = s->w_mask; #ifdef UNALIGNED_OK /* Compare two bytes at a time. Note: this is not always beneficial. * Try with and without -DUNALIGNED_OK to check. */ register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; register ush scan_start = *(ushf*)scan; register ush scan_end = *(ushf*)(scan+best_len-1); #else register Bytef *strend = s->window + s->strstart + MAX_MATCH; register Byte scan_end1 = scan[best_len-1]; register Byte scan_end = scan[best_len]; #endif /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. * It is easy to get rid of this optimization if necessary. */ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); /* Do not waste too much time if we already have a good match: */ if (s->prev_length >= s->good_match) { chain_length >>= 2; } /* Do not look for matches beyond the end of the input. This is necessary * to make deflate deterministic. */ if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); do { Assert(cur_match < s->strstart, "no future"); match = s->window + cur_match; /* Skip to next match if the match length cannot increase * or if the match length is less than 2: */ #if (defined(UNALIGNED_OK) && MAX_MATCH == 258) /* This code assumes sizeof(unsigned short) == 2. Do not use * UNALIGNED_OK if your compiler uses a different size. */ if (*(ushf*)(match+best_len-1) != scan_end || *(ushf*)match != scan_start) continue; /* It is not necessary to compare scan[2] and match[2] since they are * always equal when the other bytes match, given that the hash keys * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at * strstart+3, +5, ... up to strstart+257. We check for insufficient * lookahead only every 4th comparison; the 128th check will be made * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is * necessary to put more guard bytes at the end of the window, or * to check more often for insufficient lookahead. */ Assert(scan[2] == match[2], "scan[2]?"); scan++, match++; do { } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) && *(ushf*)(scan+=2) == *(ushf*)(match+=2) && *(ushf*)(scan+=2) == *(ushf*)(match+=2) && *(ushf*)(scan+=2) == *(ushf*)(match+=2) && scan < strend); /* The funny "do {}" generates better code on most compilers */ /* Here, scan <= window+strstart+257 */ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); if (*scan == *match) scan++; len = (MAX_MATCH - 1) - (int)(strend-scan); scan = strend - (MAX_MATCH-1); #else /* UNALIGNED_OK */ if (match[best_len] != scan_end || match[best_len-1] != scan_end1 || *match != *scan || *++match != scan[1]) continue; /* The check at best_len-1 can be removed because it will be made * again later. (This heuristic is not always a win.) * It is not necessary to compare scan[2] and match[2] since they * are always equal when the other bytes match, given that * the hash keys are equal and that HASH_BITS >= 8. */ scan += 2, match++; Assert(*scan == *match, "match[2]?"); /* We check for insufficient lookahead only every 8th comparison; * the 256th check will be made at strstart+258. */ do { } while (*++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && scan < strend); Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); len = MAX_MATCH - (int)(strend - scan); scan = strend - MAX_MATCH; #endif /* UNALIGNED_OK */ if (len > best_len) { s->match_start = cur_match; best_len = len; if (len >= nice_match) break; #ifdef UNALIGNED_OK scan_end = *(ushf*)(scan+best_len-1); #else scan_end1 = scan[best_len-1]; scan_end = scan[best_len]; #endif } } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length != 0); if ((uInt)best_len <= s->lookahead) return best_len; return s->lookahead; } #endif /* ASMV */ #ifdef DEBUG_ZLIB /* =========================================================================== * Check that the match at match_start is indeed a match. */ local void check_match(s, start, match, length) deflate_state *s; IPos start, match; int length; { /* check that the match is indeed a match */ if (zmemcmp((charf *)s->window + match, (charf *)s->window + start, length) != EQUAL) { fprintf(stderr, " start %u, match %u, length %d\n", start, match, length); do { fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); } while (--length != 0); z_error("invalid match"); } if (z_verbose > 1) { fprintf(stderr,"\\[%d,%d]", start-match, length); do { putc(s->window[start++], stderr); } while (--length != 0); } } #else # define check_match(s, start, match, length) #endif /* =========================================================================== * Fill the window when the lookahead becomes insufficient. * Updates strstart and lookahead. * * IN assertion: lookahead < MIN_LOOKAHEAD * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD * At least one byte has been read, or avail_in == 0; reads are * performed for at least two bytes (required for the zip translate_eol * option -- not supported here). */ local void fill_window(s) deflate_state *s; { register unsigned n, m; register Posf *p; unsigned more; /* Amount of free space at the end of the window. */ uInt wsize = s->w_size; do { more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); /* Deal with !@#$% 64K limit: */ if (more == 0 && s->strstart == 0 && s->lookahead == 0) { more = wsize; } else if (more == (unsigned)(-1)) { /* Very unlikely, but possible on 16 bit machine if strstart == 0 * and lookahead == 1 (input done one byte at time) */ more--; /* If the window is almost full and there is insufficient lookahead, * move the upper half to the lower one to make room in the upper half. */ } else if (s->strstart >= wsize+MAX_DIST(s)) { zmemcpy((charf *)s->window, (charf *)s->window+wsize, (unsigned)wsize); s->match_start -= wsize; s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ s->block_start -= (long) wsize; /* Slide the hash table (could be avoided with 32 bit values at the expense of memory usage). We slide even when level == 0 to keep the hash table consistent if we switch back to level > 0 later. (Using level 0 permanently is not an optimal usage of zlib, so we don't care about this pathological case.) */ n = s->hash_size; p = &s->head[n]; do { m = *--p; *p = (Pos)(m >= wsize ? m-wsize : NIL); } while (--n); n = wsize; p = &s->prev[n]; do { m = *--p; *p = (Pos)(m >= wsize ? m-wsize : NIL); /* If n is not on any hash chain, prev[n] is garbage but * its value will never be used. */ } while (--n); more += wsize; } if (s->strm->avail_in == 0) return; /* If there was no sliding: * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && * more == window_size - lookahead - strstart * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) * => more >= window_size - 2*WSIZE + 2 * In the BIG_MEM or MMAP case (not yet supported), * window_size == input_size + MIN_LOOKAHEAD && * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. * Otherwise, window_size == 2*WSIZE so more >= 2. * If there was sliding, more >= WSIZE. So in all cases, more >= 2. */ Assert(more >= 2, "more < 2"); n = read_buf(s->strm, (charf *)s->window + s->strstart + s->lookahead, more); s->lookahead += n; /* Initialize the hash value now that we have some input: */ if (s->lookahead >= MIN_MATCH) { s->ins_h = s->window[s->strstart]; UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); #if MIN_MATCH != 3 Call UPDATE_HASH() MIN_MATCH-3 more times #endif } /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, * but this is not important since only literal bytes will be emitted. */ } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); } /* =========================================================================== * Flush the current block, with given end-of-file flag. * IN assertion: strstart is set to the end of the current match. */ #define FLUSH_BLOCK_ONLY(s, eof) { \ _tr_flush_block(s, (s->block_start >= 0L ? \ (charf *)&s->window[(unsigned)s->block_start] : \ (charf *)Z_NULL), \ (ulg)((long)s->strstart - s->block_start), \ (eof)); \ s->block_start = s->strstart; \ flush_pending(s->strm); \ Tracev((stderr,"[FLUSH]")); \ } /* Same but force premature exit if necessary. */ #define FLUSH_BLOCK(s, eof) { \ FLUSH_BLOCK_ONLY(s, eof); \ if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \ } /* =========================================================================== * Copy without compression as much as possible from the input stream, return * the current block state. * This function does not insert new strings in the dictionary since * uncompressible data is probably not useful. This function is used * only for the level=0 compression option. * NOTE: this function should be optimized to avoid extra copying from * window to pending_buf. */ local block_state deflate_stored(s, flush) deflate_state *s; int flush; { /* Stored blocks are limited to 0xffff bytes, pending_buf is limited * to pending_buf_size, and each stored block has a 5 byte header: */ ulg max_block_size = 0xffff; ulg max_start; if (max_block_size > s->pending_buf_size - 5) { max_block_size = s->pending_buf_size - 5; } /* Copy as much as possible from input to output: */ for (;;) { /* Fill the window as much as possible: */ if (s->lookahead <= 1) { Assert(s->strstart < s->w_size+MAX_DIST(s) || s->block_start >= (long)s->w_size, "slide too late"); fill_window(s); if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more; if (s->lookahead == 0) break; /* flush the current block */ } Assert(s->block_start >= 0L, "block gone"); s->strstart += s->lookahead; s->lookahead = 0; /* Emit a stored block if pending_buf will be full: */ max_start = s->block_start + max_block_size; if (s->strstart == 0 || (ulg)s->strstart >= max_start) { /* strstart == 0 is possible when wraparound on 16-bit machine */ s->lookahead = (uInt)(s->strstart - max_start); s->strstart = (uInt)max_start; FLUSH_BLOCK(s, 0); } /* Flush if we may have to slide, otherwise block_start may become * negative and the data will be gone: */ if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) { FLUSH_BLOCK(s, 0); } } FLUSH_BLOCK(s, flush == Z_FINISH); return flush == Z_FINISH ? finish_done : block_done; } /* =========================================================================== * Compress as much as possible from the input stream, return the current * block state. * This function does not perform lazy evaluation of matches and inserts * new strings in the dictionary only for unmatched strings or for short * matches. It is used only for the fast compression options. */ local block_state deflate_fast(s, flush) deflate_state *s; int flush; { IPos hash_head = NIL; /* head of the hash chain */ int bflush; /* set if current block must be flushed */ for (;;) { /* Make sure that we always have enough lookahead, except * at the end of the input file. We need MAX_MATCH bytes * for the next match, plus MIN_MATCH bytes to insert the * string following the next match. */ if (s->lookahead < MIN_LOOKAHEAD) { fill_window(s); if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { return need_more; } if (s->lookahead == 0) break; /* flush the current block */ } /* Insert the string window[strstart .. strstart+2] in the * dictionary, and set hash_head to the head of the hash chain: */ if (s->lookahead >= MIN_MATCH) { INSERT_STRING(s, s->strstart, hash_head); } /* Find the longest match, discarding those <= prev_length. * At this point we have always match_length < MIN_MATCH */ if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { /* To simplify the code, we prevent matches with the string * of window index 0 (in particular we have to avoid a match * of the string with itself at the start of the input file). */ if (s->strategy != Z_HUFFMAN_ONLY) { s->match_length = longest_match (s, hash_head); } /* longest_match() sets match_start */ } if (s->match_length >= MIN_MATCH) { check_match(s, s->strstart, s->match_start, s->match_length); bflush = _tr_tally(s, s->strstart - s->match_start, s->match_length - MIN_MATCH); s->lookahead -= s->match_length; /* Insert new strings in the hash table only if the match length * is not too large. This saves time but degrades compression. */ if (s->match_length <= s->max_insert_length && s->lookahead >= MIN_MATCH) { s->match_length--; /* string at strstart already in hash table */ do { s->strstart++; INSERT_STRING(s, s->strstart, hash_head); /* strstart never exceeds WSIZE-MAX_MATCH, so there are * always MIN_MATCH bytes ahead. */ } while (--s->match_length != 0); s->strstart++; } else { s->strstart += s->match_length; s->match_length = 0; s->ins_h = s->window[s->strstart]; UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); #if MIN_MATCH != 3 Call UPDATE_HASH() MIN_MATCH-3 more times #endif /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not * matter since it will be recomputed at next deflate call. */ } } else { /* No match, output a literal byte */ Tracevv((stderr,"%c", s->window[s->strstart])); bflush = _tr_tally (s, 0, s->window[s->strstart]); s->lookahead--; s->strstart++; } if (bflush) FLUSH_BLOCK(s, 0); } FLUSH_BLOCK(s, flush == Z_FINISH); return flush == Z_FINISH ? finish_done : block_done; } /* =========================================================================== * Same as above, but achieves better compression. We use a lazy * evaluation for matches: a match is finally adopted only if there is * no better match at the next window position. */ local block_state deflate_slow(s, flush) deflate_state *s; int flush; { IPos hash_head = NIL; /* head of hash chain */ int bflush; /* set if current block must be flushed */ /* Process the input block. */ for (;;) { /* Make sure that we always have enough lookahead, except * at the end of the input file. We need MAX_MATCH bytes * for the next match, plus MIN_MATCH bytes to insert the * string following the next match. */ if (s->lookahead < MIN_LOOKAHEAD) { fill_window(s); if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { return need_more; } if (s->lookahead == 0) break; /* flush the current block */ } /* Insert the string window[strstart .. strstart+2] in the * dictionary, and set hash_head to the head of the hash chain: */ if (s->lookahead >= MIN_MATCH) { INSERT_STRING(s, s->strstart, hash_head); } /* Find the longest match, discarding those <= prev_length. */ s->prev_length = s->match_length, s->prev_match = s->match_start; s->match_length = MIN_MATCH-1; if (hash_head != NIL && s->prev_length < s->max_lazy_match && s->strstart - hash_head <= MAX_DIST(s)) { /* To simplify the code, we prevent matches with the string * of window index 0 (in particular we have to avoid a match * of the string with itself at the start of the input file). */ if (s->strategy != Z_HUFFMAN_ONLY) { s->match_length = longest_match (s, hash_head); } /* longest_match() sets match_start */ if (s->match_length <= 5 && (s->strategy == Z_FILTERED || (s->match_length == MIN_MATCH && s->strstart - s->match_start > TOO_FAR))) { /* If prev_match is also MIN_MATCH, match_start is garbage * but we will ignore the current match anyway. */ s->match_length = MIN_MATCH-1; } } /* If there was a match at the previous step and the current * match is not better, output the previous match: */ if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; /* Do not insert strings in hash table beyond this. */ check_match(s, s->strstart-1, s->prev_match, s->prev_length); bflush = _tr_tally(s, s->strstart -1 - s->prev_match, s->prev_length - MIN_MATCH); /* Insert in hash table all strings up to the end of the match. * strstart-1 and strstart are already inserted. If there is not * enough lookahead, the last two strings are not inserted in * the hash table. */ s->lookahead -= s->prev_length-1; s->prev_length -= 2; do { if (++s->strstart <= max_insert) { INSERT_STRING(s, s->strstart, hash_head); } } while (--s->prev_length != 0); s->match_available = 0; s->match_length = MIN_MATCH-1; s->strstart++; if (bflush) FLUSH_BLOCK(s, 0); } else if (s->match_available) { /* If there was no match at the previous position, output a * single literal. If there was a match but the current match * is longer, truncate the previous match to a single literal. */ Tracevv((stderr,"%c", s->window[s->strstart-1])); if (_tr_tally (s, 0, s->window[s->strstart-1])) { FLUSH_BLOCK_ONLY(s, 0); } s->strstart++; s->lookahead--; if (s->strm->avail_out == 0) return need_more; } else { /* There is no previous match to compare with, wait for * the next step to decide. */ s->match_available = 1; s->strstart++; s->lookahead--; } } Assert (flush != Z_NO_FLUSH, "no flush?"); if (s->match_available) { Tracevv((stderr,"%c", s->window[s->strstart-1])); _tr_tally (s, 0, s->window[s->strstart-1]); s->match_available = 0; } FLUSH_BLOCK(s, flush == Z_FINISH); return flush == Z_FINISH ? finish_done : block_done; } /* --- deflate.c */ /* +++ trees.c */ /* trees.c -- output deflated data using Huffman coding * Copyright (C) 1995-1996 Jean-loup Gailly * For conditions of distribution and use, see copyright notice in zlib.h */ /* * ALGORITHM * * The "deflation" process uses several Huffman trees. The more * common source values are represented by shorter bit sequences. * * Each code tree is stored in a compressed form which is itself * a Huffman encoding of the lengths of all the code strings (in * ascending order by source values). The actual code strings are * reconstructed from the lengths in the inflate process, as described * in the deflate specification. * * REFERENCES * * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification". * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc * * Storer, James A. * Data Compression: Methods and Theory, pp. 49-50. * Computer Science Press, 1988. ISBN 0-7167-8156-5. * * Sedgewick, R. * Algorithms, p290. * Addison-Wesley, 1983. ISBN 0-201-06672-6. */ /* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */ /* #include "deflate.h" */ #ifdef DEBUG_ZLIB # include #endif /* =========================================================================== * Constants */ #define MAX_BL_BITS 7 /* Bit length codes must not exceed MAX_BL_BITS bits */ #define END_BLOCK 256 /* end of block literal code */ #define REP_3_6 16 /* repeat previous bit length 3-6 times (2 bits of repeat count) */ #define REPZ_3_10 17 /* repeat a zero length 3-10 times (3 bits of repeat count) */ #define REPZ_11_138 18 /* repeat a zero length 11-138 times (7 bits of repeat count) */ local int extra_lbits[LENGTH_CODES] /* extra bits for each length code */ = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0}; local int extra_dbits[D_CODES] /* extra bits for each distance code */ = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; local int extra_blbits[BL_CODES]/* extra bits for each bit length code */ = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7}; local uch bl_order[BL_CODES] = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15}; /* The lengths of the bit length codes are sent in order of decreasing * probability, to avoid transmitting the lengths for unused bit length codes. */ #define Buf_size (8 * 2*sizeof(char)) /* Number of bits used within bi_buf. (bi_buf might be implemented on * more than 16 bits on some systems.) */ /* =========================================================================== * Local data. These are initialized only once. */ local ct_data static_ltree[L_CODES+2]; /* The static literal tree. Since the bit lengths are imposed, there is no * need for the L_CODES extra codes used during heap construction. However * The codes 286 and 287 are needed to build a canonical tree (see _tr_init * below). */ local ct_data static_dtree[D_CODES]; /* The static distance tree. (Actually a trivial tree since all codes use * 5 bits.) */ local uch dist_code[512]; /* distance codes. The first 256 values correspond to the distances * 3 .. 258, the last 256 values correspond to the top 8 bits of * the 15 bit distances. */ local uch length_code[MAX_MATCH-MIN_MATCH+1]; /* length code for each normalized match length (0 == MIN_MATCH) */ local int base_length[LENGTH_CODES]; /* First normalized length for each code (0 = MIN_MATCH) */ local int base_dist[D_CODES]; /* First normalized distance for each code (0 = distance of 1) */ struct static_tree_desc_s { ct_data *static_tree; /* static tree or NULL */ intf *extra_bits; /* extra bits for each code or NULL */ int extra_base; /* base index for extra_bits */ int elems; /* max number of elements in the tree */ int max_length; /* max bit length for the codes */ }; local static_tree_desc static_l_desc = {static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; local static_tree_desc static_d_desc = {static_dtree, extra_dbits, 0, D_CODES, MAX_BITS}; local static_tree_desc static_bl_desc = {(ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS}; /* =========================================================================== * Local (static) routines in this file. */ local void tr_static_init OF((void)); local void init_block OF((deflate_state *s)); local void pqdownheap OF((deflate_state *s, ct_data *tree, int k)); local void gen_bitlen OF((deflate_state *s, tree_desc *desc)); local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count)); local void build_tree OF((deflate_state *s, tree_desc *desc)); local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code)); local void send_tree OF((deflate_state *s, ct_data *tree, int max_code)); local int build_bl_tree OF((deflate_state *s)); local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes, int blcodes)); local void compress_block OF((deflate_state *s, ct_data *ltree, ct_data *dtree)); local void set_data_type OF((deflate_state *s)); local unsigned bi_reverse OF((unsigned value, int length)); local void bi_windup OF((deflate_state *s)); local void bi_flush OF((deflate_state *s)); local void copy_block OF((deflate_state *s, charf *buf, unsigned len, int header)); #ifndef DEBUG_ZLIB # define send_code(s, c, tree) send_bits(s, tree[(c)].Code, tree[(c)].Len) /* Send a code of the given tree. c and tree must not have side effects */ #else /* DEBUG_ZLIB */ # define send_code(s, c, tree) \ { if (verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \ send_bits(s, tree[c].Code, tree[c].Len); } #endif #define d_code(dist) \ ((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)]) /* Mapping from a distance to a distance code. dist is the distance - 1 and * must not have side effects. dist_code[256] and dist_code[257] are never * used. */ /* =========================================================================== * Output a short LSB first on the stream. * IN assertion: there is enough room in pendingBuf. */ #define put_short(s, w) { \ put_byte(s, (uch)((w) & 0xff)); \ put_byte(s, (uch)((ush)(w) >> 8)); \ } /* =========================================================================== * Send a value on a given number of bits. * IN assertion: length <= 16 and value fits in length bits. */ #ifdef DEBUG_ZLIB local void send_bits OF((deflate_state *s, int value, int length)); local void send_bits(s, value, length) deflate_state *s; int value; /* value to send */ int length; /* number of bits */ { Tracevv((stderr," l %2d v %4x ", length, value)); Assert(length > 0 && length <= 15, "invalid length"); s->bits_sent += (ulg)length; /* If not enough room in bi_buf, use (valid) bits from bi_buf and * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid)) * unused bits in value. */ if (s->bi_valid > (int)Buf_size - length) { s->bi_buf |= (value << s->bi_valid); put_short(s, s->bi_buf); s->bi_buf = (ush)value >> (Buf_size - s->bi_valid); s->bi_valid += length - Buf_size; } else { s->bi_buf |= value << s->bi_valid; s->bi_valid += length; } } #else /* !DEBUG_ZLIB */ #define send_bits(s, value, length) \ { int len = (length);\ if ((s)->bi_valid > (int)Buf_size - len) {\ int val = (value);\ (s)->bi_buf |= (val << (s)->bi_valid);\ put_short((s), (s)->bi_buf);\ (s)->bi_buf = (ush)val >> (Buf_size - (s)->bi_valid);\ (s)->bi_valid += len - Buf_size;\ } else {\ (s)->bi_buf |= (value) << (s)->bi_valid;\ (s)->bi_valid += len;\ }\ } #endif /* DEBUG_ZLIB */ - -#define MAX(a,b) (a >= b ? a : b) /* the arguments must not have side effects */ /* =========================================================================== * Initialize the various 'constant' tables. In a multi-threaded environment, * this function may be called by two threads concurrently, but this is * harmless since both invocations do exactly the same thing. */ local void tr_static_init() { static int static_init_done = 0; int n; /* iterates over tree elements */ int bits; /* bit counter */ int length; /* length value */ int code; /* code value */ int dist; /* distance index */ ush bl_count[MAX_BITS+1]; /* number of codes at each bit length for an optimal tree */ if (static_init_done) return; /* Initialize the mapping length (0..255) -> length code (0..28) */ length = 0; for (code = 0; code < LENGTH_CODES-1; code++) { base_length[code] = length; for (n = 0; n < (1< dist code (0..29) */ dist = 0; for (code = 0 ; code < 16; code++) { base_dist[code] = dist; for (n = 0; n < (1<>= 7; /* from now on, all distances are divided by 128 */ for ( ; code < D_CODES; code++) { base_dist[code] = dist << 7; for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) { dist_code[256 + dist++] = (uch)code; } } Assert (dist == 256, "tr_static_init: 256+dist != 512"); /* Construct the codes of the static literal tree */ for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0; n = 0; while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++; while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++; while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++; while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++; /* Codes 286 and 287 do not exist, but we must include them in the * tree construction to get a canonical Huffman tree (longest code * all ones) */ gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count); /* The static distance tree is trivial: */ for (n = 0; n < D_CODES; n++) { static_dtree[n].Len = 5; static_dtree[n].Code = bi_reverse((unsigned)n, 5); } static_init_done = 1; } /* =========================================================================== * Initialize the tree data structures for a new zlib stream. */ void _tr_init(s) deflate_state *s; { tr_static_init(); s->compressed_len = 0L; s->l_desc.dyn_tree = s->dyn_ltree; s->l_desc.stat_desc = &static_l_desc; s->d_desc.dyn_tree = s->dyn_dtree; s->d_desc.stat_desc = &static_d_desc; s->bl_desc.dyn_tree = s->bl_tree; s->bl_desc.stat_desc = &static_bl_desc; s->bi_buf = 0; s->bi_valid = 0; s->last_eob_len = 8; /* enough lookahead for inflate */ #ifdef DEBUG_ZLIB s->bits_sent = 0L; #endif /* Initialize the first block of the first file: */ init_block(s); } /* =========================================================================== * Initialize a new block. */ local void init_block(s) deflate_state *s; { int n; /* iterates over tree elements */ /* Initialize the trees. */ for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0; for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0; for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0; s->dyn_ltree[END_BLOCK].Freq = 1; s->opt_len = s->static_len = 0L; s->last_lit = s->matches = 0; } #define SMALLEST 1 /* Index within the heap array of least frequent node in the Huffman tree */ /* =========================================================================== * Remove the smallest element from the heap and recreate the heap with * one less element. Updates heap and heap_len. */ #define pqremove(s, tree, top) \ {\ top = s->heap[SMALLEST]; \ s->heap[SMALLEST] = s->heap[s->heap_len--]; \ pqdownheap(s, tree, SMALLEST); \ } /* =========================================================================== * Compares to subtrees, using the tree depth as tie breaker when * the subtrees have equal frequency. This minimizes the worst case length. */ #define smaller(tree, n, m, depth) \ (tree[n].Freq < tree[m].Freq || \ (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m])) /* =========================================================================== * Restore the heap property by moving down the tree starting at node k, * exchanging a node with the smallest of its two sons if necessary, stopping * when the heap property is re-established (each father smaller than its * two sons). */ local void pqdownheap(s, tree, k) deflate_state *s; ct_data *tree; /* the tree to restore */ int k; /* node to move down */ { int v = s->heap[k]; int j = k << 1; /* left son of k */ while (j <= s->heap_len) { /* Set j to the smallest of the two sons: */ if (j < s->heap_len && smaller(tree, s->heap[j+1], s->heap[j], s->depth)) { j++; } /* Exit if v is smaller than both sons */ if (smaller(tree, v, s->heap[j], s->depth)) break; /* Exchange v with the smallest son */ s->heap[k] = s->heap[j]; k = j; /* And continue down the tree, setting j to the left son of k */ j <<= 1; } s->heap[k] = v; } /* =========================================================================== * Compute the optimal bit lengths for a tree and update the total bit length * for the current block. * IN assertion: the fields freq and dad are set, heap[heap_max] and * above are the tree nodes sorted by increasing frequency. * OUT assertions: the field len is set to the optimal bit length, the * array bl_count contains the frequencies for each bit length. * The length opt_len is updated; static_len is also updated if stree is * not null. */ local void gen_bitlen(s, desc) deflate_state *s; tree_desc *desc; /* the tree descriptor */ { ct_data *tree = desc->dyn_tree; int max_code = desc->max_code; ct_data *stree = desc->stat_desc->static_tree; intf *extra = desc->stat_desc->extra_bits; int base = desc->stat_desc->extra_base; int max_length = desc->stat_desc->max_length; int h; /* heap index */ int n, m; /* iterate over the tree elements */ int bits; /* bit length */ int xbits; /* extra bits */ ush f; /* frequency */ int overflow = 0; /* number of elements with bit length too large */ for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0; /* In a first pass, compute the optimal bit lengths (which may * overflow in the case of the bit length tree). */ tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */ for (h = s->heap_max+1; h < HEAP_SIZE; h++) { n = s->heap[h]; bits = tree[tree[n].Dad].Len + 1; if (bits > max_length) bits = max_length, overflow++; tree[n].Len = (ush)bits; /* We overwrite tree[n].Dad which is no longer needed */ if (n > max_code) continue; /* not a leaf node */ s->bl_count[bits]++; xbits = 0; if (n >= base) xbits = extra[n-base]; f = tree[n].Freq; s->opt_len += (ulg)f * (bits + xbits); if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits); } if (overflow == 0) return; Trace((stderr,"\nbit length overflow\n")); /* This happens for example on obj2 and pic of the Calgary corpus */ /* Find the first bit length which could increase: */ do { bits = max_length-1; while (s->bl_count[bits] == 0) bits--; s->bl_count[bits]--; /* move one leaf down the tree */ s->bl_count[bits+1] += 2; /* move one overflow item as its brother */ s->bl_count[max_length]--; /* The brother of the overflow item also moves one step up, * but this does not affect bl_count[max_length] */ overflow -= 2; } while (overflow > 0); /* Now recompute all bit lengths, scanning in increasing frequency. * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all * lengths instead of fixing only the wrong ones. This idea is taken * from 'ar' written by Haruhiko Okumura.) */ for (bits = max_length; bits != 0; bits--) { n = s->bl_count[bits]; while (n != 0) { m = s->heap[--h]; if (m > max_code) continue; if (tree[m].Len != (unsigned) bits) { Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); s->opt_len += ((long)bits - (long)tree[m].Len) *(long)tree[m].Freq; tree[m].Len = (ush)bits; } n--; } } } /* =========================================================================== * Generate the codes for a given tree and bit counts (which need not be * optimal). * IN assertion: the array bl_count contains the bit length statistics for * the given tree and the field len is set for all tree elements. * OUT assertion: the field code is set for all tree elements of non * zero code length. */ local void gen_codes (tree, max_code, bl_count) ct_data *tree; /* the tree to decorate */ int max_code; /* largest code with non zero frequency */ ushf *bl_count; /* number of codes at each bit length */ { ush next_code[MAX_BITS+1]; /* next code value for each bit length */ ush code = 0; /* running code value */ int bits; /* bit index */ int n; /* code index */ /* The distribution counts are first used to generate the code values * without bit reversal. */ for (bits = 1; bits <= MAX_BITS; bits++) { next_code[bits] = code = (code + bl_count[bits-1]) << 1; } /* Check that the bit counts in bl_count are consistent. The last code * must be all ones. */ Assert (code + bl_count[MAX_BITS]-1 == (1<dyn_tree; ct_data *stree = desc->stat_desc->static_tree; int elems = desc->stat_desc->elems; int n, m; /* iterate over heap elements */ int max_code = -1; /* largest code with non zero frequency */ int node; /* new node being created */ /* Construct the initial heap, with least frequent element in * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. * heap[0] is not used. */ s->heap_len = 0, s->heap_max = HEAP_SIZE; for (n = 0; n < elems; n++) { if (tree[n].Freq != 0) { s->heap[++(s->heap_len)] = max_code = n; s->depth[n] = 0; } else { tree[n].Len = 0; } } /* The pkzip format requires that at least one distance code exists, * and that at least one bit should be sent even if there is only one * possible code. So to avoid special checks later on we force at least * two codes of non zero frequency. */ while (s->heap_len < 2) { node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0); tree[node].Freq = 1; s->depth[node] = 0; s->opt_len--; if (stree) s->static_len -= stree[node].Len; /* node is 0 or 1 so it does not have extra bits */ } desc->max_code = max_code; /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, * establish sub-heaps of increasing lengths: */ for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n); /* Construct the Huffman tree by repeatedly combining the least two * frequent nodes. */ node = elems; /* next internal node of the tree */ do { pqremove(s, tree, n); /* n = node of least frequency */ m = s->heap[SMALLEST]; /* m = node of next least frequency */ s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */ s->heap[--(s->heap_max)] = m; /* Create a new node father of n and m */ tree[node].Freq = tree[n].Freq + tree[m].Freq; s->depth[node] = (uch) (MAX(s->depth[n], s->depth[m]) + 1); tree[n].Dad = tree[m].Dad = (ush)node; #ifdef DUMP_BL_TREE if (tree == s->bl_tree) { fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)", node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq); } #endif /* and insert the new node in the heap */ s->heap[SMALLEST] = node++; pqdownheap(s, tree, SMALLEST); } while (s->heap_len >= 2); s->heap[--(s->heap_max)] = s->heap[SMALLEST]; /* At this point, the fields freq and dad are set. We can now * generate the bit lengths. */ gen_bitlen(s, (tree_desc *)desc); /* The field len is now set, we can generate the bit codes */ gen_codes ((ct_data *)tree, max_code, s->bl_count); } /* =========================================================================== * Scan a literal or distance tree to determine the frequencies of the codes * in the bit length tree. */ local void scan_tree (s, tree, max_code) deflate_state *s; ct_data *tree; /* the tree to be scanned */ int max_code; /* and its largest code of non zero frequency */ { int n; /* iterates over all tree elements */ int prevlen = -1; /* last emitted length */ int curlen; /* length of current code */ int nextlen = tree[0].Len; /* length of next code */ int count = 0; /* repeat count of the current code */ int max_count = 7; /* max repeat count */ int min_count = 4; /* min repeat count */ if (nextlen == 0) max_count = 138, min_count = 3; tree[max_code+1].Len = (ush)0xffff; /* guard */ for (n = 0; n <= max_code; n++) { curlen = nextlen; nextlen = tree[n+1].Len; if (++count < max_count && curlen == nextlen) { continue; } else if (count < min_count) { s->bl_tree[curlen].Freq += count; } else if (curlen != 0) { if (curlen != prevlen) s->bl_tree[curlen].Freq++; s->bl_tree[REP_3_6].Freq++; } else if (count <= 10) { s->bl_tree[REPZ_3_10].Freq++; } else { s->bl_tree[REPZ_11_138].Freq++; } count = 0; prevlen = curlen; if (nextlen == 0) { max_count = 138, min_count = 3; } else if (curlen == nextlen) { max_count = 6, min_count = 3; } else { max_count = 7, min_count = 4; } } } /* =========================================================================== * Send a literal or distance tree in compressed form, using the codes in * bl_tree. */ local void send_tree (s, tree, max_code) deflate_state *s; ct_data *tree; /* the tree to be scanned */ int max_code; /* and its largest code of non zero frequency */ { int n; /* iterates over all tree elements */ int prevlen = -1; /* last emitted length */ int curlen; /* length of current code */ int nextlen = tree[0].Len; /* length of next code */ int count = 0; /* repeat count of the current code */ int max_count = 7; /* max repeat count */ int min_count = 4; /* min repeat count */ /* tree[max_code+1].Len = -1; */ /* guard already set */ if (nextlen == 0) max_count = 138, min_count = 3; for (n = 0; n <= max_code; n++) { curlen = nextlen; nextlen = tree[n+1].Len; if (++count < max_count && curlen == nextlen) { continue; } else if (count < min_count) { do { send_code(s, curlen, s->bl_tree); } while (--count != 0); } else if (curlen != 0) { if (curlen != prevlen) { send_code(s, curlen, s->bl_tree); count--; } Assert(count >= 3 && count <= 6, " 3_6?"); send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2); } else if (count <= 10) { send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3); } else { send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7); } count = 0; prevlen = curlen; if (nextlen == 0) { max_count = 138, min_count = 3; } else if (curlen == nextlen) { max_count = 6, min_count = 3; } else { max_count = 7, min_count = 4; } } } /* =========================================================================== * Construct the Huffman tree for the bit lengths and return the index in * bl_order of the last bit length code to send. */ local int build_bl_tree(s) deflate_state *s; { int max_blindex; /* index of last bit length code of non zero freq */ /* Determine the bit length frequencies for literal and distance trees */ scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code); scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code); /* Build the bit length tree: */ build_tree(s, (tree_desc *)(&(s->bl_desc))); /* opt_len now includes the length of the tree representations, except * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. */ /* Determine the number of bit length codes to send. The pkzip format * requires that at least 4 bit length codes be sent. (appnote.txt says * 3 but the actual value used is 4.) */ for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) { if (s->bl_tree[bl_order[max_blindex]].Len != 0) break; } /* Update opt_len to include the bit length tree and counts */ s->opt_len += 3*(max_blindex+1) + 5+5+4; Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", s->opt_len, s->static_len)); return max_blindex; } /* =========================================================================== * Send the header for a block using dynamic Huffman trees: the counts, the * lengths of the bit length codes, the literal tree and the distance tree. * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. */ local void send_all_trees(s, lcodes, dcodes, blcodes) deflate_state *s; int lcodes, dcodes, blcodes; /* number of codes for each tree */ { int rank; /* index in bl_order */ Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, "too many codes"); Tracev((stderr, "\nbl counts: ")); send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */ send_bits(s, dcodes-1, 5); send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */ for (rank = 0; rank < blcodes; rank++) { Tracev((stderr, "\nbl code %2d ", bl_order[rank])); send_bits(s, s->bl_tree[bl_order[rank]].Len, 3); } Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */ Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */ Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); } /* =========================================================================== * Send a stored block */ void _tr_stored_block(s, buf, stored_len, eof) deflate_state *s; charf *buf; /* input block */ ulg stored_len; /* length of input block */ int eof; /* true if this is the last block for a file */ { send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */ s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; s->compressed_len += (stored_len + 4) << 3; copy_block(s, buf, (unsigned)stored_len, 1); /* with header */ } /* Send just the `stored block' type code without any length bytes or data. */ void _tr_stored_type_only(s) deflate_state *s; { send_bits(s, (STORED_BLOCK << 1), 3); bi_windup(s); s->compressed_len = (s->compressed_len + 3) & ~7L; } /* =========================================================================== * Send one empty static block to give enough lookahead for inflate. * This takes 10 bits, of which 7 may remain in the bit buffer. * The current inflate code requires 9 bits of lookahead. If the * last two codes for the previous block (real code plus EOB) were coded * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode * the last real code. In this case we send two empty static blocks instead * of one. (There are no problems if the previous block is stored or fixed.) * To simplify the code, we assume the worst case of last real code encoded * on one bit only. */ void _tr_align(s) deflate_state *s; { send_bits(s, STATIC_TREES<<1, 3); send_code(s, END_BLOCK, static_ltree); s->compressed_len += 10L; /* 3 for block type, 7 for EOB */ bi_flush(s); /* Of the 10 bits for the empty block, we have already sent * (10 - bi_valid) bits. The lookahead for the last real code (before * the EOB of the previous block) was thus at least one plus the length * of the EOB plus what we have just sent of the empty static block. */ if (1 + s->last_eob_len + 10 - s->bi_valid < 9) { send_bits(s, STATIC_TREES<<1, 3); send_code(s, END_BLOCK, static_ltree); s->compressed_len += 10L; bi_flush(s); } s->last_eob_len = 7; } /* =========================================================================== * Determine the best encoding for the current block: dynamic trees, static * trees or store, and output the encoded block to the zip file. This function * returns the total compressed length for the file so far. */ ulg _tr_flush_block(s, buf, stored_len, eof) deflate_state *s; charf *buf; /* input block, or NULL if too old */ ulg stored_len; /* length of input block */ int eof; /* true if this is the last block for a file */ { ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ int max_blindex = 0; /* index of last bit length code of non zero freq */ /* Build the Huffman trees unless a stored block is forced */ if (s->level > 0) { /* Check if the file is ascii or binary */ if (s->data_type == Z_UNKNOWN) set_data_type(s); /* Construct the literal and distance trees */ build_tree(s, (tree_desc *)(&(s->l_desc))); Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, s->static_len)); build_tree(s, (tree_desc *)(&(s->d_desc))); Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, s->static_len)); /* At this point, opt_len and static_len are the total bit lengths of * the compressed block data, excluding the tree representations. */ /* Build the bit length tree for the above two trees, and get the index * in bl_order of the last bit length code to send. */ max_blindex = build_bl_tree(s); /* Determine the best encoding. Compute first the block length in bytes*/ opt_lenb = (s->opt_len+3+7)>>3; static_lenb = (s->static_len+3+7)>>3; Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, s->last_lit)); if (static_lenb <= opt_lenb) opt_lenb = static_lenb; } else { Assert(buf != (char*)0, "lost buf"); opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ } /* If compression failed and this is the first and last block, * and if the .zip file can be seeked (to rewrite the local header), * the whole file is transformed into a stored file: */ #ifdef STORED_FILE_OK # ifdef FORCE_STORED_FILE if (eof && s->compressed_len == 0L) { /* force stored file */ # else if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) { # endif /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */ if (buf == (charf*)0) error ("block vanished"); copy_block(s, buf, (unsigned)stored_len, 0); /* without header */ s->compressed_len = stored_len << 3; s->method = STORED; } else #endif /* STORED_FILE_OK */ #ifdef FORCE_STORED if (buf != (char*)0) { /* force stored block */ #else if (stored_len+4 <= opt_lenb && buf != (char*)0) { /* 4: two words for the lengths */ #endif /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. * Otherwise we can't have processed more than WSIZE input bytes since * the last block flush, because compression would have been * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to * transform a block into a stored block. */ _tr_stored_block(s, buf, stored_len, eof); #ifdef FORCE_STATIC } else if (static_lenb >= 0) { /* force static trees */ #else } else if (static_lenb == opt_lenb) { #endif send_bits(s, (STATIC_TREES<<1)+eof, 3); compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree); s->compressed_len += 3 + s->static_len; } else { send_bits(s, (DYN_TREES<<1)+eof, 3); send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1, max_blindex+1); compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree); s->compressed_len += 3 + s->opt_len; } Assert (s->compressed_len == s->bits_sent, "bad compressed size"); init_block(s); if (eof) { bi_windup(s); s->compressed_len += 7; /* align on byte boundary */ } Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, s->compressed_len-7*eof)); return s->compressed_len >> 3; } /* =========================================================================== * Save the match info and tally the frequency counts. Return true if * the current block must be flushed. */ int _tr_tally (s, dist, lc) deflate_state *s; unsigned dist; /* distance of matched string */ unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ { s->d_buf[s->last_lit] = (ush)dist; s->l_buf[s->last_lit++] = (uch)lc; if (dist == 0) { /* lc is the unmatched char */ s->dyn_ltree[lc].Freq++; } else { s->matches++; /* Here, lc is the match length - MIN_MATCH */ dist--; /* dist = match distance - 1 */ Assert((ush)dist < (ush)MAX_DIST(s) && (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++; s->dyn_dtree[d_code(dist)].Freq++; } /* Try to guess if it is profitable to stop the current block here */ if (s->level > 2 && (s->last_lit & 0xfff) == 0) { /* Compute an upper bound for the compressed length */ ulg out_length = (ulg)s->last_lit*8L; ulg in_length = (ulg)((long)s->strstart - s->block_start); int dcode; for (dcode = 0; dcode < D_CODES; dcode++) { out_length += (ulg)s->dyn_dtree[dcode].Freq * (5L+extra_dbits[dcode]); } out_length >>= 3; Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", s->last_lit, in_length, out_length, 100L - out_length*100L/in_length)); if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; } return (s->last_lit == s->lit_bufsize-1); /* We avoid equality with lit_bufsize because of wraparound at 64K * on 16 bit machines and because stored blocks are restricted to * 64K-1 bytes. */ } /* =========================================================================== * Send the block data compressed using the given Huffman trees */ local void compress_block(s, ltree, dtree) deflate_state *s; ct_data *ltree; /* literal tree */ ct_data *dtree; /* distance tree */ { unsigned dist; /* distance of matched string */ int lc; /* match length or unmatched char (if dist == 0) */ unsigned lx = 0; /* running index in l_buf */ unsigned code; /* the code to send */ int extra; /* number of extra bits to send */ if (s->last_lit != 0) do { dist = s->d_buf[lx]; lc = s->l_buf[lx++]; if (dist == 0) { send_code(s, lc, ltree); /* send a literal byte */ Tracecv(isgraph(lc), (stderr," '%c' ", lc)); } else { /* Here, lc is the match length - MIN_MATCH */ code = length_code[lc]; send_code(s, code+LITERALS+1, ltree); /* send the length code */ extra = extra_lbits[code]; if (extra != 0) { lc -= base_length[code]; send_bits(s, lc, extra); /* send the extra length bits */ } dist--; /* dist is now the match distance - 1 */ code = d_code(dist); Assert (code < D_CODES, "bad d_code"); send_code(s, code, dtree); /* send the distance code */ extra = extra_dbits[code]; if (extra != 0) { dist -= base_dist[code]; send_bits(s, dist, extra); /* send the extra distance bits */ } } /* literal or match pair ? */ /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow"); } while (lx < s->last_lit); send_code(s, END_BLOCK, ltree); s->last_eob_len = ltree[END_BLOCK].Len; } /* =========================================================================== * Set the data type to ASCII or BINARY, using a crude approximation: * binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise. * IN assertion: the fields freq of dyn_ltree are set and the total of all * frequencies does not exceed 64K (to fit in an int on 16 bit machines). */ local void set_data_type(s) deflate_state *s; { int n = 0; unsigned ascii_freq = 0; unsigned bin_freq = 0; while (n < 7) bin_freq += s->dyn_ltree[n++].Freq; while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq; while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq; s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII); } /* =========================================================================== * Reverse the first len bits of a code, using straightforward code (a faster * method would use a table) * IN assertion: 1 <= len <= 15 */ local unsigned bi_reverse(code, len) unsigned code; /* the value to invert */ int len; /* its bit length */ { register unsigned res = 0; do { res |= code & 1; code >>= 1, res <<= 1; } while (--len > 0); return res >> 1; } /* =========================================================================== * Flush the bit buffer, keeping at most 7 bits in it. */ local void bi_flush(s) deflate_state *s; { if (s->bi_valid == 16) { put_short(s, s->bi_buf); s->bi_buf = 0; s->bi_valid = 0; } else if (s->bi_valid >= 8) { put_byte(s, (Byte)s->bi_buf); s->bi_buf >>= 8; s->bi_valid -= 8; } } /* =========================================================================== * Flush the bit buffer and align the output on a byte boundary */ local void bi_windup(s) deflate_state *s; { if (s->bi_valid > 8) { put_short(s, s->bi_buf); } else if (s->bi_valid > 0) { put_byte(s, (Byte)s->bi_buf); } s->bi_buf = 0; s->bi_valid = 0; #ifdef DEBUG_ZLIB s->bits_sent = (s->bits_sent+7) & ~7; #endif } /* =========================================================================== * Copy a stored block, storing first the length and its * one's complement if requested. */ local void copy_block(s, buf, len, header) deflate_state *s; charf *buf; /* the input data */ unsigned len; /* its length */ int header; /* true if block header must be written */ { bi_windup(s); /* align on byte boundary */ s->last_eob_len = 8; /* enough lookahead for inflate */ if (header) { put_short(s, (ush)len); put_short(s, (ush)~len); #ifdef DEBUG_ZLIB s->bits_sent += 2*16; #endif } #ifdef DEBUG_ZLIB s->bits_sent += (ulg)len<<3; #endif /* bundle up the put_byte(s, *buf++) calls */ zmemcpy(&s->pending_buf[s->pending], buf, len); s->pending += len; } /* --- trees.c */ /* +++ inflate.c */ /* inflate.c -- zlib interface to inflate modules * Copyright (C) 1995-1996 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* #include "zutil.h" */ /* +++ infblock.h */ /* infblock.h -- header to use infblock.c * Copyright (C) 1995-1996 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ struct inflate_blocks_state; typedef struct inflate_blocks_state FAR inflate_blocks_statef; extern inflate_blocks_statef * inflate_blocks_new OF(( z_streamp z, check_func c, /* check function */ uInt w)); /* window size */ extern int inflate_blocks OF(( inflate_blocks_statef *, z_streamp , int)); /* initial return code */ extern void inflate_blocks_reset OF(( inflate_blocks_statef *, z_streamp , uLongf *)); /* check value on output */ extern int inflate_blocks_free OF(( inflate_blocks_statef *, z_streamp , uLongf *)); /* check value on output */ extern void inflate_set_dictionary OF(( inflate_blocks_statef *s, const Bytef *d, /* dictionary */ uInt n)); /* dictionary length */ extern int inflate_addhistory OF(( inflate_blocks_statef *, z_streamp)); extern int inflate_packet_flush OF(( inflate_blocks_statef *)); /* --- infblock.h */ #ifndef NO_DUMMY_DECL struct inflate_blocks_state {int dummy;}; /* for buggy compilers */ #endif /* inflate private state */ struct internal_state { /* mode */ enum { METHOD, /* waiting for method byte */ FLAG, /* waiting for flag byte */ DICT4, /* four dictionary check bytes to go */ DICT3, /* three dictionary check bytes to go */ DICT2, /* two dictionary check bytes to go */ DICT1, /* one dictionary check byte to go */ DICT0, /* waiting for inflateSetDictionary */ BLOCKS, /* decompressing blocks */ CHECK4, /* four check bytes to go */ CHECK3, /* three check bytes to go */ CHECK2, /* two check bytes to go */ CHECK1, /* one check byte to go */ DONE, /* finished check, done */ BAD} /* got an error--stay here */ mode; /* current inflate mode */ /* mode dependent information */ union { uInt method; /* if FLAGS, method byte */ struct { uLong was; /* computed check value */ uLong need; /* stream check value */ } check; /* if CHECK, check values to compare */ uInt marker; /* if BAD, inflateSync's marker bytes count */ } sub; /* submode */ /* mode independent information */ int nowrap; /* flag for no wrapper */ uInt wbits; /* log2(window size) (8..15, defaults to 15) */ inflate_blocks_statef *blocks; /* current inflate_blocks state */ }; int inflateReset(z) z_streamp z; { uLong c; if (z == Z_NULL || z->state == Z_NULL) return Z_STREAM_ERROR; z->total_in = z->total_out = 0; z->msg = Z_NULL; z->state->mode = z->state->nowrap ? BLOCKS : METHOD; inflate_blocks_reset(z->state->blocks, z, &c); Trace((stderr, "inflate: reset\n")); return Z_OK; } int inflateEnd(z) z_streamp z; { uLong c; if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL) return Z_STREAM_ERROR; if (z->state->blocks != Z_NULL) inflate_blocks_free(z->state->blocks, z, &c); ZFREE(z, z->state); z->state = Z_NULL; Trace((stderr, "inflate: end\n")); return Z_OK; } int inflateInit2_(z, w, version, stream_size) z_streamp z; int w; const char *version; int stream_size; { if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || stream_size != sizeof(z_stream)) return Z_VERSION_ERROR; /* initialize state */ if (z == Z_NULL) return Z_STREAM_ERROR; z->msg = Z_NULL; #ifndef NO_ZCFUNCS if (z->zalloc == Z_NULL) { z->zalloc = zcalloc; z->opaque = (voidpf)0; } if (z->zfree == Z_NULL) z->zfree = zcfree; #endif if ((z->state = (struct internal_state FAR *) ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL) return Z_MEM_ERROR; z->state->blocks = Z_NULL; /* handle undocumented nowrap option (no zlib header or check) */ z->state->nowrap = 0; if (w < 0) { w = - w; z->state->nowrap = 1; } /* set window size */ if (w < 8 || w > 15) { inflateEnd(z); return Z_STREAM_ERROR; } z->state->wbits = (uInt)w; /* create inflate_blocks state */ if ((z->state->blocks = inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, (uInt)1 << w)) == Z_NULL) { inflateEnd(z); return Z_MEM_ERROR; } Trace((stderr, "inflate: allocated\n")); /* reset state */ inflateReset(z); return Z_OK; } int inflateInit_(z, version, stream_size) z_streamp z; const char *version; int stream_size; { return inflateInit2_(z, DEF_WBITS, version, stream_size); } #define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;} #define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++) int inflate(z, f) z_streamp z; int f; { int r; uInt b; if (z == Z_NULL || z->state == Z_NULL || z->next_in == Z_NULL || f < 0) return Z_STREAM_ERROR; r = Z_BUF_ERROR; while (1) switch (z->state->mode) { case METHOD: NEEDBYTE if (((z->state->sub.method = NEXTBYTE) & 0xf) != Z_DEFLATED) { z->state->mode = BAD; z->msg = (char*)"unknown compression method"; z->state->sub.marker = 5; /* can't try inflateSync */ break; } if ((z->state->sub.method >> 4) + 8 > z->state->wbits) { z->state->mode = BAD; z->msg = (char*)"invalid window size"; z->state->sub.marker = 5; /* can't try inflateSync */ break; } z->state->mode = FLAG; case FLAG: NEEDBYTE b = NEXTBYTE; if (((z->state->sub.method << 8) + b) % 31) { z->state->mode = BAD; z->msg = (char*)"incorrect header check"; z->state->sub.marker = 5; /* can't try inflateSync */ break; } Trace((stderr, "inflate: zlib header ok\n")); if (!(b & PRESET_DICT)) { z->state->mode = BLOCKS; break; } z->state->mode = DICT4; case DICT4: NEEDBYTE z->state->sub.check.need = (uLong)NEXTBYTE << 24; z->state->mode = DICT3; case DICT3: NEEDBYTE z->state->sub.check.need += (uLong)NEXTBYTE << 16; z->state->mode = DICT2; case DICT2: NEEDBYTE z->state->sub.check.need += (uLong)NEXTBYTE << 8; z->state->mode = DICT1; case DICT1: NEEDBYTE z->state->sub.check.need += (uLong)NEXTBYTE; z->adler = z->state->sub.check.need; z->state->mode = DICT0; return Z_NEED_DICT; case DICT0: z->state->mode = BAD; z->msg = (char*)"need dictionary"; z->state->sub.marker = 0; /* can try inflateSync */ return Z_STREAM_ERROR; case BLOCKS: r = inflate_blocks(z->state->blocks, z, r); if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0) r = inflate_packet_flush(z->state->blocks); if (r == Z_DATA_ERROR) { z->state->mode = BAD; z->state->sub.marker = 0; /* can try inflateSync */ break; } if (r != Z_STREAM_END) return r; r = Z_OK; inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was); if (z->state->nowrap) { z->state->mode = DONE; break; } z->state->mode = CHECK4; case CHECK4: NEEDBYTE z->state->sub.check.need = (uLong)NEXTBYTE << 24; z->state->mode = CHECK3; case CHECK3: NEEDBYTE z->state->sub.check.need += (uLong)NEXTBYTE << 16; z->state->mode = CHECK2; case CHECK2: NEEDBYTE z->state->sub.check.need += (uLong)NEXTBYTE << 8; z->state->mode = CHECK1; case CHECK1: NEEDBYTE z->state->sub.check.need += (uLong)NEXTBYTE; if (z->state->sub.check.was != z->state->sub.check.need) { z->state->mode = BAD; z->msg = (char*)"incorrect data check"; z->state->sub.marker = 5; /* can't try inflateSync */ break; } Trace((stderr, "inflate: zlib check ok\n")); z->state->mode = DONE; case DONE: return Z_STREAM_END; case BAD: return Z_DATA_ERROR; default: return Z_STREAM_ERROR; } empty: if (f != Z_PACKET_FLUSH) return r; z->state->mode = BAD; z->msg = (char *)"need more for packet flush"; z->state->sub.marker = 0; /* can try inflateSync */ return Z_DATA_ERROR; } int inflateSetDictionary(z, dictionary, dictLength) z_streamp z; const Bytef *dictionary; uInt dictLength; { uInt length = dictLength; if (z == Z_NULL || z->state == Z_NULL || z->state->mode != DICT0) return Z_STREAM_ERROR; if (adler32(1L, dictionary, dictLength) != z->adler) return Z_DATA_ERROR; z->adler = 1L; if (length >= ((uInt)1<state->wbits)) { length = (1<state->wbits)-1; dictionary += dictLength - length; } inflate_set_dictionary(z->state->blocks, dictionary, length); z->state->mode = BLOCKS; return Z_OK; } /* * This subroutine adds the data at next_in/avail_in to the output history * without performing any output. The output buffer must be "caught up"; * i.e. no pending output (hence s->read equals s->write), and the state must * be BLOCKS (i.e. we should be willing to see the start of a series of * BLOCKS). On exit, the output will also be caught up, and the checksum * will have been updated if need be. */ int inflateIncomp(z) z_stream *z; { if (z->state->mode != BLOCKS) return Z_DATA_ERROR; return inflate_addhistory(z->state->blocks, z); } int inflateSync(z) z_streamp z; { uInt n; /* number of bytes to look at */ Bytef *p; /* pointer to bytes */ uInt m; /* number of marker bytes found in a row */ uLong r, w; /* temporaries to save total_in and total_out */ /* set up */ if (z == Z_NULL || z->state == Z_NULL) return Z_STREAM_ERROR; if (z->state->mode != BAD) { z->state->mode = BAD; z->state->sub.marker = 0; } if ((n = z->avail_in) == 0) return Z_BUF_ERROR; p = z->next_in; m = z->state->sub.marker; /* search */ while (n && m < 4) { if (*p == (Byte)(m < 2 ? 0 : 0xff)) m++; else if (*p) m = 0; else m = 4 - m; p++, n--; } /* restore */ z->total_in += p - z->next_in; z->next_in = p; z->avail_in = n; z->state->sub.marker = m; /* return no joy or set up to restart on a new block */ if (m != 4) return Z_DATA_ERROR; r = z->total_in; w = z->total_out; inflateReset(z); z->total_in = r; z->total_out = w; z->state->mode = BLOCKS; return Z_OK; } #undef NEEDBYTE #undef NEXTBYTE /* --- inflate.c */ /* +++ infblock.c */ /* infblock.c -- interpret and process block types to last block * Copyright (C) 1995-1996 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* #include "zutil.h" */ /* #include "infblock.h" */ /* +++ inftrees.h */ /* inftrees.h -- header to use inftrees.c * Copyright (C) 1995-1996 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ /* Huffman code lookup table entry--this entry is four bytes for machines that have 16-bit pointers (e.g. PC's in the small or medium model). */ typedef struct inflate_huft_s FAR inflate_huft; struct inflate_huft_s { union { struct { Byte Exop; /* number of extra bits or operation */ Byte Bits; /* number of bits in this code or subcode */ } what; Bytef *pad; /* pad structure to a power of 2 (4 bytes for */ } word; /* 16-bit, 8 bytes for 32-bit machines) */ union { uInt Base; /* literal, length base, or distance base */ inflate_huft *Next; /* pointer to next level of table */ } more; }; #ifdef DEBUG_ZLIB extern uInt inflate_hufts; #endif extern int inflate_trees_bits OF(( uIntf *, /* 19 code lengths */ uIntf *, /* bits tree desired/actual depth */ inflate_huft * FAR *, /* bits tree result */ z_streamp )); /* for zalloc, zfree functions */ extern int inflate_trees_dynamic OF(( uInt, /* number of literal/length codes */ uInt, /* number of distance codes */ uIntf *, /* that many (total) code lengths */ uIntf *, /* literal desired/actual bit depth */ uIntf *, /* distance desired/actual bit depth */ inflate_huft * FAR *, /* literal/length tree result */ inflate_huft * FAR *, /* distance tree result */ z_streamp )); /* for zalloc, zfree functions */ extern int inflate_trees_fixed OF(( uIntf *, /* literal desired/actual bit depth */ uIntf *, /* distance desired/actual bit depth */ inflate_huft * FAR *, /* literal/length tree result */ inflate_huft * FAR *)); /* distance tree result */ extern int inflate_trees_free OF(( inflate_huft *, /* tables to free */ z_streamp )); /* for zfree function */ /* --- inftrees.h */ /* +++ infcodes.h */ /* infcodes.h -- header to use infcodes.c * Copyright (C) 1995-1996 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ struct inflate_codes_state; typedef struct inflate_codes_state FAR inflate_codes_statef; extern inflate_codes_statef *inflate_codes_new OF(( uInt, uInt, inflate_huft *, inflate_huft *, z_streamp )); extern int inflate_codes OF(( inflate_blocks_statef *, z_streamp , int)); extern void inflate_codes_free OF(( inflate_codes_statef *, z_streamp )); /* --- infcodes.h */ /* +++ infutil.h */ /* infutil.h -- types and macros common to blocks and codes * Copyright (C) 1995-1996 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ #ifndef _INFUTIL_H #define _INFUTIL_H typedef enum { TYPE, /* get type bits (3, including end bit) */ LENS, /* get lengths for stored */ STORED, /* processing stored block */ TABLE, /* get table lengths */ BTREE, /* get bit lengths tree for a dynamic block */ DTREE, /* get length, distance trees for a dynamic block */ CODES, /* processing fixed or dynamic block */ DRY, /* output remaining window bytes */ DONEB, /* finished last block, done */ BADB} /* got a data error--stuck here */ inflate_block_mode; /* inflate blocks semi-private state */ struct inflate_blocks_state { /* mode */ inflate_block_mode mode; /* current inflate_block mode */ /* mode dependent information */ union { uInt left; /* if STORED, bytes left to copy */ struct { uInt table; /* table lengths (14 bits) */ uInt index; /* index into blens (or border) */ uIntf *blens; /* bit lengths of codes */ uInt bb; /* bit length tree depth */ inflate_huft *tb; /* bit length decoding tree */ } trees; /* if DTREE, decoding info for trees */ struct { inflate_huft *tl; inflate_huft *td; /* trees to free */ inflate_codes_statef *codes; } decode; /* if CODES, current state */ } sub; /* submode */ uInt last; /* true if this block is the last block */ /* mode independent information */ uInt bitk; /* bits in bit buffer */ uLong bitb; /* bit buffer */ Bytef *window; /* sliding window */ Bytef *end; /* one byte after sliding window */ Bytef *read; /* window read pointer */ Bytef *write; /* window write pointer */ check_func checkfn; /* check function */ uLong check; /* check on output */ }; /* defines for inflate input/output */ /* update pointers and return */ #define UPDBITS {s->bitb=b;s->bitk=k;} #define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;} #define UPDOUT {s->write=q;} #define UPDATE {UPDBITS UPDIN UPDOUT} #define LEAVE {UPDATE return inflate_flush(s,z,r);} /* get bytes and bits */ #define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;} #define NEEDBYTE {if(n)r=Z_OK;else LEAVE} #define NEXTBYTE (n--,*p++) #define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<>=(j);k-=(j);} /* output bytes */ #define WAVAIL (uInt)(qread?s->read-q-1:s->end-q) #define LOADOUT {q=s->write;m=(uInt)WAVAIL;} #define WWRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=(uInt)WAVAIL;}} #define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT} #define NEEDOUT {if(m==0){WWRAP if(m==0){FLUSH WWRAP if(m==0) LEAVE}}r=Z_OK;} #define OUTBYTE(a) {*q++=(Byte)(a);m--;} /* load local pointers */ #define LOAD {LOADIN LOADOUT} /* masks for lower bits (size given to avoid silly warnings with Visual C++) */ extern uInt inflate_mask[17]; /* copy as much as possible from the sliding window to the output area */ extern int inflate_flush OF(( inflate_blocks_statef *, z_streamp , int)); #ifndef NO_DUMMY_DECL struct internal_state {int dummy;}; /* for buggy compilers */ #endif #endif /* --- infutil.h */ #ifndef NO_DUMMY_DECL struct inflate_codes_state {int dummy;}; /* for buggy compilers */ #endif /* Table for deflate from PKZIP's appnote.txt. */ local const uInt border[] = { /* Order of the bit length code lengths */ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; /* Notes beyond the 1.93a appnote.txt: 1. Distance pointers never point before the beginning of the output stream. 2. Distance pointers can point back across blocks, up to 32k away. 3. There is an implied maximum of 7 bits for the bit length table and 15 bits for the actual data. 4. If only one code exists, then it is encoded using one bit. (Zero would be more efficient, but perhaps a little confusing.) If two codes exist, they are coded using one bit each (0 and 1). 5. There is no way of sending zero distance codes--a dummy must be sent if there are none. (History: a pre 2.0 version of PKZIP would store blocks with no distance codes, but this was discovered to be too harsh a criterion.) Valid only for 1.93a. 2.04c does allow zero distance codes, which is sent as one code of zero bits in length. 6. There are up to 286 literal/length codes. Code 256 represents the end-of-block. Note however that the static length tree defines 288 codes just to fill out the Huffman codes. Codes 286 and 287 cannot be used though, since there is no length base or extra bits defined for them. Similarily, there are up to 30 distance codes. However, static trees define 32 codes (all 5 bits) to fill out the Huffman codes, but the last two had better not show up in the data. 7. Unzip can check dynamic Huffman blocks for complete code sets. The exception is that a single code would not be complete (see #4). 8. The five bits following the block type is really the number of literal codes sent minus 257. 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits (1+6+6). Therefore, to output three times the length, you output three codes (1+1+1), whereas to output four times the same length, you only need two codes (1+3). Hmm. 10. In the tree reconstruction algorithm, Code = Code + Increment only if BitLength(i) is not zero. (Pretty obvious.) 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19) 12. Note: length code 284 can represent 227-258, but length code 285 really is 258. The last length deserves its own, short code since it gets used a lot in very redundant files. The length 258 is special since 258 - 3 (the min match length) is 255. 13. The literal/length and distance code bit lengths are read as a single stream of lengths. It is possible (and advantageous) for a repeat code (16, 17, or 18) to go across the boundary between the two sets of lengths. */ void inflate_blocks_reset(s, z, c) inflate_blocks_statef *s; z_streamp z; uLongf *c; { if (s->checkfn != Z_NULL) *c = s->check; if (s->mode == BTREE || s->mode == DTREE) ZFREE(z, s->sub.trees.blens); if (s->mode == CODES) { inflate_codes_free(s->sub.decode.codes, z); inflate_trees_free(s->sub.decode.td, z); inflate_trees_free(s->sub.decode.tl, z); } s->mode = TYPE; s->bitk = 0; s->bitb = 0; s->read = s->write = s->window; if (s->checkfn != Z_NULL) z->adler = s->check = (*s->checkfn)(0L, Z_NULL, 0); Trace((stderr, "inflate: blocks reset\n")); } inflate_blocks_statef *inflate_blocks_new(z, c, w) z_streamp z; check_func c; uInt w; { inflate_blocks_statef *s; if ((s = (inflate_blocks_statef *)ZALLOC (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL) return s; if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL) { ZFREE(z, s); return Z_NULL; } s->end = s->window + w; s->checkfn = c; s->mode = TYPE; Trace((stderr, "inflate: blocks allocated\n")); inflate_blocks_reset(s, z, &s->check); return s; } #ifdef DEBUG_ZLIB extern uInt inflate_hufts; #endif int inflate_blocks(s, z, r) inflate_blocks_statef *s; z_streamp z; int r; { uInt t; /* temporary storage */ uLong b; /* bit buffer */ uInt k; /* bits in bit buffer */ Bytef *p; /* input data pointer */ uInt n; /* bytes available there */ Bytef *q; /* output window write pointer */ uInt m; /* bytes to end of window or read pointer */ /* copy input/output information to locals (UPDATE macro restores) */ LOAD /* process input based on current state */ while (1) switch (s->mode) { case TYPE: NEEDBITS(3) t = (uInt)b & 7; s->last = t & 1; switch (t >> 1) { case 0: /* stored */ Trace((stderr, "inflate: stored block%s\n", s->last ? " (last)" : "")); DUMPBITS(3) t = k & 7; /* go to byte boundary */ DUMPBITS(t) s->mode = LENS; /* get length of stored block */ break; case 1: /* fixed */ Trace((stderr, "inflate: fixed codes block%s\n", s->last ? " (last)" : "")); { uInt bl, bd; inflate_huft *tl, *td; inflate_trees_fixed(&bl, &bd, &tl, &td); s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z); if (s->sub.decode.codes == Z_NULL) { r = Z_MEM_ERROR; LEAVE } s->sub.decode.tl = Z_NULL; /* don't try to free these */ s->sub.decode.td = Z_NULL; } DUMPBITS(3) s->mode = CODES; break; case 2: /* dynamic */ Trace((stderr, "inflate: dynamic codes block%s\n", s->last ? " (last)" : "")); DUMPBITS(3) s->mode = TABLE; break; case 3: /* illegal */ DUMPBITS(3) s->mode = BADB; z->msg = (char*)"invalid block type"; r = Z_DATA_ERROR; LEAVE } break; case LENS: NEEDBITS(32) if ((((~b) >> 16) & 0xffff) != (b & 0xffff)) { s->mode = BADB; z->msg = (char*)"invalid stored block lengths"; r = Z_DATA_ERROR; LEAVE } s->sub.left = (uInt)b & 0xffff; b = k = 0; /* dump bits */ Tracev((stderr, "inflate: stored length %u\n", s->sub.left)); s->mode = s->sub.left ? STORED : (s->last ? DRY : TYPE); break; case STORED: if (n == 0) LEAVE NEEDOUT t = s->sub.left; if (t > n) t = n; if (t > m) t = m; zmemcpy(q, p, t); p += t; n -= t; q += t; m -= t; if ((s->sub.left -= t) != 0) break; Tracev((stderr, "inflate: stored end, %lu total out\n", z->total_out + (q >= s->read ? q - s->read : (s->end - s->read) + (q - s->window)))); s->mode = s->last ? DRY : TYPE; break; case TABLE: NEEDBITS(14) s->sub.trees.table = t = (uInt)b & 0x3fff; #ifndef PKZIP_BUG_WORKAROUND if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29) { s->mode = BADB; z->msg = (char*)"too many length or distance symbols"; r = Z_DATA_ERROR; LEAVE } #endif t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f); if (t < 19) t = 19; if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL) { r = Z_MEM_ERROR; LEAVE } DUMPBITS(14) s->sub.trees.index = 0; Tracev((stderr, "inflate: table sizes ok\n")); s->mode = BTREE; case BTREE: while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10)) { NEEDBITS(3) s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7; DUMPBITS(3) } while (s->sub.trees.index < 19) s->sub.trees.blens[border[s->sub.trees.index++]] = 0; s->sub.trees.bb = 7; t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb, &s->sub.trees.tb, z); if (t != Z_OK) { r = t; if (r == Z_DATA_ERROR) { ZFREE(z, s->sub.trees.blens); s->mode = BADB; } LEAVE } s->sub.trees.index = 0; Tracev((stderr, "inflate: bits tree ok\n")); s->mode = DTREE; case DTREE: while (t = s->sub.trees.table, s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f)) { inflate_huft *h; uInt i, j, c; t = s->sub.trees.bb; NEEDBITS(t) h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]); t = h->word.what.Bits; c = h->more.Base; if (c < 16) { DUMPBITS(t) s->sub.trees.blens[s->sub.trees.index++] = c; } else /* c == 16..18 */ { i = c == 18 ? 7 : c - 14; j = c == 18 ? 11 : 3; NEEDBITS(t + i) DUMPBITS(t) j += (uInt)b & inflate_mask[i]; DUMPBITS(i) i = s->sub.trees.index; t = s->sub.trees.table; if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) || (c == 16 && i < 1)) { inflate_trees_free(s->sub.trees.tb, z); ZFREE(z, s->sub.trees.blens); s->mode = BADB; z->msg = (char*)"invalid bit length repeat"; r = Z_DATA_ERROR; LEAVE } c = c == 16 ? s->sub.trees.blens[i - 1] : 0; do { s->sub.trees.blens[i++] = c; } while (--j); s->sub.trees.index = i; } } inflate_trees_free(s->sub.trees.tb, z); s->sub.trees.tb = Z_NULL; { uInt bl, bd; inflate_huft *tl, *td; inflate_codes_statef *c; bl = 9; /* must be <= 9 for lookahead assumptions */ bd = 6; /* must be <= 9 for lookahead assumptions */ t = s->sub.trees.table; #ifdef DEBUG_ZLIB inflate_hufts = 0; #endif t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f), s->sub.trees.blens, &bl, &bd, &tl, &td, z); if (t != Z_OK) { if (t == (uInt)Z_DATA_ERROR) { ZFREE(z, s->sub.trees.blens); s->mode = BADB; } r = t; LEAVE } Tracev((stderr, "inflate: trees ok, %d * %d bytes used\n", inflate_hufts, sizeof(inflate_huft))); if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL) { inflate_trees_free(td, z); inflate_trees_free(tl, z); r = Z_MEM_ERROR; LEAVE } /* * this ZFREE must occur *BEFORE* we mess with sub.decode, because * sub.trees is union'd with sub.decode. */ ZFREE(z, s->sub.trees.blens); s->sub.decode.codes = c; s->sub.decode.tl = tl; s->sub.decode.td = td; } s->mode = CODES; case CODES: UPDATE if ((r = inflate_codes(s, z, r)) != Z_STREAM_END) return inflate_flush(s, z, r); r = Z_OK; inflate_codes_free(s->sub.decode.codes, z); inflate_trees_free(s->sub.decode.td, z); inflate_trees_free(s->sub.decode.tl, z); LOAD Tracev((stderr, "inflate: codes end, %lu total out\n", z->total_out + (q >= s->read ? q - s->read : (s->end - s->read) + (q - s->window)))); if (!s->last) { s->mode = TYPE; break; } if (k > 7) /* return unused byte, if any */ { Assert(k < 16, "inflate_codes grabbed too many bytes") k -= 8; n++; p--; /* can always return one */ } s->mode = DRY; case DRY: FLUSH if (s->read != s->write) LEAVE s->mode = DONEB; case DONEB: r = Z_STREAM_END; LEAVE case BADB: r = Z_DATA_ERROR; LEAVE default: r = Z_STREAM_ERROR; LEAVE } } int inflate_blocks_free(s, z, c) inflate_blocks_statef *s; z_streamp z; uLongf *c; { inflate_blocks_reset(s, z, c); ZFREE(z, s->window); ZFREE(z, s); Trace((stderr, "inflate: blocks freed\n")); return Z_OK; } void inflate_set_dictionary(s, d, n) inflate_blocks_statef *s; const Bytef *d; uInt n; { zmemcpy((charf *)s->window, d, n); s->read = s->write = s->window + n; } /* * This subroutine adds the data at next_in/avail_in to the output history * without performing any output. The output buffer must be "caught up"; * i.e. no pending output (hence s->read equals s->write), and the state must * be BLOCKS (i.e. we should be willing to see the start of a series of * BLOCKS). On exit, the output will also be caught up, and the checksum * will have been updated if need be. */ int inflate_addhistory(s, z) inflate_blocks_statef *s; z_stream *z; { uLong b; /* bit buffer */ /* NOT USED HERE */ uInt k; /* bits in bit buffer */ /* NOT USED HERE */ uInt t; /* temporary storage */ Bytef *p; /* input data pointer */ uInt n; /* bytes available there */ Bytef *q; /* output window write pointer */ uInt m; /* bytes to end of window or read pointer */ if (s->read != s->write) return Z_STREAM_ERROR; if (s->mode != TYPE) return Z_DATA_ERROR; /* we're ready to rock */ LOAD /* while there is input ready, copy to output buffer, moving * pointers as needed. */ while (n) { t = n; /* how many to do */ /* is there room until end of buffer? */ if (t > m) t = m; /* update check information */ if (s->checkfn != Z_NULL) s->check = (*s->checkfn)(s->check, q, t); zmemcpy(q, p, t); q += t; p += t; n -= t; z->total_out += t; s->read = q; /* drag read pointer forward */ /* WWRAP */ /* expand WWRAP macro by hand to handle s->read */ if (q == s->end) { s->read = q = s->window; m = WAVAIL; } } UPDATE return Z_OK; } /* * At the end of a Deflate-compressed PPP packet, we expect to have seen * a `stored' block type value but not the (zero) length bytes. */ int inflate_packet_flush(s) inflate_blocks_statef *s; { if (s->mode != LENS) return Z_DATA_ERROR; s->mode = TYPE; return Z_OK; } /* --- infblock.c */ /* +++ inftrees.c */ /* inftrees.c -- generate Huffman trees for efficient decoding * Copyright (C) 1995-1996 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* #include "zutil.h" */ /* #include "inftrees.h" */ char inflate_copyright[] = " inflate 1.0.4 Copyright 1995-1996 Mark Adler "; /* If you use the zlib library in a product, an acknowledgment is welcome in the documentation of your product. If for some reason you cannot include such an acknowledgment, I would appreciate that you keep this copyright string in the executable of your product. */ #ifndef NO_DUMMY_DECL struct internal_state {int dummy;}; /* for buggy compilers */ #endif /* simplify the use of the inflate_huft type with some defines */ #define base more.Base #define next more.Next #define exop word.what.Exop #define bits word.what.Bits local int huft_build OF(( uIntf *, /* code lengths in bits */ uInt, /* number of codes */ uInt, /* number of "simple" codes */ const uIntf *, /* list of base values for non-simple codes */ const uIntf *, /* list of extra bits for non-simple codes */ inflate_huft * FAR*,/* result: starting table */ uIntf *, /* maximum lookup bits (returns actual) */ z_streamp )); /* for zalloc function */ local voidpf falloc OF(( voidpf, /* opaque pointer (not used) */ uInt, /* number of items */ uInt)); /* size of item */ /* Tables for deflate from PKZIP's appnote.txt. */ local const uInt cplens[31] = { /* Copy lengths for literal codes 257..285 */ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; /* see note #13 above about 258 */ local const uInt cplext[31] = { /* Extra bits for literal codes 257..285 */ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; /* 112==invalid */ local const uInt cpdist[30] = { /* Copy offsets for distance codes 0..29 */ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577}; local const uInt cpdext[30] = { /* Extra bits for distance codes */ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; /* Huffman code decoding is performed using a multi-level table lookup. The fastest way to decode is to simply build a lookup table whose size is determined by the longest code. However, the time it takes to build this table can also be a factor if the data being decoded is not very long. The most common codes are necessarily the shortest codes, so those codes dominate the decoding time, and hence the speed. The idea is you can have a shorter table that decodes the shorter, more probable codes, and then point to subsidiary tables for the longer codes. The time it costs to decode the longer codes is then traded against the time it takes to make longer tables. This results of this trade are in the variables lbits and dbits below. lbits is the number of bits the first level table for literal/ length codes can decode in one step, and dbits is the same thing for the distance codes. Subsequent tables are also less than or equal to those sizes. These values may be adjusted either when all of the codes are shorter than that, in which case the longest code length in bits is used, or when the shortest code is *longer* than the requested table size, in which case the length of the shortest code in bits is used. There are two different values for the two tables, since they code a different number of possibilities each. The literal/length table codes 286 possible values, or in a flat code, a little over eight bits. The distance table codes 30 possible values, or a little less than five bits, flat. The optimum values for speed end up being about one bit more than those, so lbits is 8+1 and dbits is 5+1. The optimum values may differ though from machine to machine, and possibly even between compilers. Your mileage may vary. */ /* If BMAX needs to be larger than 16, then h and x[] should be uLong. */ #define BMAX 15 /* maximum bit length of any code */ #define N_MAX 288 /* maximum number of codes in any set */ #ifdef DEBUG_ZLIB uInt inflate_hufts; #endif local int huft_build(b, n, s, d, e, t, m, zs) uIntf *b; /* code lengths in bits (all assumed <= BMAX) */ uInt n; /* number of codes (assumed <= N_MAX) */ uInt s; /* number of simple-valued codes (0..s-1) */ const uIntf *d; /* list of base values for non-simple codes */ const uIntf *e; /* list of extra bits for non-simple codes */ inflate_huft * FAR *t; /* result: starting table */ uIntf *m; /* maximum lookup bits, returns actual */ z_streamp zs; /* for zalloc function */ /* Given a list of code lengths and a maximum table size, make a set of tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR if the given code set is incomplete (the tables are still built in this case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of lengths), or Z_MEM_ERROR if not enough memory. */ { uInt a; /* counter for codes of length k */ uInt c[BMAX+1]; /* bit length count table */ uInt f; /* i repeats in table every f entries */ int g; /* maximum code length */ int h; /* table level */ register uInt i; /* counter, current code */ register uInt j; /* counter */ register int k; /* number of bits in current code */ int l; /* bits per table (returned in m) */ register uIntf *p; /* pointer into c[], b[], or v[] */ inflate_huft *q; /* points to current table */ struct inflate_huft_s r; /* table entry for structure assignment */ inflate_huft *u[BMAX]; /* table stack */ uInt v[N_MAX]; /* values in order of bit length */ register int w; /* bits before this table == (l * h) */ uInt x[BMAX+1]; /* bit offsets, then code stack */ uIntf *xp; /* pointer into x */ int y; /* number of dummy codes added */ uInt z; /* number of entries in current table */ /* Generate counts for each bit length */ p = c; #define C0 *p++ = 0; #define C2 C0 C0 C0 C0 #define C4 C2 C2 C2 C2 C4 /* clear c[]--assume BMAX+1 is 16 */ p = b; i = n; do { c[*p++]++; /* assume all entries <= BMAX */ } while (--i); if (c[0] == n) /* null input--all zero length codes */ { *t = (inflate_huft *)Z_NULL; *m = 0; return Z_OK; } /* Find minimum and maximum length, bound *m by those */ l = *m; for (j = 1; j <= BMAX; j++) if (c[j]) break; k = j; /* minimum code length */ if ((uInt)l < j) l = j; for (i = BMAX; i; i--) if (c[i]) break; g = i; /* maximum code length */ if ((uInt)l > i) l = i; *m = l; /* Adjust last length count to fill out codes, if needed */ for (y = 1 << j; j < i; j++, y <<= 1) if ((y -= c[j]) < 0) return Z_DATA_ERROR; if ((y -= c[i]) < 0) return Z_DATA_ERROR; c[i] += y; /* Generate starting offsets into the value table for each length */ x[1] = j = 0; p = c + 1; xp = x + 2; while (--i) { /* note that i == g from above */ *xp++ = (j += *p++); } /* Make a table of values in order of bit lengths */ p = b; i = 0; do { if ((j = *p++) != 0) v[x[j]++] = i; } while (++i < n); n = x[g]; /* set n to length of v */ /* Generate the Huffman codes and for each, make the table entries */ x[0] = i = 0; /* first Huffman code is zero */ p = v; /* grab values in bit order */ h = -1; /* no tables yet--level -1 */ w = -l; /* bits decoded == (l * h) */ u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */ q = (inflate_huft *)Z_NULL; /* ditto */ z = 0; /* ditto */ /* go through the bit lengths (k already is bits in shortest code) */ for (; k <= g; k++) { a = c[k]; while (a--) { /* here i is the Huffman code of length k bits for value *p */ /* make tables up to required level */ while (k > w + l) { h++; w += l; /* previous table always l bits */ /* compute minimum size table less than or equal to l bits */ z = g - w; z = z > (uInt)l ? l : z; /* table size upper limit */ if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */ { /* too few codes for k-w bit table */ f -= a + 1; /* deduct codes from patterns left */ xp = c + k; if (j < z) while (++j < z) /* try smaller tables up to z bits */ { if ((f <<= 1) <= *++xp) break; /* enough codes to use up j bits */ f -= *xp; /* else deduct codes from patterns */ } } z = 1 << j; /* table entries for j-bit table */ /* allocate and link in new table */ if ((q = (inflate_huft *)ZALLOC (zs,z + 1,sizeof(inflate_huft))) == Z_NULL) { if (h) inflate_trees_free(u[0], zs); return Z_MEM_ERROR; /* not enough memory */ } #ifdef DEBUG_ZLIB inflate_hufts += z + 1; #endif *t = q + 1; /* link to list for huft_free() */ *(t = &(q->next)) = Z_NULL; u[h] = ++q; /* table starts after link */ /* connect to last table, if there is one */ if (h) { x[h] = i; /* save pattern for backing up */ r.bits = (Byte)l; /* bits to dump before this table */ r.exop = (Byte)j; /* bits in this table */ r.next = q; /* pointer to this table */ j = i >> (w - l); /* (get around Turbo C bug) */ u[h-1][j] = r; /* connect to last table */ } } /* set up table entry in r */ r.bits = (Byte)(k - w); if (p >= v + n) r.exop = 128 + 64; /* out of values--invalid code */ else if (*p < s) { r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */ r.base = *p++; /* simple code is just the value */ } else { r.exop = (Byte)(e[*p - s] + 16 + 64);/* non-simple--look up in lists */ r.base = d[*p++ - s]; } /* fill code-like entries with r */ f = 1 << (k - w); for (j = i >> w; j < z; j += f) q[j] = r; /* backwards increment the k-bit code i */ for (j = 1 << (k - 1); i & j; j >>= 1) i ^= j; i ^= j; /* backup over finished tables */ while ((i & ((1 << w) - 1)) != x[h]) { h--; /* don't need to update q */ w -= l; } } } /* Return Z_BUF_ERROR if we were given an incomplete table */ return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK; } int inflate_trees_bits(c, bb, tb, z) uIntf *c; /* 19 code lengths */ uIntf *bb; /* bits tree desired/actual depth */ inflate_huft * FAR *tb; /* bits tree result */ z_streamp z; /* for zfree function */ { int r; r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z); if (r == Z_DATA_ERROR) z->msg = (char*)"oversubscribed dynamic bit lengths tree"; else if (r == Z_BUF_ERROR || *bb == 0) { inflate_trees_free(*tb, z); z->msg = (char*)"incomplete dynamic bit lengths tree"; r = Z_DATA_ERROR; } return r; } int inflate_trees_dynamic(nl, nd, c, bl, bd, tl, td, z) uInt nl; /* number of literal/length codes */ uInt nd; /* number of distance codes */ uIntf *c; /* that many (total) code lengths */ uIntf *bl; /* literal desired/actual bit depth */ uIntf *bd; /* distance desired/actual bit depth */ inflate_huft * FAR *tl; /* literal/length tree result */ inflate_huft * FAR *td; /* distance tree result */ z_streamp z; /* for zfree function */ { int r; /* build literal/length tree */ r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z); if (r != Z_OK || *bl == 0) { if (r == Z_DATA_ERROR) z->msg = (char*)"oversubscribed literal/length tree"; else if (r != Z_MEM_ERROR) { inflate_trees_free(*tl, z); z->msg = (char*)"incomplete literal/length tree"; r = Z_DATA_ERROR; } return r; } /* build distance tree */ r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z); if (r != Z_OK || (*bd == 0 && nl > 257)) { if (r == Z_DATA_ERROR) z->msg = (char*)"oversubscribed distance tree"; else if (r == Z_BUF_ERROR) { #ifdef PKZIP_BUG_WORKAROUND r = Z_OK; } #else inflate_trees_free(*td, z); z->msg = (char*)"incomplete distance tree"; r = Z_DATA_ERROR; } else if (r != Z_MEM_ERROR) { z->msg = (char*)"empty distance tree with lengths"; r = Z_DATA_ERROR; } inflate_trees_free(*tl, z); return r; #endif } /* done */ return Z_OK; } /* build fixed tables only once--keep them here */ local int fixed_built = 0; #define FIXEDH 530 /* number of hufts used by fixed tables */ local inflate_huft fixed_mem[FIXEDH]; local uInt fixed_bl; local uInt fixed_bd; local inflate_huft *fixed_tl; local inflate_huft *fixed_td; local voidpf falloc(q, n, s) voidpf q; /* opaque pointer */ uInt n; /* number of items */ uInt s; /* size of item */ { Assert(s == sizeof(inflate_huft) && n <= *(intf *)q, "inflate_trees falloc overflow"); *(intf *)q -= n+s-s; /* s-s to avoid warning */ return (voidpf)(fixed_mem + *(intf *)q); } int inflate_trees_fixed(bl, bd, tl, td) uIntf *bl; /* literal desired/actual bit depth */ uIntf *bd; /* distance desired/actual bit depth */ inflate_huft * FAR *tl; /* literal/length tree result */ inflate_huft * FAR *td; /* distance tree result */ { /* build fixed tables if not already (multiple overlapped executions ok) */ if (!fixed_built) { int k; /* temporary variable */ unsigned c[288]; /* length list for huft_build */ z_stream z; /* for falloc function */ int f = FIXEDH; /* number of hufts left in fixed_mem */ /* set up fake z_stream for memory routines */ z.zalloc = falloc; z.zfree = Z_NULL; z.opaque = (voidpf)&f; /* literal table */ for (k = 0; k < 144; k++) c[k] = 8; for (; k < 256; k++) c[k] = 9; for (; k < 280; k++) c[k] = 7; for (; k < 288; k++) c[k] = 8; fixed_bl = 7; huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z); /* distance table */ for (k = 0; k < 30; k++) c[k] = 5; fixed_bd = 5; huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z); /* done */ Assert(f == 0, "invalid build of fixed tables"); fixed_built = 1; } *bl = fixed_bl; *bd = fixed_bd; *tl = fixed_tl; *td = fixed_td; return Z_OK; } int inflate_trees_free(t, z) inflate_huft *t; /* table to free */ z_streamp z; /* for zfree function */ /* Free the malloc'ed tables built by huft_build(), which makes a linked list of the tables it made, with the links in a dummy first entry of each table. */ { register inflate_huft *p, *q, *r; /* Reverse linked list */ p = Z_NULL; q = t; while (q != Z_NULL) { r = (q - 1)->next; (q - 1)->next = p; p = q; q = r; } /* Go through linked list, freeing from the malloced (t[-1]) address. */ while (p != Z_NULL) { q = (--p)->next; ZFREE(z,p); p = q; } return Z_OK; } /* --- inftrees.c */ /* +++ infcodes.c */ /* infcodes.c -- process literals and length/distance pairs * Copyright (C) 1995-1996 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* #include "zutil.h" */ /* #include "inftrees.h" */ /* #include "infblock.h" */ /* #include "infcodes.h" */ /* #include "infutil.h" */ /* +++ inffast.h */ /* inffast.h -- header to use inffast.c * Copyright (C) 1995-1996 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ extern int inflate_fast OF(( uInt, uInt, inflate_huft *, inflate_huft *, inflate_blocks_statef *, z_streamp )); /* --- inffast.h */ /* simplify the use of the inflate_huft type with some defines */ #define base more.Base #define next more.Next #define exop word.what.Exop #define bits word.what.Bits /* inflate codes private state */ struct inflate_codes_state { /* mode */ enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */ START, /* x: set up for LEN */ LEN, /* i: get length/literal/eob next */ LENEXT, /* i: getting length extra (have base) */ DIST, /* i: get distance next */ DISTEXT, /* i: getting distance extra */ COPY, /* o: copying bytes in window, waiting for space */ LIT, /* o: got literal, waiting for output space */ WASH, /* o: got eob, possibly still output waiting */ END, /* x: got eob and all data flushed */ BADCODE} /* x: got error */ mode; /* current inflate_codes mode */ /* mode dependent information */ uInt len; union { struct { inflate_huft *tree; /* pointer into tree */ uInt need; /* bits needed */ } code; /* if LEN or DIST, where in tree */ uInt lit; /* if LIT, literal */ struct { uInt get; /* bits to get for extra */ uInt dist; /* distance back to copy from */ } copy; /* if EXT or COPY, where and how much */ } sub; /* submode */ /* mode independent information */ Byte lbits; /* ltree bits decoded per branch */ Byte dbits; /* dtree bits decoder per branch */ inflate_huft *ltree; /* literal/length/eob tree */ inflate_huft *dtree; /* distance tree */ }; inflate_codes_statef *inflate_codes_new(bl, bd, tl, td, z) uInt bl, bd; inflate_huft *tl; inflate_huft *td; /* need separate declaration for Borland C++ */ z_streamp z; { inflate_codes_statef *c; if ((c = (inflate_codes_statef *) ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL) { c->mode = START; c->lbits = (Byte)bl; c->dbits = (Byte)bd; c->ltree = tl; c->dtree = td; Tracev((stderr, "inflate: codes new\n")); } return c; } int inflate_codes(s, z, r) inflate_blocks_statef *s; z_streamp z; int r; { uInt j; /* temporary storage */ inflate_huft *t; /* temporary pointer */ uInt e; /* extra bits or operation */ uLong b; /* bit buffer */ uInt k; /* bits in bit buffer */ Bytef *p; /* input data pointer */ uInt n; /* bytes available there */ Bytef *q; /* output window write pointer */ uInt m; /* bytes to end of window or read pointer */ Bytef *f; /* pointer to copy strings from */ inflate_codes_statef *c = s->sub.decode.codes; /* codes state */ /* copy input/output information to locals (UPDATE macro restores) */ LOAD /* process input and output based on current state */ while (1) switch (c->mode) { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */ case START: /* x: set up for LEN */ #ifndef SLOW if (m >= 258 && n >= 10) { UPDATE r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z); LOAD if (r != Z_OK) { c->mode = r == Z_STREAM_END ? WASH : BADCODE; break; } } #endif /* !SLOW */ c->sub.code.need = c->lbits; c->sub.code.tree = c->ltree; c->mode = LEN; case LEN: /* i: get length/literal/eob next */ j = c->sub.code.need; NEEDBITS(j) t = c->sub.code.tree + ((uInt)b & inflate_mask[j]); DUMPBITS(t->bits) e = (uInt)(t->exop); if (e == 0) /* literal */ { c->sub.lit = t->base; Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ? "inflate: literal '%c'\n" : "inflate: literal 0x%02x\n", t->base)); c->mode = LIT; break; } if (e & 16) /* length */ { c->sub.copy.get = e & 15; c->len = t->base; c->mode = LENEXT; break; } if ((e & 64) == 0) /* next table */ { c->sub.code.need = e; c->sub.code.tree = t->next; break; } if (e & 32) /* end of block */ { Tracevv((stderr, "inflate: end of block\n")); c->mode = WASH; break; } c->mode = BADCODE; /* invalid code */ z->msg = (char*)"invalid literal/length code"; r = Z_DATA_ERROR; LEAVE case LENEXT: /* i: getting length extra (have base) */ j = c->sub.copy.get; NEEDBITS(j) c->len += (uInt)b & inflate_mask[j]; DUMPBITS(j) c->sub.code.need = c->dbits; c->sub.code.tree = c->dtree; Tracevv((stderr, "inflate: length %u\n", c->len)); c->mode = DIST; case DIST: /* i: get distance next */ j = c->sub.code.need; NEEDBITS(j) t = c->sub.code.tree + ((uInt)b & inflate_mask[j]); DUMPBITS(t->bits) e = (uInt)(t->exop); if (e & 16) /* distance */ { c->sub.copy.get = e & 15; c->sub.copy.dist = t->base; c->mode = DISTEXT; break; } if ((e & 64) == 0) /* next table */ { c->sub.code.need = e; c->sub.code.tree = t->next; break; } c->mode = BADCODE; /* invalid code */ z->msg = (char*)"invalid distance code"; r = Z_DATA_ERROR; LEAVE case DISTEXT: /* i: getting distance extra */ j = c->sub.copy.get; NEEDBITS(j) c->sub.copy.dist += (uInt)b & inflate_mask[j]; DUMPBITS(j) Tracevv((stderr, "inflate: distance %u\n", c->sub.copy.dist)); c->mode = COPY; case COPY: /* o: copying bytes in window, waiting for space */ #ifndef __TURBOC__ /* Turbo C bug for following expression */ f = (uInt)(q - s->window) < c->sub.copy.dist ? s->end - (c->sub.copy.dist - (q - s->window)) : q - c->sub.copy.dist; #else f = q - c->sub.copy.dist; if ((uInt)(q - s->window) < c->sub.copy.dist) f = s->end - (c->sub.copy.dist - (uInt)(q - s->window)); #endif while (c->len) { NEEDOUT OUTBYTE(*f++) if (f == s->end) f = s->window; c->len--; } c->mode = START; break; case LIT: /* o: got literal, waiting for output space */ NEEDOUT OUTBYTE(c->sub.lit) c->mode = START; break; case WASH: /* o: got eob, possibly more output */ FLUSH if (s->read != s->write) LEAVE c->mode = END; case END: r = Z_STREAM_END; LEAVE case BADCODE: /* x: got error */ r = Z_DATA_ERROR; LEAVE default: r = Z_STREAM_ERROR; LEAVE } } void inflate_codes_free(c, z) inflate_codes_statef *c; z_streamp z; { ZFREE(z, c); Tracev((stderr, "inflate: codes free\n")); } /* --- infcodes.c */ /* +++ infutil.c */ /* inflate_util.c -- data and routines common to blocks and codes * Copyright (C) 1995-1996 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* #include "zutil.h" */ /* #include "infblock.h" */ /* #include "inftrees.h" */ /* #include "infcodes.h" */ /* #include "infutil.h" */ #ifndef NO_DUMMY_DECL struct inflate_codes_state {int dummy;}; /* for buggy compilers */ #endif /* And'ing with mask[n] masks the lower n bits */ uInt inflate_mask[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff }; /* copy as much as possible from the sliding window to the output area */ int inflate_flush(s, z, r) inflate_blocks_statef *s; z_streamp z; int r; { uInt n; Bytef *p; Bytef *q; /* local copies of source and destination pointers */ p = z->next_out; q = s->read; /* compute number of bytes to copy as far as end of window */ n = (uInt)((q <= s->write ? s->write : s->end) - q); if (n > z->avail_out) n = z->avail_out; if (n && r == Z_BUF_ERROR) r = Z_OK; /* update counters */ z->avail_out -= n; z->total_out += n; /* update check information */ if (s->checkfn != Z_NULL) z->adler = s->check = (*s->checkfn)(s->check, q, n); /* copy as far as end of window */ if (p != Z_NULL) { zmemcpy(p, q, n); p += n; } q += n; /* see if more to copy at beginning of window */ if (q == s->end) { /* wrap pointers */ q = s->window; if (s->write == s->end) s->write = s->window; /* compute bytes to copy */ n = (uInt)(s->write - q); if (n > z->avail_out) n = z->avail_out; if (n && r == Z_BUF_ERROR) r = Z_OK; /* update counters */ z->avail_out -= n; z->total_out += n; /* update check information */ if (s->checkfn != Z_NULL) z->adler = s->check = (*s->checkfn)(s->check, q, n); /* copy */ if (p != Z_NULL) { zmemcpy(p, q, n); p += n; } q += n; } /* update pointers */ z->next_out = p; s->read = q; /* done */ return r; } /* --- infutil.c */ /* +++ inffast.c */ /* inffast.c -- process literals and length/distance pairs fast * Copyright (C) 1995-1996 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* #include "zutil.h" */ /* #include "inftrees.h" */ /* #include "infblock.h" */ /* #include "infcodes.h" */ /* #include "infutil.h" */ /* #include "inffast.h" */ #ifndef NO_DUMMY_DECL struct inflate_codes_state {int dummy;}; /* for buggy compilers */ #endif /* simplify the use of the inflate_huft type with some defines */ #define base more.Base #define next more.Next #define exop word.what.Exop #define bits word.what.Bits /* macros for bit input with no checking and for returning unused bytes */ #define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<>3);p-=c;k&=7;} /* Called with number of bytes left to write in window at least 258 (the maximum string length) and number of input bytes available at least ten. The ten bytes are six bytes for the longest length/ distance pair plus four bytes for overloading the bit buffer. */ int inflate_fast(bl, bd, tl, td, s, z) uInt bl, bd; inflate_huft *tl; inflate_huft *td; /* need separate declaration for Borland C++ */ inflate_blocks_statef *s; z_streamp z; { inflate_huft *t; /* temporary pointer */ uInt e; /* extra bits or operation */ uLong b; /* bit buffer */ uInt k; /* bits in bit buffer */ Bytef *p; /* input data pointer */ uInt n; /* bytes available there */ Bytef *q; /* output window write pointer */ uInt m; /* bytes to end of window or read pointer */ uInt ml; /* mask for literal/length tree */ uInt md; /* mask for distance tree */ uInt c; /* bytes to copy */ uInt d; /* distance back to copy from */ Bytef *r; /* copy source pointer */ /* load input, output, bit values */ LOAD /* initialize masks */ ml = inflate_mask[bl]; md = inflate_mask[bd]; /* do until not enough input or output space for fast loop */ do { /* assume called with m >= 258 && n >= 10 */ /* get literal/length code */ GRABBITS(20) /* max bits for literal/length code */ if ((e = (t = tl + ((uInt)b & ml))->exop) == 0) { DUMPBITS(t->bits) Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ? "inflate: * literal '%c'\n" : "inflate: * literal 0x%02x\n", t->base)); *q++ = (Byte)t->base; m--; continue; } do { DUMPBITS(t->bits) if (e & 16) { /* get extra bits for length */ e &= 15; c = t->base + ((uInt)b & inflate_mask[e]); DUMPBITS(e) Tracevv((stderr, "inflate: * length %u\n", c)); /* decode distance base of block to copy */ GRABBITS(15); /* max bits for distance code */ e = (t = td + ((uInt)b & md))->exop; do { DUMPBITS(t->bits) if (e & 16) { /* get extra bits to add to distance base */ e &= 15; GRABBITS(e) /* get extra bits (up to 13) */ d = t->base + ((uInt)b & inflate_mask[e]); DUMPBITS(e) Tracevv((stderr, "inflate: * distance %u\n", d)); /* do the copy */ m -= c; if ((uInt)(q - s->window) >= d) /* offset before dest */ { /* just copy */ r = q - d; *q++ = *r++; c--; /* minimum count is three, */ *q++ = *r++; c--; /* so unroll loop a little */ } else /* else offset after destination */ { e = d - (uInt)(q - s->window); /* bytes from offset to end */ r = s->end - e; /* pointer to offset */ if (c > e) /* if source crosses, */ { c -= e; /* copy to end of window */ do { *q++ = *r++; } while (--e); r = s->window; /* copy rest from start of window */ } } do { /* copy all or what's left */ *q++ = *r++; } while (--c); break; } else if ((e & 64) == 0) e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop; else { z->msg = (char*)"invalid distance code"; UNGRAB UPDATE return Z_DATA_ERROR; } } while (1); break; } if ((e & 64) == 0) { if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0) { DUMPBITS(t->bits) Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ? "inflate: * literal '%c'\n" : "inflate: * literal 0x%02x\n", t->base)); *q++ = (Byte)t->base; m--; break; } } else if (e & 32) { Tracevv((stderr, "inflate: * end of block\n")); UNGRAB UPDATE return Z_STREAM_END; } else { z->msg = (char*)"invalid literal/length code"; UNGRAB UPDATE return Z_DATA_ERROR; } } while (1); } while (m >= 258 && n >= 10); /* not enough input or output--restore pointers and return */ UNGRAB UPDATE return Z_OK; } /* --- inffast.c */ /* +++ zutil.c */ /* zutil.c -- target dependent utility functions for the compression library * Copyright (C) 1995-1996 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ /* From: zutil.c,v 1.17 1996/07/24 13:41:12 me Exp $ */ #ifdef DEBUG_ZLIB #include #endif /* #include "zutil.h" */ #ifndef NO_DUMMY_DECL struct internal_state {int dummy;}; /* for buggy compilers */ #endif #ifndef STDC extern void exit OF((int)); #endif static const char *z_errmsg[10] = { "need dictionary", /* Z_NEED_DICT 2 */ "stream end", /* Z_STREAM_END 1 */ "", /* Z_OK 0 */ "file error", /* Z_ERRNO (-1) */ "stream error", /* Z_STREAM_ERROR (-2) */ "data error", /* Z_DATA_ERROR (-3) */ "insufficient memory", /* Z_MEM_ERROR (-4) */ "buffer error", /* Z_BUF_ERROR (-5) */ "incompatible version",/* Z_VERSION_ERROR (-6) */ ""}; const char *zlibVersion() { return ZLIB_VERSION; } #ifdef DEBUG_ZLIB void z_error (m) char *m; { fprintf(stderr, "%s\n", m); exit(1); } #endif #ifndef HAVE_MEMCPY void zmemcpy(dest, source, len) Bytef* dest; Bytef* source; uInt len; { if (len == 0) return; do { *dest++ = *source++; /* ??? to be unrolled */ } while (--len != 0); } int zmemcmp(s1, s2, len) Bytef* s1; Bytef* s2; uInt len; { uInt j; for (j = 0; j < len; j++) { if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1; } return 0; } void zmemzero(dest, len) Bytef* dest; uInt len; { if (len == 0) return; do { *dest++ = 0; /* ??? to be unrolled */ } while (--len != 0); } #endif #ifdef __TURBOC__ #if (defined( __BORLANDC__) || !defined(SMALL_MEDIUM)) && !defined(__32BIT__) /* Small and medium model in Turbo C are for now limited to near allocation * with reduced MAX_WBITS and MAX_MEM_LEVEL */ # define MY_ZCALLOC /* Turbo C malloc() does not allow dynamic allocation of 64K bytes * and farmalloc(64K) returns a pointer with an offset of 8, so we * must fix the pointer. Warning: the pointer must be put back to its * original form in order to free it, use zcfree(). */ #define MAX_PTR 10 /* 10*64K = 640K */ local int next_ptr = 0; typedef struct ptr_table_s { voidpf org_ptr; voidpf new_ptr; } ptr_table; local ptr_table table[MAX_PTR]; /* This table is used to remember the original form of pointers * to large buffers (64K). Such pointers are normalized with a zero offset. * Since MSDOS is not a preemptive multitasking OS, this table is not * protected from concurrent access. This hack doesn't work anyway on * a protected system like OS/2. Use Microsoft C instead. */ voidpf zcalloc (voidpf opaque, unsigned items, unsigned size) { voidpf buf = opaque; /* just to make some compilers happy */ ulg bsize = (ulg)items*size; /* If we allocate less than 65520 bytes, we assume that farmalloc * will return a usable pointer which doesn't have to be normalized. */ if (bsize < 65520L) { buf = farmalloc(bsize); if (*(ush*)&buf != 0) return buf; } else { buf = farmalloc(bsize + 16L); } if (buf == NULL || next_ptr >= MAX_PTR) return NULL; table[next_ptr].org_ptr = buf; /* Normalize the pointer to seg:0 */ *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4; *(ush*)&buf = 0; table[next_ptr++].new_ptr = buf; return buf; } void zcfree (voidpf opaque, voidpf ptr) { int n; if (*(ush*)&ptr != 0) { /* object < 64K */ farfree(ptr); return; } /* Find the original pointer */ for (n = 0; n < next_ptr; n++) { if (ptr != table[n].new_ptr) continue; farfree(table[n].org_ptr); while (++n < next_ptr) { table[n-1] = table[n]; } next_ptr--; return; } ptr = opaque; /* just to make some compilers happy */ Assert(0, "zcfree: ptr not found"); } #endif #endif /* __TURBOC__ */ #if defined(M_I86) && !defined(__32BIT__) /* Microsoft C in 16-bit mode */ # define MY_ZCALLOC #if (!defined(_MSC_VER) || (_MSC_VER < 600)) # define _halloc halloc # define _hfree hfree #endif voidpf zcalloc (voidpf opaque, unsigned items, unsigned size) { if (opaque) opaque = 0; /* to make compiler happy */ return _halloc((long)items, size); } void zcfree (voidpf opaque, voidpf ptr) { if (opaque) opaque = 0; /* to make compiler happy */ _hfree(ptr); } #endif /* MSC */ #ifndef MY_ZCALLOC /* Any system without a special alloc function */ #ifndef STDC extern voidp calloc OF((uInt items, uInt size)); extern void free OF((voidpf ptr)); #endif voidpf zcalloc (opaque, items, size) voidpf opaque; unsigned items; unsigned size; { if (opaque) items += size - size; /* make compiler happy */ return (voidpf)calloc(items, size); } void zcfree (opaque, ptr) voidpf opaque; voidpf ptr; { free(ptr); if (opaque) return; /* make compiler happy */ } #endif /* MY_ZCALLOC */ /* --- zutil.c */ /* +++ adler32.c */ /* adler32.c -- compute the Adler-32 checksum of a data stream * Copyright (C) 1995-1996 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* From: adler32.c,v 1.10 1996/05/22 11:52:18 me Exp $ */ /* #include "zlib.h" */ #define BASE 65521L /* largest prime smaller than 65536 */ #define NMAX 5552 /* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */ #define DO1(buf,i) {s1 += buf[(i)]; s2 += s1;} #define DO2(buf,i) DO1(buf,i); DO1(buf,(i)+1); #define DO4(buf,i) DO2(buf,i); DO2(buf,(i)+2); #define DO8(buf,i) DO4(buf,i); DO4(buf,(i)+4); #define DO16(buf) DO8(buf,0); DO8(buf,8); /* ========================================================================= */ uLong adler32(adler, buf, len) uLong adler; const Bytef *buf; uInt len; { unsigned long s1 = adler & 0xffff; unsigned long s2 = (adler >> 16) & 0xffff; int k; if (buf == Z_NULL) return 1L; while (len > 0) { k = len < NMAX ? len : NMAX; len -= k; while (k >= 16) { DO16(buf); buf += 16; k -= 16; } if (k != 0) do { s1 += *buf++; s2 += s1; } while (--k); s1 %= BASE; s2 %= BASE; } return (s2 << 16) | s1; } /* --- adler32.c */ Index: head/sys/netinet6/nd6.c =================================================================== --- head/sys/netinet6/nd6.c (revision 110231) +++ head/sys/netinet6/nd6.c (revision 110232) @@ -1,2252 +1,2251 @@ /* $FreeBSD$ */ /* $KAME: nd6.c,v 1.144 2001/05/24 07:44:00 itojun Exp $ */ /* * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * XXX * KAME 970409 note: * BSD/OS version heavily modifies this code, related to llinfo. * Since we don't have BSD/OS version of net/route.c in our hand, * I left the code mostly as it was in 970310. -- itojun */ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_mac.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ND6_SLOWTIMER_INTERVAL (60 * 60) /* 1 hour */ #define ND6_RECALC_REACHTM_INTERVAL (60 * 120) /* 2 hours */ #define SIN6(s) ((struct sockaddr_in6 *)s) #define SDL(s) ((struct sockaddr_dl *)s) /* timer values */ int nd6_prune = 1; /* walk list every 1 seconds */ int nd6_delay = 5; /* delay first probe time 5 second */ int nd6_umaxtries = 3; /* maximum unicast query */ int nd6_mmaxtries = 3; /* maximum multicast query */ int nd6_useloopback = 1; /* use loopback interface for local traffic */ int nd6_gctimer = (60 * 60 * 24); /* 1 day: garbage collection timer */ /* preventing too many loops in ND option parsing */ int nd6_maxndopt = 10; /* max # of ND options allowed */ int nd6_maxnudhint = 0; /* max # of subsequent upper layer hints */ #ifdef ND6_DEBUG int nd6_debug = 1; #else int nd6_debug = 0; #endif /* for debugging? */ static int nd6_inuse, nd6_allocated; struct llinfo_nd6 llinfo_nd6 = {&llinfo_nd6, &llinfo_nd6}; static size_t nd_ifinfo_indexlim = 8; struct nd_ifinfo *nd_ifinfo = NULL; struct nd_drhead nd_defrouter; struct nd_prhead nd_prefix = { 0 }; int nd6_recalc_reachtm_interval = ND6_RECALC_REACHTM_INTERVAL; static struct sockaddr_in6 all1_sa; static void nd6_slowtimo __P((void *)); static int regen_tmpaddr __P((struct in6_ifaddr *)); struct callout nd6_slowtimo_ch; struct callout nd6_timer_ch; extern struct callout in6_tmpaddrtimer_ch; void nd6_init() { static int nd6_init_done = 0; int i; if (nd6_init_done) { log(LOG_NOTICE, "nd6_init called more than once(ignored)\n"); return; } all1_sa.sin6_family = AF_INET6; all1_sa.sin6_len = sizeof(struct sockaddr_in6); for (i = 0; i < sizeof(all1_sa.sin6_addr); i++) all1_sa.sin6_addr.s6_addr[i] = 0xff; /* initialization of the default router list */ TAILQ_INIT(&nd_defrouter); nd6_init_done = 1; /* start timer */ callout_reset(&nd6_slowtimo_ch, ND6_SLOWTIMER_INTERVAL * hz, nd6_slowtimo, NULL); } void nd6_ifattach(ifp) struct ifnet *ifp; { /* * We have some arrays that should be indexed by if_index. * since if_index will grow dynamically, they should grow too. */ if (nd_ifinfo == NULL || if_index >= nd_ifinfo_indexlim) { size_t n; caddr_t q; while (if_index >= nd_ifinfo_indexlim) nd_ifinfo_indexlim <<= 1; /* grow nd_ifinfo */ n = nd_ifinfo_indexlim * sizeof(struct nd_ifinfo); q = (caddr_t)malloc(n, M_IP6NDP, 0); bzero(q, n); if (nd_ifinfo) { bcopy((caddr_t)nd_ifinfo, q, n/2); free((caddr_t)nd_ifinfo, M_IP6NDP); } nd_ifinfo = (struct nd_ifinfo *)q; } #define ND nd_ifinfo[ifp->if_index] /* * Don't initialize if called twice. * XXX: to detect this, we should choose a member that is never set * before initialization of the ND structure itself. We formaly used * the linkmtu member, which was not suitable because it could be * initialized via "ifconfig mtu". */ if (ND.basereachable) return; ND.linkmtu = ifnet_byindex(ifp->if_index)->if_mtu; ND.chlim = IPV6_DEFHLIM; ND.basereachable = REACHABLE_TIME; ND.reachable = ND_COMPUTE_RTIME(ND.basereachable); ND.retrans = RETRANS_TIMER; ND.receivedra = 0; ND.flags = ND6_IFF_PERFORMNUD; nd6_setmtu(ifp); #undef ND } /* * Reset ND level link MTU. This function is called when the physical MTU * changes, which means we might have to adjust the ND level MTU. */ void nd6_setmtu(ifp) struct ifnet *ifp; { -#define MIN(a,b) ((a) < (b) ? (a) : (b)) struct nd_ifinfo *ndi = &nd_ifinfo[ifp->if_index]; u_long oldmaxmtu = ndi->maxmtu; u_long oldlinkmtu = ndi->linkmtu; switch (ifp->if_type) { case IFT_ARCNET: /* XXX MTU handling needs more work */ ndi->maxmtu = MIN(60480, ifp->if_mtu); break; case IFT_ETHER: ndi->maxmtu = MIN(ETHERMTU, ifp->if_mtu); break; case IFT_FDDI: ndi->maxmtu = MIN(FDDIIPMTU, ifp->if_mtu); break; case IFT_ATM: ndi->maxmtu = MIN(ATMMTU, ifp->if_mtu); break; case IFT_IEEE1394: /* XXX should be IEEE1394MTU(1500) */ ndi->maxmtu = MIN(ETHERMTU, ifp->if_mtu); break; #ifdef IFT_IEEE80211 case IFT_IEEE80211: /* XXX should be IEEE80211MTU(1500) */ ndi->maxmtu = MIN(ETHERMTU, ifp->if_mtu); break; #endif default: ndi->maxmtu = ifp->if_mtu; break; } if (oldmaxmtu != ndi->maxmtu) { /* * If the ND level MTU is not set yet, or if the maxmtu * is reset to a smaller value than the ND level MTU, * also reset the ND level MTU. */ if (ndi->linkmtu == 0 || ndi->maxmtu < ndi->linkmtu) { ndi->linkmtu = ndi->maxmtu; /* also adjust in6_maxmtu if necessary. */ if (oldlinkmtu == 0) { /* * XXX: the case analysis is grotty, but * it is not efficient to call in6_setmaxmtu() * here when we are during the initialization * procedure. */ if (in6_maxmtu < ndi->linkmtu) in6_maxmtu = ndi->linkmtu; } else in6_setmaxmtu(); } } #undef MIN } void nd6_option_init(opt, icmp6len, ndopts) void *opt; int icmp6len; union nd_opts *ndopts; { bzero(ndopts, sizeof(*ndopts)); ndopts->nd_opts_search = (struct nd_opt_hdr *)opt; ndopts->nd_opts_last = (struct nd_opt_hdr *)(((u_char *)opt) + icmp6len); if (icmp6len == 0) { ndopts->nd_opts_done = 1; ndopts->nd_opts_search = NULL; } } /* * Take one ND option. */ struct nd_opt_hdr * nd6_option(ndopts) union nd_opts *ndopts; { struct nd_opt_hdr *nd_opt; int olen; if (!ndopts) panic("ndopts == NULL in nd6_option\n"); if (!ndopts->nd_opts_last) panic("uninitialized ndopts in nd6_option\n"); if (!ndopts->nd_opts_search) return NULL; if (ndopts->nd_opts_done) return NULL; nd_opt = ndopts->nd_opts_search; /* make sure nd_opt_len is inside the buffer */ if ((caddr_t)&nd_opt->nd_opt_len >= (caddr_t)ndopts->nd_opts_last) { bzero(ndopts, sizeof(*ndopts)); return NULL; } olen = nd_opt->nd_opt_len << 3; if (olen == 0) { /* * Message validation requires that all included * options have a length that is greater than zero. */ bzero(ndopts, sizeof(*ndopts)); return NULL; } ndopts->nd_opts_search = (struct nd_opt_hdr *)((caddr_t)nd_opt + olen); if (ndopts->nd_opts_search > ndopts->nd_opts_last) { /* option overruns the end of buffer, invalid */ bzero(ndopts, sizeof(*ndopts)); return NULL; } else if (ndopts->nd_opts_search == ndopts->nd_opts_last) { /* reached the end of options chain */ ndopts->nd_opts_done = 1; ndopts->nd_opts_search = NULL; } return nd_opt; } /* * Parse multiple ND options. * This function is much easier to use, for ND routines that do not need * multiple options of the same type. */ int nd6_options(ndopts) union nd_opts *ndopts; { struct nd_opt_hdr *nd_opt; int i = 0; if (!ndopts) panic("ndopts == NULL in nd6_options\n"); if (!ndopts->nd_opts_last) panic("uninitialized ndopts in nd6_options\n"); if (!ndopts->nd_opts_search) return 0; while (1) { nd_opt = nd6_option(ndopts); if (!nd_opt && !ndopts->nd_opts_last) { /* * Message validation requires that all included * options have a length that is greater than zero. */ icmp6stat.icp6s_nd_badopt++; bzero(ndopts, sizeof(*ndopts)); return -1; } if (!nd_opt) goto skip1; switch (nd_opt->nd_opt_type) { case ND_OPT_SOURCE_LINKADDR: case ND_OPT_TARGET_LINKADDR: case ND_OPT_MTU: case ND_OPT_REDIRECTED_HEADER: if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) { nd6log((LOG_INFO, "duplicated ND6 option found (type=%d)\n", nd_opt->nd_opt_type)); /* XXX bark? */ } else { ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt; } break; case ND_OPT_PREFIX_INFORMATION: if (ndopts->nd_opt_array[nd_opt->nd_opt_type] == 0) { ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt; } ndopts->nd_opts_pi_end = (struct nd_opt_prefix_info *)nd_opt; break; default: /* * Unknown options must be silently ignored, * to accomodate future extension to the protocol. */ nd6log((LOG_DEBUG, "nd6_options: unsupported option %d - " "option ignored\n", nd_opt->nd_opt_type)); } skip1: i++; if (i > nd6_maxndopt) { icmp6stat.icp6s_nd_toomanyopt++; nd6log((LOG_INFO, "too many loop in nd opt\n")); break; } if (ndopts->nd_opts_done) break; } return 0; } /* * ND6 timer routine to expire default route list and prefix list */ void nd6_timer(ignored_arg) void *ignored_arg; { int s; struct llinfo_nd6 *ln; struct nd_defrouter *dr; struct nd_prefix *pr; struct ifnet *ifp; struct in6_ifaddr *ia6, *nia6; struct in6_addrlifetime *lt6; s = splnet(); callout_reset(&nd6_timer_ch, nd6_prune * hz, nd6_timer, NULL); ln = llinfo_nd6.ln_next; while (ln && ln != &llinfo_nd6) { struct rtentry *rt; struct sockaddr_in6 *dst; struct llinfo_nd6 *next = ln->ln_next; /* XXX: used for the DELAY case only: */ struct nd_ifinfo *ndi = NULL; if ((rt = ln->ln_rt) == NULL) { ln = next; continue; } if ((ifp = rt->rt_ifp) == NULL) { ln = next; continue; } ndi = &nd_ifinfo[ifp->if_index]; dst = (struct sockaddr_in6 *)rt_key(rt); if (ln->ln_expire > time_second) { ln = next; continue; } /* sanity check */ if (!rt) panic("rt=0 in nd6_timer(ln=%p)\n", ln); if (rt->rt_llinfo && (struct llinfo_nd6 *)rt->rt_llinfo != ln) panic("rt_llinfo(%p) is not equal to ln(%p)\n", rt->rt_llinfo, ln); if (!dst) panic("dst=0 in nd6_timer(ln=%p)\n", ln); switch (ln->ln_state) { case ND6_LLINFO_INCOMPLETE: if (ln->ln_asked < nd6_mmaxtries) { ln->ln_asked++; ln->ln_expire = time_second + nd_ifinfo[ifp->if_index].retrans / 1000; nd6_ns_output(ifp, NULL, &dst->sin6_addr, ln, 0); } else { struct mbuf *m = ln->ln_hold; if (m) { if (rt->rt_ifp) { /* * Fake rcvif to make ICMP error * more helpful in diagnosing * for the receiver. * XXX: should we consider * older rcvif? */ m->m_pkthdr.rcvif = rt->rt_ifp; } icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 0); ln->ln_hold = NULL; } next = nd6_free(rt); } break; case ND6_LLINFO_REACHABLE: if (ln->ln_expire) { ln->ln_state = ND6_LLINFO_STALE; ln->ln_expire = time_second + nd6_gctimer; } break; case ND6_LLINFO_STALE: /* Garbage Collection(RFC 2461 5.3) */ if (ln->ln_expire) next = nd6_free(rt); break; case ND6_LLINFO_DELAY: if (ndi && (ndi->flags & ND6_IFF_PERFORMNUD) != 0) { /* We need NUD */ ln->ln_asked = 1; ln->ln_state = ND6_LLINFO_PROBE; ln->ln_expire = time_second + ndi->retrans / 1000; nd6_ns_output(ifp, &dst->sin6_addr, &dst->sin6_addr, ln, 0); } else { ln->ln_state = ND6_LLINFO_STALE; /* XXX */ ln->ln_expire = time_second + nd6_gctimer; } break; case ND6_LLINFO_PROBE: if (ln->ln_asked < nd6_umaxtries) { ln->ln_asked++; ln->ln_expire = time_second + nd_ifinfo[ifp->if_index].retrans / 1000; nd6_ns_output(ifp, &dst->sin6_addr, &dst->sin6_addr, ln, 0); } else { next = nd6_free(rt); } break; } ln = next; } /* expire default router list */ dr = TAILQ_FIRST(&nd_defrouter); while (dr) { if (dr->expire && dr->expire < time_second) { struct nd_defrouter *t; t = TAILQ_NEXT(dr, dr_entry); defrtrlist_del(dr); dr = t; } else { dr = TAILQ_NEXT(dr, dr_entry); } } /* * expire interface addresses. * in the past the loop was inside prefix expiry processing. * However, from a stricter speci-confrmance standpoint, we should * rather separate address lifetimes and prefix lifetimes. */ addrloop: for (ia6 = in6_ifaddr; ia6; ia6 = nia6) { nia6 = ia6->ia_next; /* check address lifetime */ lt6 = &ia6->ia6_lifetime; if (IFA6_IS_INVALID(ia6)) { int regen = 0; /* * If the expiring address is temporary, try * regenerating a new one. This would be useful when * we suspended a laptop PC, then turned it on after a * period that could invalidate all temporary * addresses. Although we may have to restart the * loop (see below), it must be after purging the * address. Otherwise, we'd see an infinite loop of * regeneration. */ if (ip6_use_tempaddr && (ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0) { if (regen_tmpaddr(ia6) == 0) regen = 1; } in6_purgeaddr(&ia6->ia_ifa); if (regen) goto addrloop; /* XXX: see below */ } if (IFA6_IS_DEPRECATED(ia6)) { int oldflags = ia6->ia6_flags; ia6->ia6_flags |= IN6_IFF_DEPRECATED; /* * If a temporary address has just become deprecated, * regenerate a new one if possible. */ if (ip6_use_tempaddr && (ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0 && (oldflags & IN6_IFF_DEPRECATED) == 0) { if (regen_tmpaddr(ia6) == 0) { /* * A new temporary address is * generated. * XXX: this means the address chain * has changed while we are still in * the loop. Although the change * would not cause disaster (because * it's not a deletion, but an * addition,) we'd rather restart the * loop just for safety. Or does this * significantly reduce performance?? */ goto addrloop; } } } else { /* * A new RA might have made a deprecated address * preferred. */ ia6->ia6_flags &= ~IN6_IFF_DEPRECATED; } } /* expire prefix list */ pr = nd_prefix.lh_first; while (pr) { /* * check prefix lifetime. * since pltime is just for autoconf, pltime processing for * prefix is not necessary. */ if (pr->ndpr_expire && pr->ndpr_expire < time_second) { struct nd_prefix *t; t = pr->ndpr_next; /* * address expiration and prefix expiration are * separate. NEVER perform in6_purgeaddr here. */ prelist_remove(pr); pr = t; } else pr = pr->ndpr_next; } splx(s); } static int regen_tmpaddr(ia6) struct in6_ifaddr *ia6; /* deprecated/invalidated temporary address */ { struct ifaddr *ifa; struct ifnet *ifp; struct in6_ifaddr *public_ifa6 = NULL; ifp = ia6->ia_ifa.ifa_ifp; for (ifa = ifp->if_addrlist.tqh_first; ifa; ifa = ifa->ifa_list.tqe_next) { struct in6_ifaddr *it6; if (ifa->ifa_addr->sa_family != AF_INET6) continue; it6 = (struct in6_ifaddr *)ifa; /* ignore no autoconf addresses. */ if ((it6->ia6_flags & IN6_IFF_AUTOCONF) == 0) continue; /* ignore autoconf addresses with different prefixes. */ if (it6->ia6_ndpr == NULL || it6->ia6_ndpr != ia6->ia6_ndpr) continue; /* * Now we are looking at an autoconf address with the same * prefix as ours. If the address is temporary and is still * preferred, do not create another one. It would be rare, but * could happen, for example, when we resume a laptop PC after * a long period. */ if ((it6->ia6_flags & IN6_IFF_TEMPORARY) != 0 && !IFA6_IS_DEPRECATED(it6)) { public_ifa6 = NULL; break; } /* * This is a public autoconf address that has the same prefix * as ours. If it is preferred, keep it. We can't break the * loop here, because there may be a still-preferred temporary * address with the prefix. */ if (!IFA6_IS_DEPRECATED(it6)) public_ifa6 = it6; } if (public_ifa6 != NULL) { int e; if ((e = in6_tmpifadd(public_ifa6, 0)) != 0) { log(LOG_NOTICE, "regen_tmpaddr: failed to create a new" " tmp addr,errno=%d\n", e); return(-1); } return(0); } return(-1); } /* * Nuke neighbor cache/prefix/default router management table, right before * ifp goes away. */ void nd6_purge(ifp) struct ifnet *ifp; { struct llinfo_nd6 *ln, *nln; struct nd_defrouter *dr, *ndr, drany; struct nd_prefix *pr, *npr; /* Nuke default router list entries toward ifp */ if ((dr = TAILQ_FIRST(&nd_defrouter)) != NULL) { /* * The first entry of the list may be stored in * the routing table, so we'll delete it later. */ for (dr = TAILQ_NEXT(dr, dr_entry); dr; dr = ndr) { ndr = TAILQ_NEXT(dr, dr_entry); if (dr->ifp == ifp) defrtrlist_del(dr); } dr = TAILQ_FIRST(&nd_defrouter); if (dr->ifp == ifp) defrtrlist_del(dr); } /* Nuke prefix list entries toward ifp */ for (pr = nd_prefix.lh_first; pr; pr = npr) { npr = pr->ndpr_next; if (pr->ndpr_ifp == ifp) { /* * Previously, pr->ndpr_addr is removed as well, * but I strongly believe we don't have to do it. * nd6_purge() is only called from in6_ifdetach(), * which removes all the associated interface addresses * by itself. * (jinmei@kame.net 20010129) */ prelist_remove(pr); } } /* cancel default outgoing interface setting */ if (nd6_defifindex == ifp->if_index) nd6_setdefaultiface(0); if (!ip6_forwarding && ip6_accept_rtadv) { /* XXX: too restrictive? */ /* refresh default router list */ bzero(&drany, sizeof(drany)); defrouter_delreq(&drany, 0); defrouter_select(); } /* * Nuke neighbor cache entries for the ifp. * Note that rt->rt_ifp may not be the same as ifp, * due to KAME goto ours hack. See RTM_RESOLVE case in * nd6_rtrequest(), and ip6_input(). */ ln = llinfo_nd6.ln_next; while (ln && ln != &llinfo_nd6) { struct rtentry *rt; struct sockaddr_dl *sdl; nln = ln->ln_next; rt = ln->ln_rt; if (rt && rt->rt_gateway && rt->rt_gateway->sa_family == AF_LINK) { sdl = (struct sockaddr_dl *)rt->rt_gateway; if (sdl->sdl_index == ifp->if_index) nln = nd6_free(rt); } ln = nln; } } struct rtentry * nd6_lookup(addr6, create, ifp) struct in6_addr *addr6; int create; struct ifnet *ifp; { struct rtentry *rt; struct sockaddr_in6 sin6; bzero(&sin6, sizeof(sin6)); sin6.sin6_len = sizeof(struct sockaddr_in6); sin6.sin6_family = AF_INET6; sin6.sin6_addr = *addr6; #ifdef SCOPEDROUTING sin6.sin6_scope_id = in6_addr2scopeid(ifp, addr6); #endif rt = rtalloc1((struct sockaddr *)&sin6, create, 0UL); if (rt && (rt->rt_flags & RTF_LLINFO) == 0) { /* * This is the case for the default route. * If we want to create a neighbor cache for the address, we * should free the route for the destination and allocate an * interface route. */ if (create) { RTFREE(rt); rt = 0; } } if (!rt) { if (create && ifp) { int e; /* * If no route is available and create is set, * we allocate a host route for the destination * and treat it like an interface route. * This hack is necessary for a neighbor which can't * be covered by our own prefix. */ struct ifaddr *ifa = ifaof_ifpforaddr((struct sockaddr *)&sin6, ifp); if (ifa == NULL) return(NULL); /* * Create a new route. RTF_LLINFO is necessary * to create a Neighbor Cache entry for the * destination in nd6_rtrequest which will be * called in rtrequest via ifa->ifa_rtrequest. */ if ((e = rtrequest(RTM_ADD, (struct sockaddr *)&sin6, ifa->ifa_addr, (struct sockaddr *)&all1_sa, (ifa->ifa_flags | RTF_HOST | RTF_LLINFO) & ~RTF_CLONING, &rt)) != 0) log(LOG_ERR, "nd6_lookup: failed to add route for a " "neighbor(%s), errno=%d\n", ip6_sprintf(addr6), e); if (rt == NULL) return(NULL); if (rt->rt_llinfo) { struct llinfo_nd6 *ln = (struct llinfo_nd6 *)rt->rt_llinfo; ln->ln_state = ND6_LLINFO_NOSTATE; } } else return(NULL); } rt->rt_refcnt--; /* * Validation for the entry. * Note that the check for rt_llinfo is necessary because a cloned * route from a parent route that has the L flag (e.g. the default * route to a p2p interface) may have the flag, too, while the * destination is not actually a neighbor. * XXX: we can't use rt->rt_ifp to check for the interface, since * it might be the loopback interface if the entry is for our * own address on a non-loopback interface. Instead, we should * use rt->rt_ifa->ifa_ifp, which would specify the REAL * interface. */ if ((rt->rt_flags & RTF_GATEWAY) || (rt->rt_flags & RTF_LLINFO) == 0 || rt->rt_gateway->sa_family != AF_LINK || rt->rt_llinfo == NULL || (ifp && rt->rt_ifa->ifa_ifp != ifp)) { if (create) { log(LOG_DEBUG, "nd6_lookup: failed to lookup %s (if = %s)\n", ip6_sprintf(addr6), ifp ? if_name(ifp) : "unspec"); /* xxx more logs... kazu */ } return(NULL); } return(rt); } /* * Detect if a given IPv6 address identifies a neighbor on a given link. * XXX: should take care of the destination of a p2p link? */ int nd6_is_addr_neighbor(addr, ifp) struct sockaddr_in6 *addr; struct ifnet *ifp; { struct ifaddr *ifa; int i; #define IFADDR6(a) ((((struct in6_ifaddr *)(a))->ia_addr).sin6_addr) #define IFMASK6(a) ((((struct in6_ifaddr *)(a))->ia_prefixmask).sin6_addr) /* * A link-local address is always a neighbor. * XXX: we should use the sin6_scope_id field rather than the embedded * interface index. */ if (IN6_IS_ADDR_LINKLOCAL(&addr->sin6_addr) && ntohs(*(u_int16_t *)&addr->sin6_addr.s6_addr[2]) == ifp->if_index) return(1); /* * If the address matches one of our addresses, * it should be a neighbor. */ for (ifa = ifp->if_addrlist.tqh_first; ifa; ifa = ifa->ifa_list.tqe_next) { if (ifa->ifa_addr->sa_family != AF_INET6) next: continue; for (i = 0; i < 4; i++) { if ((IFADDR6(ifa).s6_addr32[i] ^ addr->sin6_addr.s6_addr32[i]) & IFMASK6(ifa).s6_addr32[i]) goto next; } return(1); } /* * Even if the address matches none of our addresses, it might be * in the neighbor cache. */ if (nd6_lookup(&addr->sin6_addr, 0, ifp) != NULL) return(1); return(0); #undef IFADDR6 #undef IFMASK6 } /* * Free an nd6 llinfo entry. */ struct llinfo_nd6 * nd6_free(rt) struct rtentry *rt; { struct llinfo_nd6 *ln = (struct llinfo_nd6 *)rt->rt_llinfo, *next; struct in6_addr in6 = ((struct sockaddr_in6 *)rt_key(rt))->sin6_addr; struct nd_defrouter *dr; /* * we used to have pfctlinput(PRC_HOSTDEAD) here. * even though it is not harmful, it was not really necessary. */ if (!ip6_forwarding && ip6_accept_rtadv) { /* XXX: too restrictive? */ int s; s = splnet(); dr = defrouter_lookup(&((struct sockaddr_in6 *)rt_key(rt))->sin6_addr, rt->rt_ifp); if (ln->ln_router || dr) { /* * rt6_flush must be called whether or not the neighbor * is in the Default Router List. * See a corresponding comment in nd6_na_input(). */ rt6_flush(&in6, rt->rt_ifp); } if (dr) { /* * Unreachablity of a router might affect the default * router selection and on-link detection of advertised * prefixes. */ /* * Temporarily fake the state to choose a new default * router and to perform on-link determination of * prefixes correctly. * Below the state will be set correctly, * or the entry itself will be deleted. */ ln->ln_state = ND6_LLINFO_INCOMPLETE; /* * Since defrouter_select() does not affect the * on-link determination and MIP6 needs the check * before the default router selection, we perform * the check now. */ pfxlist_onlink_check(); if (dr == TAILQ_FIRST(&nd_defrouter)) { /* * It is used as the current default router, * so we have to move it to the end of the * list and choose a new one. * XXX: it is not very efficient if this is * the only router. */ TAILQ_REMOVE(&nd_defrouter, dr, dr_entry); TAILQ_INSERT_TAIL(&nd_defrouter, dr, dr_entry); defrouter_select(); } } splx(s); } /* * Before deleting the entry, remember the next entry as the * return value. We need this because pfxlist_onlink_check() above * might have freed other entries (particularly the old next entry) as * a side effect (XXX). */ next = ln->ln_next; /* * Detach the route from the routing tree and the list of neighbor * caches, and disable the route entry not to be used in already * cached routes. */ rtrequest(RTM_DELETE, rt_key(rt), (struct sockaddr *)0, rt_mask(rt), 0, (struct rtentry **)0); return(next); } /* * Upper-layer reachability hint for Neighbor Unreachability Detection. * * XXX cost-effective metods? */ void nd6_nud_hint(rt, dst6, force) struct rtentry *rt; struct in6_addr *dst6; int force; { struct llinfo_nd6 *ln; /* * If the caller specified "rt", use that. Otherwise, resolve the * routing table by supplied "dst6". */ if (!rt) { if (!dst6) return; if (!(rt = nd6_lookup(dst6, 0, NULL))) return; } if ((rt->rt_flags & RTF_GATEWAY) != 0 || (rt->rt_flags & RTF_LLINFO) == 0 || !rt->rt_llinfo || !rt->rt_gateway || rt->rt_gateway->sa_family != AF_LINK) { /* This is not a host route. */ return; } ln = (struct llinfo_nd6 *)rt->rt_llinfo; if (ln->ln_state < ND6_LLINFO_REACHABLE) return; /* * if we get upper-layer reachability confirmation many times, * it is possible we have false information. */ if (!force) { ln->ln_byhint++; if (ln->ln_byhint > nd6_maxnudhint) return; } ln->ln_state = ND6_LLINFO_REACHABLE; if (ln->ln_expire) ln->ln_expire = time_second + nd_ifinfo[rt->rt_ifp->if_index].reachable; } void nd6_rtrequest(req, rt, info) int req; struct rtentry *rt; struct rt_addrinfo *info; /* xxx unused */ { struct sockaddr *gate = rt->rt_gateway; struct llinfo_nd6 *ln = (struct llinfo_nd6 *)rt->rt_llinfo; static struct sockaddr_dl null_sdl = {sizeof(null_sdl), AF_LINK}; struct ifnet *ifp = rt->rt_ifp; struct ifaddr *ifa; if ((rt->rt_flags & RTF_GATEWAY)) return; if (nd6_need_cache(ifp) == 0 && (rt->rt_flags & RTF_HOST) == 0) { /* * This is probably an interface direct route for a link * which does not need neighbor caches (e.g. fe80::%lo0/64). * We do not need special treatment below for such a route. * Moreover, the RTF_LLINFO flag which would be set below * would annoy the ndp(8) command. */ return; } if (req == RTM_RESOLVE && (nd6_need_cache(ifp) == 0 || /* stf case */ !nd6_is_addr_neighbor((struct sockaddr_in6 *)rt_key(rt), ifp))) { /* * FreeBSD and BSD/OS often make a cloned host route based * on a less-specific route (e.g. the default route). * If the less specific route does not have a "gateway" * (this is the case when the route just goes to a p2p or an * stf interface), we'll mistakenly make a neighbor cache for * the host route, and will see strange neighbor solicitation * for the corresponding destination. In order to avoid the * confusion, we check if the destination of the route is * a neighbor in terms of neighbor discovery, and stop the * process if not. Additionally, we remove the LLINFO flag * so that ndp(8) will not try to get the neighbor information * of the destination. */ rt->rt_flags &= ~RTF_LLINFO; return; } switch (req) { case RTM_ADD: /* * There is no backward compatibility :) * * if ((rt->rt_flags & RTF_HOST) == 0 && * SIN(rt_mask(rt))->sin_addr.s_addr != 0xffffffff) * rt->rt_flags |= RTF_CLONING; */ if (rt->rt_flags & (RTF_CLONING | RTF_LLINFO)) { /* * Case 1: This route should come from * a route to interface. RTF_LLINFO flag is set * for a host route whose destination should be * treated as on-link. */ rt_setgate(rt, rt_key(rt), (struct sockaddr *)&null_sdl); gate = rt->rt_gateway; SDL(gate)->sdl_type = ifp->if_type; SDL(gate)->sdl_index = ifp->if_index; if (ln) ln->ln_expire = time_second; #if 1 if (ln && ln->ln_expire == 0) { /* kludge for desktops */ #if 0 printf("nd6_rtequest: time.tv_sec is zero; " "treat it as 1\n"); #endif ln->ln_expire = 1; } #endif if ((rt->rt_flags & RTF_CLONING)) break; } /* * In IPv4 code, we try to annonuce new RTF_ANNOUNCE entry here. * We don't do that here since llinfo is not ready yet. * * There are also couple of other things to be discussed: * - unsolicited NA code needs improvement beforehand * - RFC2461 says we MAY send multicast unsolicited NA * (7.2.6 paragraph 4), however, it also says that we * SHOULD provide a mechanism to prevent multicast NA storm. * we don't have anything like it right now. * note that the mechanism needs a mutual agreement * between proxies, which means that we need to implement * a new protocol, or a new kludge. * - from RFC2461 6.2.4, host MUST NOT send an unsolicited NA. * we need to check ip6forwarding before sending it. * (or should we allow proxy ND configuration only for * routers? there's no mention about proxy ND from hosts) */ #if 0 /* XXX it does not work */ if (rt->rt_flags & RTF_ANNOUNCE) nd6_na_output(ifp, &SIN6(rt_key(rt))->sin6_addr, &SIN6(rt_key(rt))->sin6_addr, ip6_forwarding ? ND_NA_FLAG_ROUTER : 0, 1, NULL); #endif /* FALLTHROUGH */ case RTM_RESOLVE: if ((ifp->if_flags & (IFF_POINTOPOINT | IFF_LOOPBACK)) == 0) { /* * Address resolution isn't necessary for a point to * point link, so we can skip this test for a p2p link. */ if (gate->sa_family != AF_LINK || gate->sa_len < sizeof(null_sdl)) { log(LOG_DEBUG, "nd6_rtrequest: bad gateway value: %s\n", if_name(ifp)); break; } SDL(gate)->sdl_type = ifp->if_type; SDL(gate)->sdl_index = ifp->if_index; } if (ln != NULL) break; /* This happens on a route change */ /* * Case 2: This route may come from cloning, or a manual route * add with a LL address. */ R_Malloc(ln, struct llinfo_nd6 *, sizeof(*ln)); rt->rt_llinfo = (caddr_t)ln; if (!ln) { log(LOG_DEBUG, "nd6_rtrequest: malloc failed\n"); break; } nd6_inuse++; nd6_allocated++; Bzero(ln, sizeof(*ln)); ln->ln_rt = rt; /* this is required for "ndp" command. - shin */ if (req == RTM_ADD) { /* * gate should have some valid AF_LINK entry, * and ln->ln_expire should have some lifetime * which is specified by ndp command. */ ln->ln_state = ND6_LLINFO_REACHABLE; ln->ln_byhint = 0; } else { /* * When req == RTM_RESOLVE, rt is created and * initialized in rtrequest(), so rt_expire is 0. */ ln->ln_state = ND6_LLINFO_NOSTATE; ln->ln_expire = time_second; } rt->rt_flags |= RTF_LLINFO; ln->ln_next = llinfo_nd6.ln_next; llinfo_nd6.ln_next = ln; ln->ln_prev = &llinfo_nd6; ln->ln_next->ln_prev = ln; /* * check if rt_key(rt) is one of my address assigned * to the interface. */ ifa = (struct ifaddr *)in6ifa_ifpwithaddr(rt->rt_ifp, &SIN6(rt_key(rt))->sin6_addr); if (ifa) { caddr_t macp = nd6_ifptomac(ifp); ln->ln_expire = 0; ln->ln_state = ND6_LLINFO_REACHABLE; ln->ln_byhint = 0; if (macp) { Bcopy(macp, LLADDR(SDL(gate)), ifp->if_addrlen); SDL(gate)->sdl_alen = ifp->if_addrlen; } if (nd6_useloopback) { rt->rt_ifp = &loif[0]; /* XXX */ /* * Make sure rt_ifa be equal to the ifaddr * corresponding to the address. * We need this because when we refer * rt_ifa->ia6_flags in ip6_input, we assume * that the rt_ifa points to the address instead * of the loopback address. */ if (ifa != rt->rt_ifa) { IFAFREE(rt->rt_ifa); IFAREF(ifa); rt->rt_ifa = ifa; } } } else if (rt->rt_flags & RTF_ANNOUNCE) { ln->ln_expire = 0; ln->ln_state = ND6_LLINFO_REACHABLE; ln->ln_byhint = 0; /* join solicited node multicast for proxy ND */ if (ifp->if_flags & IFF_MULTICAST) { struct in6_addr llsol; int error; llsol = SIN6(rt_key(rt))->sin6_addr; llsol.s6_addr16[0] = htons(0xff02); llsol.s6_addr16[1] = htons(ifp->if_index); llsol.s6_addr32[1] = 0; llsol.s6_addr32[2] = htonl(1); llsol.s6_addr8[12] = 0xff; if (!in6_addmulti(&llsol, ifp, &error)) { nd6log((LOG_ERR, "%s: failed to join " "%s (errno=%d)\n", if_name(ifp), ip6_sprintf(&llsol), error)); } } } break; case RTM_DELETE: if (!ln) break; /* leave from solicited node multicast for proxy ND */ if ((rt->rt_flags & RTF_ANNOUNCE) != 0 && (ifp->if_flags & IFF_MULTICAST) != 0) { struct in6_addr llsol; struct in6_multi *in6m; llsol = SIN6(rt_key(rt))->sin6_addr; llsol.s6_addr16[0] = htons(0xff02); llsol.s6_addr16[1] = htons(ifp->if_index); llsol.s6_addr32[1] = 0; llsol.s6_addr32[2] = htonl(1); llsol.s6_addr8[12] = 0xff; IN6_LOOKUP_MULTI(llsol, ifp, in6m); if (in6m) in6_delmulti(in6m); } nd6_inuse--; ln->ln_next->ln_prev = ln->ln_prev; ln->ln_prev->ln_next = ln->ln_next; ln->ln_prev = NULL; rt->rt_llinfo = 0; rt->rt_flags &= ~RTF_LLINFO; if (ln->ln_hold) m_freem(ln->ln_hold); Free((caddr_t)ln); } } int nd6_ioctl(cmd, data, ifp) u_long cmd; caddr_t data; struct ifnet *ifp; { struct in6_drlist *drl = (struct in6_drlist *)data; struct in6_prlist *prl = (struct in6_prlist *)data; struct in6_ndireq *ndi = (struct in6_ndireq *)data; struct in6_nbrinfo *nbi = (struct in6_nbrinfo *)data; struct in6_ndifreq *ndif = (struct in6_ndifreq *)data; struct nd_defrouter *dr, any; struct nd_prefix *pr; struct rtentry *rt; int i = 0, error = 0; int s; switch (cmd) { case SIOCGDRLST_IN6: /* * obsolete API, use sysctl under net.inet6.icmp6 */ bzero(drl, sizeof(*drl)); s = splnet(); dr = TAILQ_FIRST(&nd_defrouter); while (dr && i < DRLSTSIZ) { drl->defrouter[i].rtaddr = dr->rtaddr; if (IN6_IS_ADDR_LINKLOCAL(&drl->defrouter[i].rtaddr)) { /* XXX: need to this hack for KAME stack */ drl->defrouter[i].rtaddr.s6_addr16[1] = 0; } else log(LOG_ERR, "default router list contains a " "non-linklocal address(%s)\n", ip6_sprintf(&drl->defrouter[i].rtaddr)); drl->defrouter[i].flags = dr->flags; drl->defrouter[i].rtlifetime = dr->rtlifetime; drl->defrouter[i].expire = dr->expire; drl->defrouter[i].if_index = dr->ifp->if_index; i++; dr = TAILQ_NEXT(dr, dr_entry); } splx(s); break; case SIOCGPRLST_IN6: /* * obsolete API, use sysctl under net.inet6.icmp6 */ /* * XXX meaning of fields, especialy "raflags", is very * differnet between RA prefix list and RR/static prefix list. * how about separating ioctls into two? */ bzero(prl, sizeof(*prl)); s = splnet(); pr = nd_prefix.lh_first; while (pr && i < PRLSTSIZ) { struct nd_pfxrouter *pfr; int j; (void)in6_embedscope(&prl->prefix[i].prefix, &pr->ndpr_prefix, NULL, NULL); prl->prefix[i].raflags = pr->ndpr_raf; prl->prefix[i].prefixlen = pr->ndpr_plen; prl->prefix[i].vltime = pr->ndpr_vltime; prl->prefix[i].pltime = pr->ndpr_pltime; prl->prefix[i].if_index = pr->ndpr_ifp->if_index; prl->prefix[i].expire = pr->ndpr_expire; pfr = pr->ndpr_advrtrs.lh_first; j = 0; while (pfr) { if (j < DRLSTSIZ) { #define RTRADDR prl->prefix[i].advrtr[j] RTRADDR = pfr->router->rtaddr; if (IN6_IS_ADDR_LINKLOCAL(&RTRADDR)) { /* XXX: hack for KAME */ RTRADDR.s6_addr16[1] = 0; } else log(LOG_ERR, "a router(%s) advertises " "a prefix with " "non-link local address\n", ip6_sprintf(&RTRADDR)); #undef RTRADDR } j++; pfr = pfr->pfr_next; } prl->prefix[i].advrtrs = j; prl->prefix[i].origin = PR_ORIG_RA; i++; pr = pr->ndpr_next; } { struct rr_prefix *rpp; for (rpp = LIST_FIRST(&rr_prefix); rpp; rpp = LIST_NEXT(rpp, rp_entry)) { if (i >= PRLSTSIZ) break; (void)in6_embedscope(&prl->prefix[i].prefix, &pr->ndpr_prefix, NULL, NULL); prl->prefix[i].raflags = rpp->rp_raf; prl->prefix[i].prefixlen = rpp->rp_plen; prl->prefix[i].vltime = rpp->rp_vltime; prl->prefix[i].pltime = rpp->rp_pltime; prl->prefix[i].if_index = rpp->rp_ifp->if_index; prl->prefix[i].expire = rpp->rp_expire; prl->prefix[i].advrtrs = 0; prl->prefix[i].origin = rpp->rp_origin; i++; } } splx(s); break; case OSIOCGIFINFO_IN6: if (!nd_ifinfo || i >= nd_ifinfo_indexlim) { error = EINVAL; break; } ndi->ndi.linkmtu = nd_ifinfo[ifp->if_index].linkmtu; ndi->ndi.maxmtu = nd_ifinfo[ifp->if_index].maxmtu; ndi->ndi.basereachable = nd_ifinfo[ifp->if_index].basereachable; ndi->ndi.reachable = nd_ifinfo[ifp->if_index].reachable; ndi->ndi.retrans = nd_ifinfo[ifp->if_index].retrans; ndi->ndi.flags = nd_ifinfo[ifp->if_index].flags; ndi->ndi.recalctm = nd_ifinfo[ifp->if_index].recalctm; ndi->ndi.chlim = nd_ifinfo[ifp->if_index].chlim; ndi->ndi.receivedra = nd_ifinfo[ifp->if_index].receivedra; break; case SIOCGIFINFO_IN6: if (!nd_ifinfo || i >= nd_ifinfo_indexlim) { error = EINVAL; break; } ndi->ndi = nd_ifinfo[ifp->if_index]; break; case SIOCSIFINFO_FLAGS: /* XXX: almost all other fields of ndi->ndi is unused */ if (!nd_ifinfo || i >= nd_ifinfo_indexlim) { error = EINVAL; break; } nd_ifinfo[ifp->if_index].flags = ndi->ndi.flags; break; case SIOCSNDFLUSH_IN6: /* XXX: the ioctl name is confusing... */ /* flush default router list */ /* * xxx sumikawa: should not delete route if default * route equals to the top of default router list */ bzero(&any, sizeof(any)); defrouter_delreq(&any, 0); defrouter_select(); /* xxx sumikawa: flush prefix list */ break; case SIOCSPFXFLUSH_IN6: { /* flush all the prefix advertised by routers */ struct nd_prefix *pr, *next; s = splnet(); for (pr = nd_prefix.lh_first; pr; pr = next) { struct in6_ifaddr *ia, *ia_next; next = pr->ndpr_next; if (IN6_IS_ADDR_LINKLOCAL(&pr->ndpr_prefix.sin6_addr)) continue; /* XXX */ /* do we really have to remove addresses as well? */ for (ia = in6_ifaddr; ia; ia = ia_next) { /* ia might be removed. keep the next ptr. */ ia_next = ia->ia_next; if ((ia->ia6_flags & IN6_IFF_AUTOCONF) == 0) continue; if (ia->ia6_ndpr == pr) in6_purgeaddr(&ia->ia_ifa); } prelist_remove(pr); } splx(s); break; } case SIOCSRTRFLUSH_IN6: { /* flush all the default routers */ struct nd_defrouter *dr, *next; s = splnet(); if ((dr = TAILQ_FIRST(&nd_defrouter)) != NULL) { /* * The first entry of the list may be stored in * the routing table, so we'll delete it later. */ for (dr = TAILQ_NEXT(dr, dr_entry); dr; dr = next) { next = TAILQ_NEXT(dr, dr_entry); defrtrlist_del(dr); } defrtrlist_del(TAILQ_FIRST(&nd_defrouter)); } splx(s); break; } case SIOCGNBRINFO_IN6: { struct llinfo_nd6 *ln; struct in6_addr nb_addr = nbi->addr; /* make local for safety */ /* * XXX: KAME specific hack for scoped addresses * XXXX: for other scopes than link-local? */ if (IN6_IS_ADDR_LINKLOCAL(&nbi->addr) || IN6_IS_ADDR_MC_LINKLOCAL(&nbi->addr)) { u_int16_t *idp = (u_int16_t *)&nb_addr.s6_addr[2]; if (*idp == 0) *idp = htons(ifp->if_index); } s = splnet(); if ((rt = nd6_lookup(&nb_addr, 0, ifp)) == NULL) { error = EINVAL; splx(s); break; } ln = (struct llinfo_nd6 *)rt->rt_llinfo; nbi->state = ln->ln_state; nbi->asked = ln->ln_asked; nbi->isrouter = ln->ln_router; nbi->expire = ln->ln_expire; splx(s); break; } case SIOCGDEFIFACE_IN6: /* XXX: should be implemented as a sysctl? */ ndif->ifindex = nd6_defifindex; break; case SIOCSDEFIFACE_IN6: /* XXX: should be implemented as a sysctl? */ return(nd6_setdefaultiface(ndif->ifindex)); break; } return(error); } /* * Create neighbor cache entry and cache link-layer address, * on reception of inbound ND6 packets. (RS/RA/NS/redirect) */ struct rtentry * nd6_cache_lladdr(ifp, from, lladdr, lladdrlen, type, code) struct ifnet *ifp; struct in6_addr *from; char *lladdr; int lladdrlen; int type; /* ICMP6 type */ int code; /* type dependent information */ { struct rtentry *rt = NULL; struct llinfo_nd6 *ln = NULL; int is_newentry; struct sockaddr_dl *sdl = NULL; int do_update; int olladdr; int llchange; int newstate = 0; if (!ifp) panic("ifp == NULL in nd6_cache_lladdr"); if (!from) panic("from == NULL in nd6_cache_lladdr"); /* nothing must be updated for unspecified address */ if (IN6_IS_ADDR_UNSPECIFIED(from)) return NULL; /* * Validation about ifp->if_addrlen and lladdrlen must be done in * the caller. * * XXX If the link does not have link-layer adderss, what should * we do? (ifp->if_addrlen == 0) * Spec says nothing in sections for RA, RS and NA. There's small * description on it in NS section (RFC 2461 7.2.3). */ rt = nd6_lookup(from, 0, ifp); if (!rt) { #if 0 /* nothing must be done if there's no lladdr */ if (!lladdr || !lladdrlen) return NULL; #endif rt = nd6_lookup(from, 1, ifp); is_newentry = 1; } else { /* do nothing if static ndp is set */ if (rt->rt_flags & RTF_STATIC) return NULL; is_newentry = 0; } if (!rt) return NULL; if ((rt->rt_flags & (RTF_GATEWAY | RTF_LLINFO)) != RTF_LLINFO) { fail: (void)nd6_free(rt); return NULL; } ln = (struct llinfo_nd6 *)rt->rt_llinfo; if (!ln) goto fail; if (!rt->rt_gateway) goto fail; if (rt->rt_gateway->sa_family != AF_LINK) goto fail; sdl = SDL(rt->rt_gateway); olladdr = (sdl->sdl_alen) ? 1 : 0; if (olladdr && lladdr) { if (bcmp(lladdr, LLADDR(sdl), ifp->if_addrlen)) llchange = 1; else llchange = 0; } else llchange = 0; /* * newentry olladdr lladdr llchange (*=record) * 0 n n -- (1) * 0 y n -- (2) * 0 n y -- (3) * STALE * 0 y y n (4) * * 0 y y y (5) * STALE * 1 -- n -- (6) NOSTATE(= PASSIVE) * 1 -- y -- (7) * STALE */ if (lladdr) { /* (3-5) and (7) */ /* * Record source link-layer address * XXX is it dependent to ifp->if_type? */ sdl->sdl_alen = ifp->if_addrlen; bcopy(lladdr, LLADDR(sdl), ifp->if_addrlen); } if (!is_newentry) { if ((!olladdr && lladdr) /* (3) */ || (olladdr && lladdr && llchange)) { /* (5) */ do_update = 1; newstate = ND6_LLINFO_STALE; } else /* (1-2,4) */ do_update = 0; } else { do_update = 1; if (!lladdr) /* (6) */ newstate = ND6_LLINFO_NOSTATE; else /* (7) */ newstate = ND6_LLINFO_STALE; } if (do_update) { /* * Update the state of the neighbor cache. */ ln->ln_state = newstate; if (ln->ln_state == ND6_LLINFO_STALE) { /* * XXX: since nd6_output() below will cause * state tansition to DELAY and reset the timer, * we must set the timer now, although it is actually * meaningless. */ ln->ln_expire = time_second + nd6_gctimer; if (ln->ln_hold) { /* * we assume ifp is not a p2p here, so just * set the 2nd argument as the 1st one. */ nd6_output(ifp, ifp, ln->ln_hold, (struct sockaddr_in6 *)rt_key(rt), rt); ln->ln_hold = NULL; } } else if (ln->ln_state == ND6_LLINFO_INCOMPLETE) { /* probe right away */ ln->ln_expire = time_second; } } /* * ICMP6 type dependent behavior. * * NS: clear IsRouter if new entry * RS: clear IsRouter * RA: set IsRouter if there's lladdr * redir: clear IsRouter if new entry * * RA case, (1): * The spec says that we must set IsRouter in the following cases: * - If lladdr exist, set IsRouter. This means (1-5). * - If it is old entry (!newentry), set IsRouter. This means (7). * So, based on the spec, in (1-5) and (7) cases we must set IsRouter. * A quetion arises for (1) case. (1) case has no lladdr in the * neighbor cache, this is similar to (6). * This case is rare but we figured that we MUST NOT set IsRouter. * * newentry olladdr lladdr llchange NS RS RA redir * D R * 0 n n -- (1) c ? s * 0 y n -- (2) c s s * 0 n y -- (3) c s s * 0 y y n (4) c s s * 0 y y y (5) c s s * 1 -- n -- (6) c c c s * 1 -- y -- (7) c c s c s * * (c=clear s=set) */ switch (type & 0xff) { case ND_NEIGHBOR_SOLICIT: /* * New entry must have is_router flag cleared. */ if (is_newentry) /* (6-7) */ ln->ln_router = 0; break; case ND_REDIRECT: /* * If the icmp is a redirect to a better router, always set the * is_router flag. Otherwise, if the entry is newly created, * clear the flag. [RFC 2461, sec 8.3] */ if (code == ND_REDIRECT_ROUTER) ln->ln_router = 1; else if (is_newentry) /* (6-7) */ ln->ln_router = 0; break; case ND_ROUTER_SOLICIT: /* * is_router flag must always be cleared. */ ln->ln_router = 0; break; case ND_ROUTER_ADVERT: /* * Mark an entry with lladdr as a router. */ if ((!is_newentry && (olladdr || lladdr)) /* (2-5) */ || (is_newentry && lladdr)) { /* (7) */ ln->ln_router = 1; } break; } /* * When the link-layer address of a router changes, select the * best router again. In particular, when the neighbor entry is newly * created, it might affect the selection policy. * Question: can we restrict the first condition to the "is_newentry" * case? * XXX: when we hear an RA from a new router with the link-layer * address option, defrouter_select() is called twice, since * defrtrlist_update called the function as well. However, I believe * we can compromise the overhead, since it only happens the first * time. * XXX: although defrouter_select() should not have a bad effect * for those are not autoconfigured hosts, we explicitly avoid such * cases for safety. */ if (do_update && ln->ln_router && !ip6_forwarding && ip6_accept_rtadv) defrouter_select(); return rt; } static void nd6_slowtimo(ignored_arg) void *ignored_arg; { int s = splnet(); int i; struct nd_ifinfo *nd6if; callout_reset(&nd6_slowtimo_ch, ND6_SLOWTIMER_INTERVAL * hz, nd6_slowtimo, NULL); for (i = 1; i < if_index + 1; i++) { if (!nd_ifinfo || i >= nd_ifinfo_indexlim) continue; nd6if = &nd_ifinfo[i]; if (nd6if->basereachable && /* already initialized */ (nd6if->recalctm -= ND6_SLOWTIMER_INTERVAL) <= 0) { /* * Since reachable time rarely changes by router * advertisements, we SHOULD insure that a new random * value gets recomputed at least once every few hours. * (RFC 2461, 6.3.4) */ nd6if->recalctm = nd6_recalc_reachtm_interval; nd6if->reachable = ND_COMPUTE_RTIME(nd6if->basereachable); } } splx(s); } #define senderr(e) { error = (e); goto bad;} int nd6_output(ifp, origifp, m0, dst, rt0) struct ifnet *ifp; struct ifnet *origifp; struct mbuf *m0; struct sockaddr_in6 *dst; struct rtentry *rt0; { struct mbuf *m = m0; struct rtentry *rt = rt0; struct sockaddr_in6 *gw6 = NULL; struct llinfo_nd6 *ln = NULL; int error = 0; if (IN6_IS_ADDR_MULTICAST(&dst->sin6_addr)) goto sendpkt; if (nd6_need_cache(ifp) == 0) goto sendpkt; /* * next hop determination. This routine is derived from ether_outpout. */ if (rt) { if ((rt->rt_flags & RTF_UP) == 0) { if ((rt0 = rt = rtalloc1((struct sockaddr *)dst, 1, 0UL)) != NULL) { rt->rt_refcnt--; if (rt->rt_ifp != ifp) { /* XXX: loop care? */ return nd6_output(ifp, origifp, m0, dst, rt); } } else senderr(EHOSTUNREACH); } if (rt->rt_flags & RTF_GATEWAY) { gw6 = (struct sockaddr_in6 *)rt->rt_gateway; /* * We skip link-layer address resolution and NUD * if the gateway is not a neighbor from ND point * of view, regardless of the value of nd_ifinfo.flags. * The second condition is a bit tricky; we skip * if the gateway is our own address, which is * sometimes used to install a route to a p2p link. */ if (!nd6_is_addr_neighbor(gw6, ifp) || in6ifa_ifpwithaddr(ifp, &gw6->sin6_addr)) { /* * We allow this kind of tricky route only * when the outgoing interface is p2p. * XXX: we may need a more generic rule here. */ if ((ifp->if_flags & IFF_POINTOPOINT) == 0) senderr(EHOSTUNREACH); goto sendpkt; } if (rt->rt_gwroute == 0) goto lookup; if (((rt = rt->rt_gwroute)->rt_flags & RTF_UP) == 0) { rtfree(rt); rt = rt0; lookup: rt->rt_gwroute = rtalloc1(rt->rt_gateway, 1, 0UL); if ((rt = rt->rt_gwroute) == 0) senderr(EHOSTUNREACH); } } } /* * Address resolution or Neighbor Unreachability Detection * for the next hop. * At this point, the destination of the packet must be a unicast * or an anycast address(i.e. not a multicast). */ /* Look up the neighbor cache for the nexthop */ if (rt && (rt->rt_flags & RTF_LLINFO) != 0) ln = (struct llinfo_nd6 *)rt->rt_llinfo; else { /* * Since nd6_is_addr_neighbor() internally calls nd6_lookup(), * the condition below is not very efficient. But we believe * it is tolerable, because this should be a rare case. */ if (nd6_is_addr_neighbor(dst, ifp) && (rt = nd6_lookup(&dst->sin6_addr, 1, ifp)) != NULL) ln = (struct llinfo_nd6 *)rt->rt_llinfo; } if (!ln || !rt) { if ((ifp->if_flags & IFF_POINTOPOINT) == 0 && !(nd_ifinfo[ifp->if_index].flags & ND6_IFF_PERFORMNUD)) { log(LOG_DEBUG, "nd6_output: can't allocate llinfo for %s " "(ln=%p, rt=%p)\n", ip6_sprintf(&dst->sin6_addr), ln, rt); senderr(EIO); /* XXX: good error? */ } goto sendpkt; /* send anyway */ } /* We don't have to do link-layer address resolution on a p2p link. */ if ((ifp->if_flags & IFF_POINTOPOINT) != 0 && ln->ln_state < ND6_LLINFO_REACHABLE) { ln->ln_state = ND6_LLINFO_STALE; ln->ln_expire = time_second + nd6_gctimer; } /* * The first time we send a packet to a neighbor whose entry is * STALE, we have to change the state to DELAY and a sets a timer to * expire in DELAY_FIRST_PROBE_TIME seconds to ensure do * neighbor unreachability detection on expiration. * (RFC 2461 7.3.3) */ if (ln->ln_state == ND6_LLINFO_STALE) { ln->ln_asked = 0; ln->ln_state = ND6_LLINFO_DELAY; ln->ln_expire = time_second + nd6_delay; } /* * If the neighbor cache entry has a state other than INCOMPLETE * (i.e. its link-layer address is already resolved), just * send the packet. */ if (ln->ln_state > ND6_LLINFO_INCOMPLETE) goto sendpkt; /* * There is a neighbor cache entry, but no ethernet address * response yet. Replace the held mbuf (if any) with this * latest one. * * This code conforms to the rate-limiting rule described in Section * 7.2.2 of RFC 2461, because the timer is set correctly after sending * an NS below. */ if (ln->ln_state == ND6_LLINFO_NOSTATE) ln->ln_state = ND6_LLINFO_INCOMPLETE; if (ln->ln_hold) m_freem(ln->ln_hold); ln->ln_hold = m; if (ln->ln_expire) { if (ln->ln_asked < nd6_mmaxtries && ln->ln_expire < time_second) { ln->ln_asked++; ln->ln_expire = time_second + nd_ifinfo[ifp->if_index].retrans / 1000; nd6_ns_output(ifp, NULL, &dst->sin6_addr, ln, 0); } } return(0); sendpkt: #ifdef MAC mac_create_mbuf_linklayer(ifp, m); #endif if ((ifp->if_flags & IFF_LOOPBACK) != 0) { return((*ifp->if_output)(origifp, m, (struct sockaddr *)dst, rt)); } return((*ifp->if_output)(ifp, m, (struct sockaddr *)dst, rt)); bad: if (m) m_freem(m); return (error); } #undef senderr int nd6_need_cache(ifp) struct ifnet *ifp; { /* * XXX: we currently do not make neighbor cache on any interface * other than ARCnet, Ethernet, FDDI and GIF. * * RFC2893 says: * - unidirectional tunnels needs no ND */ switch (ifp->if_type) { case IFT_ARCNET: case IFT_ETHER: case IFT_FDDI: case IFT_IEEE1394: #ifdef IFT_L2VLAN case IFT_L2VLAN: #endif #ifdef IFT_IEEE80211 case IFT_IEEE80211: #endif case IFT_GIF: /* XXX need more cases? */ return(1); default: return(0); } } int nd6_storelladdr(ifp, rt, m, dst, desten) struct ifnet *ifp; struct rtentry *rt; struct mbuf *m; struct sockaddr *dst; u_char *desten; { int i; struct sockaddr_dl *sdl; if (m->m_flags & M_MCAST) { switch (ifp->if_type) { case IFT_ETHER: case IFT_FDDI: #ifdef IFT_L2VLAN case IFT_L2VLAN: #endif #ifdef IFT_IEEE80211 case IFT_IEEE80211: #endif ETHER_MAP_IPV6_MULTICAST(&SIN6(dst)->sin6_addr, desten); return(1); case IFT_IEEE1394: /* * netbsd can use if_broadcastaddr, but we don't do so * to reduce # of ifdef. */ for (i = 0; i < ifp->if_addrlen; i++) desten[i] = ~0; return(1); case IFT_ARCNET: *desten = 0; return(1); default: m_freem(m); return(0); } } if (rt == NULL) { /* this could happen, if we could not allocate memory */ m_freem(m); return(0); } if (rt->rt_gateway->sa_family != AF_LINK) { printf("nd6_storelladdr: something odd happens\n"); m_freem(m); return(0); } sdl = SDL(rt->rt_gateway); if (sdl->sdl_alen == 0) { /* this should be impossible, but we bark here for debugging */ printf("nd6_storelladdr: sdl_alen == 0\n"); m_freem(m); return(0); } bcopy(LLADDR(sdl), desten, sdl->sdl_alen); return(1); } static int nd6_sysctl_drlist(SYSCTL_HANDLER_ARGS); static int nd6_sysctl_prlist(SYSCTL_HANDLER_ARGS); #ifdef SYSCTL_DECL SYSCTL_DECL(_net_inet6_icmp6); #endif SYSCTL_NODE(_net_inet6_icmp6, ICMPV6CTL_ND6_DRLIST, nd6_drlist, CTLFLAG_RD, nd6_sysctl_drlist, ""); SYSCTL_NODE(_net_inet6_icmp6, ICMPV6CTL_ND6_PRLIST, nd6_prlist, CTLFLAG_RD, nd6_sysctl_prlist, ""); static int nd6_sysctl_drlist(SYSCTL_HANDLER_ARGS) { int error; char buf[1024]; struct in6_defrouter *d, *de; struct nd_defrouter *dr; if (req->newptr) return EPERM; error = 0; for (dr = TAILQ_FIRST(&nd_defrouter); dr; dr = TAILQ_NEXT(dr, dr_entry)) { d = (struct in6_defrouter *)buf; de = (struct in6_defrouter *)(buf + sizeof(buf)); if (d + 1 <= de) { bzero(d, sizeof(*d)); d->rtaddr.sin6_family = AF_INET6; d->rtaddr.sin6_len = sizeof(d->rtaddr); if (in6_recoverscope(&d->rtaddr, &dr->rtaddr, dr->ifp) != 0) log(LOG_ERR, "scope error in " "default router list (%s)\n", ip6_sprintf(&dr->rtaddr)); d->flags = dr->flags; d->rtlifetime = dr->rtlifetime; d->expire = dr->expire; d->if_index = dr->ifp->if_index; } else panic("buffer too short"); error = SYSCTL_OUT(req, buf, sizeof(*d)); if (error) break; } return error; } static int nd6_sysctl_prlist(SYSCTL_HANDLER_ARGS) { int error; char buf[1024]; struct in6_prefix *p, *pe; struct nd_prefix *pr; if (req->newptr) return EPERM; error = 0; for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { u_short advrtrs; size_t advance; struct sockaddr_in6 *sin6, *s6; struct nd_pfxrouter *pfr; p = (struct in6_prefix *)buf; pe = (struct in6_prefix *)(buf + sizeof(buf)); if (p + 1 <= pe) { bzero(p, sizeof(*p)); sin6 = (struct sockaddr_in6 *)(p + 1); p->prefix = pr->ndpr_prefix; if (in6_recoverscope(&p->prefix, &p->prefix.sin6_addr, pr->ndpr_ifp) != 0) log(LOG_ERR, "scope error in prefix list (%s)\n", ip6_sprintf(&p->prefix.sin6_addr)); p->raflags = pr->ndpr_raf; p->prefixlen = pr->ndpr_plen; p->vltime = pr->ndpr_vltime; p->pltime = pr->ndpr_pltime; p->if_index = pr->ndpr_ifp->if_index; p->expire = pr->ndpr_expire; p->refcnt = pr->ndpr_refcnt; p->flags = pr->ndpr_stateflags; p->origin = PR_ORIG_RA; advrtrs = 0; for (pfr = pr->ndpr_advrtrs.lh_first; pfr; pfr = pfr->pfr_next) { if ((void *)&sin6[advrtrs + 1] > (void *)pe) { advrtrs++; continue; } s6 = &sin6[advrtrs]; bzero(s6, sizeof(*s6)); s6->sin6_family = AF_INET6; s6->sin6_len = sizeof(*sin6); if (in6_recoverscope(s6, &pfr->router->rtaddr, pfr->router->ifp) != 0) log(LOG_ERR, "scope error in " "prefix list (%s)\n", ip6_sprintf(&pfr->router->rtaddr)); advrtrs++; } p->advrtrs = advrtrs; } else panic("buffer too short"); advance = sizeof(*p) + sizeof(*sin6) * advrtrs; error = SYSCTL_OUT(req, buf, advance); if (error) break; } return error; } Index: head/sys/pccard/pccard.c =================================================================== --- head/sys/pccard/pccard.c (revision 110231) +++ head/sys/pccard/pccard.c (revision 110232) @@ -1,701 +1,699 @@ /* * pccard.c - Interface code for PC-CARD controllers. * * June 1995, Andrew McRae (andrew@mega.com.au) *------------------------------------------------------------------------- * * Copyright (c) 2001 M. Warner Losh. All rights reserved. * Copyright (c) 1995 Andrew McRae. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#define MIN(a,b) ((a)<(b)?(a):(b)) - static int allocate_driver(struct slot *, struct dev_desc *); static void inserted(void *); static void disable_slot(struct slot *); static void disable_slot_to(struct slot *); static void power_off_slot(void *); /* * The driver interface for read/write uses a block * of memory in the ISA I/O memory space allocated via * an ioctl setting. * * Now that we have different bus attachments, we should really * use a better algorythm to allocate memory. */ static unsigned long pccard_mem; /* Physical memory */ static unsigned char *pccard_kmem; /* Kernel virtual address */ static struct resource *pccard_mem_res; static int pccard_mem_rid; static d_open_t crdopen; static d_close_t crdclose; static d_read_t crdread; static d_write_t crdwrite; static d_ioctl_t crdioctl; static d_poll_t crdpoll; #define CDEV_MAJOR 50 static struct cdevsw crd_cdevsw = { /* open */ crdopen, /* close */ crdclose, /* read */ crdread, /* write */ crdwrite, /* ioctl */ crdioctl, /* poll */ crdpoll, /* mmap */ nommap, /* strategy */ nostrategy, /* name */ "crd", /* maj */ CDEV_MAJOR, /* dump */ nodump, /* psize */ nopsize, /* flags */ 0, }; /* * Power off the slot. * (doing it immediately makes the removal of some cards unstable) */ static void power_off_slot(void *arg) { struct slot *slt = (struct slot *)arg; int s; /* * The following will generate an interrupt. So, to hold off * the interrupt unitl after disable runs so that we can get rid * rid of the interrupt before it becomes unsafe to touch the * device. * * XXX In current, the spl stuff is a nop. */ s = splhigh(); /* Power off the slot. */ slt->pwr_off_pending = 0; slt->ctrl->disable(slt); splx(s); } /* * disable_slot - Disables the slot by removing * the power and unmapping the I/O */ static void disable_slot(struct slot *slt) { device_t pccarddev; device_t *kids; int nkids; int i; int ret; /* * Note that a race condition is possible here; if a * driver is accessing the device and it is removed, then * all bets are off... */ pccarddev = slt->dev; device_get_children(pccarddev, &kids, &nkids); for (i = 0; i < nkids; i++) { if ((ret = device_delete_child(pccarddev, kids[i])) != 0) printf("pccard: delete of %s failed: %d\n", device_get_nameunit(kids[i]), ret); } free(kids, M_TEMP); /* Power off the slot 1/2 second after removal of the card */ slt->poff_ch = timeout(power_off_slot, (caddr_t)slt, hz / 2); slt->pwr_off_pending = 1; } static void disable_slot_to(struct slot *slt) { disable_slot(slt); if (slt->state == empty) printf("pccard: card removed, slot %d\n", slt->slotnum); else printf("pccard: card deactivated, slot %d\n", slt->slotnum); pccard_remove_beep(); selwakeup(&slt->selp); } /* * pccard_init_slot - Initialize the slot controller and attach various * things to it. We also make the device for it. We create the device that * will be exported to devfs. */ struct slot * pccard_init_slot(device_t dev, struct slot_ctrl *ctrl) { int slotno; struct slot *slt; slt = PCCARD_DEVICE2SOFTC(dev); slotno = device_get_unit(dev); slt->dev = dev; slt->d = make_dev(&crd_cdevsw, slotno, 0, 0, 0600, "card%d", slotno); slt->d->si_drv1 = slt; slt->ctrl = ctrl; slt->slotnum = slotno; callout_handle_init(&slt->insert_ch); callout_handle_init(&slt->poff_ch); return (slt); } /* * allocate_driver - Create a new device entry for this * slot, and attach a driver to it. */ static int allocate_driver(struct slot *slt, struct dev_desc *desc) { struct pccard_devinfo *devi; device_t pccarddev; int err, irq = 0; device_t child; device_t *devs; int count; pccarddev = slt->dev; err = device_get_children(pccarddev, &devs, &count); if (err != 0) return (err); free(devs, M_TEMP); if (count) { device_printf(pccarddev, "Can not attach more than one child.\n"); return (EIO); } irq = ffs(desc->irqmask) - 1; MALLOC(devi, struct pccard_devinfo *, sizeof(*devi), M_DEVBUF, M_ZERO); strcpy(devi->name, desc->name); /* * Create an entry for the device under this slot. */ devi->running = 1; devi->slt = slt; bcopy(desc->misc, devi->misc, sizeof(desc->misc)); strcpy(devi->manufstr, desc->manufstr); strcpy(devi->versstr, desc->versstr); devi->manufacturer = desc->manufacturer; devi->product = desc->product; devi->prodext = desc->prodext; resource_list_init(&devi->resources); child = device_add_child(pccarddev, devi->name, desc->unit); if (child == NULL) { if (desc->unit != -1) device_printf(pccarddev, "Unit %d failed for %s, try a different unit\n", desc->unit, devi->name); else device_printf(pccarddev, "No units available for %s. Impossible?\n", devi->name); return (EIO); } device_set_flags(child, desc->flags); device_set_ivars(child, devi); if (bootverbose) { device_printf(pccarddev, "Assigning %s:", device_get_nameunit(child)); if (desc->iobase) printf(" io 0x%x-0x%x", desc->iobase, desc->iobase + desc->iosize - 1); if (irq) printf(" irq %d", irq); if (desc->mem) printf(" mem 0x%lx-0x%lx", desc->mem, desc->mem + desc->memsize - 1); printf(" flags 0x%x\n", desc->flags); } err = bus_set_resource(child, SYS_RES_IOPORT, 0, desc->iobase, desc->iosize); if (err) goto err; if (irq) err = bus_set_resource(child, SYS_RES_IRQ, 0, irq, 1); if (err) goto err; if (desc->memsize) { err = bus_set_resource(child, SYS_RES_MEMORY, 0, desc->mem, desc->memsize); if (err) goto err; } err = device_probe_and_attach(child); /* * XXX We unwisely assume that the detach code won't run while the * XXX the attach code is attaching. Someone should put some * XXX interlock code. This can happen if probe/attach takes a while * XXX and the user ejects the card, which causes the detach * XXX function to be called. */ strncpy(desc->name, device_get_nameunit(child), sizeof(desc->name)); desc->name[sizeof(desc->name) - 1] = '\0'; err: if (err) device_delete_child(pccarddev, child); return (err); } /* * card insert routine - Called from a timeout to debounce * insertion events. */ static void inserted(void *arg) { struct slot *slt = arg; slt->state = filled; /* * Disable any pending timeouts for this slot, and explicitly * power it off right now. Then, re-enable the power using * the (possibly new) power settings. */ untimeout(power_off_slot, (caddr_t)slt, slt->poff_ch); power_off_slot(slt); /* * Enable 5V to the card so that the CIS can be read. Well, * enable the most natural voltage so that the CIS can be read. */ slt->pwr.vcc = -1; slt->pwr.vpp = -1; slt->ctrl->power(slt); printf("pccard: card inserted, slot %d\n", slt->slotnum); pccard_insert_beep(); slt->ctrl->reset(slt); } /* * Card event callback. Called at splhigh to prevent * device interrupts from interceding. */ void pccard_event(struct slot *slt, enum card_event event) { if (slt->insert_seq) { slt->insert_seq = 0; untimeout(inserted, (void *)slt, slt->insert_ch); } switch(event) { case card_removed: case card_deactivated: if (slt->state == filled || slt->state == inactive) { if (event == card_removed) slt->state = empty; else slt->state = inactive; disable_slot_to(slt); } break; case card_inserted: slt->insert_seq = 1; slt->insert_ch = timeout(inserted, (void *)slt, hz/4); break; } } /* * Device driver interface. */ static int crdopen(dev_t dev, int oflags, int devtype, d_thread_t *td) { struct slot *slt = PCCARD_DEV2SOFTC(dev); if (slt == NULL) return (ENXIO); if (slt->rwmem == 0) slt->rwmem = MDF_ATTR; return (0); } /* * Close doesn't de-allocate any resources, since * slots may be assigned to drivers already. */ static int crdclose(dev_t dev, int fflag, int devtype, d_thread_t *td) { return (0); } /* * read interface. Map memory at lseek offset, * then transfer to user space. */ static int crdread(dev_t dev, struct uio *uio, int ioflag) { struct slot *slt = PCCARD_DEV2SOFTC(dev); struct mem_desc *mp, oldmap; unsigned char *p; unsigned int offs; int error = 0, win, count; if (slt == 0 || slt->state != filled) return (ENXIO); if (pccard_mem == 0) return (ENOMEM); for (win = slt->ctrl->maxmem - 1; win >= 0; win--) if ((slt->mem[win].flags & MDF_ACTIVE) == 0) break; if (win < 0) return (EBUSY); mp = &slt->mem[win]; oldmap = *mp; mp->flags = slt->rwmem | MDF_ACTIVE; while (uio->uio_resid && error == 0) { mp->card = uio->uio_offset; mp->size = PCCARD_MEMSIZE; mp->start = (caddr_t)(void *)(uintptr_t)pccard_mem; if ((error = slt->ctrl->mapmem(slt, win)) != 0) break; offs = (unsigned int)uio->uio_offset & (PCCARD_MEMSIZE - 1); p = pccard_kmem + offs; count = MIN(PCCARD_MEMSIZE - offs, uio->uio_resid); error = uiomove(p, count, uio); } /* * Restore original map. */ *mp = oldmap; slt->ctrl->mapmem(slt, win); return (error); } /* * crdwrite - Write data to card memory. * Handles wrap around so that only one memory * window is used. */ static int crdwrite(dev_t dev, struct uio *uio, int ioflag) { struct slot *slt = PCCARD_DEV2SOFTC(dev); struct mem_desc *mp, oldmap; unsigned char *p; unsigned int offs; int error = 0, win, count; if (slt == 0 || slt->state != filled) return (ENXIO); if (pccard_mem == 0) return (ENOMEM); for (win = slt->ctrl->maxmem - 1; win >= 0; win--) if ((slt->mem[win].flags & MDF_ACTIVE) == 0) break; if (win < 0) return (EBUSY); mp = &slt->mem[win]; oldmap = *mp; mp->flags = slt->rwmem | MDF_ACTIVE; while (uio->uio_resid && error == 0) { mp->card = uio->uio_offset; mp->size = PCCARD_MEMSIZE; mp->start = (caddr_t)(void *)(uintptr_t)pccard_mem; if ((error = slt->ctrl->mapmem(slt, win)) != 0) break; offs = (unsigned int)uio->uio_offset & (PCCARD_MEMSIZE - 1); p = pccard_kmem + offs; count = MIN(PCCARD_MEMSIZE - offs, uio->uio_resid); error = uiomove(p, count, uio); } /* * Restore original map. */ *mp = oldmap; slt->ctrl->mapmem(slt, win); return (error); } /* * ioctl calls - allows setting/getting of memory and I/O * descriptors, and assignment of drivers. */ static int crdioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, d_thread_t *td) { u_int32_t addr; int err; struct io_desc *ip; struct mem_desc *mp; device_t pccarddev; int pwval; int s; struct slot *slt = PCCARD_DEV2SOFTC(dev); if (slt == 0 && cmd != PIOCRWMEM) return (ENXIO); switch(cmd) { default: if (slt->ctrl->ioctl) return (slt->ctrl->ioctl(slt, cmd, data)); return (ENOTTY); /* * Get slot state. */ case PIOCGSTATE: s = splhigh(); ((struct slotstate *)data)->state = slt->state; ((struct slotstate *)data)->laststate = slt->laststate; slt->laststate = slt->state; splx(s); ((struct slotstate *)data)->maxmem = slt->ctrl->maxmem; ((struct slotstate *)data)->maxio = slt->ctrl->maxio; ((struct slotstate *)data)->irqs = 0; break; /* * Get memory context. */ case PIOCGMEM: s = ((struct mem_desc *)data)->window; if (s < 0 || s >= slt->ctrl->maxmem) return (EINVAL); mp = &slt->mem[s]; ((struct mem_desc *)data)->flags = mp->flags; ((struct mem_desc *)data)->start = mp->start; ((struct mem_desc *)data)->size = mp->size; ((struct mem_desc *)data)->card = mp->card; break; /* * Set memory context. If context already active, then unmap it. * It is hard to see how the parameters can be checked. * At the very least, we only allow root to set the context. */ case PIOCSMEM: if (suser(td)) return (EPERM); if (slt->state != filled) return (ENXIO); s = ((struct mem_desc *)data)->window; if (s < 0 || s >= slt->ctrl->maxmem) return (EINVAL); slt->mem[s] = *((struct mem_desc *)data); return (slt->ctrl->mapmem(slt, s)); /* * Get I/O port context. */ case PIOCGIO: s = ((struct io_desc *)data)->window; if (s < 0 || s >= slt->ctrl->maxio) return (EINVAL); ip = &slt->io[s]; ((struct io_desc *)data)->flags = ip->flags; ((struct io_desc *)data)->start = ip->start; ((struct io_desc *)data)->size = ip->size; break; /* * Set I/O port context. */ case PIOCSIO: if (suser(td)) return (EPERM); if (slt->state != filled) return (ENXIO); s = ((struct io_desc *)data)->window; if (s < 0 || s >= slt->ctrl->maxio) return (EINVAL); slt->io[s] = *((struct io_desc *)data); /* XXX Don't actually map */ return (0); break; /* * Set memory window flags for read/write interface. */ case PIOCRWFLAG: slt->rwmem = *(int *)data; break; /* * Set the memory window to be used for the read/write interface. */ case PIOCRWMEM: if (*(unsigned long *)data == 0) { *(unsigned long *)data = pccard_mem; break; } if (suser(td)) return (EPERM); /* * Validate the memory by checking it against the I/O * memory range. It must also start on an aligned block size. */ if (*(unsigned long *)data & (PCCARD_MEMSIZE-1)) return (EINVAL); pccarddev = PCCARD_DEV2SOFTC(dev)->dev; pccard_mem_rid = 0; addr = *(unsigned long *)data; if (pccard_mem_res) bus_release_resource(pccarddev, SYS_RES_MEMORY, pccard_mem_rid, pccard_mem_res); pccard_mem_res = bus_alloc_resource(pccarddev, SYS_RES_MEMORY, &pccard_mem_rid, addr, addr, PCCARD_MEMSIZE, RF_ACTIVE | rman_make_alignment_flags(PCCARD_MEMSIZE)); if (pccard_mem_res == NULL) return (EINVAL); pccard_mem = rman_get_start(pccard_mem_res); pccard_kmem = rman_get_virtual(pccard_mem_res); break; /* * Set power values. */ case PIOCSPOW: slt->pwr = *(struct power *)data; return (slt->ctrl->power(slt)); /* * Allocate a driver to this slot. */ case PIOCSDRV: if (suser(td)) return (EPERM); err = allocate_driver(slt, (struct dev_desc *)data); if (!err) pccard_success_beep(); else pccard_failure_beep(); return (err); /* * Virtual removal/insertion */ case PIOCSVIR: pwval = *(int *)data; if (!pwval) { if (slt->state != filled) return (EINVAL); pccard_event(slt, card_deactivated); } else { if (slt->state != empty && slt->state != inactive) return (EINVAL); pccard_event(slt, card_inserted); } break; case PIOCSBEEP: if (pccard_beep_select(*(int *)data)) { return (EINVAL); } break; } return (0); } /* * poll - Poll on exceptions will return true * when a change in card status occurs. */ static int crdpoll(dev_t dev, int events, d_thread_t *td) { int revents = 0; int s; struct slot *slt = PCCARD_DEV2SOFTC(dev); if (events & (POLLIN | POLLRDNORM)) revents |= events & (POLLIN | POLLRDNORM); if (events & (POLLOUT | POLLWRNORM)) revents |= events & (POLLIN | POLLRDNORM); s = splhigh(); /* * select for exception - card event. */ if (events & POLLRDBAND) if (slt == 0 || slt->laststate != slt->state) revents |= POLLRDBAND; if (revents == 0) selrecord(td, &slt->selp); splx(s); return (revents); } /* * APM hooks for suspending and resuming. */ int pccard_suspend(device_t dev) { struct slot *slt = PCCARD_DEVICE2SOFTC(dev); /* This code stolen from pccard_event:card_removed */ if (slt->state == filled) { int s = splhigh(); /* nop on current */ disable_slot(slt); slt->laststate = suspend; /* for pccardd */ slt->state = empty; splx(s); printf("pccard: card disabled, slot %d\n", slt->slotnum); } /* * Disable any pending timeouts for this slot since we're * powering it down/disabling now. */ untimeout(power_off_slot, (caddr_t)slt, slt->poff_ch); slt->ctrl->disable(slt); return (0); } int pccard_resume(device_t dev) { struct slot *slt = PCCARD_DEVICE2SOFTC(dev); slt->ctrl->resume(slt); return (0); } Index: head/sys/sys/param.h =================================================================== --- head/sys/sys/param.h (revision 110231) +++ head/sys/sys/param.h (revision 110232) @@ -1,327 +1,325 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)param.h 8.3 (Berkeley) 4/4/95 * $FreeBSD$ */ #ifndef _SYS_PARAM_H_ #define _SYS_PARAM_H_ #define BSD 199506 /* System version (year & month). */ #define BSD4_3 1 #define BSD4_4 1 /* * __FreeBSD_version numbers are documented in the Porter's Handbook. * If you bump the version for any reason, you should update the documentation * there. * Currently this lives here: * * doc/en_US.ISO8859-1/books/porters-handbook/book.sgml */ #undef __FreeBSD_version #define __FreeBSD_version 500100 /* Master, propagated to newvers */ #ifndef NULL #define NULL 0 #endif #ifndef LOCORE #include #endif /* * Machine-independent constants (some used in following include files). * Redefined constants are from POSIX 1003.1 limits file. * * MAXCOMLEN should be >= sizeof(ac_comm) (see ) * MAXLOGNAME should be == UT_NAMESIZE+1 (see ) */ #include #define MAXCOMLEN 19 /* max command name remembered */ #define MAXINTERP 32 /* max interpreter file name length */ #define MAXLOGNAME 17 /* max login name length (incl. NUL) */ #define MAXUPRC CHILD_MAX /* max simultaneous processes */ #define NCARGS ARG_MAX /* max bytes for an exec function */ #define NGROUPS NGROUPS_MAX /* max number groups */ #define NOFILE OPEN_MAX /* max open files per process */ #define NOGROUP 65535 /* marker for empty group set member */ #define MAXHOSTNAMELEN 256 /* max hostname size */ #define SPECNAMELEN 15 /* max length of devicename */ /* More types and definitions used throughout the kernel. */ #ifdef _KERNEL #include #include #include #include #define FALSE 0 #define TRUE 1 #endif #ifndef _KERNEL /* Signals. */ #include #endif /* Machine type dependent parameters. */ #include #ifndef _KERNEL #include #endif #ifndef _NO_NAMESPACE_POLLUTION #ifndef DEV_BSHIFT #define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ #endif #define DEV_BSIZE (1<>PAGE_SHIFT) #endif /* * btodb() is messy and perhaps slow because `bytes' may be an off_t. We * want to shift an unsigned type to avoid sign extension and we don't * want to widen `bytes' unnecessarily. Assume that the result fits in * a daddr_t. */ #ifndef btodb #define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ (sizeof (bytes) > sizeof(long) \ ? (daddr_t)((unsigned long long)(bytes) >> DEV_BSHIFT) \ : (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT)) #endif #ifndef dbtob #define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ ((off_t)(db) << DEV_BSHIFT) #endif #endif /* _NO_NAMESPACE_POLLUTION */ #define PRIMASK 0x0ff #define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */ #define PDROP 0x200 /* OR'd with pri to stop re-entry of interlock mutex */ #define NZERO 0 /* default "nice" */ #define NBBY 8 /* number of bits in a byte */ #define NBPW sizeof(int) /* number of bytes per word (integer) */ #define CMASK 022 /* default file mask: S_IWGRP|S_IWOTH */ #define NODEV (dev_t)(-1) /* non-existent device */ #ifdef _KERNEL #define NOUDEV (udev_t)(-1) /* non-existent device */ #define NOMAJ 256 /* non-existent device */ #endif #define CBLOCK 128 /* Clist block size, must be a power of 2. */ #define CBQSIZE (CBLOCK/NBBY) /* Quote bytes/cblock - can do better. */ /* Data chars/clist. */ #define CBSIZE (CBLOCK - sizeof(struct cblock *) - CBQSIZE) #define CROUND (CBLOCK - 1) /* Clist rounding. */ /* * File system parameters and macros. * * MAXBSIZE - Filesystems are made out of blocks of at most MAXBSIZE bytes * per block. MAXBSIZE may be made larger without effecting * any existing filesystems as long as it does not exceed MAXPHYS, * and may be made smaller at the risk of not being able to use * filesystems which require a block size exceeding MAXBSIZE. * * BKVASIZE - Nominal buffer space per buffer, in bytes. BKVASIZE is the * minimum KVM memory reservation the kernel is willing to make. * Filesystems can of course request smaller chunks. Actual * backing memory uses a chunk size of a page (PAGE_SIZE). * * If you make BKVASIZE too small you risk seriously fragmenting * the buffer KVM map which may slow things down a bit. If you * make it too big the kernel will not be able to optimally use * the KVM memory reserved for the buffer cache and will wind * up with too-few buffers. * * The default is 16384, roughly 2x the block size used by a * normal UFS filesystem. */ #define MAXBSIZE 65536 /* must be power of 2 */ #define BKVASIZE 16384 /* must be power of 2 */ #define BKVAMASK (BKVASIZE-1) /* * MAXPATHLEN defines the longest permissible path length after expanding * symbolic links. It is used to allocate a temporary buffer from the buffer * pool in which to do the name expansion, hence should be a power of two, * and must be less than or equal to MAXBSIZE. MAXSYMLINKS defines the * maximum number of symbolic links that may be expanded in a path name. * It should be set high enough to allow all legitimate uses, but halt * infinite loops reasonably quickly. */ #define MAXPATHLEN PATH_MAX #define MAXSYMLINKS 32 /* Bit map related macros. */ #define setbit(a,i) ((a)[(i)/NBBY] |= 1<<((i)%NBBY)) #define clrbit(a,i) ((a)[(i)/NBBY] &= ~(1<<((i)%NBBY))) #define isset(a,i) ((a)[(i)/NBBY] & (1<<((i)%NBBY))) #define isclr(a,i) (((a)[(i)/NBBY] & (1<<((i)%NBBY))) == 0) /* Macros for counting and rounding. */ #ifndef howmany #define howmany(x, y) (((x)+((y)-1))/(y)) #endif #define rounddown(x, y) (((x)/(y))*(y)) #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* to any y */ #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) /* if y is powers of two */ #define powerof2(x) ((((x)-1)&(x))==0) /* Macros for min/max. */ -#ifndef _KERNEL #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) -#endif #ifdef _KERNEL /* * Basic byte order function prototypes for non-inline functions. */ #ifndef _BYTEORDER_PROTOTYPED #define _BYTEORDER_PROTOTYPED __BEGIN_DECLS __uint32_t htonl(__uint32_t); __uint16_t htons(__uint16_t); __uint32_t ntohl(__uint32_t); __uint16_t ntohs(__uint16_t); __END_DECLS #endif #ifndef lint #ifndef _BYTEORDER_FUNC_DEFINED #define _BYTEORDER_FUNC_DEFINED #define htonl(x) __htonl(x) #define htons(x) __htons(x) #define ntohl(x) __ntohl(x) #define ntohs(x) __ntohs(x) #endif /* !_BYTEORDER_FUNC_DEFINED */ #endif /* lint */ #endif /* _KERNEL */ /* * Constants for setting the parameters of the kernel memory allocator. * * 2 ** MINBUCKET is the smallest unit of memory that will be * allocated. It must be at least large enough to hold a pointer. * * Units of memory less or equal to MAXALLOCSAVE will permanently * allocate physical memory; requests for these size pieces of * memory are quite fast. Allocations greater than MAXALLOCSAVE must * always allocate and free physical memory; requests for these * size allocations should be done infrequently as they will be slow. * * Constraints: PAGE_SIZE <= MAXALLOCSAVE <= 2 ** (MINBUCKET + 14), and * MAXALLOCSIZE must be a power of two. */ #if defined(__alpha__) || defined(__ia64__) || defined(__sparc64__) #define MINBUCKET 5 /* 5 => min allocation of 32 bytes */ #else #define MINBUCKET 4 /* 4 => min allocation of 16 bytes */ #endif #define MAXALLOCSAVE (2 * PAGE_SIZE) /* * Scale factor for scaled integers used to count %cpu time and load avgs. * * The number of CPU `tick's that map to a unique `%age' can be expressed * by the formula (1 / (2 ^ (FSHIFT - 11))). The maximum load average that * can be calculated (assuming 32 bits) can be closely approximated using * the formula (2 ^ (2 * (16 - FSHIFT))) for (FSHIFT < 15). * * For the scheduler to maintain a 1:1 mapping of CPU `tick' to `%age', * FSHIFT must be at least 11; this gives us a maximum load avg of ~1024. */ #define FSHIFT 11 /* bits to right of fixed binary point */ #define FSCALE (1<> (PAGE_SHIFT - DEV_BSHIFT)) #define ctodb(db) /* calculates pages to devblks */ \ ((db) << (PAGE_SHIFT - DEV_BSHIFT)) #endif /* _SYS_PARAM_H_ */