Index: projects/powernv/arm/arm/busdma_machdep-v6.c =================================================================== --- projects/powernv/arm/arm/busdma_machdep-v6.c (revision 291050) +++ projects/powernv/arm/arm/busdma_machdep-v6.c (revision 291051) @@ -1,1732 +1,1741 @@ /*- * Copyright (c) 2012-2015 Ian Lepore * Copyright (c) 2010 Mark Tinguely * Copyright (c) 2004 Olivier Houchard * Copyright (c) 2002 Peter Grehan * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb */ #include __FBSDID("$FreeBSD$"); #define _ARM32_BUS_DMA_PRIVATE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if __ARM_ARCH < 6 #define BUSDMA_DCACHE_ALIGN arm_dcache_align #define BUSDMA_DCACHE_MASK arm_dcache_align_mask #else #define BUSDMA_DCACHE_ALIGN cpuinfo.dcache_line_size #define BUSDMA_DCACHE_MASK cpuinfo.dcache_line_mask #endif #define MAX_BPAGES 64 #define MAX_DMA_SEGMENTS 4096 #define BUS_DMA_EXCL_BOUNCE BUS_DMA_BUS2 #define BUS_DMA_ALIGN_BOUNCE BUS_DMA_BUS3 #define BUS_DMA_COULD_BOUNCE (BUS_DMA_EXCL_BOUNCE | BUS_DMA_ALIGN_BOUNCE) #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 struct bounce_zone; struct bus_dma_tag { bus_dma_tag_t parent; bus_size_t alignment; bus_addr_t boundary; bus_addr_t lowaddr; bus_addr_t highaddr; bus_dma_filter_t *filter; void *filterarg; bus_size_t maxsize; u_int nsegments; bus_size_t maxsegsz; int flags; int ref_count; int map_count; bus_dma_lock_t *lockfunc; void *lockfuncarg; struct bounce_zone *bounce_zone; /* * DMA range for this tag. If the page doesn't fall within * one of these ranges, an error is returned. The caller * may then decide what to do with the transfer. If the * range pointer is NULL, it is ignored. */ struct arm32_dma_range *ranges; int _nranges; }; struct bounce_page { vm_offset_t vaddr; /* kva of bounce buffer */ bus_addr_t busaddr; /* Physical address */ vm_offset_t datavaddr; /* kva of client data */ vm_page_t datapage; /* physical page of client data */ vm_offset_t dataoffs; /* page offset of client data */ bus_size_t datacount; /* client data count */ STAILQ_ENTRY(bounce_page) links; }; struct sync_list { vm_offset_t vaddr; /* kva of client data */ + bus_addr_t paddr; /* physical address */ vm_page_t pages; /* starting page of client data */ - vm_offset_t dataoffs; /* page offset of client data */ bus_size_t datacount; /* client data count */ }; int busdma_swi_pending; struct bounce_zone { STAILQ_ENTRY(bounce_zone) links; STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; int total_bpages; int free_bpages; int reserved_bpages; int active_bpages; int total_bounced; int total_deferred; int map_count; bus_size_t alignment; bus_addr_t lowaddr; char zoneid[8]; char lowaddrid[20]; struct sysctl_ctx_list sysctl_tree; struct sysctl_oid *sysctl_tree_top; }; static struct mtx bounce_lock; static int total_bpages; static int busdma_zonecount; static uint32_t tags_total; static uint32_t maps_total; static uint32_t maps_dmamem; static uint32_t maps_coherent; static counter_u64_t maploads_total; static counter_u64_t maploads_bounced; static counter_u64_t maploads_coherent; static counter_u64_t maploads_dmamem; static counter_u64_t maploads_mbuf; static counter_u64_t maploads_physmem; static STAILQ_HEAD(, bounce_zone) bounce_zone_list; SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); SYSCTL_UINT(_hw_busdma, OID_AUTO, tags_total, CTLFLAG_RD, &tags_total, 0, "Number of active tags"); SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_total, CTLFLAG_RD, &maps_total, 0, "Number of active maps"); SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_dmamem, CTLFLAG_RD, &maps_dmamem, 0, "Number of active maps for bus_dmamem_alloc buffers"); SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_coherent, CTLFLAG_RD, &maps_coherent, 0, "Number of active maps with BUS_DMA_COHERENT flag set"); SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_total, CTLFLAG_RD, &maploads_total, "Number of load operations performed"); SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_bounced, CTLFLAG_RD, &maploads_bounced, "Number of load operations that used bounce buffers"); SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_coherent, CTLFLAG_RD, &maploads_dmamem, "Number of load operations on BUS_DMA_COHERENT memory"); SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_dmamem, CTLFLAG_RD, &maploads_dmamem, "Number of load operations on bus_dmamem_alloc buffers"); SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_mbuf, CTLFLAG_RD, &maploads_mbuf, "Number of load operations for mbufs"); SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_physmem, CTLFLAG_RD, &maploads_physmem, "Number of load operations on physical buffers"); SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, "Total bounce pages"); struct bus_dmamap { struct bp_list bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; struct memdesc mem; bus_dmamap_callback_t *callback; void *callback_arg; int flags; #define DMAMAP_COHERENT (1 << 0) #define DMAMAP_DMAMEM_ALLOC (1 << 1) #define DMAMAP_MBUF (1 << 2) STAILQ_ENTRY(bus_dmamap) links; bus_dma_segment_t *segments; int sync_count; struct sync_list slist[]; }; static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; static void init_bounce_pages(void *dummy); static int alloc_bounce_zone(bus_dma_tag_t dmat); static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit); static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, bus_addr_t addr, bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, pmap_t pmap, bus_dmamap_t map, void *buf, bus_size_t buflen, int flags); static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags); static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags); static void dma_preread_safe(vm_offset_t va, vm_paddr_t pa, vm_size_t size); static void dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op); static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ static busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata"); MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages"); static void busdma_init(void *dummy) { int uma_flags; maploads_total = counter_u64_alloc(M_WAITOK); maploads_bounced = counter_u64_alloc(M_WAITOK); maploads_coherent = counter_u64_alloc(M_WAITOK); maploads_dmamem = counter_u64_alloc(M_WAITOK); maploads_mbuf = counter_u64_alloc(M_WAITOK); maploads_physmem = counter_u64_alloc(M_WAITOK); uma_flags = 0; /* Create a cache of buffers in standard (cacheable) memory. */ standard_allocator = busdma_bufalloc_create("buffer", BUSDMA_DCACHE_ALIGN,/* minimum_alignment */ NULL, /* uma_alloc func */ NULL, /* uma_free func */ uma_flags); /* uma_zcreate_flags */ #ifdef INVARIANTS /* * Force UMA zone to allocate service structures like * slabs using own allocator. uma_debug code performs * atomic ops on uma_slab_t fields and safety of this * operation is not guaranteed for write-back caches */ uma_flags = UMA_ZONE_OFFPAGE; #endif /* * Create a cache of buffers in uncacheable memory, to implement the * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag. */ coherent_allocator = busdma_bufalloc_create("coherent", BUSDMA_DCACHE_ALIGN,/* minimum_alignment */ busdma_bufalloc_alloc_uncacheable, busdma_bufalloc_free_uncacheable, uma_flags); /* uma_zcreate_flags */ } /* * This init historically used SI_SUB_VM, but now the init code requires * malloc(9) using M_BUSDMA memory and the pcpu zones for counter(9), which get * set up by SI_SUB_KMEM and SI_ORDER_LAST, so we'll go right after that by * using SI_SUB_KMEM+1. */ SYSINIT(busdma, SI_SUB_KMEM+1, SI_ORDER_FIRST, busdma_init, NULL); /* * This routine checks the exclusion zone constraints from a tag against the * physical RAM available on the machine. If a tag specifies an exclusion zone * but there's no RAM in that zone, then we avoid allocating resources to bounce * a request, and we can use any memory allocator (as opposed to needing * kmem_alloc_contig() just because it can allocate pages in an address range). * * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the * same value on 32-bit architectures) as their lowaddr constraint, and we can't * possibly have RAM at an address higher than the highest address we can * express, so we take a fast out. */ static int exclusion_bounce_check(vm_offset_t lowaddr, vm_offset_t highaddr) { int i; if (lowaddr >= BUS_SPACE_MAXADDR) return (0); for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { if ((lowaddr >= phys_avail[i] && lowaddr < phys_avail[i + 1]) || (lowaddr < phys_avail[i] && highaddr >= phys_avail[i])) return (1); } return (0); } /* * Return true if the tag has an exclusion zone that could lead to bouncing. */ static __inline int exclusion_bounce(bus_dma_tag_t dmat) { return (dmat->flags & BUS_DMA_EXCL_BOUNCE); } /* * Return true if the given address does not fall on the alignment boundary. */ static __inline int alignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr) { return (addr & (dmat->alignment - 1)); } /* * Return true if the DMA should bounce because the start or end does not fall * on a cacheline boundary (which would require a partial cacheline flush). * COHERENT memory doesn't trigger cacheline flushes. Memory allocated by * bus_dmamem_alloc() is always aligned to cacheline boundaries, and there's a * strict rule that such memory cannot be accessed by the CPU while DMA is in * progress (or by multiple DMA engines at once), so that it's always safe to do * full cacheline flushes even if that affects memory outside the range of a * given DMA operation that doesn't involve the full allocated buffer. If we're * mapping an mbuf, that follows the same rules as a buffer we allocated. */ static __inline int cacheline_bounce(bus_dmamap_t map, bus_addr_t addr, bus_size_t size) { if (map->flags & (DMAMAP_DMAMEM_ALLOC | DMAMAP_COHERENT | DMAMAP_MBUF)) return (0); return ((addr | size) & arm_dcache_align_mask); } /* * Return true if we might need to bounce the DMA described by addr and size. * * This is used to quick-check whether we need to do the more expensive work of * checking the DMA page-by-page looking for alignment and exclusion bounces. * * Note that the addr argument might be either virtual or physical. It doesn't * matter because we only look at the low-order bits, which are the same in both * address spaces. */ static __inline int might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr, bus_size_t size) { return ((dmat->flags & BUS_DMA_EXCL_BOUNCE) || alignment_bounce(dmat, addr) || cacheline_bounce(map, addr, size)); } /* * Return true if we must bounce the DMA described by paddr and size. * * Bouncing can be triggered by DMA that doesn't begin and end on cacheline * boundaries, or doesn't begin on an alignment boundary, or falls within the * exclusion zone of any tag in the ancestry chain. * * For exclusions, walk the chain of tags comparing paddr to the exclusion zone * within each tag. If the tag has a filter function, use it to decide whether * the DMA needs to bounce, otherwise any DMA within the zone bounces. */ static int must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, bus_size_t size) { if (cacheline_bounce(map, paddr, size)) return (1); /* * The tag already contains ancestors' alignment restrictions so this * check doesn't need to be inside the loop. */ if (alignment_bounce(dmat, paddr)) return (1); /* * Even though each tag has an exclusion zone that is a superset of its * own and all its ancestors' exclusions, the exclusion zone of each tag * up the chain must be checked within the loop, because the busdma * rules say the filter function is called only when the address lies * within the low-highaddr range of the tag that filterfunc belongs to. */ while (dmat != NULL && exclusion_bounce(dmat)) { if ((paddr >= dmat->lowaddr && paddr <= dmat->highaddr) && (dmat->filter == NULL || dmat->filter(dmat->filterarg, paddr) != 0)) return (1); dmat = dmat->parent; } return (0); } static __inline struct arm32_dma_range * _bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, bus_addr_t curaddr) { struct arm32_dma_range *dr; int i; for (i = 0, dr = ranges; i < nranges; i++, dr++) { if (curaddr >= dr->dr_sysbase && round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) return (dr); } return (NULL); } /* * Convenience function for manipulating driver locks from busdma (during * busdma_swi, for example). Drivers that don't provide their own locks * should specify &Giant to dmat->lockfuncarg. Drivers that use their own * non-mutex locking scheme don't have to use this at all. */ void busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) { struct mtx *dmtx; dmtx = (struct mtx *)arg; switch (op) { case BUS_DMA_LOCK: mtx_lock(dmtx); break; case BUS_DMA_UNLOCK: mtx_unlock(dmtx); break; default: panic("Unknown operation 0x%x for busdma_lock_mutex!", op); } } /* * dflt_lock should never get called. It gets put into the dma tag when * lockfunc == NULL, which is only valid if the maps that are associated * with the tag are meant to never be defered. * XXX Should have a way to identify which driver is responsible here. */ static void dflt_lock(void *arg, bus_dma_lock_op_t op) { panic("driver error: busdma dflt_lock called"); } /* * Allocate a device specific dma_tag. */ int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat) { bus_dma_tag_t newtag; int error = 0; /* Basic sanity checking. */ KASSERT(boundary == 0 || powerof2(boundary), ("dma tag boundary %lu, must be a power of 2", boundary)); KASSERT(boundary == 0 || boundary >= maxsegsz, ("dma tag boundary %lu is < maxsegsz %lu\n", boundary, maxsegsz)); KASSERT(alignment != 0 && powerof2(alignment), ("dma tag alignment %lu, must be non-zero power of 2", alignment)); KASSERT(maxsegsz != 0, ("dma tag maxsegsz must not be zero")); /* Return a NULL tag on failure */ *dmat = NULL; newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_BUSDMA, M_ZERO | M_NOWAIT); if (newtag == NULL) { CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, 0, error); return (ENOMEM); } newtag->parent = parent; newtag->alignment = alignment; newtag->boundary = boundary; newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); newtag->filter = filter; newtag->filterarg = filterarg; newtag->maxsize = maxsize; newtag->nsegments = nsegments; newtag->maxsegsz = maxsegsz; newtag->flags = flags; newtag->ref_count = 1; /* Count ourself */ newtag->map_count = 0; newtag->ranges = bus_dma_get_range(); newtag->_nranges = bus_dma_get_range_nb(); if (lockfunc != NULL) { newtag->lockfunc = lockfunc; newtag->lockfuncarg = lockfuncarg; } else { newtag->lockfunc = dflt_lock; newtag->lockfuncarg = NULL; } /* Take into account any restrictions imposed by our parent tag */ if (parent != NULL) { newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); newtag->alignment = MAX(parent->alignment, newtag->alignment); newtag->flags |= parent->flags & BUS_DMA_COULD_BOUNCE; if (newtag->boundary == 0) newtag->boundary = parent->boundary; else if (parent->boundary != 0) newtag->boundary = MIN(parent->boundary, newtag->boundary); if (newtag->filter == NULL) { /* * Short circuit to looking at our parent directly * since we have encapsulated all of its information */ newtag->filter = parent->filter; newtag->filterarg = parent->filterarg; newtag->parent = parent->parent; } if (newtag->parent != NULL) atomic_add_int(&parent->ref_count, 1); } if (exclusion_bounce_check(newtag->lowaddr, newtag->highaddr)) newtag->flags |= BUS_DMA_EXCL_BOUNCE; if (alignment_bounce(newtag, 1)) newtag->flags |= BUS_DMA_ALIGN_BOUNCE; /* * Any request can auto-bounce due to cacheline alignment, in addition * to any alignment or boundary specifications in the tag, so if the * ALLOCNOW flag is set, there's always work to do. */ if ((flags & BUS_DMA_ALLOCNOW) != 0) { struct bounce_zone *bz; /* * Round size up to a full page, and add one more page because * there can always be one more boundary crossing than the * number of pages in a transfer. */ maxsize = roundup2(maxsize, PAGE_SIZE) + PAGE_SIZE; if ((error = alloc_bounce_zone(newtag)) != 0) { free(newtag, M_BUSDMA); return (error); } bz = newtag->bounce_zone; if (ptoa(bz->total_bpages) < maxsize) { int pages; pages = atop(maxsize) - bz->total_bpages; /* Add pages to our bounce pool */ if (alloc_bounce_pages(newtag, pages) < pages) error = ENOMEM; } /* Performed initial allocation */ newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; } else newtag->bounce_zone = NULL; if (error != 0) { free(newtag, M_BUSDMA); } else { atomic_add_32(&tags_total, 1); *dmat = newtag; } CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); return (error); } int bus_dma_tag_destroy(bus_dma_tag_t dmat) { bus_dma_tag_t dmat_copy; int error; error = 0; dmat_copy = dmat; if (dmat != NULL) { if (dmat->map_count != 0) { error = EBUSY; goto out; } while (dmat != NULL) { bus_dma_tag_t parent; parent = dmat->parent; atomic_subtract_int(&dmat->ref_count, 1); if (dmat->ref_count == 0) { atomic_subtract_32(&tags_total, 1); free(dmat, M_BUSDMA); /* * Last reference count, so * release our reference * count on our parent. */ dmat = parent; } else dmat = NULL; } } out: CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); return (error); } static int allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t mapp) { struct bounce_zone *bz; int maxpages; int error; if (dmat->bounce_zone == NULL) if ((error = alloc_bounce_zone(dmat)) != 0) return (error); bz = dmat->bounce_zone; /* Initialize the new map */ STAILQ_INIT(&(mapp->bpages)); /* * Attempt to add pages to our pool on a per-instance basis up to a sane * limit. Even if the tag isn't flagged as COULD_BOUNCE due to * alignment and boundary constraints, it could still auto-bounce due to * cacheline alignment, which requires at most two bounce pages. */ if (dmat->flags & BUS_DMA_COULD_BOUNCE) maxpages = MAX_BPAGES; else maxpages = 2 * bz->map_count; if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { int pages; pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1; pages = MIN(maxpages - bz->total_bpages, pages); pages = MAX(pages, 2); if (alloc_bounce_pages(dmat, pages) < pages) return (ENOMEM); if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; } bz->map_count++; return (0); } static bus_dmamap_t allocate_map(bus_dma_tag_t dmat, int mflags) { int mapsize, segsize; bus_dmamap_t map; /* * Allocate the map. The map structure ends with an embedded * variable-sized array of sync_list structures. Following that * we allocate enough extra space to hold the array of bus_dma_segments. */ KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS, ("cannot allocate %u dma segments (max is %u)", dmat->nsegments, MAX_DMA_SEGMENTS)); segsize = sizeof(struct bus_dma_segment) * dmat->nsegments; mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments; map = malloc(mapsize + segsize, M_BUSDMA, mflags | M_ZERO); if (map == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); return (NULL); } map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize); return (map); } /* * Allocate a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { bus_dmamap_t map; int error = 0; *mapp = map = allocate_map(dmat, M_NOWAIT); if (map == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); return (ENOMEM); } /* * Bouncing might be required if the driver asks for an exclusion * region, a data alignment that is stricter than 1, or DMA that begins * or ends with a partial cacheline. Whether bouncing will actually * happen can't be known until mapping time, but we need to pre-allocate * resources now because we might not be allowed to at mapping time. */ error = allocate_bz_and_pages(dmat, map); if (error != 0) { free(map, M_BUSDMA); *mapp = NULL; return (error); } if (map->flags & DMAMAP_COHERENT) atomic_add_32(&maps_coherent, 1); atomic_add_32(&maps_total, 1); dmat->map_count++; return (0); } /* * Destroy a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) { if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY); return (EBUSY); } if (dmat->bounce_zone) dmat->bounce_zone->map_count--; if (map->flags & DMAMAP_COHERENT) atomic_subtract_32(&maps_coherent, 1); atomic_subtract_32(&maps_total, 1); free(map, M_BUSDMA); dmat->map_count--; CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); return (0); } /* * Allocate a piece of memory that can be efficiently mapped into bus device * space based on the constraints listed in the dma tag. Returns a pointer to * the allocated memory, and a pointer to an associated bus_dmamap. */ int bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, bus_dmamap_t *mapp) { busdma_bufalloc_t ba; struct busdma_bufzone *bufzone; bus_dmamap_t map; vm_memattr_t memattr; int mflags; if (flags & BUS_DMA_NOWAIT) mflags = M_NOWAIT; else mflags = M_WAITOK; if (flags & BUS_DMA_ZERO) mflags |= M_ZERO; *mapp = map = allocate_map(dmat, mflags); if (map == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->flags, ENOMEM); return (ENOMEM); } map->flags = DMAMAP_DMAMEM_ALLOC; /* Choose a busdma buffer allocator based on memory type flags. */ if (flags & BUS_DMA_COHERENT) { memattr = VM_MEMATTR_UNCACHEABLE; ba = coherent_allocator; map->flags |= DMAMAP_COHERENT; } else { memattr = VM_MEMATTR_DEFAULT; ba = standard_allocator; } /* * Try to find a bufzone in the allocator that holds a cache of buffers * of the right size for this request. If the buffer is too big to be * held in the allocator cache, this returns NULL. */ bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); /* * Allocate the buffer from the uma(9) allocator if... * - It's small enough to be in the allocator (bufzone not NULL). * - The alignment constraint isn't larger than the allocation size * (the allocator aligns buffers to their size boundaries). * - There's no need to handle lowaddr/highaddr exclusion zones. * else allocate non-contiguous pages if... * - The page count that could get allocated doesn't exceed nsegments. * - The alignment constraint isn't larger than a page boundary. * - There are no boundary-crossing constraints. * else allocate a block of contiguous pages because one or more of the * constraints is something that only the contig allocator can fulfill. */ if (bufzone != NULL && dmat->alignment <= bufzone->size && !exclusion_bounce(dmat)) { *vaddr = uma_zalloc(bufzone->umazone, mflags); } else if (dmat->nsegments >= btoc(dmat->maxsize) && dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) { *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize, mflags, 0, dmat->lowaddr, memattr); } else { *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize, mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, memattr); } if (*vaddr == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->flags, ENOMEM); free(map, M_BUSDMA); *mapp = NULL; return (ENOMEM); } if (map->flags & DMAMAP_COHERENT) atomic_add_32(&maps_coherent, 1); atomic_add_32(&maps_dmamem, 1); atomic_add_32(&maps_total, 1); dmat->map_count++; CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->flags, 0); return (0); } /* * Free a piece of memory that was allocated via bus_dmamem_alloc, along with * its associated map. */ void bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) { struct busdma_bufzone *bufzone; busdma_bufalloc_t ba; if (map->flags & DMAMAP_COHERENT) ba = coherent_allocator; else ba = standard_allocator; bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); if (bufzone != NULL && dmat->alignment <= bufzone->size && !exclusion_bounce(dmat)) uma_zfree(bufzone->umazone, vaddr); else kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize); dmat->map_count--; if (map->flags & DMAMAP_COHERENT) atomic_subtract_32(&maps_coherent, 1); atomic_subtract_32(&maps_total, 1); atomic_subtract_32(&maps_dmamem, 1); free(map, M_BUSDMA); CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); } static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags) { bus_addr_t curaddr; bus_size_t sgsize; if (map->pagesneeded == 0) { CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" " map= %p, pagesneeded= %d", dmat->lowaddr, dmat->boundary, dmat->alignment, map, map->pagesneeded); /* * Count the number of bounce pages * needed in order to complete this transfer */ curaddr = buf; while (buflen != 0) { sgsize = MIN(buflen, dmat->maxsegsz); if (must_bounce(dmat, map, curaddr, sgsize) != 0) { sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); map->pagesneeded++; } curaddr += sgsize; buflen -= sgsize; } CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); } } static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, pmap_t pmap, bus_dmamap_t map, void *buf, bus_size_t buflen, int flags) { vm_offset_t vaddr; vm_offset_t vendaddr; bus_addr_t paddr; if (map->pagesneeded == 0) { CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d" " map= %p, pagesneeded= %d", dmat->lowaddr, dmat->boundary, dmat->alignment, map, map->pagesneeded); /* * Count the number of bounce pages * needed in order to complete this transfer */ vaddr = (vm_offset_t)buf; vendaddr = (vm_offset_t)buf + buflen; while (vaddr < vendaddr) { if (__predict_true(pmap == kernel_pmap)) paddr = pmap_kextract(vaddr); else paddr = pmap_extract(pmap, vaddr); if (must_bounce(dmat, map, paddr, min(vendaddr - vaddr, (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)))) != 0) { map->pagesneeded++; } vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK)); } CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); } } static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) { /* Reserve Necessary Bounce Pages */ mtx_lock(&bounce_lock); if (flags & BUS_DMA_NOWAIT) { if (reserve_bounce_pages(dmat, map, 0) != 0) { map->pagesneeded = 0; mtx_unlock(&bounce_lock); return (ENOMEM); } } else { if (reserve_bounce_pages(dmat, map, 1) != 0) { /* Queue us for resources */ STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); mtx_unlock(&bounce_lock); return (EINPROGRESS); } } mtx_unlock(&bounce_lock); return (0); } /* * Add a single contiguous physical range to the segment list. */ static int _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) { bus_addr_t baddr, bmask; int seg; /* * Make sure we don't cross any boundaries. */ bmask = ~(dmat->boundary - 1); if (dmat->boundary > 0) { baddr = (curaddr + dmat->boundary) & bmask; if (sgsize > (baddr - curaddr)) sgsize = (baddr - curaddr); } if (dmat->ranges) { struct arm32_dma_range *dr; dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, curaddr); if (dr == NULL) { _bus_dmamap_unload(dmat, map); return (0); } /* * In a valid DMA range. Translate the physical * memory address to an address in the DMA window. */ curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; } /* * Insert chunk into a segment, coalescing with * previous segment if possible. */ seg = *segp; if (seg == -1) { seg = 0; segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; } else { if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && (dmat->boundary == 0 || (segs[seg].ds_addr & bmask) == (curaddr & bmask))) segs[seg].ds_len += sgsize; else { if (++seg >= dmat->nsegments) return (0); segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; } } *segp = seg; return (sgsize); } /* * Utility function to load a physical buffer. segp contains * the starting segment on entrace, and the ending segment on exit. */ int _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { bus_addr_t curaddr; bus_addr_t sl_end = 0; bus_size_t sgsize; struct sync_list *sl; int error; if (segs == NULL) segs = map->segments; counter_u64_add(maploads_total, 1); counter_u64_add(maploads_physmem, 1); if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); if (map->pagesneeded != 0) { counter_u64_add(maploads_bounced, 1); error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) return (error); } } sl = map->slist + map->sync_count - 1; while (buflen > 0) { curaddr = buf; sgsize = MIN(buflen, dmat->maxsegsz); if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, sgsize)) { sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); curaddr = add_bounce_page(dmat, map, 0, curaddr, sgsize); } else { if (map->sync_count > 0) - sl_end = VM_PAGE_TO_PHYS(sl->pages) + - sl->dataoffs + sl->datacount; + sl_end = sl->paddr + sl->datacount; if (map->sync_count == 0 || curaddr != sl_end) { if (++map->sync_count > dmat->nsegments) break; sl++; sl->vaddr = 0; + sl->paddr = curaddr; sl->datacount = sgsize; sl->pages = PHYS_TO_VM_PAGE(curaddr); - sl->dataoffs = curaddr & PAGE_MASK; + KASSERT(sl->pages != NULL, + ("%s: page at PA:0x%08lx is not in " + "vm_page_array", __func__, curaddr)); } else sl->datacount += sgsize; } sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, segp); if (sgsize == 0) break; buf += sgsize; buflen -= sgsize; } /* * Did we fit? */ if (buflen != 0) { _bus_dmamap_unload(dmat, map); return (EFBIG); /* XXX better return value here? */ } return (0); } int _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, int *segp) { return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, segs, segp)); } /* * Utility function to load a linear buffer. segp contains * the starting segment on entrance, and the ending segment on exit. */ int _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, int *segp) { bus_size_t sgsize; bus_addr_t curaddr; bus_addr_t sl_pend = 0; vm_offset_t kvaddr, vaddr, sl_vend = 0; struct sync_list *sl; int error; counter_u64_add(maploads_total, 1); if (map->flags & DMAMAP_COHERENT) counter_u64_add(maploads_coherent, 1); if (map->flags & DMAMAP_DMAMEM_ALLOC) counter_u64_add(maploads_dmamem, 1); if (segs == NULL) segs = map->segments; if (flags & BUS_DMA_LOAD_MBUF) { counter_u64_add(maploads_mbuf, 1); map->flags |= DMAMAP_MBUF; } if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { _bus_dmamap_count_pages(dmat, pmap, map, buf, buflen, flags); if (map->pagesneeded != 0) { counter_u64_add(maploads_bounced, 1); error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) return (error); } } sl = map->slist + map->sync_count - 1; vaddr = (vm_offset_t)buf; while (buflen > 0) { /* * Get the physical address for this segment. */ if (__predict_true(pmap == kernel_pmap)) { curaddr = pmap_kextract(vaddr); kvaddr = vaddr; } else { curaddr = pmap_extract(pmap, vaddr); kvaddr = 0; } /* * Compute the segment size, and adjust counts. */ sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); if (sgsize > dmat->maxsegsz) sgsize = dmat->maxsegsz; if (buflen < sgsize) sgsize = buflen; if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, sgsize)) { curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, sgsize); } else { if (map->sync_count > 0) { - sl_pend = VM_PAGE_TO_PHYS(sl->pages) + - sl->dataoffs + sl->datacount; + sl_pend = sl->paddr + sl->datacount; sl_vend = sl->vaddr + sl->datacount; } if (map->sync_count == 0 || (kvaddr != 0 && kvaddr != sl_vend) || (curaddr != sl_pend)) { if (++map->sync_count > dmat->nsegments) goto cleanup; sl++; sl->vaddr = kvaddr; + sl->paddr = curaddr; + if (kvaddr != 0) { + sl->pages = NULL; + } else { + sl->pages = PHYS_TO_VM_PAGE(curaddr); + KASSERT(sl->pages != NULL, + ("%s: page at PA:0x%08lx is not " + "in vm_page_array", __func__, + curaddr)); + } sl->datacount = sgsize; - sl->pages = PHYS_TO_VM_PAGE(curaddr); - sl->dataoffs = curaddr & PAGE_MASK; } else sl->datacount += sgsize; } sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, segp); if (sgsize == 0) break; vaddr += sgsize; buflen -= sgsize; } cleanup: /* * Did we fit? */ if (buflen != 0) { _bus_dmamap_unload(dmat, map); return (EFBIG); /* XXX better return value here? */ } return (0); } void __bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) { map->mem = *mem; map->dmat = dmat; map->callback = callback; map->callback_arg = callback_arg; } bus_dma_segment_t * _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int error) { if (segs == NULL) segs = map->segments; return (segs); } /* * Release the mapping held by map. */ void _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) { struct bounce_page *bpage; struct bounce_zone *bz; if ((bz = dmat->bounce_zone) != NULL) { while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { STAILQ_REMOVE_HEAD(&map->bpages, links); free_bounce_page(dmat, bpage); } bz = dmat->bounce_zone; bz->free_bpages += map->pagesreserved; bz->reserved_bpages -= map->pagesreserved; map->pagesreserved = 0; map->pagesneeded = 0; } map->sync_count = 0; map->flags &= ~DMAMAP_MBUF; } static void dma_preread_safe(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { /* * Write back any partial cachelines immediately before and * after the DMA region. We don't need to round the address * down to the nearest cacheline or specify the exact size, * as dcache_wb_poc() will do the rounding for us and works * at cacheline granularity. */ if (va & BUSDMA_DCACHE_MASK) dcache_wb_poc(va, pa, 1); if ((va + size) & BUSDMA_DCACHE_MASK) dcache_wb_poc(va + size, pa + size, 1); dcache_inv_poc_dma(va, pa, size); } static void dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op) { uint32_t len, offset; vm_page_t m; vm_paddr_t pa; vm_offset_t va, tempva; bus_size_t size; - offset = sl->dataoffs; + offset = sl->paddr & PAGE_MASK; m = sl->pages; size = sl->datacount; - pa = VM_PAGE_TO_PHYS(m) | offset; + pa = sl->paddr; for ( ; size != 0; size -= len, pa += len, offset = 0, ++m) { tempva = 0; if (sl->vaddr == 0) { len = min(PAGE_SIZE - offset, size); tempva = pmap_quick_enter_page(m); va = tempva | offset; + KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset), + ("unexpected vm_page_t phys: 0x%08x != 0x%08x", + VM_PAGE_TO_PHYS(m) | offset, pa)); } else { len = sl->datacount; va = sl->vaddr; } - KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset), - ("unexpected vm_page_t phys: 0x%08x != 0x%08x", - VM_PAGE_TO_PHYS(m) | offset, pa)); switch (op) { case BUS_DMASYNC_PREWRITE: case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD: dcache_wb_poc(va, pa, len); break; case BUS_DMASYNC_PREREAD: /* * An mbuf may start in the middle of a cacheline. There * will be no cpu writes to the beginning of that line * (which contains the mbuf header) while dma is in * progress. Handle that case by doing a writeback of * just the first cacheline before invalidating the * overall buffer. Any mbuf in a chain may have this * misalignment. Buffers which are not mbufs bounce if * they are not aligned to a cacheline. */ dma_preread_safe(va, pa, len); break; case BUS_DMASYNC_POSTREAD: case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: dcache_inv_poc(va, pa, len); break; default: panic("unsupported combination of sync operations: " "0x%08x\n", op); } if (tempva != 0) pmap_quick_remove_page(tempva); } } void _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { struct bounce_page *bpage; struct sync_list *sl, *end; vm_offset_t datavaddr, tempvaddr; if (op == BUS_DMASYNC_POSTWRITE) return; /* * If the buffer was from user space, it is possible that this is not * the same vm map, especially on a POST operation. It's not clear that * dma on userland buffers can work at all right now. To be safe, until * we're able to test direct userland dma, panic on a map mismatch. */ if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " "performing bounce", __func__, dmat, dmat->flags, op); /* * For PREWRITE do a writeback. Clean the caches from the * innermost to the outermost levels. */ if (op & BUS_DMASYNC_PREWRITE) { while (bpage != NULL) { tempvaddr = 0; datavaddr = bpage->datavaddr; if (datavaddr == 0) { tempvaddr = pmap_quick_enter_page( bpage->datapage); datavaddr = tempvaddr | bpage->dataoffs; } bcopy((void *)datavaddr, (void *)bpage->vaddr, bpage->datacount); if (tempvaddr != 0) pmap_quick_remove_page(tempvaddr); dcache_wb_poc(bpage->vaddr, bpage->busaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; } /* * Do an invalidate for PREREAD unless a writeback was already * done above due to PREWRITE also being set. The reason for a * PREREAD invalidate is to prevent dirty lines currently in the * cache from being evicted during the DMA. If a writeback was * done due to PREWRITE also being set there will be no dirty * lines and the POSTREAD invalidate handles the rest. The * invalidate is done from the innermost to outermost level. If * L2 were done first, a dirty cacheline could be automatically * evicted from L1 before we invalidated it, re-dirtying the L2. */ if ((op & BUS_DMASYNC_PREREAD) && !(op & BUS_DMASYNC_PREWRITE)) { bpage = STAILQ_FIRST(&map->bpages); while (bpage != NULL) { dcache_inv_poc_dma(bpage->vaddr, bpage->busaddr, bpage->datacount); bpage = STAILQ_NEXT(bpage, links); } } /* * Re-invalidate the caches on a POSTREAD, even though they were * already invalidated at PREREAD time. Aggressive prefetching * due to accesses to other data near the dma buffer could have * brought buffer data into the caches which is now stale. The * caches are invalidated from the outermost to innermost; the * prefetches could be happening right now, and if L1 were * invalidated first, stale L2 data could be prefetched into L1. */ if (op & BUS_DMASYNC_POSTREAD) { while (bpage != NULL) { dcache_inv_poc(bpage->vaddr, bpage->busaddr, bpage->datacount); tempvaddr = 0; datavaddr = bpage->datavaddr; if (datavaddr == 0) { tempvaddr = pmap_quick_enter_page( bpage->datapage); datavaddr = tempvaddr | bpage->dataoffs; } bcopy((void *)bpage->vaddr, (void *)datavaddr, bpage->datacount); if (tempvaddr != 0) pmap_quick_remove_page(tempvaddr); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; } } /* * For COHERENT memory no cache maintenance is necessary, but ensure all * writes have reached memory for the PREWRITE case. No action is * needed for a PREREAD without PREWRITE also set, because that would * imply that the cpu had written to the COHERENT buffer and expected * the dma device to see that change, and by definition a PREWRITE sync * is required to make that happen. */ if (map->flags & DMAMAP_COHERENT) { if (op & BUS_DMASYNC_PREWRITE) { dsb(); cpu_l2cache_drain_writebuf(); } return; } /* * Cache maintenance for normal (non-COHERENT non-bounce) buffers. All * the comments about the sequences for flushing cache levels in the * bounce buffer code above apply here as well. In particular, the fact * that the sequence is inner-to-outer for PREREAD invalidation and * outer-to-inner for POSTREAD invalidation is not a mistake. */ if (map->sync_count != 0) { sl = &map->slist[0]; end = &map->slist[map->sync_count]; CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " "performing sync", __func__, dmat, dmat->flags, op); for ( ; sl != end; ++sl) dma_dcache_sync(sl, op); } } static void init_bounce_pages(void *dummy __unused) { total_bpages = 0; STAILQ_INIT(&bounce_zone_list); STAILQ_INIT(&bounce_map_waitinglist); STAILQ_INIT(&bounce_map_callbacklist); mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); } SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); static struct sysctl_ctx_list * busdma_sysctl_tree(struct bounce_zone *bz) { return (&bz->sysctl_tree); } static struct sysctl_oid * busdma_sysctl_tree_top(struct bounce_zone *bz) { return (bz->sysctl_tree_top); } static int alloc_bounce_zone(bus_dma_tag_t dmat) { struct bounce_zone *bz; /* Check to see if we already have a suitable zone */ STAILQ_FOREACH(bz, &bounce_zone_list, links) { if ((dmat->alignment <= bz->alignment) && (dmat->lowaddr >= bz->lowaddr)) { dmat->bounce_zone = bz; return (0); } } if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA, M_NOWAIT | M_ZERO)) == NULL) return (ENOMEM); STAILQ_INIT(&bz->bounce_page_list); bz->free_bpages = 0; bz->reserved_bpages = 0; bz->active_bpages = 0; bz->lowaddr = dmat->lowaddr; bz->alignment = MAX(dmat->alignment, PAGE_SIZE); bz->map_count = 0; snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); busdma_zonecount++; snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); dmat->bounce_zone = bz; sysctl_ctx_init(&bz->sysctl_tree); bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, CTLFLAG_RD, 0, ""); if (bz->sysctl_tree_top == NULL) { sysctl_ctx_free(&bz->sysctl_tree); return (0); /* XXX error code? */ } SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, "Total bounce pages"); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, "Free bounce pages"); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, "Reserved bounce pages"); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, "Active bounce pages"); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, "Total bounce requests (pages bounced)"); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, "Total bounce requests that were deferred"); SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); SYSCTL_ADD_ULONG(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "alignment", CTLFLAG_RD, &bz->alignment, ""); return (0); } static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) { struct bounce_zone *bz; int count; bz = dmat->bounce_zone; count = 0; while (numpages > 0) { struct bounce_page *bpage; bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_BUSDMA, M_NOWAIT | M_ZERO); if (bpage == NULL) break; bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE, M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); if (bpage->vaddr == 0) { free(bpage, M_BUSDMA); break; } bpage->busaddr = pmap_kextract(bpage->vaddr); mtx_lock(&bounce_lock); STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); total_bpages++; bz->total_bpages++; bz->free_bpages++; mtx_unlock(&bounce_lock); count++; numpages--; } return (count); } static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) { struct bounce_zone *bz; int pages; mtx_assert(&bounce_lock, MA_OWNED); bz = dmat->bounce_zone; pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) return (map->pagesneeded - (map->pagesreserved + pages)); bz->free_bpages -= pages; bz->reserved_bpages += pages; map->pagesreserved += pages; pages = map->pagesneeded - map->pagesreserved; return (pages); } static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, bus_addr_t addr, bus_size_t size) { struct bounce_zone *bz; struct bounce_page *bpage; KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); bz = dmat->bounce_zone; if (map->pagesneeded == 0) panic("add_bounce_page: map doesn't need any pages"); map->pagesneeded--; if (map->pagesreserved == 0) panic("add_bounce_page: map doesn't need any pages"); map->pagesreserved--; mtx_lock(&bounce_lock); bpage = STAILQ_FIRST(&bz->bounce_page_list); if (bpage == NULL) panic("add_bounce_page: free page list is empty"); STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); bz->reserved_bpages--; bz->active_bpages++; mtx_unlock(&bounce_lock); if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { /* Page offset needs to be preserved. */ bpage->vaddr |= addr & PAGE_MASK; bpage->busaddr |= addr & PAGE_MASK; } bpage->datavaddr = vaddr; bpage->datapage = PHYS_TO_VM_PAGE(addr); bpage->dataoffs = addr & PAGE_MASK; bpage->datacount = size; STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); } static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) { struct bus_dmamap *map; struct bounce_zone *bz; bz = dmat->bounce_zone; bpage->datavaddr = 0; bpage->datacount = 0; if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { /* * Reset the bounce page to start at offset 0. Other uses * of this bounce page may need to store a full page of * data and/or assume it starts on a page boundary. */ bpage->vaddr &= ~PAGE_MASK; bpage->busaddr &= ~PAGE_MASK; } mtx_lock(&bounce_lock); STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); bz->free_bpages++; bz->active_bpages--; if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { if (reserve_bounce_pages(map->dmat, map, 1) == 0) { STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links); busdma_swi_pending = 1; bz->total_deferred++; swi_sched(vm_ih, 0); } } mtx_unlock(&bounce_lock); } void busdma_swi(void) { bus_dma_tag_t dmat; struct bus_dmamap *map; mtx_lock(&bounce_lock); while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); mtx_unlock(&bounce_lock); dmat = map->dmat; dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK); bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, map->callback_arg, BUS_DMA_WAITOK); dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK); mtx_lock(&bounce_lock); } mtx_unlock(&bounce_lock); } Index: projects/powernv/boot/common/loader.8 =================================================================== --- projects/powernv/boot/common/loader.8 (revision 291050) +++ projects/powernv/boot/common/loader.8 (revision 291051) @@ -1,1089 +1,1097 @@ .\" Copyright (c) 1999 Daniel C. Sobral .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd July 25, 2015 +.Dd November 18, 2015 .Dt LOADER 8 .Os .Sh NAME .Nm loader .Nd kernel bootstrapping final stage .Sh DESCRIPTION The program called .Nm is the final stage of .Fx Ns 's kernel bootstrapping process. On IA32 (i386) architectures, it is a .Pa BTX client. It is linked statically to .Xr libstand 3 and usually located in the directory .Pa /boot . .Pp It provides a scripting language that can be used to automate tasks, do pre-configuration or assist in recovery procedures. This scripting language is roughly divided in two main components. The smaller one is a set of commands designed for direct use by the casual user, called "builtin commands" for historical reasons. The main drive behind these commands is user-friendliness. The bigger component is an .Tn ANS Forth compatible Forth interpreter based on FICL, by .An John Sadler . .Pp During initialization, .Nm will probe for a console and set the .Va console variable, or set it to serial console .Pq Dq Li comconsole if the previous boot stage used that. If multiple consoles are selected, they will be listed separated by spaces. Then, devices are probed, .Va currdev and .Va loaddev are set, and .Va LINES is set to 24. Next, .Tn FICL is initialized, the builtin words are added to its vocabulary, and .Pa /boot/boot.4th is processed if it exists. No disk switching is possible while that file is being read. The inner interpreter .Nm will use with .Tn FICL is then set to .Ic interpret , which is .Tn FICL Ns 's default. After that, .Pa /boot/loader.rc is processed if available, and, failing that, .Pa /boot/boot.conf is read for historical reasons. These files are processed through the .Ic include command, which reads all of them into memory before processing them, making disk changes possible. .Pp At this point, if an .Ic autoboot has not been tried, and if .Va autoboot_delay is not set to .Dq Li NO (not case sensitive), then an .Ic autoboot will be tried. If the system gets past this point, .Va prompt will be set and .Nm will engage interactive mode. Please note that historically even when .Va autoboot_delay is set to .Dq Li 0 user will be able to interrupt autoboot process by pressing some key on the console while kernel and modules are being loaded. In some cases such behaviour may be undesirable, to prevent it set .Va autoboot_delay to .Dq Li -1 , in this case .Nm will engage interactive mode only if .Ic autoboot has failed. .Sh BUILTIN COMMANDS In .Nm , builtin commands take parameters from the command line. Presently, the only way to call them from a script is by using .Pa evaluate on a string. If an error condition occurs, an exception will be generated, which can be intercepted using .Tn ANS Forth exception handling words. If not intercepted, an error message will be displayed and the interpreter's state will be reset, emptying the stack and restoring interpreting mode. .Pp The builtin commands available are: .Pp .Bl -tag -width Ds -compact .It Ic autoboot Op Ar seconds Op Ar prompt Proceeds to bootstrap the system after a number of seconds, if not interrupted by the user. Displays a countdown prompt warning the user the system is about to be booted, unless interrupted by a key press. The kernel will be loaded first if necessary. Defaults to 10 seconds. .Pp .It Ic bcachestat Displays statistics about disk cache usage. For debugging only. .Pp .It Ic boot .It Ic boot Ar kernelname Op Cm ... .It Ic boot Fl flag Cm ... Immediately proceeds to bootstrap the system, loading the kernel if necessary. Any flags or arguments are passed to the kernel, but they must precede the kernel name, if a kernel name is provided. .Pp .Em WARNING : The behavior of this builtin is changed if .Xr loader.4th 8 is loaded. .Pp .It Ic echo Xo .Op Fl n .Op Aq message .Xc Displays text on the screen. A new line will be printed unless .Fl n is specified. .Pp .It Ic heap Displays memory usage statistics. For debugging purposes only. .Pp .It Ic help Op topic Op subtopic Shows help messages read from .Pa /boot/loader.help . The special topic .Em index will list the topics available. .Pp .It Ic include Ar file Op Ar Process script files. Each file, in turn, is completely read into memory, and then each of its lines is passed to the command line interpreter. If any error is returned by the interpreter, the include command aborts immediately, without reading any other files, and returns an error itself (see .Sx ERRORS ) . .Pp .It Ic load Xo .Op Fl t Ar type .Ar file Cm ... .Xc Loads a kernel, kernel loadable module (kld), disk image, or file of opaque contents tagged as being of the type .Ar type . Kernel and modules can be either in a.out or ELF format. Any arguments passed after the name of the file to be loaded will be passed as arguments to that file. Use the .Li md_image type to make the kernel create a file-backed .Xr md 4 disk. This is useful for booting from a temporary rootfs. Currently, argument passing does not work for the kernel. .Pp .It Ic load_geli Xo .Op Fl n Ar keyno .Ar prov Ar file .Xc Loads a .Xr geli 8 encryption keyfile for the given provider name. The key index can be specified via .Ar keyno or will default to zero. .Pp .It Ic ls Xo .Op Fl l .Op Ar path .Xc Displays a listing of files in the directory .Ar path , or the root directory if .Ar path is not specified. If .Fl l is specified, file sizes will be shown too. .Pp .It Ic lsdev Op Fl v Lists all of the devices from which it may be possible to load modules. If .Fl v is specified, more details are printed. .Pp .It Ic lsmod Op Fl v Displays loaded modules. If .Fl v is specified, more details are shown. .Pp .It Ic more Ar file Op Ar Display the files specified, with a pause at each .Va LINES displayed. .Pp .It Ic pnpscan Op Fl v Scans for Plug-and-Play devices. This is not functional at present. .Pp .It Ic read Xo .Op Fl t Ar seconds .Op Fl p Ar prompt .Op Va variable .Xc Reads a line of input from the terminal, storing it in .Va variable if specified. A timeout can be specified with .Fl t , though it will be canceled at the first key pressed. A prompt may also be displayed through the .Fl p flag. .Pp .It Ic reboot Immediately reboots the system. .Pp .It Ic set Ar variable .It Ic set Ar variable Ns = Ns Ar value Set loader's environment variables. .Pp .It Ic show Op Va variable Displays the specified variable's value, or all variables and their values if .Va variable is not specified. .Pp .It Ic unload Remove all modules from memory. .Pp .It Ic unset Va variable Removes .Va variable from the environment. .Pp .It Ic \&? Lists available commands. .El .Ss BUILTIN ENVIRONMENT VARIABLES The .Nm has actually two different kinds of .Sq environment variables. There are ANS Forth's .Em environmental queries , and a separate space of environment variables used by builtins, which are not directly available to Forth words. It is the latter type that this section covers. .Pp Environment variables can be set and unset through the .Ic set and .Ic unset builtins, and can have their values interactively examined through the use of the .Ic show builtin. Their values can also be accessed as described in .Sx BUILTIN PARSER . .Pp Notice that these environment variables are not inherited by any shell after the system has been booted. .Pp A few variables are set automatically by .Nm . Others can affect the behavior of either .Nm or the kernel at boot. Some options may require a value, while others define behavior just by being set. Both types of builtin variables are described below. .Bl -tag -width bootfile .It Va autoboot_delay Number of seconds .Ic autoboot will wait before booting. If this variable is not defined, .Ic autoboot will default to 10 seconds. .Pp If set to .Dq Li NO , no .Ic autoboot will be automatically attempted after processing .Pa /boot/loader.rc , though explicit .Ic autoboot Ns 's will be processed normally, defaulting to 10 seconds delay. .Pp If set to .Dq Li 0 , no delay will be inserted, but user still will be able to interrupt .Ic autoboot process and escape into the interactive mode by pressing some key on the console while kernel and modules are being loaded. .Pp If set to .Dq Li -1 , no delay will be inserted and .Nm will engage interactive mode only if .Ic autoboot has failed for some reason. .It Va boot_askname Instructs the kernel to prompt the user for the name of the root device when the kernel is booted. .It Va boot_cdrom Instructs the kernel to try to mount the root file system from CD-ROM. .It Va boot_ddb Instructs the kernel to start in the DDB debugger, rather than proceeding to initialize when booted. .It Va boot_dfltroot Instructs the kernel to mount the statically compiled-in root file system. .It Va boot_gdb Selects gdb-remote mode for the kernel debugger by default. .It Va boot_multicons Enables multiple console support in the kernel early on boot. In a running system, console configuration can be manipulated by the .Xr conscontrol 8 utility. .It Va boot_mute All console output is suppressed when console is muted. In a running system, the state of console muting can be manipulated by the .Xr conscontrol 8 utility. .It Va boot_pause During the device probe, pause after each line is printed. .It Va boot_serial Force the use of a serial console even when an internal console is present. .It Va boot_single Prevents the kernel from initiating a multi-user startup; instead, a single-user mode will be entered when the kernel has finished device probing. .It Va boot_verbose Setting this variable causes extra debugging information to be printed by the kernel during the boot phase. .It Va bootfile List of semicolon-separated search path for bootable kernels. The default is .Dq Li kernel . .It Va comconsole_speed Defines the speed of the serial console (i386 and amd64 only). If the previous boot stage indicated that a serial console is in use then this variable is initialized to the current speed of the console serial port. Otherwise it is set to 9600 unless this was overridden using the .Va BOOT_COMCONSOLE_SPEED variable when .Nm was compiled. Changes to the .Va comconsole_speed variable take effect immediately. .It Va comconsole_port Defines the base i/o port used to access console UART (i386 and amd64 only). If the variable is not set, its assumed value is 0x3F8, which corresponds to PC port COM1, unless overridden by .Va BOOT_COMCONSOLE_PORT variable during the compilation of .Nm . Setting the .Va comconsole_port variable automatically set .Va hw.uart.console environment variable to provide a hint to kernel for location of the console. Loader console is changed immediately after variable .Va comconsole_port is set. .It Va comconsole_pcidev Defines the location of a PCI device of the 'simple communication' class to be used as the serial console UART (i386 and amd64 only). The syntax of the variable is .Li 'bus:device:function[:bar]' , where all members must be numeric, with possible .Li 0x prefix to indicate a hexadecimal value. The .Va bar member is optional and assumed to be 0x10 if omitted. The bar must decode i/o space. Setting the variable .Va comconsole_pcidev automatically sets the variable .Va comconsole_port to the base of the selected bar, and hint .Va hw.uart.console . Loader console is changed immediately after variable .Va comconsole_pcidev is set. .It Va console Defines the current console or consoles. Multiple consoles may be specified. In that case, the first listed console will become the default console for userland output (e.g.\& from .Xr init 8 ) . .It Va currdev Selects the default device. Syntax for devices is odd. +.It Va dumpdev +Sets the device for kernel dumps. +This can be used to ensure that a device is configured before the corresponding +.Va dumpdev +directive from +.Xr rc.conf 5 +has been processed, allowing kernel panics that happen during the early stages +of boot to be captured. .It Va init_chroot If set to a valid directory in the root file system, it causes .Xr init 8 to perform a .Xr chroot 2 operation on that directory, making it the new root directory. That happens before entering single-user mode or multi-user mode (but after executing the .Va init_script if enabled). .It Va init_path Sets the list of binaries which the kernel will try to run as the initial process. The first matching binary is used. The default list is .Dq Li /sbin/init:/sbin/oinit:/sbin/init.bak:\:/rescue/init . .It Va init_script If set to a valid file name in the root file system, instructs .Xr init 8 to run that script as the very first action, before doing anything else. Signal handling and exit code interpretation is similar to running the .Pa /etc/rc script. In particular, single-user operation is enforced if the script terminates with a non-zero exit code, or if a SIGTERM is delivered to the .Xr init 8 process (PID 1). .It Va init_shell Defines the shell binary to be used for executing the various shell scripts. The default is .Dq Li /bin/sh . It is used for running the .Va init_script if set, as well as for the .Pa /etc/rc and .Pa /etc/rc.shutdown scripts. The value of the corresponding .Xr kenv 2 variable is evaluated every time .Xr init 8 calls a shell script, so it can be changed later on using the .Xr kenv 1 utility. In particular, if a non-default shell is used for running an .Va init_script , it might be desirable to have that script reset the value of .Va init_shell back to the default, so that the .Pa /etc/rc script is executed with the standard shell .Pa /bin/sh . .It Va interpret Has the value .Dq Li OK if the Forth's current state is interpreting. .It Va LINES Define the number of lines on the screen, to be used by the pager. .It Va module_path Sets the list of directories which will be searched for modules named in a load command or implicitly required by a dependency. The default value for this variable is .Dq Li /boot/kernel;/boot/modules . .It Va num_ide_disks Sets the number of IDE disks as a workaround for some problems in finding the root disk at boot. This has been deprecated in favor of .Va root_disk_unit . .It Va prompt Value of .Nm Ns 's prompt. Defaults to .Dq Li "${interpret}" . If variable .Va prompt is unset, the default prompt is .Ql > . .It Va root_disk_unit If the code which detects the disk unit number for the root disk is confused, e.g.\& by a mix of SCSI and IDE disks, or IDE disks with gaps in the sequence (e.g.\& no primary slave), the unit number can be forced by setting this variable. .It Va rootdev By default the value of .Va currdev is used to set the root file system when the kernel is booted. This can be overridden by setting .Va rootdev explicitly. .El .Pp Other variables are used to override kernel tunable parameters. The following tunables are available: .Bl -tag -width Va .It Va hw.physmem Limit the amount of physical memory the system will use. By default the size is in bytes, but the .Cm k , K , m , M , g and .Cm G suffixes are also accepted and indicate kilobytes, megabytes and gigabytes respectively. An invalid suffix will result in the variable being ignored by the kernel. .It Va hw.pci.host_start_mem , hw.acpi.host_start_mem When not otherwise constrained, this limits the memory start address. The default is 0x80000000 and should be set to at least size of the memory and not conflict with other resources. Typically, only systems without PCI bridges need to set this variable since PCI bridges typically constrain the memory starting address (and the variable is only used when bridges do not constrain this address). .It Va hw.pci.enable_io_modes Enable PCI resources which are left off by some BIOSes or are not enabled correctly by the device driver. Tunable value set to ON (1) by default, but this may cause problems with some peripherals. .It Va kern.maxusers Set the size of a number of statically allocated system tables; see .Xr tuning 7 for a description of how to select an appropriate value for this tunable. When set, this tunable replaces the value declared in the kernel compile-time configuration file. .It Va kern.ipc.nmbclusters Set the number of mbuf clusters to be allocated. The value cannot be set below the default determined when the kernel was compiled. .It Va kern.ipc.nsfbufs Set the number of .Xr sendfile 2 buffers to be allocated. Overrides .Dv NSFBUFS . Not all architectures use such buffers; see .Xr sendfile 2 for details. .It Va kern.maxswzone Limits the amount of KVM to be used to hold swap metadata, which directly governs the maximum amount of swap the system can support, at the rate of approximately 200 MB of swap space per 1 MB of metadata. This value is specified in bytes of KVA space. If no value is provided, the system allocates enough memory to handle an amount of swap that corresponds to eight times the amount of physical memory present in the system. .Pp Note that swap metadata can be fragmented, which means that the system can run out of space before it reaches the theoretical limit. Therefore, care should be taken to not configure more swap than approximately half of the theoretical maximum. .Pp Running out of space for swap metadata can leave the system in an unrecoverable state. Therefore, you should only change this parameter if you need to greatly extend the KVM reservation for other resources such as the buffer cache or .Va kern.ipc.nmbclusters . Modifies kernel option .Dv VM_SWZONE_SIZE_MAX . .It Va kern.maxbcache Limits the amount of KVM reserved for use by the buffer cache, specified in bytes. The default maximum is 200MB on i386, and 400MB on amd64 and sparc64. This parameter is used to prevent the buffer cache from eating too much KVM in large-memory machine configurations. Only mess around with this parameter if you need to greatly extend the KVM reservation for other resources such as the swap zone or .Va kern.ipc.nmbclusters . Note that the NBUF parameter will override this limit. Modifies .Dv VM_BCACHE_SIZE_MAX . .It Va kern.msgbufsize Sets the size of the kernel message buffer. The default limit of 64KB is usually sufficient unless large amounts of trace data need to be collected between opportunities to examine the buffer or dump it to a file. Overrides kernel option .Dv MSGBUF_SIZE . .It Va machdep.disable_mtrrs Disable the use of i686 MTRRs (x86 only). .It Va net.inet.tcp.tcbhashsize Overrides the compile-time set value of .Dv TCBHASHSIZE or the preset default of 512. Must be a power of 2. .It Va twiddle_divisor Throttles the output of the .Sq twiddle I/O progress indicator displayed while loading the kernel and modules. This is useful on slow serial consoles where the time spent waiting for these characters to be written can add up to many seconds. The default is 1 (full speed); a value of 2 spins half as fast, and so on. .It Va vm.kmem_size Sets the size of kernel memory (bytes). This overrides the value determined when the kernel was compiled. Modifies .Dv VM_KMEM_SIZE . .It Va vm.kmem_size_min .It Va vm.kmem_size_max Sets the minimum and maximum (respectively) amount of kernel memory that will be automatically allocated by the kernel. These override the values determined when the kernel was compiled. Modifies .Dv VM_KMEM_SIZE_MIN and .Dv VM_KMEM_SIZE_MAX . .El .Ss BUILTIN PARSER When a builtin command is executed, the rest of the line is taken by it as arguments, and it is processed by a special parser which is not used for regular Forth commands. .Pp This special parser applies the following rules to the parsed text: .Bl -enum .It All backslash characters are preprocessed. .Bl -bullet .It \eb , \ef , \er , \en and \et are processed as in C. .It \es is converted to a space. .It \ev is converted to .Tn ASCII 11. .It \ez is just skipped. Useful for things like .Dq \e0xf\ez\e0xf . .It \e0xN and \e0xNN are replaced by the hex N or NN. .It \eNNN is replaced by the octal NNN .Tn ASCII character. .It \e" , \e' and \e$ will escape these characters, preventing them from receiving special treatment in Step 2, described below. .It \e\e will be replaced with a single \e . .It In any other occurrence, backslash will just be removed. .El .It Every string between non-escaped quotes or double-quotes will be treated as a single word for the purposes of the remaining steps. .It Replace any .Li $VARIABLE or .Li ${VARIABLE} with the value of the environment variable .Va VARIABLE . .It Space-delimited arguments are passed to the called builtin command. Spaces can also be escaped through the use of \e\e . .El .Pp An exception to this parsing rule exists, and is described in .Sx BUILTINS AND FORTH . .Ss BUILTINS AND FORTH All builtin words are state-smart, immediate words. If interpreted, they behave exactly as described previously. If they are compiled, though, they extract their arguments from the stack instead of the command line. .Pp If compiled, the builtin words expect to find, at execution time, the following parameters on the stack: .D1 Ar addrN lenN ... addr2 len2 addr1 len1 N where .Ar addrX lenX are strings which will compose the command line that will be parsed into the builtin's arguments. Internally, these strings are concatenated in from 1 to N, with a space put between each one. .Pp If no arguments are passed, a 0 .Em must be passed, even if the builtin accepts no arguments. .Pp While this behavior has benefits, it has its trade-offs. If the execution token of a builtin is acquired (through .Ic ' or .Ic ['] ) , and then passed to .Ic catch or .Ic execute , the builtin behavior will depend on the system state .Bf Em at the time .Ic catch or .Ic execute is processed! .Ef This is particularly annoying for programs that want or need to handle exceptions. In this case, the use of a proxy is recommended. For example: .Dl : (boot) boot ; .Sh FICL .Tn FICL is a Forth interpreter written in C, in the form of a forth virtual machine library that can be called by C functions and vice versa. .Pp In .Nm , each line read interactively is then fed to .Tn FICL , which may call .Nm back to execute the builtin words. The builtin .Ic include will also feed .Tn FICL , one line at a time. .Pp The words available to .Tn FICL can be classified into four groups. The .Tn ANS Forth standard words, extra .Tn FICL words, extra .Fx words, and the builtin commands; the latter were already described. The .Tn ANS Forth standard words are listed in the .Sx STANDARDS section. The words falling in the two other groups are described in the following subsections. .Ss FICL EXTRA WORDS .Bl -tag -width wid-set-super .It Ic .env .It Ic .ver .It Ic -roll .It Ic 2constant .It Ic >name .It Ic body> .It Ic compare This is the STRING word set's .Ic compare . .It Ic compile-only .It Ic endif .It Ic forget-wid .It Ic parse-word .It Ic sliteral This is the STRING word set's .Ic sliteral . .It Ic wid-set-super .It Ic w@ .It Ic w! .It Ic x. .It Ic empty .It Ic cell- .It Ic -rot .El .Ss FREEBSD EXTRA WORDS .Bl -tag -width XXXXXXXX .It Ic \&$ Pq -- Evaluates the remainder of the input buffer, after having printed it first. .It Ic \&% Pq -- Evaluates the remainder of the input buffer under a .Ic catch exception guard. .It Ic .# Works like .Ic "." but without outputting a trailing space. .It Ic fclose Pq Ar fd -- Closes a file. .It Ic fkey Pq Ar fd -- char Reads a single character from a file. .It Ic fload Pq Ar fd -- Processes a file .Em fd . .It Ic fopen Pq Ar addr len mode Li -- Ar fd Opens a file. Returns a file descriptor, or \-1 in case of failure. The .Ar mode parameter selects whether the file is to be opened for read access, write access, or both. The constants .Dv O_RDONLY , O_WRONLY , and .Dv O_RDWR are defined in .Pa /boot/support.4th , indicating read only, write only, and read-write access, respectively. .It Xo .Ic fread .Pq Ar fd addr len -- len' .Xc Tries to read .Em len bytes from file .Em fd into buffer .Em addr . Returns the actual number of bytes read, or -1 in case of error or end of file. .It Ic heap? Pq -- Ar cells Return the space remaining in the dictionary heap, in cells. This is not related to the heap used by dynamic memory allocation words. .It Ic inb Pq Ar port -- char Reads a byte from a port. .It Ic key Pq -- Ar char Reads a single character from the console. .It Ic key? Pq -- Ar flag Returns .Ic true if there is a character available to be read from the console. .It Ic ms Pq Ar u -- Waits .Em u microseconds. .It Ic outb Pq Ar port char -- Writes a byte to a port. .It Ic seconds Pq -- Ar u Returns the number of seconds since midnight. .It Ic tib> Pq -- Ar addr len Returns the remainder of the input buffer as a string on the stack. .It Ic trace! Pq Ar flag -- Activates or deactivates tracing. Does not work with .Ic catch . .El .Ss FREEBSD DEFINED ENVIRONMENTAL QUERIES .Bl -tag -width Ds .It arch-i386 .Ic TRUE if the architecture is IA32. .It FreeBSD_version .Fx version at compile time. .It loader_version .Nm version. .El .Ss SYSTEM DOCUMENTATION .Sh FILES .Bl -tag -width /boot/defaults/loader.conf -compact .It Pa /boot/loader .Nm itself. .It Pa /boot/boot.4th Additional .Tn FICL initialization. .It Pa /boot/boot.conf .Nm bootstrapping script. Deprecated. .It Pa /boot/defaults/loader.conf .It Pa /boot/loader.conf .It Pa /boot/loader.conf.local .Nm configuration files, as described in .Xr loader.conf 5 . .It Pa /boot/loader.rc .Nm bootstrapping script. .It Pa /boot/loader.help Loaded by .Ic help . Contains the help messages. .El .Sh EXAMPLES Boot in single user mode: .Pp .Dl boot -s .Pp Load the kernel, a splash screen, and then autoboot in five seconds. Notice that a kernel must be loaded before any other .Ic load command is attempted. .Bd -literal -offset indent load kernel load splash_bmp load -t splash_image_data /boot/chuckrulez.bmp autoboot 5 .Ed .Pp Set the disk unit of the root device to 2, and then boot. This would be needed in a system with two IDE disks, with the second IDE disk hardwired to ada2 instead of ada1. .Bd -literal -offset indent set root_disk_unit=2 boot /boot/kernel/kernel .Ed .Pp See also: .Bl -tag -width /usr/share/examples/bootforth/X .It Pa /boot/loader.4th Extra builtin-like words. .It Pa /boot/support.4th .Pa loader.conf processing words. .It Pa /usr/share/examples/bootforth/ Assorted examples. .El .Sh ERRORS The following values are thrown by .Nm : .Bl -tag -width XXXXX -offset indent .It 100 Any type of error in the processing of a builtin. .It -1 .Ic Abort executed. .It -2 .Ic Abort" executed. .It -56 .Ic Quit executed. .It -256 Out of interpreting text. .It -257 Need more text to succeed -- will finish on next run. .It -258 .Ic Bye executed. .It -259 Unspecified error. .El .Sh SEE ALSO .Xr libstand 3 , .Xr loader.conf 5 , .Xr tuning 7 , .Xr boot 8 , .Xr btxld 8 .Sh STANDARDS For the purposes of ANS Forth compliance, loader is an .Bf Em ANS Forth System with Environmental Restrictions, Providing .Ef .Bf Li .No .( , .No :noname , .No ?do , parse, pick, roll, refill, to, value, \e, false, true, .No <> , .No 0<> , compile\&, , erase, nip, tuck .Ef .Em and .Li marker .Bf Em from the Core Extensions word set, Providing the Exception Extensions word set, Providing the Locals Extensions word set, Providing the Memory-Allocation Extensions word set, Providing .Ef .Bf Li \&.s, bye, forget, see, words, \&[if], \&[else] .Ef .Em and .Li [then] .Bf Em from the Programming-Tools extension word set, Providing the Search-Order extensions word set. .Ef .Sh HISTORY The .Nm first appeared in .Fx 3.1 . .Sh AUTHORS .An -nosplit The .Nm was written by .An Michael Smith Aq msmith@FreeBSD.org . .Pp .Tn FICL was written by .An John Sadler Aq john_sadler@alum.mit.edu . .Sh BUGS The .Ic expect and .Ic accept words will read from the input buffer instead of the console. The latter will be fixed, but the former will not. Index: projects/powernv/boot/powerpc/kboot/Makefile =================================================================== --- projects/powernv/boot/powerpc/kboot/Makefile (revision 291050) +++ projects/powernv/boot/powerpc/kboot/Makefile (revision 291051) @@ -1,122 +1,117 @@ # $FreeBSD$ .include MK_SSP= no MAN= PROG= loader.kboot NEWVERSWHAT= "kboot loader" ${MACHINE_ARCH} BINDIR?= /boot INSTALLFLAGS= -b # Architecture-specific loader code SRCS= conf.c metadata.c vers.c main.c ppc64_elf_freebsd.c SRCS+= host_syscall.S hostcons.c hostdisk.c kerneltramp.S kbootfdt.c SRCS+= ucmpdi2.c LOADER_DISK_SUPPORT?= yes LOADER_UFS_SUPPORT?= yes LOADER_CD9660_SUPPORT?= yes LOADER_EXT2FS_SUPPORT?= yes LOADER_NET_SUPPORT?= yes LOADER_NFS_SUPPORT?= yes LOADER_TFTP_SUPPORT?= no LOADER_GZIP_SUPPORT?= yes LOADER_FDT_SUPPORT= yes LOADER_BZIP2_SUPPORT?= no .if ${LOADER_DISK_SUPPORT} == "yes" CFLAGS+= -DLOADER_DISK_SUPPORT .endif .if ${LOADER_UFS_SUPPORT} == "yes" CFLAGS+= -DLOADER_UFS_SUPPORT .endif .if ${LOADER_CD9660_SUPPORT} == "yes" CFLAGS+= -DLOADER_CD9660_SUPPORT .endif .if ${LOADER_EXT2FS_SUPPORT} == "yes" CFLAGS+= -DLOADER_EXT2FS_SUPPORT .endif .if ${LOADER_GZIP_SUPPORT} == "yes" CFLAGS+= -DLOADER_GZIP_SUPPORT .endif .if ${LOADER_BZIP2_SUPPORT} == "yes" CFLAGS+= -DLOADER_BZIP2_SUPPORT .endif .if ${LOADER_NET_SUPPORT} == "yes" CFLAGS+= -DLOADER_NET_SUPPORT .endif .if ${LOADER_NFS_SUPPORT} == "yes" CFLAGS+= -DLOADER_NFS_SUPPORT .endif .if ${LOADER_TFTP_SUPPORT} == "yes" CFLAGS+= -DLOADER_TFTP_SUPPORT .endif .if ${LOADER_FDT_SUPPORT} == "yes" CFLAGS+= -I${.CURDIR}/../../fdt CFLAGS+= -I${.OBJDIR}/../../fdt CFLAGS+= -I${.CURDIR}/../../../contrib/libfdt CFLAGS+= -DLOADER_FDT_SUPPORT LIBFDT= ${.OBJDIR}/../../fdt/libfdt.a .endif .if ${MK_FORTH} != "no" # Enable BootForth BOOT_FORTH= yes CFLAGS+= -DBOOT_FORTH -I${.CURDIR}/../../ficl -I${.CURDIR}/../../ficl/powerpc LIBFICL= ${.OBJDIR}/../../ficl/libficl.a .endif # Avoid the open-close-dance for every file access as some firmwares perform # an auto-negotiation on every open of the network interface and thus causes # netbooting to take horribly long. CFLAGS+= -DNETIF_OPEN_CLOSE_ONCE -mcpu=powerpc64 # Always add MI sources .PATH: ${.CURDIR}/../../common ${.CURDIR}/../../../libkern .include "${.CURDIR}/../../common/Makefile.inc" CFLAGS+= -I${.CURDIR}/../../common -I${.CURDIR}/../../.. CFLAGS+= -I. CLEANFILES+= vers.c loader.help CFLAGS+= -Wall -ffreestanding -msoft-float -DAIM # load address. set in linker script RELOC?= 0x0 CFLAGS+= -DRELOC=${RELOC} LDFLAGS= -nostdlib -static -T ${.CURDIR}/ldscript.powerpc # 64-bit bridge extensions CFLAGS+= -Wa,-mppc64bridge # Pull in common loader code #.PATH: ${.CURDIR}/../../ofw/common #.include "${.CURDIR}/../../ofw/common/Makefile.inc" # where to get libstand from LIBSTAND= ${.OBJDIR}/../../libstand32/libstand.a CFLAGS+= -I${.CURDIR}/../../../../lib/libstand/ DPADD= ${LIBFICL} ${LIBOFW} ${LIBFDT} ${LIBSTAND} LDADD= ${LIBFICL} ${LIBOFW} ${LIBFDT} ${LIBSTAND} -SC_DFLT_FONT=cp437 - -font.h: - uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h - vers.c: ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version sh ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version ${NEWVERSWHAT} loader.help: help.common help.kboot ${.CURDIR}/../../fdt/help.fdt cat ${.ALLSRC} | \ awk -f ${.CURDIR}/../../common/merge_help.awk > ${.TARGET} .PATH: ${.CURDIR}/../../forth .include "${.CURDIR}/../../forth/Makefile.inc" FILES+= loader.rc menu.rc .include Index: projects/powernv/boot/powerpc/kboot =================================================================== --- projects/powernv/boot/powerpc/kboot (revision 291050) +++ projects/powernv/boot/powerpc/kboot (revision 291051) Property changes on: projects/powernv/boot/powerpc/kboot ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/boot/powerpc/kboot:r290829-291050 Index: projects/powernv/boot =================================================================== --- projects/powernv/boot (revision 291050) +++ projects/powernv/boot (revision 291051) Property changes on: projects/powernv/boot ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/boot:r290829-291050 Index: projects/powernv/conf/files.powerpc =================================================================== --- projects/powernv/conf/files.powerpc (revision 291050) +++ projects/powernv/conf/files.powerpc (revision 291051) @@ -1,260 +1,260 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # # font.h optional sc \ compile-with "uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h" \ no-obj no-implicit-rule before-depend \ clean "font.h ${SC_DFLT_FONT}-8x14 ${SC_DFLT_FONT}-8x16 ${SC_DFLT_FONT}-8x8" # # There is only an asm version on ppc64. cddl/compat/opensolaris/kern/opensolaris_atomic.c optional zfs powerpc | dtrace powerpc compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/atomic/powerpc64/opensolaris_atomic.S optional zfs powerpc64 | dtrace powerpc64 compile-with "${ZFS_S}" cddl/dev/dtrace/powerpc/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/powerpc/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/powerpc/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" crypto/blowfish/bf_enc.c optional crypto | ipsec crypto/des/des_enc.c optional crypto | ipsec | netsmb dev/bm/if_bm.c optional bm powermac dev/adb/adb_bus.c optional adb dev/adb/adb_kbd.c optional adb dev/adb/adb_mouse.c optional adb dev/adb/adb_hb_if.m optional adb dev/adb/adb_if.m optional adb dev/adb/adb_buttons.c optional adb dev/agp/agp_apple.c optional agp powermac dev/fb/fb.c optional sc dev/fdt/fdt_powerpc.c optional fdt # ofwbus depends on simplebus. dev/fdt/simplebus.c optional aim | fdt dev/hwpmc/hwpmc_e500.c optional hwpmc dev/hwpmc/hwpmc_mpc7xxx.c optional hwpmc dev/hwpmc/hwpmc_powerpc.c optional hwpmc dev/hwpmc/hwpmc_ppc970.c optional hwpmc dev/iicbus/ad7417.c optional ad7417 powermac dev/iicbus/adm1030.c optional powermac windtunnel | adm1030 powermac dev/iicbus/adt746x.c optional adt746x powermac dev/iicbus/ds1631.c optional ds1631 powermac dev/iicbus/ds1775.c optional ds1775 powermac dev/iicbus/max6690.c optional max6690 powermac dev/nand/nfc_fsl.c optional nand mpc85xx dev/nand/nfc_rb.c optional nand mpc85xx # ofw can be either aim or fdt: fdt case handled in files. aim only powerpc specific. dev/ofw/openfirm.c optional aim dev/ofw/openfirmio.c optional aim dev/ofw/ofw_bus_if.m optional aim dev/ofw/ofw_cpu.c optional aim dev/ofw/ofw_if.m optional aim dev/ofw/ofw_bus_subr.c optional aim dev/ofw/ofw_console.c optional aim dev/ofw/ofw_disk.c optional ofwd aim dev/ofw/ofw_iicbus.c optional iicbus aim dev/ofw/ofwbus.c optional aim | fdt dev/ofw/ofw_standard.c optional aim powerpc dev/powermac_nvram/powermac_nvram.c optional powermac_nvram powermac dev/quicc/quicc_bfe_fdt.c optional quicc mpc85xx dev/scc/scc_bfe_macio.c optional scc powermac dev/sec/sec.c optional sec mpc85xx dev/sound/macio/aoa.c optional snd_davbus | snd_ai2s powermac dev/sound/macio/davbus.c optional snd_davbus powermac dev/sound/macio/i2s.c optional snd_ai2s powermac dev/sound/macio/onyx.c optional snd_ai2s iicbus powermac dev/sound/macio/snapper.c optional snd_ai2s iicbus powermac dev/sound/macio/tumbler.c optional snd_ai2s iicbus powermac dev/syscons/scgfbrndr.c optional sc dev/syscons/scterm-teken.c optional sc dev/syscons/scvtb.c optional sc dev/tsec/if_tsec.c optional tsec dev/tsec/if_tsec_fdt.c optional tsec fdt dev/uart/uart_cpu_powerpc.c optional uart -dev/usb/controller/ehci_fsl.c optional ehci mpc85xx +dev/usb/controller/ehci_fsl.c optional ehci mpc85xx | ehci qoriq_dpaa dev/vt/hw/ofwfb/ofwfb.c optional vt aim kern/kern_clocksource.c standard kern/subr_dummy_vdso_tc.c standard kern/syscalls.c optional ktr kern/subr_sfbuf.c standard libkern/ashldi3.c optional powerpc libkern/ashrdi3.c optional powerpc libkern/bcmp.c standard libkern/cmpdi2.c optional powerpc libkern/divdi3.c optional powerpc libkern/ffs.c standard libkern/ffsl.c standard libkern/ffsll.c standard libkern/fls.c standard libkern/flsl.c standard libkern/flsll.c standard libkern/lshrdi3.c optional powerpc libkern/memmove.c standard libkern/memset.c standard libkern/moddi3.c optional powerpc libkern/qdivrem.c optional powerpc libkern/ucmpdi2.c optional powerpc libkern/udivdi3.c optional powerpc libkern/umoddi3.c optional powerpc powerpc/aim/locore.S optional aim no-obj powerpc/aim/aim_machdep.c optional aim powerpc/aim/mmu_oea.c optional aim powerpc powerpc/aim/mmu_oea64.c optional aim powerpc/aim/moea64_if.m optional aim powerpc/aim/moea64_native.c optional aim powerpc/aim/mp_cpudep.c optional aim powerpc/aim/slb.c optional aim powerpc64 powerpc/booke/locore.S optional booke no-obj powerpc/booke/booke_machdep.c optional booke powerpc/booke/machdep_e500.c optional booke_e500 powerpc/booke/mp_cpudep.c optional booke smp powerpc/booke/platform_bare.c optional booke powerpc/booke/pmap.c optional booke powerpc/cpufreq/dfs.c optional cpufreq powerpc/cpufreq/pcr.c optional cpufreq aim powerpc/cpufreq/pmufreq.c optional cpufreq aim pmu powerpc/fpu/fpu_add.c optional fpu_emu powerpc/fpu/fpu_compare.c optional fpu_emu powerpc/fpu/fpu_div.c optional fpu_emu powerpc/fpu/fpu_emu.c optional fpu_emu powerpc/fpu/fpu_explode.c optional fpu_emu powerpc/fpu/fpu_implode.c optional fpu_emu powerpc/fpu/fpu_mul.c optional fpu_emu powerpc/fpu/fpu_sqrt.c optional fpu_emu powerpc/fpu/fpu_subr.c optional fpu_emu powerpc/mambo/mambocall.S optional mambo powerpc/mambo/mambo.c optional mambo powerpc/mambo/mambo_console.c optional mambo powerpc/mambo/mambo_disk.c optional mambo powerpc/mikrotik/platform_rb.c optional mikrotik powerpc/mpc85xx/atpic.c optional mpc85xx isa powerpc/mpc85xx/ds1553_bus_fdt.c optional ds1553 fdt powerpc/mpc85xx/ds1553_core.c optional ds1553 -powerpc/mpc85xx/fsl_sdhc.c optional mpc85xx sdhc +powerpc/mpc85xx/fsl_sdhc.c optional mpc85xx sdhc | qoriq_dpaa sdhc powerpc/mpc85xx/i2c.c optional iicbus fdt powerpc/mpc85xx/isa.c optional mpc85xx isa -powerpc/mpc85xx/lbc.c optional mpc85xx -powerpc/mpc85xx/mpc85xx.c optional mpc85xx +powerpc/mpc85xx/lbc.c optional mpc85xx | qoriq_dpaa +powerpc/mpc85xx/mpc85xx.c optional mpc85xx | qoriq_dpaa powerpc/mpc85xx/mpc85xx_gpio.c optional mpc85xx gpio -powerpc/mpc85xx/platform_mpc85xx.c optional mpc85xx -powerpc/mpc85xx/pci_mpc85xx.c optional pci mpc85xx -powerpc/mpc85xx/pci_mpc85xx_pcib.c optional pci mpc85xx +powerpc/mpc85xx/platform_mpc85xx.c optional mpc85xx | qoriq_dpaa +powerpc/mpc85xx/pci_mpc85xx.c optional pci mpc85xx | pci qoriq_dpaa +powerpc/mpc85xx/pci_mpc85xx_pcib.c optional pci mpc85xx | pci qoriq_dpaa powerpc/ofw/ofw_machdep.c standard powerpc/ofw/ofw_pci.c optional pci powerpc/ofw/ofw_pcibus.c optional pci powerpc/ofw/ofw_pcib_pci.c optional pci powerpc/ofw/ofw_real.c optional aim powerpc/ofw/ofw_syscons.c optional sc aim powerpc/ofw/ofwcall32.S optional aim powerpc powerpc/ofw/ofwcall64.S optional aim powerpc64 powerpc/ofw/ofwmagic.S optional aim powerpc/ofw/openpic_ofw.c optional aim | fdt powerpc/ofw/rtas.c optional aim powerpc/powermac/ata_kauai.c optional powermac ata | powermac atamacio powerpc/powermac/ata_macio.c optional powermac ata | powermac atamacio powerpc/powermac/ata_dbdma.c optional powermac ata | powermac atamacio powerpc/powermac/atibl.c optional powermac atibl powerpc/powermac/cuda.c optional powermac cuda powerpc/powermac/cpcht.c optional powermac pci powerpc/powermac/dbdma.c optional powermac pci powerpc/powermac/fcu.c optional powermac fcu powerpc/powermac/grackle.c optional powermac pci powerpc/powermac/hrowpic.c optional powermac pci powerpc/powermac/kiic.c optional powermac kiic powerpc/powermac/macgpio.c optional powermac pci powerpc/powermac/macio.c optional powermac pci powerpc/powermac/nvbl.c optional powermac nvbl powerpc/powermac/platform_powermac.c optional powermac powerpc/powermac/powermac_thermal.c optional powermac powerpc/powermac/pswitch.c optional powermac pswitch powerpc/powermac/pmu.c optional powermac pmu powerpc/powermac/smu.c optional powermac smu powerpc/powermac/smusat.c optional powermac smu powerpc/powermac/uninorth.c optional powermac powerpc/powermac/uninorthpci.c optional powermac pci powerpc/powermac/vcoregpio.c optional powermac powerpc/powernv/opal.c optional powernv powerpc/powernv/opal_console.c optional powernv powerpc/powernv/opal_dev.c optional powernv powerpc/powernv/opal_pci.c optional powernv pci powerpc/powernv/opalcall.S optional powernv powerpc/powernv/platform_powernv.c optional powernv powerpc/powerpc/altivec.c standard powerpc/powerpc/autoconf.c standard powerpc/powerpc/bcopy.c standard powerpc/powerpc/bus_machdep.c standard powerpc/powerpc/busdma_machdep.c standard powerpc/powerpc/clock.c standard powerpc/powerpc/copyinout.c standard powerpc/powerpc/copystr.c standard powerpc/powerpc/cpu.c standard powerpc/powerpc/db_disasm.c optional ddb powerpc/powerpc/db_hwwatch.c optional ddb powerpc/powerpc/db_interface.c optional ddb powerpc/powerpc/db_trace.c optional ddb powerpc/powerpc/dump_machdep.c standard powerpc/powerpc/elf32_machdep.c optional powerpc | compat_freebsd32 powerpc/powerpc/elf64_machdep.c optional powerpc64 powerpc/powerpc/exec_machdep.c standard powerpc/powerpc/fpu.c standard powerpc/powerpc/fuswintr.c standard powerpc/powerpc/gdb_machdep.c optional gdb powerpc/powerpc/in_cksum.c optional inet | inet6 powerpc/powerpc/interrupt.c standard powerpc/powerpc/intr_machdep.c standard powerpc/powerpc/iommu_if.m standard powerpc/powerpc/machdep.c standard powerpc/powerpc/mem.c optional mem powerpc/powerpc/mmu_if.m standard powerpc/powerpc/mp_machdep.c optional smp powerpc/powerpc/nexus.c standard powerpc/powerpc/openpic.c standard powerpc/powerpc/pic_if.m standard powerpc/powerpc/pmap_dispatch.c standard powerpc/powerpc/platform.c standard powerpc/powerpc/platform_if.m standard powerpc/powerpc/ptrace_machdep.c standard powerpc/powerpc/sc_machdep.c optional sc powerpc/powerpc/setjmp.S standard powerpc/powerpc/sigcode32.S optional powerpc | compat_freebsd32 powerpc/powerpc/sigcode64.S optional powerpc64 powerpc/powerpc/swtch32.S optional powerpc powerpc/powerpc/swtch64.S optional powerpc64 powerpc/powerpc/stack_machdep.c optional ddb | stack powerpc/powerpc/suswintr.c standard powerpc/powerpc/syncicache.c standard powerpc/powerpc/sys_machdep.c standard powerpc/powerpc/trap.c standard powerpc/powerpc/uio_machdep.c standard powerpc/powerpc/uma_machdep.c standard powerpc/powerpc/vm_machdep.c standard powerpc/ps3/ehci_ps3.c optional ps3 ehci powerpc/ps3/ohci_ps3.c optional ps3 ohci powerpc/ps3/if_glc.c optional ps3 glc powerpc/ps3/mmu_ps3.c optional ps3 powerpc/ps3/platform_ps3.c optional ps3 powerpc/ps3/ps3bus.c optional ps3 powerpc/ps3/ps3cdrom.c optional ps3 scbus powerpc/ps3/ps3disk.c optional ps3 powerpc/ps3/ps3pic.c optional ps3 powerpc/ps3/ps3_syscons.c optional ps3 vt powerpc/ps3/ps3-hvcall.S optional ps3 powerpc/pseries/phyp-hvcall.S optional pseries powerpc64 powerpc/pseries/mmu_phyp.c optional pseries powerpc64 powerpc/pseries/phyp_console.c optional pseries powerpc64 uart powerpc/pseries/phyp_llan.c optional llan powerpc/pseries/phyp_vscsi.c optional pseries powerpc64 scbus powerpc/pseries/platform_chrp.c optional pseries powerpc/pseries/plpar_iommu.c optional pseries powerpc64 powerpc/pseries/plpar_pcibus.c optional pseries powerpc64 pci powerpc/pseries/rtas_dev.c optional pseries powerpc/pseries/rtas_pci.c optional pseries pci powerpc/pseries/vdevice.c optional pseries powerpc64 powerpc/pseries/xics.c optional pseries powerpc64 powerpc/psim/iobus.c optional psim powerpc/psim/ata_iobus.c optional ata psim powerpc/psim/openpic_iobus.c optional psim powerpc/psim/uart_iobus.c optional uart psim Index: projects/powernv/conf/options =================================================================== --- projects/powernv/conf/options (revision 291050) +++ projects/powernv/conf/options (revision 291051) @@ -1,961 +1,962 @@ # $FreeBSD$ # # On the handling of kernel options # # All kernel options should be listed in NOTES, with suitable # descriptions. Negative options (options that make some code not # compile) should be commented out; LINT (generated from NOTES) should # compile as much code as possible. Try to structure option-using # code so that a single option only switch code on, or only switch # code off, to make it possible to have a full compile-test. If # necessary, you can check for COMPILING_LINT to get maximum code # coverage. # # All new options shall also be listed in either "conf/options" or # "conf/options.". Options that affect a single source-file # .[c|s] should be directed into "opt_.h", while options # that affect multiple files should either go in "opt_global.h" if # this is a kernel-wide option (used just about everywhere), or in # "opt_.h" if it affects only some files. # Note that the effect of listing only an option without a # header-file-name in conf/options (and cousins) is that the last # convention is followed. # # This handling scheme is not yet fully implemented. # # # Format of this file: # Option name filename # # If filename is missing, the default is # opt_.h AAC_DEBUG opt_aac.h AACRAID_DEBUG opt_aacraid.h AHC_ALLOW_MEMIO opt_aic7xxx.h AHC_TMODE_ENABLE opt_aic7xxx.h AHC_DUMP_EEPROM opt_aic7xxx.h AHC_DEBUG opt_aic7xxx.h AHC_DEBUG_OPTS opt_aic7xxx.h AHC_REG_PRETTY_PRINT opt_aic7xxx.h AHD_DEBUG opt_aic79xx.h AHD_DEBUG_OPTS opt_aic79xx.h AHD_TMODE_ENABLE opt_aic79xx.h AHD_REG_PRETTY_PRINT opt_aic79xx.h ADW_ALLOW_MEMIO opt_adw.h TWA_DEBUG opt_twa.h TWA_FLASH_FIRMWARE opt_twa.h # Debugging options. ALT_BREAK_TO_DEBUGGER opt_kdb.h BREAK_TO_DEBUGGER opt_kdb.h DDB DDB_BUFR_SIZE opt_ddb.h DDB_CAPTURE_DEFAULTBUFSIZE opt_ddb.h DDB_CAPTURE_MAXBUFSIZE opt_ddb.h DDB_CTF opt_ddb.h DDB_NUMSYM opt_ddb.h GDB KDB opt_global.h KDB_TRACE opt_kdb.h KDB_UNATTENDED opt_kdb.h KLD_DEBUG opt_kld.h SYSCTL_DEBUG opt_sysctl.h EARLY_PRINTF opt_global.h TEXTDUMP_PREFERRED opt_ddb.h TEXTDUMP_VERBOSE opt_ddb.h # Miscellaneous options. ADAPTIVE_LOCKMGRS ALQ ALTERA_SDCARD_FAST_SIM opt_altera_sdcard.h ATSE_CFI_HACK opt_cfi.h AUDIT opt_global.h BOOTHOWTO opt_global.h BOOTVERBOSE opt_global.h CALLOUT_PROFILING CAPABILITIES opt_capsicum.h CAPABILITY_MODE opt_capsicum.h COMPAT_43 opt_compat.h COMPAT_43TTY opt_compat.h COMPAT_FREEBSD4 opt_compat.h COMPAT_FREEBSD5 opt_compat.h COMPAT_FREEBSD6 opt_compat.h COMPAT_FREEBSD7 opt_compat.h COMPAT_FREEBSD9 opt_compat.h COMPAT_FREEBSD10 opt_compat.h COMPAT_CLOUDABI64 opt_dontuse.h COMPAT_LINUXKPI opt_compat.h COMPILING_LINT opt_global.h CY_PCI_FASTINTR DEADLKRES opt_watchdog.h DIRECTIO FILEMON opt_dontuse.h FFCLOCK FULL_PREEMPTION opt_sched.h GZIO opt_gzio.h IMAGACT_BINMISC opt_dontuse.h IPI_PREEMPTION opt_sched.h GEOM_AES opt_geom.h GEOM_BDE opt_geom.h GEOM_BSD opt_geom.h GEOM_CACHE opt_geom.h GEOM_CONCAT opt_geom.h GEOM_ELI opt_geom.h GEOM_FOX opt_geom.h GEOM_GATE opt_geom.h GEOM_JOURNAL opt_geom.h GEOM_LABEL opt_geom.h GEOM_LABEL_GPT opt_geom.h GEOM_LINUX_LVM opt_geom.h GEOM_MAP opt_geom.h GEOM_MBR opt_geom.h GEOM_MIRROR opt_geom.h GEOM_MULTIPATH opt_geom.h GEOM_NOP opt_geom.h GEOM_PART_APM opt_geom.h GEOM_PART_BSD opt_geom.h GEOM_PART_BSD64 opt_geom.h GEOM_PART_EBR opt_geom.h GEOM_PART_EBR_COMPAT opt_geom.h GEOM_PART_GPT opt_geom.h GEOM_PART_LDM opt_geom.h GEOM_PART_MBR opt_geom.h GEOM_PART_PC98 opt_geom.h GEOM_PART_VTOC8 opt_geom.h GEOM_PC98 opt_geom.h GEOM_RAID opt_geom.h GEOM_RAID3 opt_geom.h GEOM_SHSEC opt_geom.h GEOM_STRIPE opt_geom.h GEOM_SUNLABEL opt_geom.h GEOM_UNCOMPRESS opt_geom.h GEOM_UNCOMPRESS_DEBUG opt_geom.h GEOM_UZIP opt_geom.h GEOM_VINUM opt_geom.h GEOM_VIRSTOR opt_geom.h GEOM_VOL opt_geom.h GEOM_ZERO opt_geom.h KDTRACE_HOOKS opt_global.h KDTRACE_FRAME opt_kdtrace.h KN_HASHSIZE opt_kqueue.h KSTACK_MAX_PAGES KSTACK_PAGES KSTACK_USAGE_PROF KTRACE KTRACE_REQUEST_POOL opt_ktrace.h LIBICONV MAC opt_global.h MAC_BIBA opt_dontuse.h MAC_BSDEXTENDED opt_dontuse.h MAC_IFOFF opt_dontuse.h MAC_LOMAC opt_dontuse.h MAC_MLS opt_dontuse.h MAC_NONE opt_dontuse.h MAC_PARTITION opt_dontuse.h MAC_PORTACL opt_dontuse.h MAC_SEEOTHERUIDS opt_dontuse.h MAC_STATIC opt_mac.h MAC_STUB opt_dontuse.h MAC_TEST opt_dontuse.h MD_ROOT opt_md.h MD_ROOT_FSTYPE opt_md.h MD_ROOT_SIZE opt_md.h MFI_DEBUG opt_mfi.h MFI_DECODE_LOG opt_mfi.h MPROF_BUFFERS opt_mprof.h MPROF_HASH_SIZE opt_mprof.h NEW_PCIB opt_global.h NO_ADAPTIVE_MUTEXES opt_adaptive_mutexes.h NO_ADAPTIVE_RWLOCKS NO_ADAPTIVE_SX NO_EVENTTIMERS opt_timer.h NO_SYSCTL_DESCR opt_global.h NSWBUF_MIN opt_swap.h MBUF_PACKET_ZONE_DISABLE opt_global.h PANIC_REBOOT_WAIT_TIME opt_panic.h PCI_IOV opt_global.h PPC_DEBUG opt_ppc.h PPC_PROBE_CHIPSET opt_ppc.h PPS_SYNC opt_ntp.h PREEMPTION opt_sched.h QUOTA SCHED_4BSD opt_sched.h SCHED_STATS opt_sched.h SCHED_ULE opt_sched.h SLEEPQUEUE_PROFILING SLHCI_DEBUG opt_slhci.h SPX_HACK STACK opt_stack.h SUIDDIR MSGMNB opt_sysvipc.h MSGMNI opt_sysvipc.h MSGSEG opt_sysvipc.h MSGSSZ opt_sysvipc.h MSGTQL opt_sysvipc.h SEMMNI opt_sysvipc.h SEMMNS opt_sysvipc.h SEMMNU opt_sysvipc.h SEMMSL opt_sysvipc.h SEMOPM opt_sysvipc.h SEMUME opt_sysvipc.h SHMALL opt_sysvipc.h SHMMAX opt_sysvipc.h SHMMAXPGS opt_sysvipc.h SHMMIN opt_sysvipc.h SHMMNI opt_sysvipc.h SHMSEG opt_sysvipc.h SYSVMSG opt_sysvipc.h SYSVSEM opt_sysvipc.h SYSVSHM opt_sysvipc.h SW_WATCHDOG opt_watchdog.h TURNSTILE_PROFILING UMTX_PROFILING VFS_AIO VERBOSE_SYSINIT WLCACHE opt_wavelan.h WLDEBUG opt_wavelan.h # POSIX kernel options P1003_1B_MQUEUE opt_posix.h P1003_1B_SEMAPHORES opt_posix.h _KPOSIX_PRIORITY_SCHEDULING opt_posix.h # Do we want the config file compiled into the kernel? INCLUDE_CONFIG_FILE opt_config.h # Options for static filesystems. These should only be used at config # time, since the corresponding lkms cannot work if there are any static # dependencies. Unusability is enforced by hiding the defines for the # options in a never-included header. AUTOFS opt_dontuse.h CD9660 opt_dontuse.h EXT2FS opt_dontuse.h FDESCFS opt_dontuse.h FFS opt_dontuse.h FUSE opt_dontuse.h MSDOSFS opt_dontuse.h NANDFS opt_dontuse.h NULLFS opt_dontuse.h PROCFS opt_dontuse.h PSEUDOFS opt_dontuse.h REISERFS opt_dontuse.h SMBFS opt_dontuse.h TMPFS opt_dontuse.h UDF opt_dontuse.h UNIONFS opt_dontuse.h ZFS opt_dontuse.h # Pseudofs debugging PSEUDOFS_TRACE opt_pseudofs.h # In-kernel GSS-API KGSSAPI opt_kgssapi.h KGSSAPI_DEBUG opt_kgssapi.h # These static filesystems have one slightly bogus static dependency in # sys/i386/i386/autoconf.c. If any of these filesystems are # statically compiled into the kernel, code for mounting them as root # filesystems will be enabled - but look below. # NFSCL - client # NFSD - server NFSCL opt_nfs.h NFSD opt_nfs.h # filesystems and libiconv bridge CD9660_ICONV opt_dontuse.h MSDOSFS_ICONV opt_dontuse.h UDF_ICONV opt_dontuse.h # If you are following the conditions in the copyright, # you can enable soft-updates which will speed up a lot of thigs # and make the system safer from crashes at the same time. # otherwise a STUB module will be compiled in. SOFTUPDATES opt_ffs.h # On small, embedded systems, it can be useful to turn off support for # snapshots. It saves about 30-40k for a feature that would be lightly # used, if it is used at all. NO_FFS_SNAPSHOT opt_ffs.h # Enabling this option turns on support for Access Control Lists in UFS, # which can be used to support high security configurations. Depends on # UFS_EXTATTR. UFS_ACL opt_ufs.h # Enabling this option turns on support for extended attributes in UFS-based # filesystems, which can be used to support high security configurations # as well as new filesystem features. UFS_EXTATTR opt_ufs.h UFS_EXTATTR_AUTOSTART opt_ufs.h # Enable fast hash lookups for large directories on UFS-based filesystems. UFS_DIRHASH opt_ufs.h # Enable gjournal-based UFS journal. UFS_GJOURNAL opt_ufs.h # The below sentence is not in English, and neither is this one. # We plan to remove the static dependences above, with a # _ROOT option to control if it usable as root. This list # allows these options to be present in config files already (though # they won't make any difference yet). NFS_ROOT opt_nfsroot.h # SMB/CIFS requester NETSMB opt_netsmb.h # Options used only in subr_param.c. HZ opt_param.h MAXFILES opt_param.h NBUF opt_param.h NSFBUFS opt_param.h VM_BCACHE_SIZE_MAX opt_param.h VM_SWZONE_SIZE_MAX opt_param.h MAXUSERS DFLDSIZ opt_param.h MAXDSIZ opt_param.h MAXSSIZ opt_param.h # Generic SCSI options. CAM_MAX_HIGHPOWER opt_cam.h CAMDEBUG opt_cam.h CAM_DEBUG_COMPILE opt_cam.h CAM_DEBUG_DELAY opt_cam.h CAM_DEBUG_BUS opt_cam.h CAM_DEBUG_TARGET opt_cam.h CAM_DEBUG_LUN opt_cam.h CAM_DEBUG_FLAGS opt_cam.h CAM_BOOT_DELAY opt_cam.h SCSI_DELAY opt_scsi.h SCSI_NO_SENSE_STRINGS opt_scsi.h SCSI_NO_OP_STRINGS opt_scsi.h # Options used only in cam/ata/ata_da.c ADA_TEST_FAILURE opt_ada.h ATA_STATIC_ID opt_ada.h # Options used only in cam/scsi/scsi_cd.c CHANGER_MIN_BUSY_SECONDS opt_cd.h CHANGER_MAX_BUSY_SECONDS opt_cd.h # Options used only in cam/scsi/scsi_sa.c. SA_IO_TIMEOUT opt_sa.h SA_SPACE_TIMEOUT opt_sa.h SA_REWIND_TIMEOUT opt_sa.h SA_ERASE_TIMEOUT opt_sa.h SA_1FM_AT_EOD opt_sa.h # Options used only in cam/scsi/scsi_pt.c SCSI_PT_DEFAULT_TIMEOUT opt_pt.h # Options used only in cam/scsi/scsi_ses.c SES_ENABLE_PASSTHROUGH opt_ses.h # Options used in dev/sym/ (Symbios SCSI driver). SYM_SETUP_LP_PROBE_MAP opt_sym.h #-Low Priority Probe Map (bits) # Allows the ncr to take precedence # 1 (1<<0) -> 810a, 860 # 2 (1<<1) -> 825a, 875, 885, 895 # 4 (1<<2) -> 895a, 896, 1510d SYM_SETUP_SCSI_DIFF opt_sym.h #-HVD support for 825a, 875, 885 # disabled:0 (default), enabled:1 SYM_SETUP_PCI_PARITY opt_sym.h #-PCI parity checking # disabled:0, enabled:1 (default) SYM_SETUP_MAX_LUN opt_sym.h #-Number of LUNs supported # default:8, range:[1..64] # Options used only in dev/ncr/* SCSI_NCR_DEBUG opt_ncr.h SCSI_NCR_MAX_SYNC opt_ncr.h SCSI_NCR_MAX_WIDE opt_ncr.h SCSI_NCR_MYADDR opt_ncr.h # Options used only in dev/isp/* ISP_TARGET_MODE opt_isp.h ISP_FW_CRASH_DUMP opt_isp.h ISP_DEFAULT_ROLES opt_isp.h ISP_INTERNAL_TARGET opt_isp.h # Options used only in dev/iscsi ISCSI_INITIATOR_DEBUG opt_iscsi_initiator.h # Net stuff. ACCEPT_FILTER_DATA ACCEPT_FILTER_DNS ACCEPT_FILTER_HTTP ALTQ opt_global.h ALTQ_CBQ opt_altq.h ALTQ_CDNR opt_altq.h ALTQ_CODEL opt_altq.h ALTQ_DEBUG opt_altq.h ALTQ_HFSC opt_altq.h ALTQ_FAIRQ opt_altq.h ALTQ_NOPCC opt_altq.h ALTQ_PRIQ opt_altq.h ALTQ_RED opt_altq.h ALTQ_RIO opt_altq.h BOOTP opt_bootp.h BOOTP_BLOCKSIZE opt_bootp.h BOOTP_COMPAT opt_bootp.h BOOTP_NFSROOT opt_bootp.h BOOTP_NFSV3 opt_bootp.h BOOTP_WIRED_TO opt_bootp.h DEVICE_POLLING DUMMYNET opt_ipdn.h INET opt_inet.h INET6 opt_inet6.h IPDIVERT IPFILTER opt_ipfilter.h IPFILTER_DEFAULT_BLOCK opt_ipfilter.h IPFILTER_LOG opt_ipfilter.h IPFILTER_LOOKUP opt_ipfilter.h IPFIREWALL opt_ipfw.h IPFIREWALL_DEFAULT_TO_ACCEPT opt_ipfw.h IPFIREWALL_NAT opt_ipfw.h IPFIREWALL_VERBOSE opt_ipfw.h IPFIREWALL_VERBOSE_LIMIT opt_ipfw.h IPSEC opt_ipsec.h IPSEC_DEBUG opt_ipsec.h IPSEC_FILTERTUNNEL opt_ipsec.h IPSEC_NAT_T opt_ipsec.h IPSTEALTH KRPC LIBALIAS LIBMBPOOL LIBMCHAIN MBUF_PROFILING MBUF_STRESS_TEST MROUTING opt_mrouting.h NFSLOCKD PCBGROUP opt_pcbgroup.h PF_DEFAULT_TO_DROP opt_pf.h RADIX_MPATH opt_mpath.h ROUTETABLES opt_route.h RSS opt_rss.h SLIP_IFF_OPTS opt_slip.h TCPDEBUG TCPPCAP opt_global.h SIFTR TCP_OFFLOAD opt_inet.h # Enable code to dispatch TCP offloading TCP_SIGNATURE opt_inet.h VLAN_ARRAY opt_vlan.h XBONEHACK FLOWTABLE opt_route.h FLOWTABLE_HASH_ALL opt_route.h # # SCTP # SCTP opt_sctp.h SCTP_DEBUG opt_sctp.h # Enable debug printfs SCTP_WITH_NO_CSUM opt_sctp.h # Use this at your peril SCTP_LOCK_LOGGING opt_sctp.h # Log to KTR lock activity SCTP_MBUF_LOGGING opt_sctp.h # Log to KTR general mbuf aloc/free SCTP_MBCNT_LOGGING opt_sctp.h # Log to KTR mbcnt activity SCTP_PACKET_LOGGING opt_sctp.h # Log to a packet buffer last N packets SCTP_LTRACE_CHUNKS opt_sctp.h # Log to KTR chunks processed SCTP_LTRACE_ERRORS opt_sctp.h # Log to KTR error returns. SCTP_USE_PERCPU_STAT opt_sctp.h # Use per cpu stats. SCTP_MCORE_INPUT opt_sctp.h # Have multiple input threads for input mbufs SCTP_LOCAL_TRACE_BUF opt_sctp.h # Use tracebuffer exported via sysctl SCTP_DETAILED_STR_STATS opt_sctp.h # Use per PR-SCTP policy stream stats # # # # Netgraph(4). Use option NETGRAPH to enable the base netgraph code. # Each netgraph node type can be either be compiled into the kernel # or loaded dynamically. To get the former, include the corresponding # option below. Each type has its own man page, e.g. ng_async(4). NETGRAPH NETGRAPH_DEBUG opt_netgraph.h NETGRAPH_ASYNC opt_netgraph.h NETGRAPH_ATMLLC opt_netgraph.h NETGRAPH_ATM_ATMPIF opt_netgraph.h NETGRAPH_BLUETOOTH opt_netgraph.h NETGRAPH_BLUETOOTH_BT3C opt_netgraph.h NETGRAPH_BLUETOOTH_H4 opt_netgraph.h NETGRAPH_BLUETOOTH_HCI opt_netgraph.h NETGRAPH_BLUETOOTH_L2CAP opt_netgraph.h NETGRAPH_BLUETOOTH_SOCKET opt_netgraph.h NETGRAPH_BLUETOOTH_UBT opt_netgraph.h NETGRAPH_BLUETOOTH_UBTBCMFW opt_netgraph.h NETGRAPH_BPF opt_netgraph.h NETGRAPH_BRIDGE opt_netgraph.h NETGRAPH_CAR opt_netgraph.h NETGRAPH_CISCO opt_netgraph.h NETGRAPH_DEFLATE opt_netgraph.h NETGRAPH_DEVICE opt_netgraph.h NETGRAPH_ECHO opt_netgraph.h NETGRAPH_EIFACE opt_netgraph.h NETGRAPH_ETHER opt_netgraph.h NETGRAPH_ETHER_ECHO opt_netgraph.h NETGRAPH_FEC opt_netgraph.h NETGRAPH_FRAME_RELAY opt_netgraph.h NETGRAPH_GIF opt_netgraph.h NETGRAPH_GIF_DEMUX opt_netgraph.h NETGRAPH_HOLE opt_netgraph.h NETGRAPH_IFACE opt_netgraph.h NETGRAPH_IP_INPUT opt_netgraph.h NETGRAPH_IPFW opt_netgraph.h NETGRAPH_KSOCKET opt_netgraph.h NETGRAPH_L2TP opt_netgraph.h NETGRAPH_LMI opt_netgraph.h # MPPC compression requires proprietary files (not included) NETGRAPH_MPPC_COMPRESSION opt_netgraph.h NETGRAPH_MPPC_ENCRYPTION opt_netgraph.h NETGRAPH_NAT opt_netgraph.h NETGRAPH_NETFLOW opt_netgraph.h NETGRAPH_ONE2MANY opt_netgraph.h NETGRAPH_PATCH opt_netgraph.h NETGRAPH_PIPE opt_netgraph.h NETGRAPH_PPP opt_netgraph.h NETGRAPH_PPPOE opt_netgraph.h NETGRAPH_PPTPGRE opt_netgraph.h NETGRAPH_PRED1 opt_netgraph.h NETGRAPH_RFC1490 opt_netgraph.h NETGRAPH_SOCKET opt_netgraph.h NETGRAPH_SPLIT opt_netgraph.h NETGRAPH_SPPP opt_netgraph.h NETGRAPH_TAG opt_netgraph.h NETGRAPH_TCPMSS opt_netgraph.h NETGRAPH_TEE opt_netgraph.h NETGRAPH_TTY opt_netgraph.h NETGRAPH_UI opt_netgraph.h NETGRAPH_VJC opt_netgraph.h NETGRAPH_VLAN opt_netgraph.h # NgATM options NGATM_ATM opt_netgraph.h NGATM_ATMBASE opt_netgraph.h NGATM_SSCOP opt_netgraph.h NGATM_SSCFU opt_netgraph.h NGATM_UNI opt_netgraph.h NGATM_CCATM opt_netgraph.h # DRM options DRM_DEBUG opt_drm.h TI_SF_BUF_JUMBO opt_ti.h TI_JUMBO_HDRSPLIT opt_ti.h # XXX Conflict: # of devices vs network protocol (Native ATM). # This makes "atm.h" unusable. NATM # DPT driver debug flags DPT_MEASURE_PERFORMANCE opt_dpt.h DPT_RESET_HBA opt_dpt.h # Misc debug flags. Most of these should probably be replaced with # 'DEBUG', and then let people recompile just the interesting modules # with 'make CC="cc -DDEBUG"'. CLUSTERDEBUG opt_debug_cluster.h DEBUG_1284 opt_ppb_1284.h VP0_DEBUG opt_vpo.h LPT_DEBUG opt_lpt.h PLIP_DEBUG opt_plip.h LOCKF_DEBUG opt_debug_lockf.h SI_DEBUG opt_debug_si.h IFMEDIA_DEBUG opt_ifmedia.h # Fb options FB_DEBUG opt_fb.h FB_INSTALL_CDEV opt_fb.h # ppbus related options PERIPH_1284 opt_ppb_1284.h DONTPROBE_1284 opt_ppb_1284.h # smbus related options ENABLE_ALART opt_intpm.h # These cause changes all over the kernel BLKDEV_IOSIZE opt_global.h BURN_BRIDGES opt_global.h DEBUG opt_global.h DEBUG_LOCKS opt_global.h DEBUG_VFS_LOCKS opt_global.h DFLTPHYS opt_global.h DIAGNOSTIC opt_global.h INVARIANT_SUPPORT opt_global.h INVARIANTS opt_global.h MAXCPU opt_global.h MAXMEMDOM opt_global.h MAXPHYS opt_global.h MCLSHIFT opt_global.h MUTEX_DEBUG opt_global.h MUTEX_NOINLINE opt_global.h LOCK_PROFILING opt_global.h LOCK_PROFILING_FAST opt_global.h MSIZE opt_global.h REGRESSION opt_global.h RWLOCK_NOINLINE opt_global.h SX_NOINLINE opt_global.h VFS_BIO_DEBUG opt_global.h # These are VM related options VM_KMEM_SIZE opt_vm.h VM_KMEM_SIZE_SCALE opt_vm.h VM_KMEM_SIZE_MAX opt_vm.h VM_NRESERVLEVEL opt_vm.h VM_LEVEL_0_ORDER opt_vm.h NO_SWAPPING opt_vm.h MALLOC_MAKE_FAILURES opt_vm.h MALLOC_PROFILE opt_vm.h MALLOC_DEBUG_MAXZONES opt_vm.h # The MemGuard replacement allocator used for tamper-after-free detection DEBUG_MEMGUARD opt_vm.h # The RedZone malloc(9) protection DEBUG_REDZONE opt_vm.h # Standard SMP options SMP opt_global.h # Size of the kernel message buffer MSGBUF_SIZE opt_msgbuf.h # NFS options NFS_MINATTRTIMO opt_nfs.h NFS_MAXATTRTIMO opt_nfs.h NFS_MINDIRATTRTIMO opt_nfs.h NFS_MAXDIRATTRTIMO opt_nfs.h NFS_DEBUG opt_nfs.h # For the Bt848/Bt848A/Bt849/Bt878/Bt879 driver OVERRIDE_CARD opt_bktr.h OVERRIDE_TUNER opt_bktr.h OVERRIDE_DBX opt_bktr.h OVERRIDE_MSP opt_bktr.h BROOKTREE_SYSTEM_DEFAULT opt_bktr.h BROOKTREE_ALLOC_PAGES opt_bktr.h BKTR_OVERRIDE_CARD opt_bktr.h BKTR_OVERRIDE_TUNER opt_bktr.h BKTR_OVERRIDE_DBX opt_bktr.h BKTR_OVERRIDE_MSP opt_bktr.h BKTR_SYSTEM_DEFAULT opt_bktr.h BKTR_ALLOC_PAGES opt_bktr.h BKTR_USE_PLL opt_bktr.h BKTR_GPIO_ACCESS opt_bktr.h BKTR_NO_MSP_RESET opt_bktr.h BKTR_430_FX_MODE opt_bktr.h BKTR_SIS_VIA_MODE opt_bktr.h BKTR_USE_FREEBSD_SMBUS opt_bktr.h BKTR_NEW_MSP34XX_DRIVER opt_bktr.h # Options for uart(4) UART_PPS_ON_CTS opt_uart.h UART_POLL_FREQ opt_uart.h +UART_DEV_TOLERANCE_PCT opt_uart.h # options for bus/device framework BUS_DEBUG opt_bus.h # options for USB support USB_DEBUG opt_usb.h USB_HOST_ALIGN opt_usb.h USB_REQ_DEBUG opt_usb.h USB_TEMPLATE opt_usb.h USB_VERBOSE opt_usb.h USB_DMA_SINGLE_ALLOC opt_usb.h USB_EHCI_BIG_ENDIAN_DESC opt_usb.h U3G_DEBUG opt_u3g.h UKBD_DFLT_KEYMAP opt_ukbd.h UPLCOM_INTR_INTERVAL opt_uplcom.h UVSCOM_DEFAULT_OPKTSIZE opt_uvscom.h UVSCOM_INTR_INTERVAL opt_uvscom.h # Embedded system options INIT_PATH ROOTDEVNAME FDC_DEBUG opt_fdc.h PCFCLOCK_VERBOSE opt_pcfclock.h PCFCLOCK_MAX_RETRIES opt_pcfclock.h KTR opt_global.h KTR_ALQ opt_ktr.h KTR_MASK opt_ktr.h KTR_CPUMASK opt_ktr.h KTR_COMPILE opt_global.h KTR_BOOT_ENTRIES opt_global.h KTR_ENTRIES opt_global.h KTR_VERBOSE opt_ktr.h WITNESS opt_global.h WITNESS_KDB opt_witness.h WITNESS_NO_VNODE opt_witness.h WITNESS_SKIPSPIN opt_witness.h WITNESS_COUNT opt_witness.h OPENSOLARIS_WITNESS opt_global.h # options for ACPI support ACPI_DEBUG opt_acpi.h ACPI_MAX_TASKS opt_acpi.h ACPI_MAX_THREADS opt_acpi.h ACPI_DMAR opt_acpi.h DEV_ACPI opt_acpi.h # ISA support DEV_ISA opt_isa.h ISAPNP opt_isa.h # various 'device presence' options. DEV_BPF opt_bpf.h DEV_CARP opt_carp.h DEV_ENC opt_enc.h DEV_MCA opt_mca.h DEV_NETMAP opt_global.h DEV_PCI opt_pci.h DEV_PF opt_pf.h DEV_PFLOG opt_pf.h DEV_PFSYNC opt_pf.h DEV_RANDOM opt_global.h DEV_SPLASH opt_splash.h DEV_VLAN opt_vlan.h # EISA support DEV_EISA opt_eisa.h EISA_SLOTS opt_eisa.h # ed driver ED_HPP opt_ed.h ED_3C503 opt_ed.h ED_SIC opt_ed.h # bce driver BCE_DEBUG opt_bce.h BCE_NVRAM_WRITE_SUPPORT opt_bce.h SOCKBUF_DEBUG opt_global.h # options for ubsec driver UBSEC_DEBUG opt_ubsec.h UBSEC_RNDTEST opt_ubsec.h UBSEC_NO_RNG opt_ubsec.h # options for hifn driver HIFN_DEBUG opt_hifn.h HIFN_RNDTEST opt_hifn.h # options for safenet driver SAFE_DEBUG opt_safe.h SAFE_NO_RNG opt_safe.h SAFE_RNDTEST opt_safe.h # syscons/vt options MAXCONS opt_syscons.h SC_ALT_MOUSE_IMAGE opt_syscons.h SC_CUT_SPACES2TABS opt_syscons.h SC_CUT_SEPCHARS opt_syscons.h SC_DEBUG_LEVEL opt_syscons.h SC_DFLT_FONT opt_syscons.h SC_DISABLE_KDBKEY opt_syscons.h SC_DISABLE_REBOOT opt_syscons.h SC_HISTORY_SIZE opt_syscons.h SC_KERNEL_CONS_ATTR opt_syscons.h SC_KERNEL_CONS_REV_ATTR opt_syscons.h SC_MOUSE_CHAR opt_syscons.h SC_NO_CUTPASTE opt_syscons.h SC_NO_FONT_LOADING opt_syscons.h SC_NO_HISTORY opt_syscons.h SC_NO_MODE_CHANGE opt_syscons.h SC_NO_SUSPEND_VTYSWITCH opt_syscons.h SC_NO_SYSMOUSE opt_syscons.h SC_NORM_ATTR opt_syscons.h SC_NORM_REV_ATTR opt_syscons.h SC_PIXEL_MODE opt_syscons.h SC_RENDER_DEBUG opt_syscons.h SC_TWOBUTTON_MOUSE opt_syscons.h VT_ALT_TO_ESC_HACK opt_syscons.h VT_FB_DEFAULT_WIDTH opt_syscons.h VT_FB_DEFAULT_HEIGHT opt_syscons.h VT_MAXWINDOWS opt_syscons.h VT_TWOBUTTON_MOUSE opt_syscons.h DEV_SC opt_syscons.h DEV_VT opt_syscons.h # teken terminal emulator options TEKEN_CONS25 opt_teken.h TEKEN_UTF8 opt_teken.h TERMINAL_KERN_ATTR opt_teken.h TERMINAL_NORM_ATTR opt_teken.h # options for printf PRINTF_BUFR_SIZE opt_printf.h # kbd options KBD_DISABLE_KEYMAP_LOAD opt_kbd.h KBD_INSTALL_CDEV opt_kbd.h KBD_MAXRETRY opt_kbd.h KBD_MAXWAIT opt_kbd.h KBD_RESETDELAY opt_kbd.h KBDIO_DEBUG opt_kbd.h # options for the Atheros driver ATH_DEBUG opt_ath.h ATH_TXBUF opt_ath.h ATH_RXBUF opt_ath.h ATH_DIAGAPI opt_ath.h ATH_TX99_DIAG opt_ath.h ATH_ENABLE_11N opt_ath.h ATH_ENABLE_DFS opt_ath.h ATH_EEPROM_FIRMWARE opt_ath.h ATH_ENABLE_RADIOTAP_VENDOR_EXT opt_ath.h ATH_DEBUG_ALQ opt_ath.h ATH_KTR_INTR_DEBUG opt_ath.h # options for the Atheros hal AH_SUPPORT_AR5416 opt_ah.h # XXX For now, this breaks non-AR9130 chipsets, so only use it # XXX when actually targetting AR9130. AH_SUPPORT_AR9130 opt_ah.h # This is required for AR933x SoC support AH_SUPPORT_AR9330 opt_ah.h AH_SUPPORT_AR9340 opt_ah.h AH_SUPPORT_QCA9550 opt_ah.h AH_DEBUG opt_ah.h AH_ASSERT opt_ah.h AH_DEBUG_ALQ opt_ah.h AH_REGOPS_FUNC opt_ah.h AH_WRITE_REGDOMAIN opt_ah.h AH_DEBUG_COUNTRY opt_ah.h AH_WRITE_EEPROM opt_ah.h AH_PRIVATE_DIAG opt_ah.h AH_NEED_DESC_SWAP opt_ah.h AH_USE_INIPDGAIN opt_ah.h AH_MAXCHAN opt_ah.h AH_RXCFG_SDMAMW_4BYTES opt_ah.h AH_INTERRUPT_DEBUGGING opt_ah.h # AR5416 and later interrupt mitigation # XXX do not use this for AR9130 AH_AR5416_INTERRUPT_MITIGATION opt_ah.h # options for the Broadcom BCM43xx driver (bwi) BWI_DEBUG opt_bwi.h BWI_DEBUG_VERBOSE opt_bwi.h # options for the Marvell 8335 wireless driver MALO_DEBUG opt_malo.h MALO_TXBUF opt_malo.h MALO_RXBUF opt_malo.h # options for the Marvell wireless driver MWL_DEBUG opt_mwl.h MWL_TXBUF opt_mwl.h MWL_RXBUF opt_mwl.h MWL_DIAGAPI opt_mwl.h MWL_AGGR_SIZE opt_mwl.h MWL_TX_NODROP opt_mwl.h # Options for the Intel 802.11ac wireless driver IWM_DEBUG opt_iwm.h # Options for the Intel 802.11n wireless driver IWN_DEBUG opt_iwn.h # Options for the Intel 3945ABG wireless driver WPI_DEBUG opt_wpi.h # dcons options DCONS_BUF_SIZE opt_dcons.h DCONS_POLL_HZ opt_dcons.h DCONS_FORCE_CONSOLE opt_dcons.h DCONS_FORCE_GDB opt_dcons.h # HWPMC options HWPMC_DEBUG opt_global.h HWPMC_HOOKS HWPMC_MIPS_BACKTRACE opt_hwpmc_hooks.h # XBOX options for FreeBSD/i386, but some files are MI XBOX opt_xbox.h # Interrupt filtering INTR_FILTER # 802.11 support layer IEEE80211_DEBUG opt_wlan.h IEEE80211_DEBUG_REFCNT opt_wlan.h IEEE80211_AMPDU_AGE opt_wlan.h IEEE80211_SUPPORT_MESH opt_wlan.h IEEE80211_SUPPORT_SUPERG opt_wlan.h IEEE80211_SUPPORT_TDMA opt_wlan.h IEEE80211_ALQ opt_wlan.h IEEE80211_DFS_DEBUG opt_wlan.h # 802.11 TDMA support TDMA_SLOTLEN_DEFAULT opt_tdma.h TDMA_SLOTCNT_DEFAULT opt_tdma.h TDMA_BINTVAL_DEFAULT opt_tdma.h TDMA_TXRATE_11B_DEFAULT opt_tdma.h TDMA_TXRATE_11G_DEFAULT opt_tdma.h TDMA_TXRATE_11A_DEFAULT opt_tdma.h TDMA_TXRATE_TURBO_DEFAULT opt_tdma.h TDMA_TXRATE_HALF_DEFAULT opt_tdma.h TDMA_TXRATE_QUARTER_DEFAULT opt_tdma.h TDMA_TXRATE_11NA_DEFAULT opt_tdma.h TDMA_TXRATE_11NG_DEFAULT opt_tdma.h # VideoMode PICKMODE_DEBUG opt_videomode.h # Network stack virtualization options VIMAGE opt_global.h VNET_DEBUG opt_global.h # Common Flash Interface (CFI) options CFI_SUPPORT_STRATAFLASH opt_cfi.h CFI_ARMEDANDDANGEROUS opt_cfi.h # Sound options SND_DEBUG opt_snd.h SND_DIAGNOSTIC opt_snd.h SND_FEEDER_MULTIFORMAT opt_snd.h SND_FEEDER_FULL_MULTIFORMAT opt_snd.h SND_FEEDER_RATE_HP opt_snd.h SND_PCM_64 opt_snd.h SND_OLDSTEREO opt_snd.h X86BIOS # Flattened device tree options FDT opt_platform.h FDT_DTB_STATIC opt_platform.h # OFED Infiniband stack OFED opt_ofed.h OFED_DEBUG_INIT opt_ofed.h SDP opt_ofed.h SDP_DEBUG opt_ofed.h IPOIB opt_ofed.h IPOIB_DEBUG opt_ofed.h IPOIB_CM opt_ofed.h # Resource Accounting RACCT opt_global.h RACCT_DEFAULT_TO_DISABLED opt_global.h # Resource Limits RCTL opt_global.h # Random number generator(s) # Which CSPRNG hash we get. # If Yarrow is not chosen, Fortuna is selected. RANDOM_YARROW opt_global.h # With this, no entropy processor is loaded, but the entropy # harvesting infrastructure is present. This means an entropy # processor may be loaded as a module. RANDOM_LOADABLE opt_global.h # This turns on high-rate and potentially expensive harvesting in # the uma slab allocator. RANDOM_ENABLE_UMA opt_global.h # Intel em(4) driver EM_MULTIQUEUE opt_em.h Index: projects/powernv/conf/options.powerpc =================================================================== --- projects/powernv/conf/options.powerpc (revision 291050) +++ projects/powernv/conf/options.powerpc (revision 291051) @@ -1,36 +1,37 @@ # $FreeBSD$ # Options specific to the powerpc platform kernels AIM opt_global.h BOOKE opt_global.h BOOKE_E500 opt_global.h BOOKE_PPC4XX opt_global.h CELL POWERPC POWERPC64 FPU_EMU COMPAT_FREEBSD32 opt_compat.h GFB_DEBUG opt_gfb.h GFB_NO_FONT_LOADING opt_gfb.h GFB_NO_MODE_CHANGE opt_gfb.h MPC85XX opt_platform.h POWERMAC opt_platform.h PS3 opt_platform.h +QORIQ_DPAA opt_platform.h MAMBO POWERNV PSERIES PSIM SC_OFWFB opt_ofwfb.h OFWCONS_POLL_HZ opt_ofw.h # AGP debugging support AGP_DEBUG opt_agp.h MIKROTIK Index: projects/powernv/conf =================================================================== --- projects/powernv/conf (revision 291050) +++ projects/powernv/conf (revision 291051) Property changes on: projects/powernv/conf ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/conf:r290991-291050 Index: projects/powernv/dev/isp/isp.c =================================================================== --- projects/powernv/dev/isp/isp.c (revision 291050) +++ projects/powernv/dev/isp/isp.c (revision 291051) @@ -1,8553 +1,8553 @@ /*- * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Machine and OS Independent (well, as best as possible) * code for the Qlogic ISP SCSI and FC-SCSI adapters. */ /* * Inspiration and ideas about this driver are from Erik Moe's Linux driver * (qlogicisp.c) and Dave Miller's SBus version of same (qlogicisp.c). Some * ideas dredged from the Solaris driver. */ /* * Include header file appropriate for platform we're building on. */ #ifdef __NetBSD__ #include __KERNEL_RCSID(0, "$NetBSD$"); #include #endif #ifdef __FreeBSD__ #include __FBSDID("$FreeBSD$"); #include #endif #ifdef __OpenBSD__ #include #endif #ifdef __linux__ #include "isp_linux.h" #endif #ifdef __svr4__ #include "isp_solaris.h" #endif /* * General defines */ #define MBOX_DELAY_COUNT 1000000 / 100 -#define ISP_MARK_PORTDB(a, b, c) \ - do { \ - isp_prt(isp, ISP_LOG_SANCFG, \ - "Chan %d ISP_MARK_PORTDB@LINE %d", (b), __LINE__); \ - isp_mark_portdb((a), (b), (c)); \ - } while (0) /* * Local static data */ static const char fconf[] = "Chan %d PortDB[%d] changed:\n current =(0x%x@0x%06x 0x%08x%08x 0x%08x%08x)\n database=(0x%x@0x%06x 0x%08x%08x 0x%08x%08x)"; static const char notresp[] = "Not RESPONSE in RESPONSE Queue (type 0x%x) @ idx %d (next %d) nlooked %d"; -static const char topology[] = "Chan %d WWPN 0x%08x%08x PortID 0x%06x handle 0x%x, Connection '%s'"; +static const char topology[] = "Chan %d WWPN 0x%08x%08x PortID 0x%06x LoopID 0x%x Connection '%s'"; static const char bun[] = "bad underrun (count %d, resid %d, status %s)"; static const char lipd[] = "Chan %d LIP destroyed %d active commands"; static const char sacq[] = "unable to acquire scratch area"; static const uint8_t alpa_map[] = { 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6, 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca, 0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5, 0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9, 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97, 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79, 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56, 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a, 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17, 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01, 0x00 }; /* * Local function prototypes. */ static int isp_parse_async(ispsoftc_t *, uint16_t); static int isp_parse_async_fc(ispsoftc_t *, uint16_t); static int isp_handle_other_response(ispsoftc_t *, int, isphdr_t *, uint32_t *); static void isp_parse_status(ispsoftc_t *, ispstatusreq_t *, XS_T *, long *); static void isp_parse_status_24xx(ispsoftc_t *, isp24xx_statusreq_t *, XS_T *, long *); static void isp_fastpost_complete(ispsoftc_t *, uint32_t); static int isp_mbox_continue(ispsoftc_t *); static void isp_scsi_init(ispsoftc_t *); static void isp_scsi_channel_init(ispsoftc_t *, int); static void isp_fibre_init(ispsoftc_t *); static void isp_fibre_init_2400(ispsoftc_t *); static void isp_mark_portdb(ispsoftc_t *, int, int); static int isp_plogx(ispsoftc_t *, int, uint16_t, uint32_t, int, int); static int isp_port_login(ispsoftc_t *, uint16_t, uint32_t); static int isp_port_logout(ispsoftc_t *, uint16_t, uint32_t); static int isp_getpdb(ispsoftc_t *, int, uint16_t, isp_pdb_t *, int); static int isp_gethandles(ispsoftc_t *, int, uint16_t *, int *, int, int); static void isp_dump_chip_portdb(ispsoftc_t *, int, int); static uint64_t isp_get_wwn(ispsoftc_t *, int, int, int); static int isp_fclink_test(ispsoftc_t *, int, int); static int isp_pdb_sync(ispsoftc_t *, int); static int isp_scan_loop(ispsoftc_t *, int); static int isp_gid_ft_sns(ispsoftc_t *, int); static int isp_gid_ft_ct_passthru(ispsoftc_t *, int); static int isp_scan_fabric(ispsoftc_t *, int); static int isp_login_device(ispsoftc_t *, int, uint32_t, isp_pdb_t *, uint16_t *); static int isp_register_fc4_type(ispsoftc_t *, int); static int isp_register_fc4_type_24xx(ispsoftc_t *, int); +static int isp_register_fc4_features_24xx(ispsoftc_t *, int); static uint16_t isp_next_handle(ispsoftc_t *, uint16_t *); -static void isp_fw_state(ispsoftc_t *, int); +static int isp_fw_state(ispsoftc_t *, int); static void isp_mboxcmd_qnw(ispsoftc_t *, mbreg_t *, int); static void isp_mboxcmd(ispsoftc_t *, mbreg_t *); static void isp_spi_update(ispsoftc_t *, int); static void isp_setdfltsdparm(ispsoftc_t *); static void isp_setdfltfcparm(ispsoftc_t *, int); static int isp_read_nvram(ispsoftc_t *, int); static int isp_read_nvram_2400(ispsoftc_t *, uint8_t *); static void isp_rdnvram_word(ispsoftc_t *, int, uint16_t *); static void isp_rd_2400_nvram(ispsoftc_t *, uint32_t, uint32_t *); static void isp_parse_nvram_1020(ispsoftc_t *, uint8_t *); static void isp_parse_nvram_1080(ispsoftc_t *, int, uint8_t *); static void isp_parse_nvram_12160(ispsoftc_t *, int, uint8_t *); static void isp_parse_nvram_2100(ispsoftc_t *, uint8_t *); static void isp_parse_nvram_2400(ispsoftc_t *, uint8_t *); +static void +isp_change_fw_state(ispsoftc_t *isp, int chan, int state) +{ + fcparam *fcp = FCPARAM(isp, chan); + + if (fcp->isp_fwstate == state) + return; + isp_prt(isp, ISP_LOGCONFIG|ISP_LOG_SANCFG, + "Chan %d Firmware state <%s->%s>", chan, + isp_fc_fw_statename(fcp->isp_fwstate), isp_fc_fw_statename(state)); + fcp->isp_fwstate = state; +} + /* * Reset Hardware. * * Hit the chip over the head, download new f/w if available and set it running. * * Locking done elsewhere. */ void isp_reset(ispsoftc_t *isp, int do_load_defaults) { mbreg_t mbs; char *buf; uint64_t fwt; uint32_t code_org, val; int loops, i, dodnld = 1; const char *btype = "????"; static const char dcrc[] = "Downloaded RISC Code Checksum Failure"; isp->isp_state = ISP_NILSTATE; if (isp->isp_dead) { isp_shutdown(isp); ISP_DISABLE_INTS(isp); return; } /* * Basic types (SCSI, FibreChannel and PCI or SBus) * have been set in the MD code. We figure out more * here. Possibly more refined types based upon PCI * identification. Chip revision has been gathered. * * After we've fired this chip up, zero out the conf1 register * for SCSI adapters and do other settings for the 2100. */ ISP_DISABLE_INTS(isp); /* * Pick an initial maxcmds value which will be used * to allocate xflist pointer space. It may be changed * later by the firmware. */ if (IS_24XX(isp)) { isp->isp_maxcmds = 4096; } else if (IS_2322(isp)) { isp->isp_maxcmds = 2048; } else if (IS_23XX(isp) || IS_2200(isp)) { isp->isp_maxcmds = 1024; } else { isp->isp_maxcmds = 512; } /* * Set up DMA for the request and response queues. * * We do this now so we can use the request queue * for dma to load firmware from. */ if (ISP_MBOXDMASETUP(isp) != 0) { isp_prt(isp, ISP_LOGERR, "Cannot setup DMA"); return; } /* * Set up default request/response queue in-pointer/out-pointer * register indices. */ if (IS_24XX(isp)) { isp->isp_rqstinrp = BIU2400_REQINP; isp->isp_rqstoutrp = BIU2400_REQOUTP; isp->isp_respinrp = BIU2400_RSPINP; isp->isp_respoutrp = BIU2400_RSPOUTP; } else if (IS_23XX(isp)) { isp->isp_rqstinrp = BIU_REQINP; isp->isp_rqstoutrp = BIU_REQOUTP; isp->isp_respinrp = BIU_RSPINP; isp->isp_respoutrp = BIU_RSPOUTP; } else { isp->isp_rqstinrp = INMAILBOX4; isp->isp_rqstoutrp = OUTMAILBOX4; isp->isp_respinrp = OUTMAILBOX5; isp->isp_respoutrp = INMAILBOX5; } /* * Put the board into PAUSE mode (so we can read the SXP registers * or write FPM/FBM registers). */ if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_HOST_INT); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_PAUSE); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); } if (IS_FC(isp)) { switch (isp->isp_type) { case ISP_HA_FC_2100: btype = "2100"; break; case ISP_HA_FC_2200: btype = "2200"; break; case ISP_HA_FC_2300: btype = "2300"; break; case ISP_HA_FC_2312: btype = "2312"; break; case ISP_HA_FC_2322: btype = "2322"; break; case ISP_HA_FC_2400: btype = "2422"; break; case ISP_HA_FC_2500: btype = "2532"; break; default: break; } if (!IS_24XX(isp)) { /* * While we're paused, reset the FPM module and FBM * fifos. */ ISP_WRITE(isp, BIU2100_CSR, BIU2100_FPM0_REGS); ISP_WRITE(isp, FPM_DIAG_CONFIG, FPM_SOFT_RESET); ISP_WRITE(isp, BIU2100_CSR, BIU2100_FB_REGS); ISP_WRITE(isp, FBM_CMD, FBMCMD_FIFO_RESET_ALL); ISP_WRITE(isp, BIU2100_CSR, BIU2100_RISC_REGS); } } else if (IS_1240(isp)) { sdparam *sdp; btype = "1240"; isp->isp_clock = 60; sdp = SDPARAM(isp, 0); sdp->isp_ultramode = 1; sdp = SDPARAM(isp, 1); sdp->isp_ultramode = 1; /* * XXX: Should probably do some bus sensing. */ } else if (IS_ULTRA3(isp)) { sdparam *sdp = isp->isp_param; isp->isp_clock = 100; if (IS_10160(isp)) btype = "10160"; else if (IS_12160(isp)) btype = "12160"; else btype = ""; sdp->isp_lvdmode = 1; if (IS_DUALBUS(isp)) { sdp++; sdp->isp_lvdmode = 1; } } else if (IS_ULTRA2(isp)) { static const char m[] = "bus %d is in %s Mode"; uint16_t l; sdparam *sdp = SDPARAM(isp, 0); isp->isp_clock = 100; if (IS_1280(isp)) btype = "1280"; else if (IS_1080(isp)) btype = "1080"; else btype = ""; l = ISP_READ(isp, SXP_PINS_DIFF) & ISP1080_MODE_MASK; switch (l) { case ISP1080_LVD_MODE: sdp->isp_lvdmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 0, "LVD"); break; case ISP1080_HVD_MODE: sdp->isp_diffmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 0, "Differential"); break; case ISP1080_SE_MODE: sdp->isp_ultramode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 0, "Single-Ended"); break; default: isp_prt(isp, ISP_LOGERR, "unknown mode on bus %d (0x%x)", 0, l); break; } if (IS_DUALBUS(isp)) { sdp = SDPARAM(isp, 1); l = ISP_READ(isp, SXP_PINS_DIFF|SXP_BANK1_SELECT); l &= ISP1080_MODE_MASK; switch (l) { case ISP1080_LVD_MODE: sdp->isp_lvdmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 1, "LVD"); break; case ISP1080_HVD_MODE: sdp->isp_diffmode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 1, "Differential"); break; case ISP1080_SE_MODE: sdp->isp_ultramode = 1; isp_prt(isp, ISP_LOGCONFIG, m, 1, "Single-Ended"); break; default: isp_prt(isp, ISP_LOGERR, "unknown mode on bus %d (0x%x)", 1, l); break; } } } else { sdparam *sdp = SDPARAM(isp, 0); i = ISP_READ(isp, BIU_CONF0) & BIU_CONF0_HW_MASK; switch (i) { default: isp_prt(isp, ISP_LOGALL, "Unknown Chip Type 0x%x", i); /* FALLTHROUGH */ case 1: btype = "1020"; isp->isp_type = ISP_HA_SCSI_1020; isp->isp_clock = 40; break; case 2: /* * Some 1020A chips are Ultra Capable, but don't * run the clock rate up for that unless told to * do so by the Ultra Capable bits being set. */ btype = "1020A"; isp->isp_type = ISP_HA_SCSI_1020A; isp->isp_clock = 40; break; case 3: btype = "1040"; isp->isp_type = ISP_HA_SCSI_1040; isp->isp_clock = 60; break; case 4: btype = "1040A"; isp->isp_type = ISP_HA_SCSI_1040A; isp->isp_clock = 60; break; case 5: btype = "1040B"; isp->isp_type = ISP_HA_SCSI_1040B; isp->isp_clock = 60; break; case 6: btype = "1040C"; isp->isp_type = ISP_HA_SCSI_1040C; isp->isp_clock = 60; break; } /* * Now, while we're at it, gather info about ultra * and/or differential mode. */ if (ISP_READ(isp, SXP_PINS_DIFF) & SXP_PINS_DIFF_MODE) { isp_prt(isp, ISP_LOGCONFIG, "Differential Mode"); sdp->isp_diffmode = 1; } else { sdp->isp_diffmode = 0; } i = ISP_READ(isp, RISC_PSR); if (isp->isp_bustype == ISP_BT_SBUS) { i &= RISC_PSR_SBUS_ULTRA; } else { i &= RISC_PSR_PCI_ULTRA; } if (i != 0) { isp_prt(isp, ISP_LOGCONFIG, "Ultra Mode Capable"); sdp->isp_ultramode = 1; /* * If we're in Ultra Mode, we have to be 60MHz clock- * even for the SBus version. */ isp->isp_clock = 60; } else { sdp->isp_ultramode = 0; /* * Clock is known. Gronk. */ } /* * Machine dependent clock (if set) overrides * our generic determinations. */ if (isp->isp_mdvec->dv_clock) { if (isp->isp_mdvec->dv_clock < isp->isp_clock) { isp->isp_clock = isp->isp_mdvec->dv_clock; } } } /* * Clear instrumentation */ isp->isp_intcnt = isp->isp_intbogus = 0; /* * Do MD specific pre initialization */ ISP_RESET0(isp); /* * Hit the chip over the head with hammer, * and give it a chance to recover. */ if (IS_SCSI(isp)) { ISP_WRITE(isp, BIU_ICR, BIU_ICR_SOFT_RESET); /* * A slight delay... */ ISP_DELAY(100); /* * Clear data && control DMA engines. */ ISP_WRITE(isp, CDMA_CONTROL, DMA_CNTRL_CLEAR_CHAN | DMA_CNTRL_RESET_INT); ISP_WRITE(isp, DDMA_CONTROL, DMA_CNTRL_CLEAR_CHAN | DMA_CNTRL_RESET_INT); } else if (IS_24XX(isp)) { /* * Stop DMA and wait for it to stop. */ ISP_WRITE(isp, BIU2400_CSR, BIU2400_DMA_STOP|(3 << 4)); for (val = loops = 0; loops < 30000; loops++) { ISP_DELAY(10); val = ISP_READ(isp, BIU2400_CSR); if ((val & BIU2400_DMA_ACTIVE) == 0) { break; } } if (val & BIU2400_DMA_ACTIVE) { ISP_RESET0(isp); isp_prt(isp, ISP_LOGERR, "DMA Failed to Stop on Reset"); return; } /* * Hold it in SOFT_RESET and STOP state for 100us. */ ISP_WRITE(isp, BIU2400_CSR, BIU2400_SOFT_RESET|BIU2400_DMA_STOP|(3 << 4)); ISP_DELAY(100); for (loops = 0; loops < 10000; loops++) { ISP_DELAY(5); val = ISP_READ(isp, OUTMAILBOX0); } for (val = loops = 0; loops < 500000; loops ++) { val = ISP_READ(isp, BIU2400_CSR); if ((val & BIU2400_SOFT_RESET) == 0) { break; } } if (val & BIU2400_SOFT_RESET) { ISP_RESET0(isp); isp_prt(isp, ISP_LOGERR, "Failed to come out of reset"); return; } } else { ISP_WRITE(isp, BIU2100_CSR, BIU2100_SOFT_RESET); /* * A slight delay... */ ISP_DELAY(100); /* * Clear data && control DMA engines. */ ISP_WRITE(isp, CDMA2100_CONTROL, DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); ISP_WRITE(isp, TDMA2100_CONTROL, DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); ISP_WRITE(isp, RDMA2100_CONTROL, DMA_CNTRL2100_CLEAR_CHAN | DMA_CNTRL2100_RESET_INT); } /* * Wait for ISP to be ready to go... */ loops = MBOX_DELAY_COUNT; for (;;) { if (IS_SCSI(isp)) { if (!(ISP_READ(isp, BIU_ICR) & BIU_ICR_SOFT_RESET)) { break; } } else if (IS_24XX(isp)) { if (ISP_READ(isp, OUTMAILBOX0) == 0) { break; } } else { if (!(ISP_READ(isp, BIU2100_CSR) & BIU2100_SOFT_RESET)) break; } ISP_DELAY(100); if (--loops < 0) { ISP_DUMPREGS(isp, "chip reset timed out"); ISP_RESET0(isp); return; } } /* * After we've fired this chip up, zero out the conf1 register * for SCSI adapters and other settings for the 2100. */ if (IS_SCSI(isp)) { ISP_WRITE(isp, BIU_CONF1, 0); } else if (!IS_24XX(isp)) { ISP_WRITE(isp, BIU2100_CSR, 0); } /* * Reset RISC Processor */ if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RESET); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RELEASE); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RESET); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); ISP_DELAY(100); ISP_WRITE(isp, BIU_SEMA, 0); } /* * Post-RISC Reset stuff. */ if (IS_24XX(isp)) { for (val = loops = 0; loops < 5000000; loops++) { ISP_DELAY(5); val = ISP_READ(isp, OUTMAILBOX0); if (val == 0) { break; } } if (val != 0) { ISP_RESET0(isp); isp_prt(isp, ISP_LOGERR, "reset didn't clear"); return; } } else if (IS_SCSI(isp)) { uint16_t tmp = isp->isp_mdvec->dv_conf1; /* * Busted FIFO. Turn off all but burst enables. */ if (isp->isp_type == ISP_HA_SCSI_1040A) { tmp &= BIU_BURST_ENABLE; } ISP_SETBITS(isp, BIU_CONF1, tmp); if (tmp & BIU_BURST_ENABLE) { ISP_SETBITS(isp, CDMA_CONF, DMA_ENABLE_BURST); ISP_SETBITS(isp, DDMA_CONF, DMA_ENABLE_BURST); } if (SDPARAM(isp, 0)->isp_ptisp) { if (SDPARAM(isp, 0)->isp_ultramode) { while (ISP_READ(isp, RISC_MTR) != 0x1313) { ISP_WRITE(isp, RISC_MTR, 0x1313); ISP_WRITE(isp, HCCR, HCCR_CMD_STEP); } } else { ISP_WRITE(isp, RISC_MTR, 0x1212); } /* * PTI specific register */ ISP_WRITE(isp, RISC_EMB, DUAL_BANK); } else { ISP_WRITE(isp, RISC_MTR, 0x1212); } ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); } else { ISP_WRITE(isp, RISC_MTR2100, 0x1212); if (IS_2200(isp) || IS_23XX(isp)) { ISP_WRITE(isp, HCCR, HCCR_2X00_DISABLE_PARITY_PAUSE); } ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); } ISP_WRITE(isp, isp->isp_rqstinrp, 0); ISP_WRITE(isp, isp->isp_rqstoutrp, 0); ISP_WRITE(isp, isp->isp_respinrp, 0); ISP_WRITE(isp, isp->isp_respoutrp, 0); if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_PRI_REQINP, 0); ISP_WRITE(isp, BIU2400_PRI_REQOUTP, 0); ISP_WRITE(isp, BIU2400_ATIO_RSPINP, 0); ISP_WRITE(isp, BIU2400_ATIO_RSPOUTP, 0); } /* * Do MD specific post initialization */ ISP_RESET1(isp); /* * Wait for everything to finish firing up. * * Avoid doing this on early 2312s because you can generate a PCI * parity error (chip breakage). */ if (IS_2312(isp) && isp->isp_revision < 2) { ISP_DELAY(100); } else { loops = MBOX_DELAY_COUNT; while (ISP_READ(isp, OUTMAILBOX0) == MBOX_BUSY) { ISP_DELAY(100); if (--loops < 0) { ISP_RESET0(isp); isp_prt(isp, ISP_LOGERR, "MBOX_BUSY never cleared on reset"); return; } } } /* * Up until this point we've done everything by just reading or * setting registers. From this point on we rely on at least *some* * kind of firmware running in the card. */ /* * Do some sanity checking by running a NOP command. * If it succeeds, the ROM firmware is now running. */ MBSINIT(&mbs, MBOX_NO_OP, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "NOP command failed (%x)", mbs.param[0]); ISP_RESET0(isp); return; } /* * Do some operational tests */ if (IS_SCSI(isp) || IS_24XX(isp)) { static const uint16_t patterns[MAX_MAILBOX] = { 0x0000, 0xdead, 0xbeef, 0xffff, 0xa5a5, 0x5a5a, 0x7f7f, 0x7ff7, 0x3421, 0xabcd, 0xdcba, 0xfeef, 0xbead, 0xdebe, 0x2222, 0x3333, 0x5555, 0x6666, 0x7777, 0xaaaa, 0xffff, 0xdddd, 0x9999, 0x1fbc, 0x6666, 0x6677, 0x1122, 0x33ff, 0x0000, 0x0001, 0x1000, 0x1010, }; int nmbox = ISP_NMBOX(isp); if (IS_SCSI(isp)) nmbox = 6; MBSINIT(&mbs, MBOX_MAILBOX_REG_TEST, MBLOGALL, 0); for (i = 1; i < nmbox; i++) { mbs.param[i] = patterns[i]; } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { ISP_RESET0(isp); return; } for (i = 1; i < nmbox; i++) { if (mbs.param[i] != patterns[i]) { ISP_RESET0(isp); isp_prt(isp, ISP_LOGERR, "Register Test Failed at Register %d: should have 0x%04x but got 0x%04x", i, patterns[i], mbs.param[i]); return; } } } /* * Download new Firmware, unless requested not to do so. * This is made slightly trickier in some cases where the * firmware of the ROM revision is newer than the revision * compiled into the driver. So, where we used to compare * versions of our f/w and the ROM f/w, now we just see * whether we have f/w at all and whether a config flag * has disabled our download. */ if ((isp->isp_mdvec->dv_ispfw == NULL) || (isp->isp_confopts & ISP_CFG_NORELOAD)) { dodnld = 0; } if (IS_24XX(isp)) { code_org = ISP_CODE_ORG_2400; } else if (IS_23XX(isp)) { code_org = ISP_CODE_ORG_2300; } else { code_org = ISP_CODE_ORG; } if (dodnld && IS_24XX(isp)) { const uint32_t *ptr = isp->isp_mdvec->dv_ispfw; int wordload; /* * Keep loading until we run out of f/w. */ code_org = ptr[2]; /* 1st load address is our start addr */ wordload = 0; for (;;) { uint32_t la, wi, wl; isp_prt(isp, ISP_LOGDEBUG0, "load 0x%x words of code at load address 0x%x", ptr[3], ptr[2]); wi = 0; la = ptr[2]; wl = ptr[3]; while (wi < ptr[3]) { uint32_t *cp; uint32_t nw; nw = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) >> 2; if (nw > wl) { nw = wl; } cp = isp->isp_rquest; for (i = 0; i < nw; i++) { ISP_IOXPUT_32(isp, ptr[wi++], &cp[i]); wl--; } MEMORYBARRIER(isp, SYNC_REQUEST, 0, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)), -1); again: MBSINIT(&mbs, 0, MBLOGALL, 0); if (la < 0x10000 && nw < 0x10000) { mbs.param[0] = MBOX_LOAD_RISC_RAM_2100; mbs.param[1] = la; mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[4] = nw; mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); isp_prt(isp, ISP_LOGDEBUG0, "LOAD RISC RAM 2100 %u words at load address 0x%x", nw, la); } else if (wordload) { union { const uint32_t *cp; uint32_t *np; } ucd; ucd.cp = (const uint32_t *)cp; mbs.param[0] = MBOX_WRITE_RAM_WORD_EXTENDED; mbs.param[1] = la; mbs.param[2] = (*ucd.np); mbs.param[3] = (*ucd.np) >> 16; mbs.param[8] = la >> 16; isp->isp_mbxwrk0 = nw - 1; isp->isp_mbxworkp = ucd.np+1; isp->isp_mbxwrk1 = (la + 1); isp->isp_mbxwrk8 = (la + 1) >> 16; isp_prt(isp, ISP_LOGDEBUG0, "WRITE RAM WORD EXTENDED %u words at load address 0x%x", nw, la); } else { mbs.param[0] = MBOX_LOAD_RISC_RAM; mbs.param[1] = la; mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[4] = nw >> 16; mbs.param[5] = nw; mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); mbs.param[8] = la >> 16; isp_prt(isp, ISP_LOGDEBUG0, "LOAD RISC RAM %u words at load address 0x%x", nw, la); } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { if (mbs.param[0] == MBOX_HOST_INTERFACE_ERROR) { isp_prt(isp, ISP_LOGERR, "switching to word load"); wordload = 1; goto again; } isp_prt(isp, ISP_LOGERR, "F/W Risc Ram Load Failed"); ISP_RESET0(isp); return; } la += nw; } if (ptr[1] == 0) { break; } ptr += ptr[3]; } isp->isp_loaded_fw = 1; } else if (dodnld && IS_23XX(isp)) { const uint16_t *ptr = isp->isp_mdvec->dv_ispfw; uint16_t wi, wl, segno; uint32_t la; la = code_org; segno = 0; for (;;) { uint32_t nxtaddr; isp_prt(isp, ISP_LOGDEBUG0, "load 0x%x words of code at load address 0x%x", ptr[3], la); wi = 0; wl = ptr[3]; while (wi < ptr[3]) { uint16_t *cp; uint16_t nw; nw = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) >> 1; if (nw > wl) { nw = wl; } if (nw > (1 << 15)) { nw = 1 << 15; } cp = isp->isp_rquest; for (i = 0; i < nw; i++) { ISP_IOXPUT_16(isp, ptr[wi++], &cp[i]); wl--; } MEMORYBARRIER(isp, SYNC_REQUEST, 0, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)), -1); MBSINIT(&mbs, 0, MBLOGALL, 0); if (la < 0x10000) { mbs.param[0] = MBOX_LOAD_RISC_RAM_2100; mbs.param[1] = la; mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[4] = nw; mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); isp_prt(isp, ISP_LOGDEBUG1, "LOAD RISC RAM 2100 %u words at load address 0x%x\n", nw, la); } else { mbs.param[0] = MBOX_LOAD_RISC_RAM; mbs.param[1] = la; mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[4] = nw; mbs.param[6] = DMA_WD3(isp->isp_rquest_dma); mbs.param[7] = DMA_WD2(isp->isp_rquest_dma); mbs.param[8] = la >> 16; isp_prt(isp, ISP_LOGDEBUG1, "LOAD RISC RAM %u words at load address 0x%x\n", nw, la); } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "F/W Risc Ram Load Failed"); ISP_RESET0(isp); return; } la += nw; } if (!IS_2322(isp)) { break; } if (++segno == 3) { break; } /* * If we're a 2322, the firmware actually comes in * three chunks. We loaded the first at the code_org * address. The other two chunks, which follow right * after each other in memory here, get loaded at * addresses specfied at offset 0x9..0xB. */ nxtaddr = ptr[3]; ptr = &ptr[nxtaddr]; la = ptr[5] | ((ptr[4] & 0x3f) << 16); } isp->isp_loaded_fw = 1; } else if (dodnld) { union { const uint16_t *cp; uint16_t *np; } ucd; ucd.cp = isp->isp_mdvec->dv_ispfw; isp->isp_mbxworkp = &ucd.np[1]; isp->isp_mbxwrk0 = ucd.np[3] - 1; isp->isp_mbxwrk1 = code_org + 1; MBSINIT(&mbs, MBOX_WRITE_RAM_WORD, MBLOGNONE, 0); mbs.param[1] = code_org; mbs.param[2] = ucd.np[0]; isp_prt(isp, ISP_LOGDEBUG1, "WRITE RAM %u words at load address 0x%x", ucd.np[3], code_org); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "F/W download failed at word %d", isp->isp_mbxwrk1 - code_org); ISP_RESET0(isp); return; } } else { isp->isp_loaded_fw = 0; isp_prt(isp, ISP_LOGDEBUG2, "skipping f/w download"); } /* * If we loaded firmware, verify its checksum */ if (isp->isp_loaded_fw) { MBSINIT(&mbs, MBOX_VERIFY_CHECKSUM, MBLOGNONE, 0); mbs.param[0] = MBOX_VERIFY_CHECKSUM; if (IS_24XX(isp)) { mbs.param[1] = code_org >> 16; mbs.param[2] = code_org; } else { mbs.param[1] = code_org; } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, dcrc); ISP_RESET0(isp); return; } } /* * Now start it rolling. * * If we didn't actually download f/w, * we still need to (re)start it. */ MBSINIT(&mbs, MBOX_EXEC_FIRMWARE, MBLOGALL, 5000000); if (IS_24XX(isp)) { mbs.param[1] = code_org >> 16; mbs.param[2] = code_org; if (isp->isp_loaded_fw) { mbs.param[3] = 0; } else { mbs.param[3] = 1; } if (IS_25XX(isp)) { mbs.ibits |= 0x10; } } else if (IS_2322(isp)) { mbs.param[1] = code_org; if (isp->isp_loaded_fw) { mbs.param[2] = 0; } else { mbs.param[2] = 1; } } else { mbs.param[1] = code_org; } isp_mboxcmd(isp, &mbs); if (IS_2322(isp) || IS_24XX(isp)) { if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { ISP_RESET0(isp); return; } } if (IS_SCSI(isp)) { /* * Set CLOCK RATE, but only if asked to. */ if (isp->isp_clock) { MBSINIT(&mbs, MBOX_SET_CLOCK_RATE, MBLOGALL, 0); mbs.param[1] = isp->isp_clock; isp_mboxcmd(isp, &mbs); /* we will try not to care if this fails */ } } /* * Ask the chip for the current firmware version. * This should prove that the new firmware is working. */ MBSINIT(&mbs, MBOX_ABOUT_FIRMWARE, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { ISP_RESET0(isp); return; } /* * The SBus firmware that we are using apparently does not return * major, minor, micro revisions in the mailbox registers, which * is really, really, annoying. */ if (ISP_SBUS_SUPPORTED && isp->isp_bustype == ISP_BT_SBUS) { if (dodnld) { #ifdef ISP_TARGET_MODE isp->isp_fwrev[0] = 7; isp->isp_fwrev[1] = 55; #else isp->isp_fwrev[0] = 1; isp->isp_fwrev[1] = 37; #endif isp->isp_fwrev[2] = 0; } } else { isp->isp_fwrev[0] = mbs.param[1]; isp->isp_fwrev[1] = mbs.param[2]; isp->isp_fwrev[2] = mbs.param[3]; } if (IS_FC(isp)) { /* * We do not believe firmware attributes for 2100 code less * than 1.17.0, unless it's the firmware we specifically * are loading. * * Note that all 22XX and later f/w is greater than 1.X.0. */ if ((ISP_FW_OLDER_THAN(isp, 1, 17, 1))) { #ifdef USE_SMALLER_2100_FIRMWARE isp->isp_fwattr = ISP_FW_ATTR_SCCLUN; #else isp->isp_fwattr = 0; #endif } else { isp->isp_fwattr = mbs.param[6]; } if (IS_24XX(isp)) { isp->isp_fwattr |= ((uint64_t) mbs.param[15]) << 16; if (isp->isp_fwattr & ISP2400_FW_ATTR_EXTNDED) { isp->isp_fwattr |= (((uint64_t) mbs.param[16]) << 32) | (((uint64_t) mbs.param[17]) << 48); } } } else if (IS_SCSI(isp)) { #ifndef ISP_TARGET_MODE isp->isp_fwattr = ISP_FW_ATTR_TMODE; #else isp->isp_fwattr = 0; #endif } isp_prt(isp, ISP_LOGCONFIG, "Board Type %s, Chip Revision 0x%x, %s F/W Revision %d.%d.%d", btype, isp->isp_revision, dodnld? "loaded" : "resident", isp->isp_fwrev[0], isp->isp_fwrev[1], isp->isp_fwrev[2]); fwt = isp->isp_fwattr; if (IS_24XX(isp)) { buf = FCPARAM(isp, 0)->isp_scratch; ISP_SNPRINTF(buf, ISP_FC_SCRLEN, "Attributes:"); if (fwt & ISP2400_FW_ATTR_CLASS2) { fwt ^=ISP2400_FW_ATTR_CLASS2; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s Class2", buf); } if (fwt & ISP2400_FW_ATTR_IP) { fwt ^=ISP2400_FW_ATTR_IP; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s IP", buf); } if (fwt & ISP2400_FW_ATTR_MULTIID) { fwt ^=ISP2400_FW_ATTR_MULTIID; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s MultiID", buf); } if (fwt & ISP2400_FW_ATTR_SB2) { fwt ^=ISP2400_FW_ATTR_SB2; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s SB2", buf); } if (fwt & ISP2400_FW_ATTR_T10CRC) { fwt ^=ISP2400_FW_ATTR_T10CRC; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s T10CRC", buf); } if (fwt & ISP2400_FW_ATTR_VI) { fwt ^=ISP2400_FW_ATTR_VI; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s VI", buf); } if (fwt & ISP2400_FW_ATTR_MQ) { fwt ^=ISP2400_FW_ATTR_MQ; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s MQ", buf); } if (fwt & ISP2400_FW_ATTR_MSIX) { fwt ^=ISP2400_FW_ATTR_MSIX; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s MSIX", buf); } if (fwt & ISP2400_FW_ATTR_FCOE) { fwt ^=ISP2400_FW_ATTR_FCOE; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s FCOE", buf); } if (fwt & ISP2400_FW_ATTR_VP0) { fwt ^= ISP2400_FW_ATTR_VP0; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s VP0_Decoupling", buf); } if (fwt & ISP2400_FW_ATTR_EXPFW) { fwt ^= ISP2400_FW_ATTR_EXPFW; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s (Experimental)", buf); } if (fwt & ISP2400_FW_ATTR_HOTFW) { fwt ^= ISP2400_FW_ATTR_HOTFW; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s HotFW", buf); } fwt &= ~ISP2400_FW_ATTR_EXTNDED; if (fwt & ISP2400_FW_ATTR_EXTVP) { fwt ^= ISP2400_FW_ATTR_EXTVP; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s ExtVP", buf); } if (fwt & ISP2400_FW_ATTR_VN2VN) { fwt ^= ISP2400_FW_ATTR_VN2VN; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s VN2VN", buf); } if (fwt & ISP2400_FW_ATTR_EXMOFF) { fwt ^= ISP2400_FW_ATTR_EXMOFF; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s EXMOFF", buf); } if (fwt & ISP2400_FW_ATTR_NPMOFF) { fwt ^= ISP2400_FW_ATTR_NPMOFF; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s NPMOFF", buf); } if (fwt & ISP2400_FW_ATTR_DIFCHOP) { fwt ^= ISP2400_FW_ATTR_DIFCHOP; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s DIFCHOP", buf); } if (fwt & ISP2400_FW_ATTR_SRIOV) { fwt ^= ISP2400_FW_ATTR_SRIOV; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s SRIOV", buf); } if (fwt & ISP2400_FW_ATTR_ASICTMP) { fwt ^= ISP2400_FW_ATTR_ASICTMP; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s ASICTMP", buf); } if (fwt & ISP2400_FW_ATTR_ATIOMQ) { fwt ^= ISP2400_FW_ATTR_ATIOMQ; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s ATIOMQ", buf); } if (fwt) { ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s (unknown 0x%08x%08x)", buf, (uint32_t) (fwt >> 32), (uint32_t) fwt); } isp_prt(isp, ISP_LOGCONFIG, "%s", buf); } else if (IS_FC(isp)) { buf = FCPARAM(isp, 0)->isp_scratch; ISP_SNPRINTF(buf, ISP_FC_SCRLEN, "Attributes:"); if (fwt & ISP_FW_ATTR_TMODE) { fwt ^=ISP_FW_ATTR_TMODE; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s TargetMode", buf); } if (fwt & ISP_FW_ATTR_SCCLUN) { fwt ^=ISP_FW_ATTR_SCCLUN; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s SCC-Lun", buf); } if (fwt & ISP_FW_ATTR_FABRIC) { fwt ^=ISP_FW_ATTR_FABRIC; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s Fabric", buf); } if (fwt & ISP_FW_ATTR_CLASS2) { fwt ^=ISP_FW_ATTR_CLASS2; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s Class2", buf); } if (fwt & ISP_FW_ATTR_FCTAPE) { fwt ^=ISP_FW_ATTR_FCTAPE; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s FC-Tape", buf); } if (fwt & ISP_FW_ATTR_IP) { fwt ^=ISP_FW_ATTR_IP; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s IP", buf); } if (fwt & ISP_FW_ATTR_VI) { fwt ^=ISP_FW_ATTR_VI; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s VI", buf); } if (fwt & ISP_FW_ATTR_VI_SOLARIS) { fwt ^=ISP_FW_ATTR_VI_SOLARIS; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s VI_SOLARIS", buf); } if (fwt & ISP_FW_ATTR_2KLOGINS) { fwt ^=ISP_FW_ATTR_2KLOGINS; ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s 2K-Login", buf); } if (fwt != 0) { ISP_SNPRINTF(buf, ISP_FC_SCRLEN - strlen(buf), "%s (unknown 0x%08x%08x)", buf, (uint32_t) (fwt >> 32), (uint32_t) fwt); } isp_prt(isp, ISP_LOGCONFIG, "%s", buf); } if (IS_24XX(isp)) { MBSINIT(&mbs, MBOX_GET_RESOURCE_COUNT, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { ISP_RESET0(isp); return; } if (isp->isp_maxcmds >= mbs.param[3]) { isp->isp_maxcmds = mbs.param[3]; } } else { MBSINIT(&mbs, MBOX_GET_FIRMWARE_STATUS, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { ISP_RESET0(isp); return; } if (isp->isp_maxcmds >= mbs.param[2]) { isp->isp_maxcmds = mbs.param[2]; } } isp_prt(isp, ISP_LOGCONFIG, "%d max I/O command limit set", isp->isp_maxcmds); /* * If we don't have Multi-ID f/w loaded, we need to restrict channels to one. * Only make this check for non-SCSI cards (I'm not sure firmware attributes * work for them). */ if (IS_FC(isp) && isp->isp_nchan > 1) { if (!ISP_CAP_MULTI_ID(isp)) { isp_prt(isp, ISP_LOGWARN, "non-MULTIID f/w loaded, " "only can enable 1 of %d channels", isp->isp_nchan); isp->isp_nchan = 1; } else if (!ISP_CAP_VP0(isp)) { isp_prt(isp, ISP_LOGWARN, "We can not use MULTIID " "feature properly without VP0_Decoupling"); isp->isp_nchan = 1; } } - for (i = 0; i < isp->isp_nchan; i++) { - isp_fw_state(isp, i); + if (IS_FC(isp)) { + for (i = 0; i < isp->isp_nchan; i++) + isp_change_fw_state(isp, i, FW_CONFIG_WAIT); } if (isp->isp_dead) { isp_shutdown(isp); ISP_DISABLE_INTS(isp); return; } isp->isp_state = ISP_RESETSTATE; /* * Okay- now that we have new firmware running, we now (re)set our * notion of how many luns we support. This is somewhat tricky because * if we haven't loaded firmware, we sometimes do not have an easy way * of knowing how many luns we support. * * Expanded lun firmware gives you 32 luns for SCSI cards and * 16384 luns for Fibre Channel cards. * * It turns out that even for QLogic 2100s with ROM 1.10 and above * we do get a firmware attributes word returned in mailbox register 6. * * Because the lun is in a different position in the Request Queue * Entry structure for Fibre Channel with expanded lun firmware, we * can only support one lun (lun zero) when we don't know what kind * of firmware we're running. */ if (IS_SCSI(isp)) { if (dodnld) { if (IS_ULTRA2(isp) || IS_ULTRA3(isp)) { isp->isp_maxluns = 32; } else { isp->isp_maxluns = 8; } } else { isp->isp_maxluns = 8; } } else { if (ISP_CAP_SCCFW(isp)) { isp->isp_maxluns = 0; /* No limit -- 2/8 bytes */ } else { isp->isp_maxluns = 16; } } /* * We get some default values established. As a side * effect, NVRAM is read here (unless overriden by * a configuration flag). */ if (do_load_defaults) { if (IS_SCSI(isp)) { isp_setdfltsdparm(isp); } else { for (i = 0; i < isp->isp_nchan; i++) { isp_setdfltfcparm(isp, i); } } } } /* * Initialize Parameters of Hardware to a known state. * * Locks are held before coming here. */ void isp_init(ispsoftc_t *isp) { if (IS_FC(isp)) { if (IS_24XX(isp)) { isp_fibre_init_2400(isp); } else { isp_fibre_init(isp); } } else { isp_scsi_init(isp); } GET_NANOTIME(&isp->isp_init_time); } static void isp_scsi_init(ispsoftc_t *isp) { sdparam *sdp_chan0, *sdp_chan1; mbreg_t mbs; isp->isp_state = ISP_INITSTATE; sdp_chan0 = SDPARAM(isp, 0); sdp_chan1 = sdp_chan0; if (IS_DUALBUS(isp)) { sdp_chan1 = SDPARAM(isp, 1); } /* First do overall per-card settings. */ /* * If we have fast memory timing enabled, turn it on. */ if (sdp_chan0->isp_fast_mttr) { ISP_WRITE(isp, RISC_MTR, 0x1313); } /* * Set Retry Delay and Count. * You set both channels at the same time. */ MBSINIT(&mbs, MBOX_SET_RETRY_COUNT, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_retry_count; mbs.param[2] = sdp_chan0->isp_retry_delay; mbs.param[6] = sdp_chan1->isp_retry_count; mbs.param[7] = sdp_chan1->isp_retry_delay; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* * Set ASYNC DATA SETUP time. This is very important. */ MBSINIT(&mbs, MBOX_SET_ASYNC_DATA_SETUP_TIME, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_async_data_setup; mbs.param[2] = sdp_chan1->isp_async_data_setup; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* * Set ACTIVE Negation State. */ MBSINIT(&mbs, MBOX_SET_ACT_NEG_STATE, MBLOGNONE, 0); mbs.param[1] = (sdp_chan0->isp_req_ack_active_neg << 4) | (sdp_chan0->isp_data_line_active_neg << 5); mbs.param[2] = (sdp_chan1->isp_req_ack_active_neg << 4) | (sdp_chan1->isp_data_line_active_neg << 5); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "failed to set active negation state (%d,%d), (%d,%d)", sdp_chan0->isp_req_ack_active_neg, sdp_chan0->isp_data_line_active_neg, sdp_chan1->isp_req_ack_active_neg, sdp_chan1->isp_data_line_active_neg); /* * But don't return. */ } /* * Set the Tag Aging limit */ MBSINIT(&mbs, MBOX_SET_TAG_AGE_LIMIT, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_tag_aging; mbs.param[2] = sdp_chan1->isp_tag_aging; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "failed to set tag age limit (%d,%d)", sdp_chan0->isp_tag_aging, sdp_chan1->isp_tag_aging); return; } /* * Set selection timeout. */ MBSINIT(&mbs, MBOX_SET_SELECT_TIMEOUT, MBLOGALL, 0); mbs.param[1] = sdp_chan0->isp_selection_timeout; mbs.param[2] = sdp_chan1->isp_selection_timeout; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } /* now do per-channel settings */ isp_scsi_channel_init(isp, 0); if (IS_DUALBUS(isp)) isp_scsi_channel_init(isp, 1); /* * Now enable request/response queues */ if (IS_ULTRA2(isp) || IS_1240(isp)) { MBSINIT(&mbs, MBOX_INIT_RES_QUEUE_A64, MBLOGALL, 0); mbs.param[1] = RESULT_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_result_dma); mbs.param[3] = DMA_WD0(isp->isp_result_dma); mbs.param[4] = 0; mbs.param[6] = DMA_WD3(isp->isp_result_dma); mbs.param[7] = DMA_WD2(isp->isp_result_dma); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_residx = isp->isp_resodx = mbs.param[5]; MBSINIT(&mbs, MBOX_INIT_REQ_QUEUE_A64, MBLOGALL, 0); mbs.param[1] = RQUEST_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[5] = 0; mbs.param[6] = DMA_WD3(isp->isp_result_dma); mbs.param[7] = DMA_WD2(isp->isp_result_dma); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_reqidx = isp->isp_reqodx = mbs.param[4]; } else { MBSINIT(&mbs, MBOX_INIT_RES_QUEUE, MBLOGALL, 0); mbs.param[1] = RESULT_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_result_dma); mbs.param[3] = DMA_WD0(isp->isp_result_dma); mbs.param[4] = 0; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_residx = isp->isp_resodx = mbs.param[5]; MBSINIT(&mbs, MBOX_INIT_REQ_QUEUE, MBLOGALL, 0); mbs.param[1] = RQUEST_QUEUE_LEN(isp); mbs.param[2] = DMA_WD1(isp->isp_rquest_dma); mbs.param[3] = DMA_WD0(isp->isp_rquest_dma); mbs.param[5] = 0; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_reqidx = isp->isp_reqodx = mbs.param[4]; } /* * Turn on LVD transitions for ULTRA2 or better and other features * * Now that we have 32 bit handles, don't do any fast posting * any more. For Ultra2/Ultra3 cards, we can turn on 32 bit RIO * operation or use fast posting. To be conservative, we'll only * do this for Ultra3 cards now because the other cards are so * rare for this author to find and test with. */ MBSINIT(&mbs, MBOX_SET_FW_FEATURES, MBLOGALL, 0); if (IS_ULTRA2(isp)) mbs.param[1] |= FW_FEATURE_LVD_NOTIFY; #ifdef ISP_NO_RIO if (IS_ULTRA3(isp)) mbs.param[1] |= FW_FEATURE_FAST_POST; #else if (IS_ULTRA3(isp)) mbs.param[1] |= FW_FEATURE_RIO_32BIT; #endif if (mbs.param[1] != 0) { uint16_t sfeat = mbs.param[1]; isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGINFO, "Enabled FW features (0x%x)", sfeat); } } isp->isp_state = ISP_RUNSTATE; } static void isp_scsi_channel_init(ispsoftc_t *isp, int chan) { sdparam *sdp; mbreg_t mbs; int tgt; sdp = SDPARAM(isp, chan); /* * Set (possibly new) Initiator ID. */ MBSINIT(&mbs, MBOX_SET_INIT_SCSI_ID, MBLOGALL, 0); mbs.param[1] = (chan << 7) | sdp->isp_initiator_id; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp_prt(isp, ISP_LOGINFO, "Chan %d Initiator ID is %d", chan, sdp->isp_initiator_id); /* * Set current per-target parameters to an initial safe minimum. */ for (tgt = 0; tgt < MAX_TARGETS; tgt++) { int lun; uint16_t sdf; if (sdp->isp_devparam[tgt].dev_enable == 0) { continue; } #ifndef ISP_TARGET_MODE sdf = sdp->isp_devparam[tgt].goal_flags; sdf &= DPARM_SAFE_DFLT; /* * It is not quite clear when this changed over so that * we could force narrow and async for 1000/1020 cards, * but assume that this is only the case for loaded * firmware. */ if (isp->isp_loaded_fw) { sdf |= DPARM_NARROW | DPARM_ASYNC; } #else /* * The !$*!)$!$)* f/w uses the same index into some * internal table to decide how to respond to negotiations, * so if we've said "let's be safe" for ID X, and ID X * selects *us*, the negotiations will back to 'safe' * (as in narrow/async). What the f/w *should* do is * use the initiator id settings to decide how to respond. */ sdp->isp_devparam[tgt].goal_flags = sdf = DPARM_DEFAULT; #endif MBSINIT(&mbs, MBOX_SET_TARGET_PARAMS, MBLOGNONE, 0); mbs.param[1] = (chan << 15) | (tgt << 8); mbs.param[2] = sdf; if ((sdf & DPARM_SYNC) == 0) { mbs.param[3] = 0; } else { mbs.param[3] = (sdp->isp_devparam[tgt].goal_offset << 8) | (sdp->isp_devparam[tgt].goal_period); } isp_prt(isp, ISP_LOGDEBUG0, "Initial Settings bus%d tgt%d flags 0x%x off 0x%x per 0x%x", chan, tgt, mbs.param[2], mbs.param[3] >> 8, mbs.param[3] & 0xff); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { sdf = DPARM_SAFE_DFLT; MBSINIT(&mbs, MBOX_SET_TARGET_PARAMS, MBLOGALL, 0); mbs.param[1] = (tgt << 8) | (chan << 15); mbs.param[2] = sdf; mbs.param[3] = 0; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { continue; } } /* * We don't update any information directly from the f/w * because we need to run at least one command to cause a * new state to be latched up. So, we just assume that we * converge to the values we just had set. * * Ensure that we don't believe tagged queuing is enabled yet. * It turns out that sometimes the ISP just ignores our * attempts to set parameters for devices that it hasn't * seen yet. */ sdp->isp_devparam[tgt].actv_flags = sdf & ~DPARM_TQING; for (lun = 0; lun < (int) isp->isp_maxluns; lun++) { MBSINIT(&mbs, MBOX_SET_DEV_QUEUE_PARAMS, MBLOGALL, 0); mbs.param[1] = (chan << 15) | (tgt << 8) | lun; mbs.param[2] = sdp->isp_max_queue_depth; mbs.param[3] = sdp->isp_devparam[tgt].exc_throttle; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } } } for (tgt = 0; tgt < MAX_TARGETS; tgt++) { if (sdp->isp_devparam[tgt].dev_refresh) { sdp->sendmarker = 1; sdp->update = 1; break; } } } /* * Fibre Channel specific initialization. */ static void isp_fibre_init(ispsoftc_t *isp) { fcparam *fcp; isp_icb_t local, *icbp = &local; mbreg_t mbs; /* * We only support one channel on non-24XX cards */ fcp = FCPARAM(isp, 0); if (fcp->role == ISP_ROLE_NONE) return; isp->isp_state = ISP_INITSTATE; ISP_MEMZERO(icbp, sizeof (*icbp)); icbp->icb_version = ICB_VERSION1; icbp->icb_fwoptions = fcp->isp_fwoptions; /* * Firmware Options are either retrieved from NVRAM or * are patched elsewhere. We check them for sanity here * and make changes based on board revision, but otherwise * let others decide policy. */ /* * If this is a 2100 < revision 5, we have to turn off FAIRNESS. */ if (IS_2100(isp) && isp->isp_revision < 5) { icbp->icb_fwoptions &= ~ICBOPT_FAIRNESS; } /* * We have to use FULL LOGIN even though it resets the loop too much * because otherwise port database entries don't get updated after * a LIP- this is a known f/w bug for 2100 f/w less than 1.17.0. */ if (!ISP_FW_NEWER_THAN(isp, 1, 17, 0)) { icbp->icb_fwoptions |= ICBOPT_FULL_LOGIN; } /* * Insist on Port Database Update Async notifications */ icbp->icb_fwoptions |= ICBOPT_PDBCHANGE_AE; /* * Make sure that target role reflects into fwoptions. */ if (fcp->role & ISP_ROLE_TARGET) { icbp->icb_fwoptions |= ICBOPT_TGT_ENABLE; } else { icbp->icb_fwoptions &= ~ICBOPT_TGT_ENABLE; } if (fcp->role & ISP_ROLE_INITIATOR) { icbp->icb_fwoptions &= ~ICBOPT_INI_DISABLE; } else { icbp->icb_fwoptions |= ICBOPT_INI_DISABLE; } icbp->icb_maxfrmlen = DEFAULT_FRAMESIZE(isp); if (icbp->icb_maxfrmlen < ICB_MIN_FRMLEN || icbp->icb_maxfrmlen > ICB_MAX_FRMLEN) { isp_prt(isp, ISP_LOGERR, "bad frame length (%d) from NVRAM- using %d", DEFAULT_FRAMESIZE(isp), ICB_DFLT_FRMLEN); icbp->icb_maxfrmlen = ICB_DFLT_FRMLEN; } icbp->icb_maxalloc = fcp->isp_maxalloc; if (icbp->icb_maxalloc < 1) { isp_prt(isp, ISP_LOGERR, "bad maximum allocation (%d)- using 16", fcp->isp_maxalloc); icbp->icb_maxalloc = 16; } icbp->icb_execthrottle = DEFAULT_EXEC_THROTTLE(isp); if (icbp->icb_execthrottle < 1) { isp_prt(isp, ISP_LOGERR, "bad execution throttle of %d- using %d", DEFAULT_EXEC_THROTTLE(isp), ICB_DFLT_THROTTLE); icbp->icb_execthrottle = ICB_DFLT_THROTTLE; } icbp->icb_retry_delay = fcp->isp_retry_delay; icbp->icb_retry_count = fcp->isp_retry_count; if (fcp->isp_loopid < LOCAL_LOOP_LIM) { icbp->icb_hardaddr = fcp->isp_loopid; if (isp->isp_confopts & ISP_CFG_OWNLOOPID) icbp->icb_fwoptions |= ICBOPT_HARD_ADDRESS; else icbp->icb_fwoptions |= ICBOPT_PREV_ADDRESS; } /* * Right now we just set extended options to prefer point-to-point * over loop based upon some soft config options. * * NB: for the 2300, ICBOPT_EXTENDED is required. */ if (IS_2100(isp)) { /* * We can't have Fast Posting any more- we now * have 32 bit handles. */ icbp->icb_fwoptions &= ~ICBOPT_FAST_POST; } else if (IS_2200(isp) || IS_23XX(isp)) { icbp->icb_fwoptions |= ICBOPT_EXTENDED; icbp->icb_xfwoptions = fcp->isp_xfwoptions; if (ISP_CAP_FCTAPE(isp)) { if (isp->isp_confopts & ISP_CFG_NOFCTAPE) icbp->icb_xfwoptions &= ~ICBXOPT_FCTAPE; if (isp->isp_confopts & ISP_CFG_FCTAPE) icbp->icb_xfwoptions |= ICBXOPT_FCTAPE; if (icbp->icb_xfwoptions & ICBXOPT_FCTAPE) { icbp->icb_fwoptions &= ~ICBOPT_FULL_LOGIN; /* per documents */ icbp->icb_xfwoptions |= ICBXOPT_FCTAPE_CCQ|ICBXOPT_FCTAPE_CONFIRM; FCPARAM(isp, 0)->fctape_enabled = 1; } else { FCPARAM(isp, 0)->fctape_enabled = 0; } } else { icbp->icb_xfwoptions &= ~ICBXOPT_FCTAPE; FCPARAM(isp, 0)->fctape_enabled = 0; } /* * Prefer or force Point-To-Point instead Loop? */ switch (isp->isp_confopts & ISP_CFG_PORT_PREF) { case ISP_CFG_NPORT: icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK; icbp->icb_xfwoptions |= ICBXOPT_PTP_2_LOOP; break; case ISP_CFG_NPORT_ONLY: icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK; icbp->icb_xfwoptions |= ICBXOPT_PTP_ONLY; break; case ISP_CFG_LPORT_ONLY: icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK; icbp->icb_xfwoptions |= ICBXOPT_LOOP_ONLY; break; default: /* * Let NVRAM settings define it if they are sane */ switch (icbp->icb_xfwoptions & ICBXOPT_TOPO_MASK) { case ICBXOPT_PTP_2_LOOP: case ICBXOPT_PTP_ONLY: case ICBXOPT_LOOP_ONLY: case ICBXOPT_LOOP_2_PTP: break; default: icbp->icb_xfwoptions &= ~ICBXOPT_TOPO_MASK; icbp->icb_xfwoptions |= ICBXOPT_LOOP_2_PTP; } break; } if (IS_2200(isp)) { /* * We can't have Fast Posting any more- we now * have 32 bit handles. * * RIO seemed to have to much breakage. * * Just opt for safety. */ icbp->icb_xfwoptions &= ~ICBXOPT_RIO_16BIT; icbp->icb_fwoptions &= ~ICBOPT_FAST_POST; } else { /* * QLogic recommends that FAST Posting be turned * off for 23XX cards and instead allow the HBA * to write response queue entries and interrupt * after a delay (ZIO). */ icbp->icb_fwoptions &= ~ICBOPT_FAST_POST; if ((fcp->isp_xfwoptions & ICBXOPT_TIMER_MASK) == ICBXOPT_ZIO) { icbp->icb_xfwoptions |= ICBXOPT_ZIO; icbp->icb_idelaytimer = 10; } icbp->icb_zfwoptions = fcp->isp_zfwoptions; if (isp->isp_confopts & ISP_CFG_ONEGB) { icbp->icb_zfwoptions &= ~ICBZOPT_RATE_MASK; icbp->icb_zfwoptions |= ICBZOPT_RATE_ONEGB; } else if (isp->isp_confopts & ISP_CFG_TWOGB) { icbp->icb_zfwoptions &= ~ICBZOPT_RATE_MASK; icbp->icb_zfwoptions |= ICBZOPT_RATE_TWOGB; } else { switch (icbp->icb_zfwoptions & ICBZOPT_RATE_MASK) { case ICBZOPT_RATE_ONEGB: case ICBZOPT_RATE_TWOGB: case ICBZOPT_RATE_AUTO: break; default: icbp->icb_zfwoptions &= ~ICBZOPT_RATE_MASK; icbp->icb_zfwoptions |= ICBZOPT_RATE_AUTO; break; } } } } /* * For 22XX > 2.1.26 && 23XX, set some options. */ if (ISP_FW_NEWER_THAN(isp, 2, 26, 0)) { MBSINIT(&mbs, MBOX_SET_FIRMWARE_OPTIONS, MBLOGALL, 0); mbs.param[1] = IFCOPT1_DISF7SWTCH|IFCOPT1_LIPASYNC|IFCOPT1_LIPF8; mbs.param[2] = 0; mbs.param[3] = 0; if (ISP_FW_NEWER_THAN(isp, 3, 16, 0)) { mbs.param[1] |= IFCOPT1_EQFQASYNC|IFCOPT1_CTIO_RETRY; if (fcp->role & ISP_ROLE_TARGET) { if (ISP_FW_NEWER_THAN(isp, 3, 25, 0)) { mbs.param[1] |= IFCOPT1_ENAPURE; } mbs.param[3] = IFCOPT3_NOPRLI; } } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } } icbp->icb_logintime = ICB_LOGIN_TOV; #ifdef ISP_TARGET_MODE if (IS_23XX(isp) && (icbp->icb_fwoptions & ICBOPT_TGT_ENABLE)) { icbp->icb_lunenables = 0xffff; icbp->icb_ccnt = DFLT_CMND_CNT; icbp->icb_icnt = DFLT_INOT_CNT; icbp->icb_lunetimeout = ICB_LUN_ENABLE_TOV; } #endif if (fcp->isp_wwnn && fcp->isp_wwpn) { icbp->icb_fwoptions |= ICBOPT_BOTH_WWNS; MAKE_NODE_NAME_FROM_WWN(icbp->icb_nodename, fcp->isp_wwnn); MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Node 0x%08x%08x Port 0x%08x%08x", ((uint32_t) (fcp->isp_wwnn >> 32)), ((uint32_t) (fcp->isp_wwnn)), ((uint32_t) (fcp->isp_wwpn >> 32)), ((uint32_t) (fcp->isp_wwpn))); } else if (fcp->isp_wwpn) { icbp->icb_fwoptions &= ~ICBOPT_BOTH_WWNS; MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Port 0x%08x%08x", ((uint32_t) (fcp->isp_wwpn >> 32)), ((uint32_t) (fcp->isp_wwpn))); } else { isp_prt(isp, ISP_LOGERR, "No valid WWNs to use"); return; } icbp->icb_rqstqlen = RQUEST_QUEUE_LEN(isp); if (icbp->icb_rqstqlen < 1) { isp_prt(isp, ISP_LOGERR, "bad request queue length"); } icbp->icb_rsltqlen = RESULT_QUEUE_LEN(isp); if (icbp->icb_rsltqlen < 1) { isp_prt(isp, ISP_LOGERR, "bad result queue length"); } icbp->icb_rqstaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_rquest_dma); icbp->icb_respaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_result_dma); if (FC_SCRATCH_ACQUIRE(isp, 0)) { isp_prt(isp, ISP_LOGERR, sacq); return; } isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init: fwopt 0x%x xfwopt 0x%x zfwopt 0x%x", icbp->icb_fwoptions, icbp->icb_xfwoptions, icbp->icb_zfwoptions); isp_put_icb(isp, icbp, (isp_icb_t *)fcp->isp_scratch); /* * Init the firmware */ MBSINIT(&mbs, MBOX_INIT_FIRMWARE, MBLOGALL, 30000000); mbs.param[1] = 0; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); mbs.logval = MBLOGALL; isp_prt(isp, ISP_LOGDEBUG0, "INIT F/W from %p (%08x%08x)", fcp->isp_scratch, (uint32_t) ((uint64_t)fcp->isp_scdma >> 32), (uint32_t) fcp->isp_scdma); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, sizeof (*icbp), 0); isp_mboxcmd(isp, &mbs); FC_SCRATCH_RELEASE(isp, 0); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_print_bytes(isp, "isp_fibre_init", sizeof (*icbp), icbp); return; } isp->isp_reqidx = 0; isp->isp_reqodx = 0; isp->isp_residx = 0; isp->isp_resodx = 0; /* * Whatever happens, we're now committed to being here. */ isp->isp_state = ISP_RUNSTATE; } static void isp_fibre_init_2400(ispsoftc_t *isp) { fcparam *fcp; isp_icb_2400_t local, *icbp = &local; mbreg_t mbs; int chan; /* * Check to see whether all channels have *some* kind of role */ for (chan = 0; chan < isp->isp_nchan; chan++) { fcp = FCPARAM(isp, chan); if (fcp->role != ISP_ROLE_NONE) { break; } } if (chan == isp->isp_nchan) { isp_prt(isp, ISP_LOG_WARN1, "all %d channels with role 'none'", chan); return; } isp->isp_state = ISP_INITSTATE; /* * Start with channel 0. */ fcp = FCPARAM(isp, 0); /* * Turn on LIP F8 async event (1) */ MBSINIT(&mbs, MBOX_SET_FIRMWARE_OPTIONS, MBLOGALL, 0); mbs.param[1] = 1; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } ISP_MEMZERO(icbp, sizeof (*icbp)); icbp->icb_fwoptions1 = fcp->isp_fwoptions; icbp->icb_fwoptions2 = fcp->isp_xfwoptions; icbp->icb_fwoptions3 = fcp->isp_zfwoptions; if (isp->isp_nchan > 1 && ISP_CAP_VP0(isp)) { icbp->icb_fwoptions1 &= ~ICB2400_OPT1_INI_DISABLE; icbp->icb_fwoptions1 |= ICB2400_OPT1_TGT_ENABLE; } else { if (fcp->role & ISP_ROLE_TARGET) icbp->icb_fwoptions1 |= ICB2400_OPT1_TGT_ENABLE; else icbp->icb_fwoptions1 &= ~ICB2400_OPT1_TGT_ENABLE; if (fcp->role & ISP_ROLE_INITIATOR) icbp->icb_fwoptions1 &= ~ICB2400_OPT1_INI_DISABLE; else icbp->icb_fwoptions1 |= ICB2400_OPT1_INI_DISABLE; } icbp->icb_version = ICB_VERSION1; icbp->icb_maxfrmlen = DEFAULT_FRAMESIZE(isp); if (icbp->icb_maxfrmlen < ICB_MIN_FRMLEN || icbp->icb_maxfrmlen > ICB_MAX_FRMLEN) { isp_prt(isp, ISP_LOGERR, "bad frame length (%d) from NVRAM- using %d", DEFAULT_FRAMESIZE(isp), ICB_DFLT_FRMLEN); icbp->icb_maxfrmlen = ICB_DFLT_FRMLEN; } icbp->icb_execthrottle = DEFAULT_EXEC_THROTTLE(isp); if (icbp->icb_execthrottle < 1) { isp_prt(isp, ISP_LOGERR, "bad execution throttle of %d- using %d", DEFAULT_EXEC_THROTTLE(isp), ICB_DFLT_THROTTLE); icbp->icb_execthrottle = ICB_DFLT_THROTTLE; } /* * Set target exchange count. Take half if we are supporting both roles. */ if (icbp->icb_fwoptions1 & ICB2400_OPT1_TGT_ENABLE) { icbp->icb_xchgcnt = isp->isp_maxcmds; if ((icbp->icb_fwoptions1 & ICB2400_OPT1_INI_DISABLE) == 0) icbp->icb_xchgcnt >>= 1; } if (fcp->isp_loopid < LOCAL_LOOP_LIM) { icbp->icb_hardaddr = fcp->isp_loopid; if (isp->isp_confopts & ISP_CFG_OWNLOOPID) icbp->icb_fwoptions1 |= ICB2400_OPT1_HARD_ADDRESS; else icbp->icb_fwoptions1 |= ICB2400_OPT1_PREV_ADDRESS; } if (isp->isp_confopts & ISP_CFG_NOFCTAPE) { icbp->icb_fwoptions2 &= ~ICB2400_OPT2_FCTAPE; } if (isp->isp_confopts & ISP_CFG_FCTAPE) { icbp->icb_fwoptions2 |= ICB2400_OPT2_FCTAPE; } for (chan = 0; chan < isp->isp_nchan; chan++) { if (icbp->icb_fwoptions2 & ICB2400_OPT2_FCTAPE) FCPARAM(isp, chan)->fctape_enabled = 1; else FCPARAM(isp, chan)->fctape_enabled = 0; } switch (isp->isp_confopts & ISP_CFG_PORT_PREF) { case ISP_CFG_NPORT_ONLY: icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; icbp->icb_fwoptions2 |= ICB2400_OPT2_PTP_ONLY; break; case ISP_CFG_LPORT_ONLY: icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; icbp->icb_fwoptions2 |= ICB2400_OPT2_LOOP_ONLY; break; default: /* ISP_CFG_PTP_2_LOOP not available in 24XX/25XX */ icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TOPO_MASK; icbp->icb_fwoptions2 |= ICB2400_OPT2_LOOP_2_PTP; break; } switch (icbp->icb_fwoptions2 & ICB2400_OPT2_TIMER_MASK) { case ICB2400_OPT2_ZIO: case ICB2400_OPT2_ZIO1: icbp->icb_idelaytimer = 0; break; case 0: break; default: isp_prt(isp, ISP_LOGWARN, "bad value %x in fwopt2 timer field", icbp->icb_fwoptions2 & ICB2400_OPT2_TIMER_MASK); icbp->icb_fwoptions2 &= ~ICB2400_OPT2_TIMER_MASK; break; } if ((icbp->icb_fwoptions3 & ICB2400_OPT3_RSPSZ_MASK) == 0) { icbp->icb_fwoptions3 |= ICB2400_OPT3_RSPSZ_24; } icbp->icb_fwoptions3 &= ~ICB2400_OPT3_RATE_AUTO; if (isp->isp_confopts & ISP_CFG_ONEGB) { icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_ONEGB; } else if (isp->isp_confopts & ISP_CFG_TWOGB) { icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_TWOGB; } else if (isp->isp_confopts & ISP_CFG_FOURGB) { icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_FOURGB; } else if (IS_25XX(isp) && (isp->isp_confopts & ISP_CFG_EIGHTGB)) { icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_EIGHTGB; } else { icbp->icb_fwoptions3 |= ICB2400_OPT3_RATE_AUTO; } icbp->icb_logintime = ICB_LOGIN_TOV; if (fcp->isp_wwnn && fcp->isp_wwpn) { icbp->icb_fwoptions1 |= ICB2400_OPT1_BOTH_WWNS; MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); MAKE_NODE_NAME_FROM_WWN(icbp->icb_nodename, fcp->isp_wwnn); isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Node 0x%08x%08x Port 0x%08x%08x", ((uint32_t) (fcp->isp_wwnn >> 32)), ((uint32_t) (fcp->isp_wwnn)), ((uint32_t) (fcp->isp_wwpn >> 32)), ((uint32_t) (fcp->isp_wwpn))); } else if (fcp->isp_wwpn) { icbp->icb_fwoptions1 &= ~ICB2400_OPT1_BOTH_WWNS; MAKE_NODE_NAME_FROM_WWN(icbp->icb_portname, fcp->isp_wwpn); isp_prt(isp, ISP_LOGDEBUG1, "Setting ICB Node to be same as Port 0x%08x%08x", ((uint32_t) (fcp->isp_wwpn >> 32)), ((uint32_t) (fcp->isp_wwpn))); } else { isp_prt(isp, ISP_LOGERR, "No valid WWNs to use"); return; } icbp->icb_retry_count = fcp->isp_retry_count; icbp->icb_rqstqlen = RQUEST_QUEUE_LEN(isp); if (icbp->icb_rqstqlen < 8) { isp_prt(isp, ISP_LOGERR, "bad request queue length %d", icbp->icb_rqstqlen); return; } icbp->icb_rsltqlen = RESULT_QUEUE_LEN(isp); if (icbp->icb_rsltqlen < 8) { isp_prt(isp, ISP_LOGERR, "bad result queue length %d", icbp->icb_rsltqlen); return; } icbp->icb_rqstaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_rquest_dma); icbp->icb_rqstaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_rquest_dma); icbp->icb_respaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_result_dma); icbp->icb_respaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_result_dma); #ifdef ISP_TARGET_MODE /* unconditionally set up the ATIO queue if we support target mode */ icbp->icb_atioqlen = RESULT_QUEUE_LEN(isp); if (icbp->icb_atioqlen < 8) { isp_prt(isp, ISP_LOGERR, "bad ATIO queue length %d", icbp->icb_atioqlen); return; } icbp->icb_atioqaddr[RQRSP_ADDR0015] = DMA_WD0(isp->isp_atioq_dma); icbp->icb_atioqaddr[RQRSP_ADDR1631] = DMA_WD1(isp->isp_atioq_dma); icbp->icb_atioqaddr[RQRSP_ADDR3247] = DMA_WD2(isp->isp_atioq_dma); icbp->icb_atioqaddr[RQRSP_ADDR4863] = DMA_WD3(isp->isp_atioq_dma); isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init_2400: atioq %04x%04x%04x%04x", DMA_WD3(isp->isp_atioq_dma), DMA_WD2(isp->isp_atioq_dma), DMA_WD1(isp->isp_atioq_dma), DMA_WD0(isp->isp_atioq_dma)); #endif isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init_2400: fwopt1 0x%x fwopt2 0x%x fwopt3 0x%x", icbp->icb_fwoptions1, icbp->icb_fwoptions2, icbp->icb_fwoptions3); isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init_2400: rqst %04x%04x%04x%04x rsp %04x%04x%04x%04x", DMA_WD3(isp->isp_rquest_dma), DMA_WD2(isp->isp_rquest_dma), DMA_WD1(isp->isp_rquest_dma), DMA_WD0(isp->isp_rquest_dma), DMA_WD3(isp->isp_result_dma), DMA_WD2(isp->isp_result_dma), DMA_WD1(isp->isp_result_dma), DMA_WD0(isp->isp_result_dma)); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "isp_fibre_init_2400", sizeof (*icbp), icbp); } if (FC_SCRATCH_ACQUIRE(isp, 0)) { isp_prt(isp, ISP_LOGERR, sacq); return; } ISP_MEMZERO(fcp->isp_scratch, ISP_FC_SCRLEN); isp_put_icb_2400(isp, icbp, fcp->isp_scratch); /* * Now fill in information about any additional channels */ if (isp->isp_nchan > 1) { isp_icb_2400_vpinfo_t vpinfo, *vdst; vp_port_info_t pi, *pdst; size_t amt = 0; uint8_t *off; vpinfo.vp_global_options = ICB2400_VPGOPT_GEN_RIDA; if (ISP_CAP_VP0(isp)) { vpinfo.vp_global_options |= ICB2400_VPGOPT_VP0_DECOUPLE; vpinfo.vp_count = isp->isp_nchan; chan = 0; } else { vpinfo.vp_count = isp->isp_nchan - 1; chan = 1; } off = fcp->isp_scratch; off += ICB2400_VPINFO_OFF; vdst = (isp_icb_2400_vpinfo_t *) off; isp_put_icb_2400_vpinfo(isp, &vpinfo, vdst); amt = ICB2400_VPINFO_OFF + sizeof (isp_icb_2400_vpinfo_t); for (; chan < isp->isp_nchan; chan++) { fcparam *fcp2; ISP_MEMZERO(&pi, sizeof (pi)); fcp2 = FCPARAM(isp, chan); if (fcp2->role != ISP_ROLE_NONE) { pi.vp_port_options = ICB2400_VPOPT_ENABLED | ICB2400_VPOPT_ENA_SNSLOGIN; if (fcp2->role & ISP_ROLE_INITIATOR) pi.vp_port_options |= ICB2400_VPOPT_INI_ENABLE; if ((fcp2->role & ISP_ROLE_TARGET) == 0) pi.vp_port_options |= ICB2400_VPOPT_TGT_DISABLE; } if (fcp2->isp_loopid < LOCAL_LOOP_LIM) { pi.vp_port_loopid = fcp2->isp_loopid; if (isp->isp_confopts & ISP_CFG_OWNLOOPID) pi.vp_port_options |= ICB2400_VPOPT_HARD_ADDRESS; else pi.vp_port_options |= ICB2400_VPOPT_PREV_ADDRESS; } MAKE_NODE_NAME_FROM_WWN(pi.vp_port_portname, fcp2->isp_wwpn); MAKE_NODE_NAME_FROM_WWN(pi.vp_port_nodename, fcp2->isp_wwnn); off = fcp->isp_scratch; if (ISP_CAP_VP0(isp)) off += ICB2400_VPINFO_PORT_OFF(chan); else off += ICB2400_VPINFO_PORT_OFF(chan - 1); pdst = (vp_port_info_t *) off; isp_put_vp_port_info(isp, &pi, pdst); amt += ICB2400_VPOPT_WRITE_SIZE; } if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "isp_fibre_init_2400", amt - ICB2400_VPINFO_OFF, (char *)fcp->isp_scratch + ICB2400_VPINFO_OFF); } } /* * Init the firmware */ MBSINIT(&mbs, 0, MBLOGALL, 30000000); if (isp->isp_nchan > 1) { mbs.param[0] = MBOX_INIT_FIRMWARE_MULTI_ID; } else { mbs.param[0] = MBOX_INIT_FIRMWARE; } mbs.param[1] = 0; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); isp_prt(isp, ISP_LOGDEBUG0, "INIT F/W from %04x%04x%04x%04x", DMA_WD3(fcp->isp_scdma), DMA_WD2(fcp->isp_scdma), DMA_WD1(fcp->isp_scdma), DMA_WD0(fcp->isp_scdma)); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, sizeof (*icbp), 0); isp_mboxcmd(isp, &mbs); FC_SCRATCH_RELEASE(isp, 0); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return; } isp->isp_reqidx = 0; isp->isp_reqodx = 0; isp->isp_residx = 0; isp->isp_resodx = 0; isp->isp_atioodx = 0; /* * Whatever happens, we're now committed to being here. */ isp->isp_state = ISP_RUNSTATE; } static void isp_mark_portdb(ispsoftc_t *isp, int chan, int disposition) { fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; int i; for (i = 0; i < MAX_FC_TARG; i++) { lp = &fcp->portdb[i]; switch (lp->state) { case FC_PORTDB_STATE_PROBATIONAL: case FC_PORTDB_STATE_DEAD: case FC_PORTDB_STATE_CHANGED: case FC_PORTDB_STATE_PENDING_VALID: case FC_PORTDB_STATE_VALID: if (disposition > 0) lp->state = FC_PORTDB_STATE_PROBATIONAL; else { lp->state = FC_PORTDB_STATE_NIL; isp_async(isp, ISPASYNC_DEV_GONE, chan, lp); } break; case FC_PORTDB_STATE_ZOMBIE: break; case FC_PORTDB_STATE_NIL: case FC_PORTDB_STATE_NEW: default: ISP_MEMZERO(lp, sizeof(*lp)); lp->state = FC_PORTDB_STATE_NIL; break; } } } /* * Perform an IOCB PLOGI or LOGO via EXECUTE IOCB A64 for 24XX cards * or via FABRIC LOGIN/FABRIC LOGOUT for other cards. */ static int isp_plogx(ispsoftc_t *isp, int chan, uint16_t handle, uint32_t portid, int flags, int gs) { mbreg_t mbs; uint8_t q[QENTRY_LEN]; isp_plogx_t *plp; fcparam *fcp; uint8_t *scp; uint32_t sst, parm1; int rval, lev; const char *msg; char buf[64]; if (!IS_24XX(isp)) { int action = flags & PLOGX_FLG_CMD_MASK; if (action == PLOGX_FLG_CMD_PLOGI) { return (isp_port_login(isp, handle, portid)); } else if (action == PLOGX_FLG_CMD_LOGO) { return (isp_port_logout(isp, handle, portid)); } else { return (MBOX_INVALID_COMMAND); } } ISP_MEMZERO(q, QENTRY_LEN); plp = (isp_plogx_t *) q; plp->plogx_header.rqs_entry_count = 1; plp->plogx_header.rqs_entry_type = RQSTYPE_LOGIN; plp->plogx_handle = 0xffffffff; plp->plogx_nphdl = handle; plp->plogx_vphdl = chan; plp->plogx_portlo = portid; plp->plogx_rspsz_porthi = (portid >> 16) & 0xff; plp->plogx_flags = flags; if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "IOCB LOGX", QENTRY_LEN, plp); } if (gs == 0) { if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } } fcp = FCPARAM(isp, chan); scp = fcp->isp_scratch; isp_put_plogx(isp, plp, (isp_plogx_t *) scp); MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 500000); mbs.param[1] = QENTRY_LEN; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, QENTRY_LEN, chan); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { rval = mbs.param[0]; goto out; } MEMORYBARRIER(isp, SYNC_SFORCPU, QENTRY_LEN, QENTRY_LEN, chan); scp += QENTRY_LEN; isp_get_plogx(isp, (isp_plogx_t *) scp, plp); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "IOCB LOGX response", QENTRY_LEN, plp); } if (plp->plogx_status == PLOGX_STATUS_OK) { rval = 0; goto out; } else if (plp->plogx_status != PLOGX_STATUS_IOCBERR) { isp_prt(isp, ISP_LOGWARN, "status 0x%x on port login IOCB channel %d", plp->plogx_status, chan); rval = -1; goto out; } sst = plp->plogx_ioparm[0].lo16 | (plp->plogx_ioparm[0].hi16 << 16); parm1 = plp->plogx_ioparm[1].lo16 | (plp->plogx_ioparm[1].hi16 << 16); rval = -1; lev = ISP_LOGERR; msg = NULL; switch (sst) { case PLOGX_IOCBERR_NOLINK: msg = "no link"; break; case PLOGX_IOCBERR_NOIOCB: msg = "no IOCB buffer"; break; case PLOGX_IOCBERR_NOXGHG: msg = "no Exchange Control Block"; break; case PLOGX_IOCBERR_FAILED: ISP_SNPRINTF(buf, sizeof (buf), "reason 0x%x (last LOGIN state 0x%x)", parm1 & 0xff, (parm1 >> 8) & 0xff); msg = buf; break; case PLOGX_IOCBERR_NOFABRIC: msg = "no fabric"; break; case PLOGX_IOCBERR_NOTREADY: msg = "firmware not ready"; break; case PLOGX_IOCBERR_NOLOGIN: ISP_SNPRINTF(buf, sizeof (buf), "not logged in (last state 0x%x)", parm1); msg = buf; rval = MBOX_NOT_LOGGED_IN; break; case PLOGX_IOCBERR_REJECT: ISP_SNPRINTF(buf, sizeof (buf), "LS_RJT = 0x%x", parm1); msg = buf; break; case PLOGX_IOCBERR_NOPCB: msg = "no PCB allocated"; break; case PLOGX_IOCBERR_EINVAL: ISP_SNPRINTF(buf, sizeof (buf), "invalid parameter at offset 0x%x", parm1); msg = buf; break; case PLOGX_IOCBERR_PORTUSED: lev = ISP_LOG_SANCFG|ISP_LOG_WARN1; ISP_SNPRINTF(buf, sizeof (buf), "already logged in with N-Port handle 0x%x", parm1); msg = buf; rval = MBOX_PORT_ID_USED | (parm1 << 16); break; case PLOGX_IOCBERR_HNDLUSED: lev = ISP_LOG_SANCFG|ISP_LOG_WARN1; ISP_SNPRINTF(buf, sizeof (buf), "handle already used for PortID 0x%06x", parm1); msg = buf; rval = MBOX_LOOP_ID_USED; break; case PLOGX_IOCBERR_NOHANDLE: msg = "no handle allocated"; break; case PLOGX_IOCBERR_NOFLOGI: msg = "no FLOGI_ACC"; break; default: ISP_SNPRINTF(buf, sizeof (buf), "status %x from %x", plp->plogx_status, flags); msg = buf; break; } if (msg) { isp_prt(isp, ISP_LOGERR, "Chan %d PLOGX PortID 0x%06x to N-Port handle 0x%x: %s", chan, portid, handle, msg); } out: if (gs == 0) { FC_SCRATCH_RELEASE(isp, chan); } return (rval); } static int isp_port_login(ispsoftc_t *isp, uint16_t handle, uint32_t portid) { mbreg_t mbs; MBSINIT(&mbs, MBOX_FABRIC_LOGIN, MBLOGNONE, 500000); if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = handle; mbs.ibits = (1 << 10); } else { mbs.param[1] = handle << 8; } mbs.param[2] = portid >> 16; mbs.param[3] = portid; mbs.logval = MBLOGNONE; mbs.timeout = 500000; isp_mboxcmd(isp, &mbs); switch (mbs.param[0]) { case MBOX_PORT_ID_USED: isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "isp_port_login: portid 0x%06x already logged in as %u", portid, mbs.param[1]); return (MBOX_PORT_ID_USED | (mbs.param[1] << 16)); case MBOX_LOOP_ID_USED: isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "isp_port_login: handle 0x%x in use for port id 0x%02xXXXX", handle, mbs.param[1] & 0xff); return (MBOX_LOOP_ID_USED); case MBOX_COMMAND_COMPLETE: return (0); case MBOX_COMMAND_ERROR: isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "isp_port_login: error 0x%x in PLOGI to port 0x%06x", mbs.param[1], portid); return (MBOX_COMMAND_ERROR); case MBOX_ALL_IDS_USED: isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "isp_port_login: all IDs used for fabric login"); return (MBOX_ALL_IDS_USED); default: isp_prt(isp, ISP_LOG_SANCFG, "isp_port_login: error 0x%x on port login of 0x%06x@0x%0x", mbs.param[0], portid, handle); return (mbs.param[0]); } } /* * Pre-24XX fabric port logout * * Note that portid is not used */ static int isp_port_logout(ispsoftc_t *isp, uint16_t handle, uint32_t portid) { mbreg_t mbs; MBSINIT(&mbs, MBOX_FABRIC_LOGOUT, MBLOGNONE, 500000); if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = handle; mbs.ibits = (1 << 10); } else { mbs.param[1] = handle << 8; } isp_mboxcmd(isp, &mbs); return (mbs.param[0] == MBOX_COMMAND_COMPLETE? 0 : mbs.param[0]); } static int isp_getpdb(ispsoftc_t *isp, int chan, uint16_t id, isp_pdb_t *pdb, int dolock) { fcparam *fcp = FCPARAM(isp, chan); mbreg_t mbs; union { isp_pdb_21xx_t fred; isp_pdb_24xx_t bill; } un; MBSINIT(&mbs, MBOX_GET_PORT_DB, MBLOGALL & ~MBLOGMASK(MBOX_COMMAND_PARAM_ERROR), 250000); if (IS_24XX(isp)) { mbs.ibits = (1 << 9)|(1 << 10); mbs.param[1] = id; mbs.param[9] = chan; } else if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = id; } else { mbs.param[1] = id << 8; } mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); if (dolock) { if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } } MEMORYBARRIER(isp, SYNC_SFORDEV, 0, sizeof (un), chan); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { if (dolock) { FC_SCRATCH_RELEASE(isp, chan); } return (mbs.param[0] | (mbs.param[1] << 16)); } if (IS_24XX(isp)) { isp_get_pdb_24xx(isp, fcp->isp_scratch, &un.bill); pdb->handle = un.bill.pdb_handle; pdb->prli_word3 = un.bill.pdb_prli_svc3; pdb->portid = BITS2WORD_24XX(un.bill.pdb_portid_bits); ISP_MEMCPY(pdb->portname, un.bill.pdb_portname, 8); ISP_MEMCPY(pdb->nodename, un.bill.pdb_nodename, 8); - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d handle 0x%x Port 0x%06x flags 0x%x curstate %x", chan, id, pdb->portid, un.bill.pdb_flags, un.bill.pdb_curstate); + isp_prt(isp, ISP_LOGDEBUG1, + "Chan %d handle 0x%x Port 0x%06x flags 0x%x curstate %x", + chan, id, pdb->portid, un.bill.pdb_flags, + un.bill.pdb_curstate); if (un.bill.pdb_curstate < PDB2400_STATE_PLOGI_DONE || un.bill.pdb_curstate > PDB2400_STATE_LOGGED_IN) { mbs.param[0] = MBOX_NOT_LOGGED_IN; if (dolock) { FC_SCRATCH_RELEASE(isp, chan); } return (mbs.param[0]); } } else { isp_get_pdb_21xx(isp, fcp->isp_scratch, &un.fred); pdb->handle = un.fred.pdb_loopid; pdb->prli_word3 = un.fred.pdb_prli_svc3; pdb->portid = BITS2WORD(un.fred.pdb_portid_bits); ISP_MEMCPY(pdb->portname, un.fred.pdb_portname, 8); ISP_MEMCPY(pdb->nodename, un.fred.pdb_nodename, 8); + isp_prt(isp, ISP_LOGDEBUG1, + "Chan %d handle 0x%x Port 0x%06x", chan, id, pdb->portid); } if (dolock) { FC_SCRATCH_RELEASE(isp, chan); } return (0); } static int isp_gethandles(ispsoftc_t *isp, int chan, uint16_t *handles, int *num, int dolock, int loop) { fcparam *fcp = FCPARAM(isp, chan); mbreg_t mbs; isp_pnhle_21xx_t el1, *elp1; isp_pnhle_23xx_t el3, *elp3; isp_pnhle_24xx_t el4, *elp4; int i, j; uint32_t p; uint16_t h; MBSINIT(&mbs, MBOX_GET_ID_LIST, MBLOGALL, 250000); if (IS_24XX(isp)) { mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); mbs.param[8] = ISP_FC_SCRLEN; mbs.param[9] = chan; } else { mbs.ibits = (1 << 1)|(1 << 2)|(1 << 3)|(1 << 6); mbs.param[1] = DMA_WD1(fcp->isp_scdma); mbs.param[2] = DMA_WD0(fcp->isp_scdma); mbs.param[3] = DMA_WD3(fcp->isp_scdma); mbs.param[6] = DMA_WD2(fcp->isp_scdma); } if (dolock) { if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } } MEMORYBARRIER(isp, SYNC_SFORDEV, 0, ISP_FC_SCRLEN, chan); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { if (dolock) { FC_SCRATCH_RELEASE(isp, chan); } return (mbs.param[0] | (mbs.param[1] << 16)); } elp1 = fcp->isp_scratch; elp3 = fcp->isp_scratch; elp4 = fcp->isp_scratch; for (i = 0, j = 0; i < mbs.param[1] && j < *num; i++) { if (IS_24XX(isp)) { isp_get_pnhle_24xx(isp, &elp4[i], &el4); p = el4.pnhle_port_id_lo | (el4.pnhle_port_id_hi << 16); h = el4.pnhle_handle; } else if (IS_23XX(isp)) { isp_get_pnhle_23xx(isp, &elp3[i], &el3); p = el3.pnhle_port_id_lo | (el3.pnhle_port_id_hi << 16); h = el3.pnhle_handle; } else { /* 21xx */ isp_get_pnhle_21xx(isp, &elp1[i], &el1); p = el1.pnhle_port_id_lo | ((el1.pnhle_port_id_hi_handle & 0xff) << 16); h = el1.pnhle_port_id_hi_handle >> 8; } if (loop && (p >> 8) != (fcp->isp_portid >> 8)) continue; handles[j++] = h; } *num = j; if (dolock) FC_SCRATCH_RELEASE(isp, chan); return (0); } static void isp_dump_chip_portdb(ispsoftc_t *isp, int chan, int dolock) { isp_pdb_t pdb; - int lim, loopid; + uint16_t lim, nphdl; isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGINFO, "Chan %d chip port dump", chan); if (ISP_CAP_2KLOGIN(isp)) { lim = NPH_MAX_2K; } else { lim = NPH_MAX; } - for (loopid = 0; loopid != lim; loopid++) { - if (isp_getpdb(isp, chan, loopid, &pdb, dolock)) { + for (nphdl = 0; nphdl != lim; nphdl++) { + if (isp_getpdb(isp, chan, nphdl, &pdb, dolock)) { continue; } - isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGINFO, "Chan %d Loopid 0x%04x " + isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGINFO, "Chan %d Handle 0x%04x " "PortID 0x%06x WWPN 0x%02x%02x%02x%02x%02x%02x%02x%02x", - chan, loopid, pdb.portid, pdb.portname[0], pdb.portname[1], + chan, nphdl, pdb.portid, pdb.portname[0], pdb.portname[1], pdb.portname[2], pdb.portname[3], pdb.portname[4], pdb.portname[5], pdb.portname[6], pdb.portname[7]); } } static uint64_t -isp_get_wwn(ispsoftc_t *isp, int chan, int loopid, int nodename) +isp_get_wwn(ispsoftc_t *isp, int chan, int nphdl, int nodename) { uint64_t wwn = INI_NONE; - fcparam *fcp = FCPARAM(isp, chan); mbreg_t mbs; - if (fcp->isp_fwstate < FW_READY || - fcp->isp_loopstate < LOOP_PDB_RCVD) { - return (wwn); - } MBSINIT(&mbs, MBOX_GET_PORT_NAME, MBLOGALL & ~MBLOGMASK(MBOX_COMMAND_PARAM_ERROR), 500000); if (ISP_CAP_2KLOGIN(isp)) { - mbs.param[1] = loopid; + mbs.param[1] = nphdl; if (nodename) { mbs.param[10] = 1; } mbs.param[9] = chan; } else { mbs.ibitm = 3; - mbs.param[1] = loopid << 8; + mbs.param[1] = nphdl << 8; if (nodename) { mbs.param[1] |= 1; } } isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return (wwn); } if (IS_24XX(isp)) { wwn = (((uint64_t)(mbs.param[2] >> 8)) << 56) | (((uint64_t)(mbs.param[2] & 0xff)) << 48) | (((uint64_t)(mbs.param[3] >> 8)) << 40) | (((uint64_t)(mbs.param[3] & 0xff)) << 32) | (((uint64_t)(mbs.param[6] >> 8)) << 24) | (((uint64_t)(mbs.param[6] & 0xff)) << 16) | (((uint64_t)(mbs.param[7] >> 8)) << 8) | (((uint64_t)(mbs.param[7] & 0xff))); } else { wwn = (((uint64_t)(mbs.param[2] & 0xff)) << 56) | (((uint64_t)(mbs.param[2] >> 8)) << 48) | (((uint64_t)(mbs.param[3] & 0xff)) << 40) | (((uint64_t)(mbs.param[3] >> 8)) << 32) | (((uint64_t)(mbs.param[6] & 0xff)) << 24) | (((uint64_t)(mbs.param[6] >> 8)) << 16) | (((uint64_t)(mbs.param[7] & 0xff)) << 8) | (((uint64_t)(mbs.param[7] >> 8))); } return (wwn); } /* * Make sure we have good FC link. */ static int isp_fclink_test(ispsoftc_t *isp, int chan, int usdelay) { mbreg_t mbs; - int check_for_fabric, r; - uint8_t lwfs; - int loopid; + int r; + uint16_t nphdl; fcparam *fcp; - fcportdb_t *lp; isp_pdb_t pdb; NANOTIME_T hra, hrb; fcp = FCPARAM(isp, chan); - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC Link Test Entry", chan); - ISP_MARK_PORTDB(isp, chan, 1); + /* Mark port database entries for following scan. */ + isp_mark_portdb(isp, chan, 1); + if (fcp->isp_loopstate >= LOOP_LTEST_DONE) + return (0); + + isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC link test", chan); + fcp->isp_loopstate = LOOP_TESTING_LINK; + /* * Wait up to N microseconds for F/W to go to a ready state. */ - lwfs = FW_CONFIG_WAIT; GET_NANOTIME(&hra); while (1) { - isp_fw_state(isp, chan); - if (lwfs != fcp->isp_fwstate) { - isp_prt(isp, ISP_LOGCONFIG|ISP_LOG_SANCFG, "Chan %d Firmware State <%s->%s>", chan, isp_fc_fw_statename((int)lwfs), isp_fc_fw_statename((int)fcp->isp_fwstate)); - lwfs = fcp->isp_fwstate; - } + isp_change_fw_state(isp, chan, isp_fw_state(isp, chan)); if (fcp->isp_fwstate == FW_READY) { break; } GET_NANOTIME(&hrb); if ((NANOTIME_SUB(&hrb, &hra) / 1000 + 1000 >= usdelay)) break; ISP_SLEEP(isp, 1000); } /* * If we haven't gone to 'ready' state, return. */ if (fcp->isp_fwstate != FW_READY) { - isp_prt(isp, ISP_LOG_SANCFG, "%s: chan %d not at FW_READY state", __func__, chan); + isp_prt(isp, ISP_LOG_SANCFG, + "Chan %d Firmware is not ready (%s)", + chan, isp_fc_fw_statename(fcp->isp_fwstate)); return (-1); } /* * Get our Loop ID and Port ID. */ MBSINIT(&mbs, MBOX_GET_LOOP_ID, MBLOGALL, 0); mbs.param[9] = chan; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return (-1); } if (ISP_CAP_2KLOGIN(isp)) { fcp->isp_loopid = mbs.param[1]; } else { fcp->isp_loopid = mbs.param[1] & 0xff; } if (IS_2100(isp)) { - fcp->isp_topo = TOPO_NL_PORT; + /* + * Don't bother with fabric if we are using really old + * 2100 firmware. It's just not worth it. + */ + if (ISP_FW_NEWER_THAN(isp, 1, 15, 37)) + fcp->isp_topo = TOPO_FL_PORT; + else + fcp->isp_topo = TOPO_NL_PORT; } else { int topo = (int) mbs.param[6]; if (topo < TOPO_NL_PORT || topo > TOPO_PTP_STUB) { topo = TOPO_PTP_STUB; } fcp->isp_topo = topo; } fcp->isp_portid = mbs.param[2] | (mbs.param[3] << 16); - if (IS_2100(isp)) { - /* - * Don't bother with fabric if we are using really old - * 2100 firmware. It's just not worth it. - */ - if (ISP_FW_NEWER_THAN(isp, 1, 15, 37)) { - check_for_fabric = 1; - } else { - check_for_fabric = 0; - } - } else if (fcp->isp_topo == TOPO_FL_PORT || fcp->isp_topo == TOPO_F_PORT) { - check_for_fabric = 1; - } else { - check_for_fabric = 0; - } - /* * Check to make sure we got a valid loopid * The 24XX seems to mess this up for multiple channels. */ if (fcp->isp_topo == TOPO_FL_PORT || fcp->isp_topo == TOPO_NL_PORT) { uint8_t alpa = fcp->isp_portid; if (alpa == 0) { /* "Cannot Happen" */ isp_prt(isp, ISP_LOGWARN, "Zero AL_PA for Loop Topology?"); } else { int i; for (i = 0; alpa_map[i]; i++) { if (alpa_map[i] == alpa) { break; } } if (alpa_map[i] && fcp->isp_loopid != i) { isp_prt(isp, ISP_LOG_SANCFG, - "Chan %d deriving loopid %d from AL_PA map (AL_PA 0x%x) and ignoring returned value %d (AL_PA 0x%x)", + "Chan %d Deriving loopid %d from AL_PA map (AL_PA 0x%x) and ignoring returned value %d (AL_PA 0x%x)", chan, i, alpa_map[i], fcp->isp_loopid, alpa); fcp->isp_loopid = i; } } } - - if (IS_24XX(isp)) { /* XXX SHOULDN'T THIS BE FOR 2K F/W? XXX */ - loopid = NPH_FL_ID; - } else { - loopid = FL_ID; - } - if (check_for_fabric) { - r = isp_getpdb(isp, chan, loopid, &pdb, 1); - if (r && (fcp->isp_topo == TOPO_F_PORT || fcp->isp_topo == TOPO_FL_PORT)) { - isp_prt(isp, ISP_LOGWARN, "fabric topology but cannot get info about fabric controller (0x%x)", r); - fcp->isp_topo = TOPO_PTP_STUB; - } - } else { - r = -1; - } - if (r == 0) { - if (IS_2100(isp)) { - fcp->isp_topo = TOPO_FL_PORT; - } - if (pdb.portid == 0) { - /* - * Crock. - */ - fcp->isp_topo = TOPO_NL_PORT; + if (fcp->isp_topo == TOPO_F_PORT || fcp->isp_topo == TOPO_FL_PORT) { + nphdl = IS_24XX(isp) ? NPH_FL_ID : FL_ID; + r = isp_getpdb(isp, chan, nphdl, &pdb, 1); + if (r != 0 || pdb.portid == 0) { + if (IS_2100(isp)) { + fcp->isp_topo = TOPO_NL_PORT; + } else { + isp_prt(isp, ISP_LOGWARN, + "fabric topology, but cannot get info about fabric controller (0x%x)", r); + fcp->isp_topo = TOPO_PTP_STUB; + } goto not_on_fabric; } - /* - * Save the Fabric controller's port database entry. - */ - lp = &fcp->portdb[FL_ID]; - lp->state = FC_PORTDB_STATE_PENDING_VALID; - MAKE_WWN_FROM_NODE_NAME(lp->node_wwn, pdb.nodename); - MAKE_WWN_FROM_NODE_NAME(lp->port_wwn, pdb.portname); - lp->prli_word3 = pdb.prli_word3; - lp->portid = pdb.portid; - lp->handle = pdb.handle; - lp->new_portid = lp->portid; - lp->new_prli_word3 = lp->prli_word3; if (IS_24XX(isp)) { - if (check_for_fabric) { - /* - * The mbs is still hanging out from the MBOX_GET_LOOP_ID above. - */ - fcp->isp_fabric_params = mbs.param[7]; - } else { - fcp->isp_fabric_params = 0; - } + fcp->isp_fabric_params = mbs.param[7]; fcp->isp_sns_hdl = NPH_SNS_ID; r = isp_register_fc4_type_24xx(isp, chan); + if (r == 0) + isp_register_fc4_features_24xx(isp, chan); } else { fcp->isp_sns_hdl = SNS_ID; r = isp_register_fc4_type(isp, chan); } if (r) { isp_prt(isp, ISP_LOGWARN|ISP_LOG_SANCFG, "%s: register fc4 type failed", __func__); return (-1); } - } else { -not_on_fabric: - fcp->portdb[FL_ID].state = FC_PORTDB_STATE_NIL; } +not_on_fabric: fcp->isp_gbspeed = 1; if (IS_23XX(isp) || IS_24XX(isp)) { MBSINIT(&mbs, MBOX_GET_SET_DATA_RATE, MBLOGALL, 3000000); mbs.param[1] = MBGSD_GET_RATE; /* mbs.param[2] undefined if we're just getting rate */ isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { if (mbs.param[1] == MBGSD_EIGHTGB) { isp_prt(isp, ISP_LOGINFO, "Chan %d 8Gb link speed", chan); fcp->isp_gbspeed = 8; } else if (mbs.param[1] == MBGSD_FOURGB) { isp_prt(isp, ISP_LOGINFO, "Chan %d 4Gb link speed", chan); fcp->isp_gbspeed = 4; } else if (mbs.param[1] == MBGSD_TWOGB) { isp_prt(isp, ISP_LOGINFO, "Chan %d 2Gb link speed", chan); fcp->isp_gbspeed = 2; } else if (mbs.param[1] == MBGSD_ONEGB) { isp_prt(isp, ISP_LOGINFO, "Chan %d 1Gb link speed", chan); fcp->isp_gbspeed = 1; } } } + fcp->isp_loopstate = LOOP_LTEST_DONE; /* * Announce ourselves, too. */ isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGCONFIG, topology, chan, (uint32_t) (fcp->isp_wwpn >> 32), (uint32_t) fcp->isp_wwpn, fcp->isp_portid, fcp->isp_loopid, isp_fc_toponame(fcp)); - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC Link Test Complete", chan); + isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC link test done", chan); return (0); } /* * Complete the synchronization of our Port Database. * * At this point, we've scanned the local loop (if any) and the fabric * and performed fabric logins on all new devices. * - * Our task here is to go through our port database and remove any entities + * Our task here is to go through our port database removing any entities * that are still marked probational (issuing PLOGO for ones which we had - * PLOGI'd into) or are dead. - * - * Our task here is to also check policy to decide whether devices which - * have *changed* in some way should still be kept active. For example, - * if a device has just changed PortID, we can either elect to treat it - * as an old device or as a newly arrived device (and notify the outer - * layer appropriately). - * - * We also do initiator map target id assignment here for new initiator - * devices and refresh old ones ot make sure that they point to the correct - * entities. + * PLOGI'd into) or are dead, and notifying upper layers about new/changed + * devices. */ static int isp_pdb_sync(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; uint16_t dbidx; - if (fcp->isp_loopstate == LOOP_READY) { - return (0); - } - - /* - * Make sure we're okay for doing this right now. - */ - if (fcp->isp_loopstate != LOOP_PDB_RCVD && - fcp->isp_loopstate != LOOP_FSCAN_DONE && - fcp->isp_loopstate != LOOP_LSCAN_DONE) { - isp_prt(isp, ISP_LOGWARN, "isp_pdb_sync: bad loopstate %d", - fcp->isp_loopstate); + if (fcp->isp_loopstate < LOOP_FSCAN_DONE) { return (-1); } - - if (fcp->isp_topo == TOPO_FL_PORT || - fcp->isp_topo == TOPO_NL_PORT || - fcp->isp_topo == TOPO_N_PORT) { - if (fcp->isp_loopstate < LOOP_LSCAN_DONE) { - if (isp_scan_loop(isp, chan) != 0) { - isp_prt(isp, ISP_LOGWARN, - "isp_pdb_sync: isp_scan_loop failed"); - return (-1); - } - } + if (fcp->isp_loopstate > LOOP_SYNCING_PDB) { + return (0); } - if (fcp->isp_topo == TOPO_F_PORT || fcp->isp_topo == TOPO_FL_PORT) { - if (fcp->isp_loopstate < LOOP_FSCAN_DONE) { - if (isp_scan_fabric(isp, chan) != 0) { - isp_prt(isp, ISP_LOGWARN, - "isp_pdb_sync: isp_scan_fabric failed"); - return (-1); - } - } - } + isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC PDB sync", chan); - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Synchronizing PDBs", chan); - fcp->isp_loopstate = LOOP_SYNCING_PDB; for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { lp = &fcp->portdb[dbidx]; if (lp->state == FC_PORTDB_STATE_NIL || lp->state == FC_PORTDB_STATE_VALID) { continue; } switch (lp->state) { case FC_PORTDB_STATE_PROBATIONAL: case FC_PORTDB_STATE_DEAD: lp->state = FC_PORTDB_STATE_NIL; isp_async(isp, ISPASYNC_DEV_GONE, chan, lp); if (lp->autologin == 0) { (void) isp_plogx(isp, chan, lp->handle, lp->portid, PLOGX_FLG_CMD_LOGO | PLOGX_FLG_IMPLICIT | PLOGX_FLG_FREE_NPHDL, 0); } else { lp->autologin = 0; } lp->new_prli_word3 = 0; lp->new_portid = 0; /* * Note that we might come out of this with our state * set to FC_PORTDB_STATE_ZOMBIE. */ break; case FC_PORTDB_STATE_NEW: lp->portid = lp->new_portid; lp->prli_word3 = lp->new_prli_word3; lp->state = FC_PORTDB_STATE_VALID; isp_async(isp, ISPASYNC_DEV_ARRIVED, chan, lp); lp->new_prli_word3 = 0; lp->new_portid = 0; break; case FC_PORTDB_STATE_CHANGED: lp->state = FC_PORTDB_STATE_VALID; isp_async(isp, ISPASYNC_DEV_CHANGED, chan, lp); lp->portid = lp->new_portid; lp->prli_word3 = lp->new_prli_word3; lp->new_prli_word3 = 0; lp->new_portid = 0; break; case FC_PORTDB_STATE_PENDING_VALID: lp->portid = lp->new_portid; lp->prli_word3 = lp->new_prli_word3; lp->state = FC_PORTDB_STATE_VALID; isp_async(isp, ISPASYNC_DEV_STAYED, chan, lp); - if (dbidx != FL_ID) { - lp->new_prli_word3 = 0; - lp->new_portid = 0; - } break; case FC_PORTDB_STATE_ZOMBIE: break; default: isp_prt(isp, ISP_LOGWARN, "isp_pdb_sync: state %d for idx %d", lp->state, dbidx); isp_dump_portdb(isp, chan); } } /* * If we get here, we've for sure seen not only a valid loop * but know what is or isn't on it, so mark this for usage * in isp_start. */ fcp->loop_seen_once = 1; fcp->isp_loopstate = LOOP_READY; + isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC PDB sync done", chan); return (0); } /* * Scan local loop for devices. */ static int isp_scan_loop(ispsoftc_t *isp, int chan) { fcportdb_t *lp, tmp; fcparam *fcp = FCPARAM(isp, chan); int i, idx, lim, r; isp_pdb_t pdb; uint16_t handles[LOCAL_LOOP_LIM]; uint16_t handle; - if (fcp->isp_fwstate < FW_READY || - fcp->isp_loopstate < LOOP_PDB_RCVD) { + if (fcp->isp_loopstate < LOOP_LTEST_DONE) { return (-1); } if (fcp->isp_loopstate > LOOP_SCANNING_LOOP) { return (0); } + isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC loop scan", chan); if (fcp->isp_topo != TOPO_NL_PORT && fcp->isp_topo != TOPO_FL_PORT && fcp->isp_topo != TOPO_N_PORT) { isp_prt(isp, ISP_LOG_SANCFG, - "Chan %d no loop topology to scan", chan); + "Chan %d FC loop scan done (no loop)", chan); fcp->isp_loopstate = LOOP_LSCAN_DONE; return (0); } - fcp->isp_loopstate = LOOP_SCANNING_LOOP; lim = LOCAL_LOOP_LIM; r = isp_gethandles(isp, chan, handles, &lim, 1, 1); if (r != 0) { isp_prt(isp, ISP_LOG_SANCFG, - "Chan %d getting list of handles failed with %x", chan, r); + "Chan %d Getting list of handles failed with %x", chan, r); fail: - ISP_MARK_PORTDB(isp, chan, 1); isp_prt(isp, ISP_LOG_SANCFG, - "Chan %d FC scan loop DONE (bad)", chan); + "Chan %d FC loop scan done (bad)", chan); return (-1); } - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC scan loop -- %d ports", + isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Got %d handles", chan, lim); /* * Run through the list and get the port database info for each one. */ for (idx = 0; idx < lim; idx++) { handle = handles[idx]; /* * Don't scan "special" ids. */ if (ISP_CAP_2KLOGIN(isp)) { if (handle >= NPH_RESERVED) continue; } else { if (handle >= FL_ID && handle <= SNS_ID) continue; } /* * In older cards with older f/w GET_PORT_DATABASE has been * known to hang. This trick gets around that problem. */ if (IS_2100(isp) || IS_2200(isp)) { uint64_t node_wwn = isp_get_wwn(isp, chan, handle, 1); - if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) - goto fail; + if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) { +abort: + isp_prt(isp, ISP_LOG_SANCFG, + "Chan %d FC loop scan done (abort)", chan); + return (-1); + } if (node_wwn == INI_NONE) { continue; } } /* * Get the port database entity for this index. */ r = isp_getpdb(isp, chan, handle, &pdb, 1); if (r != 0) { isp_prt(isp, ISP_LOGDEBUG1, - "Chan %d FC scan loop handle %d returned %x", + "Chan %d FC Scan Loop handle %d returned %x", chan, handle, r); if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) - goto fail; + goto abort; continue; } if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) - goto fail; + goto abort; /* * On *very* old 2100 firmware we would end up sometimes * with the firmware returning the port database entry * for something else. We used to restart this, but * now we just punt. */ if (IS_2100(isp) && pdb.handle != handle) { isp_prt(isp, ISP_LOGWARN, "Chan %d getpdb() returned wrong handle %x != %x", chan, pdb.handle, handle); goto fail; } /* * Save the pertinent info locally. */ MAKE_WWN_FROM_NODE_NAME(tmp.node_wwn, pdb.nodename); MAKE_WWN_FROM_NODE_NAME(tmp.port_wwn, pdb.portname); tmp.prli_word3 = pdb.prli_word3; tmp.portid = pdb.portid; tmp.handle = pdb.handle; /* * Check to make sure it's still a valid entry. The 24XX seems * to return a portid but not a WWPN/WWNN or role for devices * which shift on a loop. */ if (tmp.node_wwn == 0 || tmp.port_wwn == 0 || tmp.portid == 0) { int a, b, c; isp_prt(isp, ISP_LOGWARN, "Chan %d bad pdb (WWNN %016jx, WWPN %016jx, PortID %06x, W3 0x%x, H 0x%x) @ handle 0x%x", chan, tmp.node_wwn, tmp.port_wwn, tmp.portid, tmp.prli_word3, tmp.handle, handle); a = (tmp.node_wwn == 0); b = (tmp.port_wwn == 0); c = (tmp.portid == 0); if (a == 0 && b == 0) { tmp.node_wwn = isp_get_wwn(isp, chan, handle, 1); tmp.port_wwn = isp_get_wwn(isp, chan, handle, 0); if (tmp.node_wwn && tmp.port_wwn) { isp_prt(isp, ISP_LOGWARN, "DODGED!"); goto cont; } } isp_dump_portdb(isp, chan); continue; } cont: /* * Now search the entire port database * for the same Port WWN. */ if (isp_find_pdb_by_wwn(isp, chan, tmp.port_wwn, &lp)) { /* * Okay- we've found a non-nil entry that matches. * Check to make sure it's probational or a zombie. */ if (lp->state != FC_PORTDB_STATE_PROBATIONAL && lp->state != FC_PORTDB_STATE_ZOMBIE && lp->state != FC_PORTDB_STATE_VALID) { isp_prt(isp, ISP_LOGERR, "Chan %d [%d] not probational/zombie (0x%x)", chan, FC_PORTDB_TGT(isp, chan, lp), lp->state); isp_dump_portdb(isp, chan); goto fail; } /* * Mark the device as something the f/w logs into * automatically. */ lp->autologin = 1; lp->node_wwn = tmp.node_wwn; /* * Check to make see if really still the same * device. If it is, we mark it pending valid. */ if (lp->portid == tmp.portid && lp->handle == tmp.handle && lp->prli_word3 == tmp.prli_word3) { lp->new_portid = tmp.portid; lp->new_prli_word3 = tmp.prli_word3; lp->state = FC_PORTDB_STATE_PENDING_VALID; - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Loop Port 0x%06x@0x%04x Pending Valid", chan, tmp.portid, tmp.handle); + isp_prt(isp, ISP_LOG_SANCFG, + "Chan %d Loop port 0x%06x@0x%04x now pending valid", + chan, tmp.portid, tmp.handle); continue; } /* * We can wipe out the old handle value * here because it's no longer valid. */ lp->handle = tmp.handle; /* * Claim that this has changed and let somebody else * decide what to do. */ - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Loop Port 0x%06x@0x%04x changed", chan, tmp.portid, tmp.handle); + isp_prt(isp, ISP_LOG_SANCFG, + "Chan %d Loop port 0x%06x@0x%04x changed", + chan, tmp.portid, tmp.handle); lp->state = FC_PORTDB_STATE_CHANGED; lp->new_portid = tmp.portid; lp->new_prli_word3 = tmp.prli_word3; continue; } /* * Ah. A new device entry. Find an empty slot * for it and save info for later disposition. */ for (i = 0; i < MAX_FC_TARG; i++) { if (fcp->portdb[i].state == FC_PORTDB_STATE_NIL) { break; } } if (i == MAX_FC_TARG) { isp_prt(isp, ISP_LOGERR, "Chan %d out of portdb entries", chan); continue; } lp = &fcp->portdb[i]; ISP_MEMZERO(lp, sizeof (fcportdb_t)); lp->autologin = 1; lp->state = FC_PORTDB_STATE_NEW; lp->new_portid = tmp.portid; lp->new_prli_word3 = tmp.prli_word3; lp->handle = tmp.handle; lp->port_wwn = tmp.port_wwn; lp->node_wwn = tmp.node_wwn; - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Loop Port 0x%06x@0x%04x is New Entry", chan, tmp.portid, tmp.handle); + isp_prt(isp, ISP_LOG_SANCFG, + "Chan %d Loop port 0x%06x@0x%04x is a new entry", + chan, tmp.portid, tmp.handle); } + if (fcp->isp_loopstate < LOOP_SCANNING_LOOP) + goto abort; fcp->isp_loopstate = LOOP_LSCAN_DONE; - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC scan loop DONE", chan); + isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC loop scan done", chan); return (0); } /* * Scan the fabric for devices and add them to our port database. * * Use the GID_FT command to get all Port IDs for FC4 SCSI devices it knows. * * For 2100-23XX cards, we can use the SNS mailbox command to pass simple * name server commands to the switch management server via the QLogic f/w. * * For the 24XX card, we have to use CT-Pass through run via the Execute IOCB * mailbox command. * * The net result is to leave the list of Port IDs setting untranslated in * offset IGPOFF of the FC scratch area, whereupon we'll canonicalize it to * host order at OGPOFF. */ /* * Take less than half of our scratch area to store Port IDs */ #define GIDLEN ((ISP_FC_SCRLEN >> 1) - 16 - SNS_GID_FT_REQ_SIZE) #define NGENT ((GIDLEN - 16) >> 2) #define IGPOFF (2 * QENTRY_LEN) #define OGPOFF (ISP_FC_SCRLEN >> 1) #define ZTXOFF (ISP_FC_SCRLEN - (1 * QENTRY_LEN)) #define CTXOFF (ISP_FC_SCRLEN - (2 * QENTRY_LEN)) #define XTXOFF (ISP_FC_SCRLEN - (3 * QENTRY_LEN)) static int isp_gid_ft_sns(ispsoftc_t *isp, int chan) { union { sns_gid_ft_req_t _x; uint8_t _y[SNS_GID_FT_REQ_SIZE]; } un; fcparam *fcp = FCPARAM(isp, chan); sns_gid_ft_req_t *rq = &un._x; mbreg_t mbs; isp_prt(isp, ISP_LOGDEBUG0, "Chan %d scanning fabric (GID_FT) via SNS", chan); ISP_MEMZERO(rq, SNS_GID_FT_REQ_SIZE); rq->snscb_rblen = GIDLEN >> 1; rq->snscb_addr[RQRSP_ADDR0015] = DMA_WD0(fcp->isp_scdma + IGPOFF); rq->snscb_addr[RQRSP_ADDR1631] = DMA_WD1(fcp->isp_scdma + IGPOFF); rq->snscb_addr[RQRSP_ADDR3247] = DMA_WD2(fcp->isp_scdma + IGPOFF); rq->snscb_addr[RQRSP_ADDR4863] = DMA_WD3(fcp->isp_scdma + IGPOFF); rq->snscb_sblen = 6; rq->snscb_cmd = SNS_GID_FT; rq->snscb_mword_div_2 = NGENT; rq->snscb_fc4_type = FC4_SCSI; isp_put_gid_ft_request(isp, rq, fcp->isp_scratch); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, SNS_GID_FT_REQ_SIZE, chan); MBSINIT(&mbs, MBOX_SEND_SNS, MBLOGALL, 10000000); mbs.param[0] = MBOX_SEND_SNS; mbs.param[1] = SNS_GID_FT_REQ_SIZE >> 1; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { if (mbs.param[0] == MBOX_INVALID_COMMAND) { return (1); } else { return (-1); } } return (0); } static int isp_gid_ft_ct_passthru(ispsoftc_t *isp, int chan) { mbreg_t mbs; fcparam *fcp = FCPARAM(isp, chan); union { isp_ct_pt_t plocal; ct_hdr_t clocal; uint8_t q[QENTRY_LEN]; } un; isp_ct_pt_t *pt; ct_hdr_t *ct; uint32_t *rp; uint8_t *scp = fcp->isp_scratch; isp_prt(isp, ISP_LOGDEBUG0, "Chan %d scanning fabric (GID_FT) via CT", chan); if (!IS_24XX(isp)) { return (1); } /* * Build a Passthrough IOCB in memory. */ pt = &un.plocal; ISP_MEMZERO(un.q, QENTRY_LEN); pt->ctp_header.rqs_entry_count = 1; pt->ctp_header.rqs_entry_type = RQSTYPE_CT_PASSTHRU; pt->ctp_handle = 0xffffffff; pt->ctp_nphdl = fcp->isp_sns_hdl; pt->ctp_cmd_cnt = 1; pt->ctp_vpidx = ISP_GET_VPIDX(isp, chan); pt->ctp_time = 30; pt->ctp_rsp_cnt = 1; pt->ctp_rsp_bcnt = GIDLEN; pt->ctp_cmd_bcnt = sizeof (*ct) + sizeof (uint32_t); pt->ctp_dataseg[0].ds_base = DMA_LO32(fcp->isp_scdma+XTXOFF); pt->ctp_dataseg[0].ds_basehi = DMA_HI32(fcp->isp_scdma+XTXOFF); pt->ctp_dataseg[0].ds_count = sizeof (*ct) + sizeof (uint32_t); pt->ctp_dataseg[1].ds_base = DMA_LO32(fcp->isp_scdma+IGPOFF); pt->ctp_dataseg[1].ds_basehi = DMA_HI32(fcp->isp_scdma+IGPOFF); pt->ctp_dataseg[1].ds_count = GIDLEN; if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "ct IOCB", QENTRY_LEN, pt); } isp_put_ct_pt(isp, pt, (isp_ct_pt_t *) &scp[CTXOFF]); /* * Build the CT header and command in memory. * * Note that the CT header has to end up as Big Endian format in memory. */ ct = &un.clocal; ISP_MEMZERO(ct, sizeof (*ct)); ct->ct_revision = CT_REVISION; ct->ct_fcs_type = CT_FC_TYPE_FC; ct->ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct->ct_cmd_resp = SNS_GID_FT; ct->ct_bcnt_resid = (GIDLEN - 16) >> 2; isp_put_ct_hdr(isp, ct, (ct_hdr_t *) &scp[XTXOFF]); rp = (uint32_t *) &scp[XTXOFF+sizeof (*ct)]; ISP_IOZPUT_32(isp, FC4_SCSI, rp); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "CT HDR + payload after put", sizeof (*ct) + sizeof (uint32_t), &scp[XTXOFF]); } ISP_MEMZERO(&scp[ZTXOFF], QENTRY_LEN); MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 500000); mbs.param[1] = QENTRY_LEN; mbs.param[2] = DMA_WD1(fcp->isp_scdma + CTXOFF); mbs.param[3] = DMA_WD0(fcp->isp_scdma + CTXOFF); mbs.param[6] = DMA_WD3(fcp->isp_scdma + CTXOFF); mbs.param[7] = DMA_WD2(fcp->isp_scdma + CTXOFF); MEMORYBARRIER(isp, SYNC_SFORDEV, XTXOFF, 2 * QENTRY_LEN, chan); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { return (-1); } MEMORYBARRIER(isp, SYNC_SFORCPU, ZTXOFF, QENTRY_LEN, chan); pt = &un.plocal; isp_get_ct_pt(isp, (isp_ct_pt_t *) &scp[ZTXOFF], pt); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "IOCB response", QENTRY_LEN, pt); } if (pt->ctp_status && pt->ctp_status != RQCS_DATA_UNDERRUN) { isp_prt(isp, ISP_LOGWARN, "Chan %d ISP GID FT CT Passthrough returned 0x%x", chan, pt->ctp_status); return (-1); } MEMORYBARRIER(isp, SYNC_SFORCPU, IGPOFF, GIDLEN + 16, chan); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "CT response", GIDLEN+16, &scp[IGPOFF]); } return (0); } static int isp_scan_fabric(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); uint32_t portid; - uint16_t handle, loopid; + uint16_t nphdl; isp_pdb_t pdb; int portidx, portlim, r; sns_gid_ft_rsp_t *rs0, *rs1; - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC Scan Fabric", chan); - if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate < LOOP_LSCAN_DONE) { + if (fcp->isp_loopstate < LOOP_LSCAN_DONE) { return (-1); } if (fcp->isp_loopstate > LOOP_SCANNING_FABRIC) { return (0); } + isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC fabric scan", chan); if (fcp->isp_topo != TOPO_FL_PORT && fcp->isp_topo != TOPO_F_PORT) { fcp->isp_loopstate = LOOP_FSCAN_DONE; - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC Scan Fabric Done (no fabric)", chan); + isp_prt(isp, ISP_LOG_SANCFG, + "Chan %d FC fabric scan done (no fabric)", chan); return (0); } - fcp->isp_loopstate = LOOP_SCANNING_FABRIC; + if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); - ISP_MARK_PORTDB(isp, chan, 1); +fail: + isp_prt(isp, ISP_LOG_SANCFG, + "Chan %d FC fabric scan done (bad)", chan); return (-1); } if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) { +abort: FC_SCRATCH_RELEASE(isp, chan); - ISP_MARK_PORTDB(isp, chan, 1); + isp_prt(isp, ISP_LOG_SANCFG, + "Chan %d FC fabric scan done (abort)", chan); return (-1); } /* * Make sure we still are logged into the fabric controller. */ - if (IS_24XX(isp)) { /* XXX SHOULDN'T THIS BE TRUE FOR 2K F/W? XXX */ - loopid = NPH_FL_ID; - } else { - loopid = FL_ID; - } - r = isp_getpdb(isp, chan, loopid, &pdb, 0); + nphdl = IS_24XX(isp) ? NPH_FL_ID : FL_ID; + r = isp_getpdb(isp, chan, nphdl, &pdb, 0); if ((r & 0xffff) == MBOX_NOT_LOGGED_IN) { isp_dump_chip_portdb(isp, chan, 0); } if (r) { - fcp->isp_loopstate = LOOP_PDB_RCVD; + fcp->isp_loopstate = LOOP_LTEST_DONE; FC_SCRATCH_RELEASE(isp, chan); - ISP_MARK_PORTDB(isp, chan, 1); - return (-1); + goto fail; } if (IS_24XX(isp)) { r = isp_gid_ft_ct_passthru(isp, chan); } else { r = isp_gid_ft_sns(isp, chan); } if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) { - FC_SCRATCH_RELEASE(isp, chan); - ISP_MARK_PORTDB(isp, chan, 1); - return (-1); + goto abort; } if (r > 0) { fcp->isp_loopstate = LOOP_FSCAN_DONE; FC_SCRATCH_RELEASE(isp, chan); return (0); } else if (r < 0) { - fcp->isp_loopstate = LOOP_PDB_RCVD; /* try again */ + fcp->isp_loopstate = LOOP_LTEST_DONE; /* try again */ FC_SCRATCH_RELEASE(isp, chan); return (0); } MEMORYBARRIER(isp, SYNC_SFORCPU, IGPOFF, GIDLEN, chan); rs0 = (sns_gid_ft_rsp_t *) ((uint8_t *)fcp->isp_scratch+IGPOFF); rs1 = (sns_gid_ft_rsp_t *) ((uint8_t *)fcp->isp_scratch+OGPOFF); isp_get_gid_ft_response(isp, rs0, rs1, NGENT); if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) { - FC_SCRATCH_RELEASE(isp, chan); - ISP_MARK_PORTDB(isp, chan, 1); - return (-1); + goto abort; } if (rs1->snscb_cthdr.ct_cmd_resp != LS_ACC) { int level; if (rs1->snscb_cthdr.ct_reason == 9 && rs1->snscb_cthdr.ct_explanation == 7) { level = ISP_LOG_SANCFG; } else { level = ISP_LOGWARN; } isp_prt(isp, level, "Chan %d Fabric Nameserver rejected GID_FT" " (Reason=0x%x Expl=0x%x)", chan, rs1->snscb_cthdr.ct_reason, rs1->snscb_cthdr.ct_explanation); FC_SCRATCH_RELEASE(isp, chan); fcp->isp_loopstate = LOOP_FSCAN_DONE; return (0); } - /* - * If we get this far, we certainly still have the fabric controller. - */ - fcp->portdb[FL_ID].state = FC_PORTDB_STATE_PENDING_VALID; - - /* * Go through the list and remove duplicate port ids. */ portlim = 0; portidx = 0; for (portidx = 0; portidx < NGENT-1; portidx++) { if (rs1->snscb_ports[portidx].control & 0x80) { break; } } /* * If we're not at the last entry, our list wasn't big enough. */ if ((rs1->snscb_ports[portidx].control & 0x80) == 0) { isp_prt(isp, ISP_LOGWARN, "fabric too big for scratch area: increase ISP_FC_SCRLEN"); } portlim = portidx + 1; isp_prt(isp, ISP_LOG_SANCFG, - "Chan %d got %d ports back from name server", chan, portlim); + "Chan %d Got %d ports back from name server", chan, portlim); for (portidx = 0; portidx < portlim; portidx++) { int npidx; portid = ((rs1->snscb_ports[portidx].portid[0]) << 16) | ((rs1->snscb_ports[portidx].portid[1]) << 8) | ((rs1->snscb_ports[portidx].portid[2])); for (npidx = portidx + 1; npidx < portlim; npidx++) { uint32_t new_portid = ((rs1->snscb_ports[npidx].portid[0]) << 16) | ((rs1->snscb_ports[npidx].portid[1]) << 8) | ((rs1->snscb_ports[npidx].portid[2])); if (new_portid == portid) { break; } } if (npidx < portlim) { rs1->snscb_ports[npidx].portid[0] = 0; rs1->snscb_ports[npidx].portid[1] = 0; rs1->snscb_ports[npidx].portid[2] = 0; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d removing duplicate PortID 0x%06x entry from list", chan, portid); } } /* * We now have a list of Port IDs for all FC4 SCSI devices * that the Fabric Name server knows about. * * For each entry on this list go through our port database looking * for probational entries- if we find one, then an old entry is * maybe still this one. We get some information to find out. * * Otherwise, it's a new fabric device, and we log into it * (unconditionally). After searching the entire database * again to make sure that we never ever ever ever have more * than one entry that has the same PortID or the same * WWNN/WWPN duple, we enter the device into our database. */ for (portidx = 0; portidx < portlim; portidx++) { fcportdb_t *lp; uint64_t wwnn, wwpn; int dbidx, nr; portid = ((rs1->snscb_ports[portidx].portid[0]) << 16) | ((rs1->snscb_ports[portidx].portid[1]) << 8) | ((rs1->snscb_ports[portidx].portid[2])); if (portid == 0) { isp_prt(isp, ISP_LOG_SANCFG, - "Chan %d skipping null PortID at idx %d", + "Chan %d Skipping null PortID at idx %d", chan, portidx); continue; } - /* - * Skip ourselves here and on other channels. If we're - * multi-id, we can't check the portids in other FCPARAM - * arenas because the resolutions here aren't synchronized. - * The best way to do this is to exclude looking at portids - * that have the same domain and area code as our own - * portid. - */ - if (ISP_CAP_MULTI_ID(isp) && isp->isp_nchan > 1) { - if ((portid >> 8) == (fcp->isp_portid >> 8)) { - isp_prt(isp, ISP_LOG_SANCFG, - "Chan %d skip PortID 0x%06x", - chan, portid); - continue; - } - } else if (portid == fcp->isp_portid) { + if (portid == fcp->isp_portid) { isp_prt(isp, ISP_LOG_SANCFG, - "Chan %d skip ourselves on @ PortID 0x%06x", - chan, portid); + "Chan %d Skipping our PortID 0x%06x", chan, portid); continue; } isp_prt(isp, ISP_LOG_SANCFG, - "Chan %d Checking Fabric Port 0x%06x", chan, portid); + "Chan %d Checking fabric port 0x%06x", chan, portid); /* * We now search our Port Database for any * probational entries with this PortID. We don't * look for zombies here- only probational * entries (we've already logged out of zombies). */ for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { lp = &fcp->portdb[dbidx]; if (lp->state != FC_PORTDB_STATE_PROBATIONAL) { continue; } if (lp->portid == portid) { break; } } /* * We found a probational entry with this Port ID. */ if (dbidx < MAX_FC_TARG) { int handle_changed = 0; lp = &fcp->portdb[dbidx]; /* * See if we're still logged into it. * * If we aren't, mark it as a dead device and * leave the new portid in the database entry * for somebody further along to decide what to * do (policy choice). * * If we are, check to see if it's the same * device still (it should be). If for some * reason it isn't, mark it as a changed device * and leave the new portid and role in the * database entry for somebody further along to * decide what to do (policy choice). * */ r = isp_getpdb(isp, chan, lp->handle, &pdb, 0); - if (fcp->isp_loopstate != LOOP_SCANNING_FABRIC) { - FC_SCRATCH_RELEASE(isp, chan); - ISP_MARK_PORTDB(isp, chan, 1); - return (-1); + if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) { + goto abort; } if (r != 0) { lp->new_portid = portid; lp->state = FC_PORTDB_STATE_DEAD; isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Fabric PortID 0x%06x handle 0x%x is dead (%d)", chan, portid, lp->handle, r); continue; } /* * Check to make sure that handle, portid, WWPN and * WWNN agree. If they don't, then the association * between this PortID and the stated handle has been * broken by the firmware. */ MAKE_WWN_FROM_NODE_NAME(wwnn, pdb.nodename); MAKE_WWN_FROM_NODE_NAME(wwpn, pdb.portname); if (pdb.handle != lp->handle || pdb.portid != portid || wwpn != lp->port_wwn || (lp->node_wwn != 0 && wwnn != lp->node_wwn)) { isp_prt(isp, ISP_LOG_SANCFG, fconf, chan, dbidx, pdb.handle, pdb.portid, (uint32_t) (wwnn >> 32), (uint32_t) wwnn, (uint32_t) (wwpn >> 32), (uint32_t) wwpn, lp->handle, portid, (uint32_t) (lp->node_wwn >> 32), (uint32_t) lp->node_wwn, (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn); /* * Try to re-login to this device using a * new handle. If that fails, mark it dead. * * isp_login_device will check for handle and * portid consistency after re-login. * */ if ((fcp->role & ISP_ROLE_INITIATOR) == 0 || isp_login_device(isp, chan, portid, &pdb, &FCPARAM(isp, 0)->isp_lasthdl)) { lp->new_portid = portid; lp->state = FC_PORTDB_STATE_DEAD; - if (fcp->isp_loopstate != + if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) { - FC_SCRATCH_RELEASE(isp, chan); - ISP_MARK_PORTDB(isp, chan, 1); - return (-1); + goto abort; } continue; } - if (fcp->isp_loopstate != - LOOP_SCANNING_FABRIC) { - FC_SCRATCH_RELEASE(isp, chan); - ISP_MARK_PORTDB(isp, chan, 1); - return (-1); + if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) { + goto abort; } MAKE_WWN_FROM_NODE_NAME(wwnn, pdb.nodename); MAKE_WWN_FROM_NODE_NAME(wwpn, pdb.portname); if (wwpn != lp->port_wwn || (lp->node_wwn != 0 && wwnn != lp->node_wwn)) { isp_prt(isp, ISP_LOGWARN, "changed WWN" " after relogin"); lp->new_portid = portid; lp->state = FC_PORTDB_STATE_DEAD; continue; } lp->handle = pdb.handle; handle_changed++; } nr = pdb.prli_word3; /* * Check to see whether the portid and roles have * stayed the same. If they have stayed the same, * we believe that this is the same device and it * hasn't become disconnected and reconnected, so * mark it as pending valid. * * If they aren't the same, mark the device as a * changed device and save the new port id and role * and let somebody else decide. */ lp->new_portid = portid; lp->new_prli_word3 = nr; if (pdb.portid != lp->portid || nr != lp->prli_word3 || handle_changed) { - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Fabric Port 0x%06x changed", chan, portid); + isp_prt(isp, ISP_LOG_SANCFG, + "Chan %d Fabric port 0x%06x changed", + chan, portid); lp->state = FC_PORTDB_STATE_CHANGED; } else { - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Fabric Port 0x%06x Now Pending Valid", chan, portid); + isp_prt(isp, ISP_LOG_SANCFG, + "Chan %d Fabric port 0x%06x now pending valid", + chan, portid); lp->state = FC_PORTDB_STATE_PENDING_VALID; } continue; } if ((fcp->role & ISP_ROLE_INITIATOR) == 0) continue; /* * Ah- a new entry. Search the database again for all non-NIL * entries to make sure we never ever make a new database entry * with the same port id. While we're at it, mark where the * last free entry was. */ dbidx = MAX_FC_TARG; for (lp = fcp->portdb; lp < &fcp->portdb[MAX_FC_TARG]; lp++) { - if (lp >= &fcp->portdb[FL_ID] && - lp <= &fcp->portdb[SNS_ID]) { - continue; - } if (lp->state == FC_PORTDB_STATE_NIL) { if (dbidx == MAX_FC_TARG) { dbidx = lp - fcp->portdb; } continue; } if (lp->state == FC_PORTDB_STATE_ZOMBIE) { continue; } if (lp->portid == portid) { break; } } if (lp < &fcp->portdb[MAX_FC_TARG]) { isp_prt(isp, ISP_LOGWARN, "Chan %d PortID 0x%06x " "already at %d handle %d state %d", chan, portid, dbidx, lp->handle, lp->state); continue; } /* * We should have the index of the first free entry seen. */ if (dbidx == MAX_FC_TARG) { isp_prt(isp, ISP_LOGERR, "port database too small to login PortID 0x%06x" "- increase MAX_FC_TARG", portid); continue; } /* * Otherwise, point to our new home. */ lp = &fcp->portdb[dbidx]; /* * Try to see if we are logged into this device, * and maybe log into it. * * isp_login_device will check for handle and * portid consistency after login. */ if (isp_login_device(isp, chan, portid, &pdb, &FCPARAM(isp, 0)->isp_lasthdl)) { - if (fcp->isp_loopstate != LOOP_SCANNING_FABRIC) { - FC_SCRATCH_RELEASE(isp, chan); - ISP_MARK_PORTDB(isp, chan, 1); - return (-1); + if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) { + goto abort; } continue; } - if (fcp->isp_loopstate != LOOP_SCANNING_FABRIC) { - FC_SCRATCH_RELEASE(isp, chan); - ISP_MARK_PORTDB(isp, chan, 1); - return (-1); + if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) { + goto abort; } - handle = pdb.handle; + nphdl = pdb.handle; MAKE_WWN_FROM_NODE_NAME(wwnn, pdb.nodename); MAKE_WWN_FROM_NODE_NAME(wwpn, pdb.portname); nr = pdb.prli_word3; /* * And go through the database *one* more time to make sure * that we do not make more than one entry that has the same * WWNN/WWPN duple */ for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { - if (dbidx >= FL_ID && dbidx <= SNS_ID) { - continue; - } if ((fcp->portdb[dbidx].node_wwn == wwnn || fcp->portdb[dbidx].node_wwn == 0) && fcp->portdb[dbidx].port_wwn == wwpn) { break; } } if (dbidx == MAX_FC_TARG) { ISP_MEMZERO(lp, sizeof (fcportdb_t)); - lp->handle = handle; + lp->handle = nphdl; lp->node_wwn = wwnn; lp->port_wwn = wwpn; lp->new_portid = portid; lp->new_prli_word3 = nr; lp->state = FC_PORTDB_STATE_NEW; - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Fabric Port 0x%06x is a New Entry", chan, portid); + isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Fabric port 0x%06x is a new entry", chan, portid); continue; } if (fcp->portdb[dbidx].state != FC_PORTDB_STATE_ZOMBIE) { isp_prt(isp, ISP_LOGWARN, "Chan %d PortID 0x%x 0x%08x%08x/0x%08x%08x %ld " "already at idx %d, state 0x%x", chan, portid, (uint32_t) (wwnn >> 32), (uint32_t) wwnn, (uint32_t) (wwpn >> 32), (uint32_t) wwpn, (long) (lp - fcp->portdb), dbidx, fcp->portdb[dbidx].state); continue; } /* * We found a zombie entry that matches us. * Revive it. We know that WWN and WWPN * are the same. For fabric devices, we * don't care that handle is different * as we assign that. If role or portid * are different, it maybe a changed device. */ lp = &fcp->portdb[dbidx]; - lp->handle = handle; + lp->handle = nphdl; lp->node_wwn = wwnn; lp->new_portid = portid; lp->new_prli_word3 = nr; if (lp->portid != portid || lp->prli_word3 != nr) { - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Zombie Fabric Port 0x%06x Now Changed", chan, portid); + isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Zombie fabric port 0x%06x now changed", chan, portid); lp->state = FC_PORTDB_STATE_CHANGED; } else { - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Zombie Fabric Port 0x%06x Now Pending Valid", chan, portid); + isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Zombie fabric port 0x%06x now pending valid", chan, portid); lp->state = FC_PORTDB_STATE_PENDING_VALID; } } - FC_SCRATCH_RELEASE(isp, chan); - if (fcp->isp_loopstate != LOOP_SCANNING_FABRIC) { - ISP_MARK_PORTDB(isp, chan, 1); - return (-1); + if (fcp->isp_loopstate < LOOP_SCANNING_FABRIC) { + goto abort; } + FC_SCRATCH_RELEASE(isp, chan); fcp->isp_loopstate = LOOP_FSCAN_DONE; - isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC Scan Fabric Done", chan); + isp_prt(isp, ISP_LOG_SANCFG, "Chan %d FC fabric scan done", chan); return (0); } /* * Find an unused handle and try and use to login to a port. */ static int isp_login_device(ispsoftc_t *isp, int chan, uint32_t portid, isp_pdb_t *p, uint16_t *ohp) { int lim, i, r; uint16_t handle; if (ISP_CAP_2KLOGIN(isp)) { lim = NPH_MAX_2K; } else { lim = NPH_MAX; } handle = isp_next_handle(isp, ohp); for (i = 0; i < lim; i++) { /* * See if we're still logged into something with * this handle and that something agrees with this * port id. */ r = isp_getpdb(isp, chan, handle, p, 0); if (r == 0 && p->portid != portid) { (void) isp_plogx(isp, chan, handle, portid, PLOGX_FLG_CMD_LOGO | PLOGX_FLG_IMPLICIT | PLOGX_FLG_FREE_NPHDL, 1); } else if (r == 0) { break; } if (FCPARAM(isp, chan)->isp_loopstate != LOOP_SCANNING_FABRIC) { return (-1); } /* * Now try and log into the device */ r = isp_plogx(isp, chan, handle, portid, PLOGX_FLG_CMD_PLOGI, 1); if (FCPARAM(isp, chan)->isp_loopstate != LOOP_SCANNING_FABRIC) { return (-1); } if (r == 0) { break; } else if ((r & 0xffff) == MBOX_PORT_ID_USED) { /* * If we get here, then the firmwware still thinks we're logged into this device, but with a different * handle. We need to break that association. We used to try and just substitute the handle, but then * failed to get any data via isp_getpdb (below). */ if (isp_plogx(isp, chan, r >> 16, portid, PLOGX_FLG_CMD_LOGO | PLOGX_FLG_IMPLICIT | PLOGX_FLG_FREE_NPHDL, 1)) { isp_prt(isp, ISP_LOGERR, "baw... logout of %x failed", r >> 16); } if (FCPARAM(isp, chan)->isp_loopstate != LOOP_SCANNING_FABRIC) { return (-1); } r = isp_plogx(isp, chan, handle, portid, PLOGX_FLG_CMD_PLOGI, 1); if (FCPARAM(isp, chan)->isp_loopstate != LOOP_SCANNING_FABRIC) { return (-1); } if (r != 0) i = lim; break; } else if ((r & 0xffff) == MBOX_LOOP_ID_USED) { - /* Try the next loop id. */ + /* Try the next handle. */ handle = isp_next_handle(isp, ohp); } else { /* Give up. */ i = lim; break; } } if (i == lim) { isp_prt(isp, ISP_LOGWARN, "Chan %d PLOGI 0x%06x failed", chan, portid); return (-1); } /* * If we successfully logged into it, get the PDB for it * so we can crosscheck that it is still what we think it * is and that we also have the role it plays */ r = isp_getpdb(isp, chan, handle, p, 0); if (FCPARAM(isp, chan)->isp_loopstate != LOOP_SCANNING_FABRIC) { return (-1); } if (r != 0) { isp_prt(isp, ISP_LOGERR, "Chan %d new device 0x%06x@0x%x disappeared", chan, portid, handle); return (-1); } if (p->handle != handle || p->portid != portid) { isp_prt(isp, ISP_LOGERR, "Chan %d new device 0x%06x@0x%x changed (0x%06x@0x%0x)", chan, portid, handle, p->portid, p->handle); return (-1); } return (0); } static int isp_register_fc4_type(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); uint8_t local[SNS_RFT_ID_REQ_SIZE]; sns_screq_t *reqp = (sns_screq_t *) local; mbreg_t mbs; ISP_MEMZERO((void *) reqp, SNS_RFT_ID_REQ_SIZE); reqp->snscb_rblen = SNS_RFT_ID_RESP_SIZE >> 1; reqp->snscb_addr[RQRSP_ADDR0015] = DMA_WD0(fcp->isp_scdma + 0x100); reqp->snscb_addr[RQRSP_ADDR1631] = DMA_WD1(fcp->isp_scdma + 0x100); reqp->snscb_addr[RQRSP_ADDR3247] = DMA_WD2(fcp->isp_scdma + 0x100); reqp->snscb_addr[RQRSP_ADDR4863] = DMA_WD3(fcp->isp_scdma + 0x100); reqp->snscb_sblen = 22; reqp->snscb_data[0] = SNS_RFT_ID; reqp->snscb_data[4] = fcp->isp_portid & 0xffff; reqp->snscb_data[5] = (fcp->isp_portid >> 16) & 0xff; reqp->snscb_data[6] = (1 << FC4_SCSI); if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } isp_put_sns_request(isp, reqp, (sns_screq_t *) fcp->isp_scratch); MBSINIT(&mbs, MBOX_SEND_SNS, MBLOGALL, 1000000); mbs.param[1] = SNS_RFT_ID_REQ_SIZE >> 1; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, SNS_RFT_ID_REQ_SIZE, chan); isp_mboxcmd(isp, &mbs); FC_SCRATCH_RELEASE(isp, chan); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { return (0); } else { return (-1); } } static int isp_register_fc4_type_24xx(ispsoftc_t *isp, int chan) { mbreg_t mbs; fcparam *fcp = FCPARAM(isp, chan); union { isp_ct_pt_t plocal; rft_id_t clocal; uint8_t q[QENTRY_LEN]; } un; isp_ct_pt_t *pt; ct_hdr_t *ct; rft_id_t *rp; uint8_t *scp = fcp->isp_scratch; if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); return (-1); } /* * Build a Passthrough IOCB in memory. */ ISP_MEMZERO(un.q, QENTRY_LEN); pt = &un.plocal; pt->ctp_header.rqs_entry_count = 1; pt->ctp_header.rqs_entry_type = RQSTYPE_CT_PASSTHRU; pt->ctp_handle = 0xffffffff; pt->ctp_nphdl = fcp->isp_sns_hdl; pt->ctp_cmd_cnt = 1; pt->ctp_vpidx = ISP_GET_VPIDX(isp, chan); pt->ctp_time = 1; pt->ctp_rsp_cnt = 1; pt->ctp_rsp_bcnt = sizeof (ct_hdr_t); pt->ctp_cmd_bcnt = sizeof (rft_id_t); pt->ctp_dataseg[0].ds_base = DMA_LO32(fcp->isp_scdma+XTXOFF); pt->ctp_dataseg[0].ds_basehi = DMA_HI32(fcp->isp_scdma+XTXOFF); pt->ctp_dataseg[0].ds_count = sizeof (rft_id_t); pt->ctp_dataseg[1].ds_base = DMA_LO32(fcp->isp_scdma+IGPOFF); pt->ctp_dataseg[1].ds_basehi = DMA_HI32(fcp->isp_scdma+IGPOFF); pt->ctp_dataseg[1].ds_count = sizeof (ct_hdr_t); isp_put_ct_pt(isp, pt, (isp_ct_pt_t *) &scp[CTXOFF]); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "IOCB CT Request", QENTRY_LEN, pt); } /* * Build the CT header and command in memory. * * Note that the CT header has to end up as Big Endian format in memory. */ ISP_MEMZERO(&un.clocal, sizeof (un.clocal)); ct = &un.clocal.rftid_hdr; ct->ct_revision = CT_REVISION; ct->ct_fcs_type = CT_FC_TYPE_FC; ct->ct_fcs_subtype = CT_FC_SUBTYPE_NS; ct->ct_cmd_resp = SNS_RFT_ID; ct->ct_bcnt_resid = (sizeof (rft_id_t) - sizeof (ct_hdr_t)) >> 2; rp = &un.clocal; rp->rftid_portid[0] = fcp->isp_portid >> 16; rp->rftid_portid[1] = fcp->isp_portid >> 8; rp->rftid_portid[2] = fcp->isp_portid; rp->rftid_fc4types[FC4_SCSI >> 5] = 1 << (FC4_SCSI & 0x1f); isp_put_rft_id(isp, rp, (rft_id_t *) &scp[XTXOFF]); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "CT Header", QENTRY_LEN, &scp[XTXOFF]); } ISP_MEMZERO(&scp[ZTXOFF], sizeof (ct_hdr_t)); MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 1000000); mbs.param[1] = QENTRY_LEN; mbs.param[2] = DMA_WD1(fcp->isp_scdma + CTXOFF); mbs.param[3] = DMA_WD0(fcp->isp_scdma + CTXOFF); mbs.param[6] = DMA_WD3(fcp->isp_scdma + CTXOFF); mbs.param[7] = DMA_WD2(fcp->isp_scdma + CTXOFF); MEMORYBARRIER(isp, SYNC_SFORDEV, XTXOFF, 2 * QENTRY_LEN, chan); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { FC_SCRATCH_RELEASE(isp, chan); return (-1); } MEMORYBARRIER(isp, SYNC_SFORCPU, ZTXOFF, QENTRY_LEN, chan); pt = &un.plocal; isp_get_ct_pt(isp, (isp_ct_pt_t *) &scp[ZTXOFF], pt); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "IOCB response", QENTRY_LEN, pt); } if (pt->ctp_status) { FC_SCRATCH_RELEASE(isp, chan); isp_prt(isp, ISP_LOGWARN, "Chan %d Register FC4 Type CT Passthrough returned 0x%x", chan, pt->ctp_status); return (1); } isp_get_ct_hdr(isp, (ct_hdr_t *) &scp[IGPOFF], ct); FC_SCRATCH_RELEASE(isp, chan); if (ct->ct_cmd_resp == LS_RJT) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, "Chan %d Register FC4 Type rejected", chan); return (-1); } else if (ct->ct_cmd_resp == LS_ACC) { isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Register FC4 Type accepted", chan); return (0); } else { isp_prt(isp, ISP_LOGWARN, "Chan %d Register FC4 Type: 0x%x", chan, ct->ct_cmd_resp); return (-1); } } +static int +isp_register_fc4_features_24xx(ispsoftc_t *isp, int chan) +{ + mbreg_t mbs; + fcparam *fcp = FCPARAM(isp, chan); + union { + isp_ct_pt_t plocal; + rff_id_t clocal; + uint8_t q[QENTRY_LEN]; + } un; + isp_ct_pt_t *pt; + ct_hdr_t *ct; + rff_id_t *rp; + uint8_t *scp = fcp->isp_scratch; + + if (FC_SCRATCH_ACQUIRE(isp, chan)) { + isp_prt(isp, ISP_LOGERR, sacq); + return (-1); + } + + /* + * Build a Passthrough IOCB in memory. + */ + ISP_MEMZERO(un.q, QENTRY_LEN); + pt = &un.plocal; + pt->ctp_header.rqs_entry_count = 1; + pt->ctp_header.rqs_entry_type = RQSTYPE_CT_PASSTHRU; + pt->ctp_handle = 0xffffffff; + pt->ctp_nphdl = fcp->isp_sns_hdl; + pt->ctp_cmd_cnt = 1; + pt->ctp_vpidx = ISP_GET_VPIDX(isp, chan); + pt->ctp_time = 1; + pt->ctp_rsp_cnt = 1; + pt->ctp_rsp_bcnt = sizeof (ct_hdr_t); + pt->ctp_cmd_bcnt = sizeof (rff_id_t); + pt->ctp_dataseg[0].ds_base = DMA_LO32(fcp->isp_scdma+XTXOFF); + pt->ctp_dataseg[0].ds_basehi = DMA_HI32(fcp->isp_scdma+XTXOFF); + pt->ctp_dataseg[0].ds_count = sizeof (rff_id_t); + pt->ctp_dataseg[1].ds_base = DMA_LO32(fcp->isp_scdma+IGPOFF); + pt->ctp_dataseg[1].ds_basehi = DMA_HI32(fcp->isp_scdma+IGPOFF); + pt->ctp_dataseg[1].ds_count = sizeof (ct_hdr_t); + isp_put_ct_pt(isp, pt, (isp_ct_pt_t *) &scp[CTXOFF]); + if (isp->isp_dblev & ISP_LOGDEBUG1) { + isp_print_bytes(isp, "IOCB CT Request", QENTRY_LEN, pt); + } + + /* + * Build the CT header and command in memory. + * + * Note that the CT header has to end up as Big Endian format in memory. + */ + ISP_MEMZERO(&un.clocal, sizeof (un.clocal)); + ct = &un.clocal.rffid_hdr; + ct->ct_revision = CT_REVISION; + ct->ct_fcs_type = CT_FC_TYPE_FC; + ct->ct_fcs_subtype = CT_FC_SUBTYPE_NS; + ct->ct_cmd_resp = SNS_RFF_ID; + ct->ct_bcnt_resid = (sizeof (rff_id_t) - sizeof (ct_hdr_t)) >> 2; + rp = &un.clocal; + rp->rffid_portid[0] = fcp->isp_portid >> 16; + rp->rffid_portid[1] = fcp->isp_portid >> 8; + rp->rffid_portid[2] = fcp->isp_portid; + rp->rffid_fc4features = 0; + if (fcp->role & ISP_ROLE_TARGET) + rp->rffid_fc4features |= 1; + if (fcp->role & ISP_ROLE_INITIATOR) + rp->rffid_fc4features |= 2; + rp->rffid_fc4type = FC4_SCSI; + isp_put_rff_id(isp, rp, (rff_id_t *) &scp[XTXOFF]); + if (isp->isp_dblev & ISP_LOGDEBUG1) { + isp_print_bytes(isp, "CT Header", QENTRY_LEN, &scp[XTXOFF]); + } + + ISP_MEMZERO(&scp[ZTXOFF], sizeof (ct_hdr_t)); + + MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 1000000); + mbs.param[1] = QENTRY_LEN; + mbs.param[2] = DMA_WD1(fcp->isp_scdma + CTXOFF); + mbs.param[3] = DMA_WD0(fcp->isp_scdma + CTXOFF); + mbs.param[6] = DMA_WD3(fcp->isp_scdma + CTXOFF); + mbs.param[7] = DMA_WD2(fcp->isp_scdma + CTXOFF); + MEMORYBARRIER(isp, SYNC_SFORDEV, XTXOFF, 2 * QENTRY_LEN, chan); + isp_mboxcmd(isp, &mbs); + if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { + FC_SCRATCH_RELEASE(isp, chan); + return (-1); + } + MEMORYBARRIER(isp, SYNC_SFORCPU, ZTXOFF, QENTRY_LEN, chan); + pt = &un.plocal; + isp_get_ct_pt(isp, (isp_ct_pt_t *) &scp[ZTXOFF], pt); + if (isp->isp_dblev & ISP_LOGDEBUG1) { + isp_print_bytes(isp, "IOCB response", QENTRY_LEN, pt); + } + if (pt->ctp_status) { + FC_SCRATCH_RELEASE(isp, chan); + isp_prt(isp, ISP_LOGWARN, + "Chan %d Register FC4 Features CT Passthrough returned 0x%x", + chan, pt->ctp_status); + return (1); + } + + isp_get_ct_hdr(isp, (ct_hdr_t *) &scp[IGPOFF], ct); + FC_SCRATCH_RELEASE(isp, chan); + + if (ct->ct_cmd_resp == LS_RJT) { + isp_prt(isp, ISP_LOG_SANCFG|ISP_LOG_WARN1, + "Chan %d Register FC4 Features rejected", chan); + return (-1); + } else if (ct->ct_cmd_resp == LS_ACC) { + isp_prt(isp, ISP_LOG_SANCFG, + "Chan %d Register FC4 Features accepted", chan); + return (0); + } else { + isp_prt(isp, ISP_LOGWARN, + "Chan %d Register FC4 Features: 0x%x", chan, ct->ct_cmd_resp); + return (-1); + } +} + static uint16_t isp_next_handle(ispsoftc_t *isp, uint16_t *ohp) { fcparam *fcp; int i, chan, wrap; uint16_t handle, minh, maxh; handle = *ohp; if (ISP_CAP_2KLOGIN(isp)) { minh = 0; maxh = NPH_RESERVED - 1; } else { minh = SNS_ID + 1; maxh = NPH_MAX - 1; } wrap = 0; next: if (handle == NIL_HANDLE) { handle = minh; } else { handle++; if (handle > maxh) { if (++wrap >= 2) { isp_prt(isp, ISP_LOGERR, "Out of port handles!"); return (NIL_HANDLE); } handle = minh; } } for (chan = 0; chan < isp->isp_nchan; chan++) { fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) continue; for (i = 0; i < MAX_FC_TARG; i++) { if (fcp->portdb[i].state != FC_PORTDB_STATE_NIL && fcp->portdb[i].handle == handle) goto next; } } *ohp = handle; return (handle); } /* * Start a command. Locking is assumed done in the caller. */ int isp_start(XS_T *xs) { ispsoftc_t *isp; uint32_t handle, cdblen; uint8_t local[QENTRY_LEN]; ispreq_t *reqp; void *cdbp, *qep; uint16_t *tptr; fcportdb_t *lp; int target, dmaresult; XS_INITERR(xs); isp = XS_ISP(xs); /* * Check command CDB length, etc.. We really are limited to 16 bytes * for Fibre Channel, but can do up to 44 bytes in parallel SCSI, * but probably only if we're running fairly new firmware (we'll * let the old f/w choke on an extended command queue entry). */ if (XS_CDBLEN(xs) > (IS_FC(isp)? 16 : 44) || XS_CDBLEN(xs) == 0) { isp_prt(isp, ISP_LOGERR, "unsupported cdb length (%d, CDB[0]=0x%x)", XS_CDBLEN(xs), XS_CDBP(xs)[0] & 0xff); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } /* * Translate the target to device handle as appropriate, checking * for correct device state as well. */ target = XS_TGT(xs); if (IS_FC(isp)) { fcparam *fcp = FCPARAM(isp, XS_CHANNEL(xs)); if ((fcp->role & ISP_ROLE_INITIATOR) == 0) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx I am not an initiator", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } if (isp->isp_state != ISP_RUNSTATE) { isp_prt(isp, ISP_LOGERR, "Adapter not at RUNSTATE"); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } /* * Try again later. */ - if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate != LOOP_READY) { + if (fcp->isp_loopstate != LOOP_READY) { return (CMD_RQLATER); } isp_prt(isp, ISP_LOGDEBUG2, "XS_TGT(xs)=%d", target); lp = &fcp->portdb[target]; if (target < 0 || target >= MAX_FC_TARG || lp->is_target == 0) { XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } if (lp->state == FC_PORTDB_STATE_ZOMBIE) { isp_prt(isp, ISP_LOGDEBUG1, "%d.%d.%jx target zombie", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs)); return (CMD_RQLATER); } if (lp->state != FC_PORTDB_STATE_VALID) { isp_prt(isp, ISP_LOGDEBUG1, "%d.%d.%jx bad db port state 0x%x", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs), lp->state); XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } } else { sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); if ((sdp->role & ISP_ROLE_INITIATOR) == 0) { isp_prt(isp, ISP_LOGDEBUG1, "%d.%d.%jx I am not an initiator", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_SELTIMEOUT); return (CMD_COMPLETE); } if (isp->isp_state != ISP_RUNSTATE) { isp_prt(isp, ISP_LOGERR, "Adapter not at RUNSTATE"); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } if (sdp->update) { isp_spi_update(isp, XS_CHANNEL(xs)); } lp = NULL; } start_again: qep = isp_getrqentry(isp); if (qep == NULL) { isp_prt(isp, ISP_LOG_WARN1, "Request Queue Overflow"); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } XS_SETERR(xs, HBA_NOERROR); /* * Now see if we need to synchronize the ISP with respect to anything. * We do dual duty here (cough) for synchronizing for busses other * than which we got here to send a command to. */ reqp = (ispreq_t *) local; ISP_MEMZERO(local, QENTRY_LEN); if (ISP_TST_SENDMARKER(isp, XS_CHANNEL(xs))) { if (IS_24XX(isp)) { isp_marker_24xx_t *m = (isp_marker_24xx_t *) reqp; m->mrk_header.rqs_entry_count = 1; m->mrk_header.rqs_entry_type = RQSTYPE_MARKER; m->mrk_modifier = SYNC_ALL; m->mrk_vphdl = XS_CHANNEL(xs); isp_put_marker_24xx(isp, m, qep); } else { isp_marker_t *m = (isp_marker_t *) reqp; m->mrk_header.rqs_entry_count = 1; m->mrk_header.rqs_entry_type = RQSTYPE_MARKER; m->mrk_target = (XS_CHANNEL(xs) << 7); /* bus # */ m->mrk_modifier = SYNC_ALL; isp_put_marker(isp, m, qep); } ISP_SYNC_REQUEST(isp); ISP_SET_SENDMARKER(isp, XS_CHANNEL(xs), 0); goto start_again; } reqp->req_header.rqs_entry_count = 1; /* * Select and install Header Code. * Note that it might be overridden before going out * if we're on a 64 bit platform. The lower level * code (isp_send_cmd) will select the appropriate * 64 bit variant if it needs to. */ if (IS_24XX(isp)) { reqp->req_header.rqs_entry_type = RQSTYPE_T7RQS; } else if (IS_FC(isp)) { reqp->req_header.rqs_entry_type = RQSTYPE_T2RQS; } else { if (XS_CDBLEN(xs) > 12) { reqp->req_header.rqs_entry_type = RQSTYPE_CMDONLY; } else { reqp->req_header.rqs_entry_type = RQSTYPE_REQUEST; } } /* * Set task attributes */ if (IS_24XX(isp)) { int ttype; if (XS_TAG_P(xs)) { ttype = XS_TAG_TYPE(xs); } else { if (XS_CDBP(xs)[0] == 0x3) { ttype = REQFLAG_HTAG; } else { ttype = REQFLAG_STAG; } } if (ttype == REQFLAG_OTAG) { ttype = FCP_CMND_TASK_ATTR_ORDERED; } else if (ttype == REQFLAG_HTAG) { ttype = FCP_CMND_TASK_ATTR_HEAD; } else { ttype = FCP_CMND_TASK_ATTR_SIMPLE; } ((ispreqt7_t *)reqp)->req_task_attribute = ttype; } else if (IS_FC(isp)) { /* * See comment in isp_intr */ /* XS_SET_RESID(xs, 0); */ /* * Fibre Channel always requires some kind of tag. * The Qlogic drivers seem be happy not to use a tag, * but this breaks for some devices (IBM drives). */ if (XS_TAG_P(xs)) { ((ispreqt2_t *)reqp)->req_flags = XS_TAG_TYPE(xs); } else { /* * If we don't know what tag to use, use HEAD OF QUEUE * for Request Sense or Simple. */ if (XS_CDBP(xs)[0] == 0x3) /* REQUEST SENSE */ ((ispreqt2_t *)reqp)->req_flags = REQFLAG_HTAG; else ((ispreqt2_t *)reqp)->req_flags = REQFLAG_STAG; } } else { sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); if ((sdp->isp_devparam[target].actv_flags & DPARM_TQING) && XS_TAG_P(xs)) { reqp->req_flags = XS_TAG_TYPE(xs); } } tptr = &reqp->req_time; /* * NB: we do not support long CDBs (yet) */ cdblen = XS_CDBLEN(xs); if (IS_SCSI(isp)) { if (cdblen > sizeof (reqp->req_cdb)) { isp_prt(isp, ISP_LOGERR, "Command Length %u too long for this chip", cdblen); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } reqp->req_target = target | (XS_CHANNEL(xs) << 7); reqp->req_lun_trn = XS_LUN(xs); cdbp = reqp->req_cdb; reqp->req_cdblen = cdblen; } else if (IS_24XX(isp)) { ispreqt7_t *t7 = (ispreqt7_t *)local; if (cdblen > sizeof (t7->req_cdb)) { isp_prt(isp, ISP_LOGERR, "Command Length %u too long for this chip", cdblen); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } t7->req_nphdl = lp->handle; t7->req_tidlo = lp->portid; t7->req_tidhi = lp->portid >> 16; t7->req_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(xs)); #if __FreeBSD_version >= 1000700 be64enc(t7->req_lun, CAM_EXTLUN_BYTE_SWIZZLE(XS_LUN(xs))); #else if (XS_LUN(xs) >= 256) { t7->req_lun[0] = XS_LUN(xs) >> 8; t7->req_lun[0] |= 0x40; } t7->req_lun[1] = XS_LUN(xs); #endif if (FCPARAM(isp, XS_CHANNEL(xs))->fctape_enabled && (lp->prli_word3 & PRLI_WD3_RETRY)) { if (FCP_NEXT_CRN(isp, &t7->req_crn, xs)) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx cannot generate next CRN", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } } tptr = &t7->req_time; cdbp = t7->req_cdb; } else { ispreqt2_t *t2 = (ispreqt2_t *)local; if (cdblen > sizeof t2->req_cdb) { isp_prt(isp, ISP_LOGERR, "Command Length %u too long for this chip", cdblen); XS_SETERR(xs, HBA_BOTCH); return (CMD_COMPLETE); } if (FCPARAM(isp, XS_CHANNEL(xs))->fctape_enabled && (lp->prli_word3 & PRLI_WD3_RETRY)) { if (FCP_NEXT_CRN(isp, &t2->req_crn, xs)) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx cannot generate next CRN", XS_CHANNEL(xs), target, (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } } if (ISP_CAP_2KLOGIN(isp)) { ispreqt2e_t *t2e = (ispreqt2e_t *)local; t2e->req_target = lp->handle; t2e->req_scclun = XS_LUN(xs); #if __FreeBSD_version < 1000700 if (XS_LUN(xs) >= 256) t2e->req_scclun |= 0x4000; #endif cdbp = t2e->req_cdb; } else if (ISP_CAP_SCCFW(isp)) { ispreqt2_t *t2 = (ispreqt2_t *)local; t2->req_target = lp->handle; t2->req_scclun = XS_LUN(xs); #if __FreeBSD_version < 1000700 if (XS_LUN(xs) >= 256) t2->req_scclun |= 0x4000; #endif cdbp = t2->req_cdb; } else { t2->req_target = lp->handle; t2->req_lun_trn = XS_LUN(xs); cdbp = t2->req_cdb; } } ISP_MEMCPY(cdbp, XS_CDBP(xs), cdblen); *tptr = XS_TIME(xs) / 1000; if (*tptr == 0 && XS_TIME(xs)) { *tptr = 1; } if (IS_24XX(isp) && *tptr > 0x1999) { *tptr = 0x1999; } if (isp_allocate_xs(isp, xs, &handle)) { isp_prt(isp, ISP_LOG_WARN1, "out of xflist pointers"); XS_SETERR(xs, HBA_BOTCH); return (CMD_EAGAIN); } /* Whew. Thankfully the same for type 7 requests */ reqp->req_handle = handle; /* * Set up DMA and/or do any platform dependent swizzling of the request entry * so that the Qlogic F/W understands what is being asked of it. * * The callee is responsible for adding all requests at this point. */ dmaresult = ISP_DMASETUP(isp, xs, reqp); if (dmaresult != CMD_QUEUED) { isp_destroy_handle(isp, handle); /* * dmasetup sets actual error in packet, and * return what we were given to return. */ return (dmaresult); } isp_xs_prt(isp, xs, ISP_LOGDEBUG0, "START cmd cdb[0]=0x%x datalen %ld", XS_CDBP(xs)[0], (long) XS_XFRLEN(xs)); isp->isp_nactive++; return (CMD_QUEUED); } /* * isp control * Locks (ints blocked) assumed held. */ int isp_control(ispsoftc_t *isp, ispctl_t ctl, ...) { XS_T *xs; mbreg_t *mbr, mbs; int chan, tgt; uint32_t handle; va_list ap; switch (ctl) { case ISPCTL_RESET_BUS: /* * Issue a bus reset. */ if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGERR, "BUS RESET NOT IMPLEMENTED"); break; } else if (IS_FC(isp)) { mbs.param[1] = 10; chan = 0; } else { va_start(ap, ctl); chan = va_arg(ap, int); va_end(ap); mbs.param[1] = SDPARAM(isp, chan)->isp_bus_reset_delay; if (mbs.param[1] < 2) { mbs.param[1] = 2; } mbs.param[2] = chan; } MBSINIT(&mbs, MBOX_BUS_RESET, MBLOGALL, 0); ISP_SET_SENDMARKER(isp, chan, 1); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } isp_prt(isp, ISP_LOGINFO, "driver initiated bus reset of bus %d", chan); return (0); case ISPCTL_RESET_DEV: va_start(ap, ctl); chan = va_arg(ap, int); tgt = va_arg(ap, int); va_end(ap); if (IS_24XX(isp)) { uint8_t local[QENTRY_LEN]; isp24xx_tmf_t *tmf; isp24xx_statusreq_t *sp; fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; if (tgt < 0 || tgt >= MAX_FC_TARG) { isp_prt(isp, ISP_LOGWARN, "Chan %d trying to reset bad target %d", chan, tgt); break; } lp = &fcp->portdb[tgt]; if (lp->is_target == 0 || lp->state != FC_PORTDB_STATE_VALID) { isp_prt(isp, ISP_LOGWARN, "Chan %d abort of no longer valid target %d", chan, tgt); break; } tmf = (isp24xx_tmf_t *) local; ISP_MEMZERO(tmf, QENTRY_LEN); tmf->tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT; tmf->tmf_header.rqs_entry_count = 1; tmf->tmf_nphdl = lp->handle; tmf->tmf_delay = 2; tmf->tmf_timeout = 2; tmf->tmf_flags = ISP24XX_TMF_TARGET_RESET; tmf->tmf_tidlo = lp->portid; tmf->tmf_tidhi = lp->portid >> 16; tmf->tmf_vpidx = ISP_GET_VPIDX(isp, chan); isp_prt(isp, ISP_LOGALL, "Chan %d Reset N-Port Handle 0x%04x @ Port 0x%06x", chan, lp->handle, lp->portid); MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 5000000); mbs.param[1] = QENTRY_LEN; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); break; } isp_put_24xx_tmf(isp, tmf, fcp->isp_scratch); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, QENTRY_LEN, chan); fcp->sendmarker = 1; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { FC_SCRATCH_RELEASE(isp, chan); break; } MEMORYBARRIER(isp, SYNC_SFORCPU, QENTRY_LEN, QENTRY_LEN, chan); sp = (isp24xx_statusreq_t *) local; isp_get_24xx_response(isp, &((isp24xx_statusreq_t *)fcp->isp_scratch)[1], sp); FC_SCRATCH_RELEASE(isp, chan); if (sp->req_completion_status == 0) { return (0); } isp_prt(isp, ISP_LOGWARN, "Chan %d reset of target %d returned 0x%x", chan, tgt, sp->req_completion_status); break; } else if (IS_FC(isp)) { if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = tgt; mbs.ibits = (1 << 10); } else { mbs.param[1] = (tgt << 8); } } else { mbs.param[1] = (chan << 15) | (tgt << 8); } MBSINIT(&mbs, MBOX_ABORT_TARGET, MBLOGALL, 0); mbs.param[2] = 3; /* 'delay', in seconds */ isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } isp_prt(isp, ISP_LOGINFO, "Target %d on Bus %d Reset Succeeded", tgt, chan); ISP_SET_SENDMARKER(isp, chan, 1); return (0); case ISPCTL_ABORT_CMD: va_start(ap, ctl); xs = va_arg(ap, XS_T *); va_end(ap); tgt = XS_TGT(xs); chan = XS_CHANNEL(xs); handle = isp_find_handle(isp, xs); if (handle == 0) { isp_prt(isp, ISP_LOGWARN, "cannot find handle for command to abort"); break; } if (IS_24XX(isp)) { isp24xx_abrt_t local, *ab = &local, *ab2; fcparam *fcp; fcportdb_t *lp; fcp = FCPARAM(isp, chan); if (tgt < 0 || tgt >= MAX_FC_TARG) { isp_prt(isp, ISP_LOGWARN, "Chan %d trying to abort bad target %d", chan, tgt); break; } lp = &fcp->portdb[tgt]; if (lp->is_target == 0 || lp->state != FC_PORTDB_STATE_VALID) { isp_prt(isp, ISP_LOGWARN, "Chan %d abort of no longer valid target %d", chan, tgt); break; } isp_prt(isp, ISP_LOGALL, "Chan %d Abort Cmd for N-Port 0x%04x @ Port 0x%06x", chan, lp->handle, lp->portid); ISP_MEMZERO(ab, QENTRY_LEN); ab->abrt_header.rqs_entry_type = RQSTYPE_ABORT_IO; ab->abrt_header.rqs_entry_count = 1; ab->abrt_handle = lp->handle; ab->abrt_cmd_handle = handle; ab->abrt_tidlo = lp->portid; ab->abrt_tidhi = lp->portid >> 16; ab->abrt_vpidx = ISP_GET_VPIDX(isp, chan); ISP_MEMZERO(&mbs, sizeof (mbs)); MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 5000000); mbs.param[1] = QENTRY_LEN; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); if (FC_SCRATCH_ACQUIRE(isp, chan)) { isp_prt(isp, ISP_LOGERR, sacq); break; } isp_put_24xx_abrt(isp, ab, fcp->isp_scratch); ab2 = (isp24xx_abrt_t *) &((uint8_t *)fcp->isp_scratch)[QENTRY_LEN]; ab2->abrt_nphdl = 0xdeaf; MEMORYBARRIER(isp, SYNC_SFORDEV, 0, 2 * QENTRY_LEN, chan); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { FC_SCRATCH_RELEASE(isp, chan); break; } MEMORYBARRIER(isp, SYNC_SFORCPU, QENTRY_LEN, QENTRY_LEN, chan); isp_get_24xx_abrt(isp, ab2, ab); FC_SCRATCH_RELEASE(isp, chan); if (ab->abrt_nphdl == ISP24XX_ABRT_OKAY) { return (0); } isp_prt(isp, ISP_LOGWARN, "Chan %d handle %d abort returned 0x%x", chan, tgt, ab->abrt_nphdl); break; } else if (IS_FC(isp)) { if (ISP_CAP_SCCFW(isp)) { if (ISP_CAP_2KLOGIN(isp)) { mbs.param[1] = tgt; } else { mbs.param[1] = tgt << 8; } mbs.param[6] = XS_LUN(xs); } else { mbs.param[1] = tgt << 8 | XS_LUN(xs); } } else { mbs.param[1] = (chan << 15) | (tgt << 8) | XS_LUN(xs); } MBSINIT(&mbs, MBOX_ABORT, MBLOGALL & ~MBLOGMASK(MBOX_COMMAND_ERROR), 0); mbs.param[2] = handle; isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { break; } return (0); case ISPCTL_UPDATE_PARAMS: va_start(ap, ctl); chan = va_arg(ap, int); va_end(ap); isp_spi_update(isp, chan); return (0); case ISPCTL_FCLINK_TEST: if (IS_FC(isp)) { int usdelay; va_start(ap, ctl); chan = va_arg(ap, int); usdelay = va_arg(ap, int); va_end(ap); if (usdelay == 0) { usdelay = 250000; } return (isp_fclink_test(isp, chan, usdelay)); } break; case ISPCTL_SCAN_FABRIC: if (IS_FC(isp)) { va_start(ap, ctl); chan = va_arg(ap, int); va_end(ap); return (isp_scan_fabric(isp, chan)); } break; case ISPCTL_SCAN_LOOP: if (IS_FC(isp)) { va_start(ap, ctl); chan = va_arg(ap, int); va_end(ap); return (isp_scan_loop(isp, chan)); } break; case ISPCTL_PDB_SYNC: if (IS_FC(isp)) { va_start(ap, ctl); chan = va_arg(ap, int); va_end(ap); return (isp_pdb_sync(isp, chan)); } break; case ISPCTL_SEND_LIP: if (IS_FC(isp) && !IS_24XX(isp)) { MBSINIT(&mbs, MBOX_INIT_LIP, MBLOGALL, 0); if (ISP_CAP_2KLOGIN(isp)) { mbs.ibits = (1 << 10); } isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { return (0); } } break; case ISPCTL_GET_PDB: if (IS_FC(isp)) { isp_pdb_t *pdb; va_start(ap, ctl); chan = va_arg(ap, int); tgt = va_arg(ap, int); pdb = va_arg(ap, isp_pdb_t *); va_end(ap); return (isp_getpdb(isp, chan, tgt, pdb, 1)); } break; case ISPCTL_GET_NAMES: { uint64_t *wwnn, *wwnp; va_start(ap, ctl); chan = va_arg(ap, int); tgt = va_arg(ap, int); wwnn = va_arg(ap, uint64_t *); wwnp = va_arg(ap, uint64_t *); va_end(ap); if (wwnn == NULL && wwnp == NULL) { break; } if (wwnn) { *wwnn = isp_get_wwn(isp, chan, tgt, 1); if (*wwnn == INI_NONE) { break; } } if (wwnp) { *wwnp = isp_get_wwn(isp, chan, tgt, 0); if (*wwnp == INI_NONE) { break; } } return (0); } case ISPCTL_RUN_MBOXCMD: { va_start(ap, ctl); mbr = va_arg(ap, mbreg_t *); va_end(ap); isp_mboxcmd(isp, mbr); return (0); } case ISPCTL_PLOGX: { isp_plcmd_t *p; int r; va_start(ap, ctl); p = va_arg(ap, isp_plcmd_t *); va_end(ap); if ((p->flags & PLOGX_FLG_CMD_MASK) != PLOGX_FLG_CMD_PLOGI || (p->handle != NIL_HANDLE)) { return (isp_plogx(isp, p->channel, p->handle, p->portid, p->flags, 0)); } do { isp_next_handle(isp, &p->handle); r = isp_plogx(isp, p->channel, p->handle, p->portid, p->flags, 0); if ((r & 0xffff) == MBOX_PORT_ID_USED) { p->handle = r >> 16; r = 0; break; } } while ((r & 0xffff) == MBOX_LOOP_ID_USED); return (r); } case ISPCTL_CHANGE_ROLE: { int role, r; va_start(ap, ctl); chan = va_arg(ap, int); role = va_arg(ap, int); va_end(ap); if (IS_FC(isp)) { r = isp_fc_change_role(isp, chan, role); } else { SDPARAM(isp, chan)->role = role; r = 0; } return (r); } default: isp_prt(isp, ISP_LOGERR, "Unknown Control Opcode 0x%x", ctl); break; } return (-1); } /* * Interrupt Service Routine(s). * * External (OS) framework has done the appropriate locking, * and the locking will be held throughout this function. */ /* * Limit our stack depth by sticking with the max likely number * of completions on a request queue at any one time. */ #ifndef MAX_REQUESTQ_COMPLETIONS #define MAX_REQUESTQ_COMPLETIONS 32 #endif void isp_intr(ispsoftc_t *isp, uint16_t isr, uint16_t sema, uint16_t info) { XS_T *complist[MAX_REQUESTQ_COMPLETIONS], *xs; uint32_t iptr, optr, junk; int i, nlooked = 0, ndone = 0, continuations_expected = 0; int etype, last_etype = 0; again: /* * Is this a mailbox related interrupt? * The mailbox semaphore will be nonzero if so. */ if (sema) { fmbox: if (info & MBOX_COMMAND_COMPLETE) { isp->isp_intmboxc++; if (isp->isp_mboxbsy) { int obits = isp->isp_obits; isp->isp_mboxtmp[0] = info; for (i = 1; i < ISP_NMBOX(isp); i++) { if ((obits & (1 << i)) == 0) { continue; } isp->isp_mboxtmp[i] = ISP_READ(isp, MBOX_OFF(i)); } if (isp->isp_mbxwrk0) { if (isp_mbox_continue(isp) == 0) { return; } } MBOX_NOTIFY_COMPLETE(isp); } else { isp_prt(isp, ISP_LOGWARN, "mailbox cmd (0x%x) with no waiters", info); } } else { i = IS_FC(isp)? isp_parse_async_fc(isp, info) : isp_parse_async(isp, info); if (i < 0) { return; } } if ((IS_FC(isp) && info != ASYNC_RIOZIO_STALL) || isp->isp_state != ISP_RUNSTATE) { goto out; } } /* * We can't be getting this now. */ if (isp->isp_state != ISP_RUNSTATE) { /* * This seems to happen to 23XX and 24XX cards- don't know why. */ if (isp->isp_mboxbsy && isp->isp_lastmbxcmd == MBOX_ABOUT_FIRMWARE) { goto fmbox; } isp_prt(isp, ISP_LOGINFO, "interrupt (ISR=%x SEMA=%x INFO=%x) " "when not ready", isr, sema, info); /* * Thank you very much! *Burrrp*! */ isp->isp_residx = ISP_READ(isp, isp->isp_respinrp); isp->isp_resodx = isp->isp_residx; ISP_WRITE(isp, isp->isp_respoutrp, isp->isp_resodx); if (IS_24XX(isp)) { ISP_DISABLE_INTS(isp); } goto out; } #ifdef ISP_TARGET_MODE /* * Check for ATIO Queue entries. */ if (IS_24XX(isp) && (isr == ISPR2HST_ATIO_UPDATE || isr == ISPR2HST_ATIO_RSPQ_UPDATE || isr == ISPR2HST_ATIO_UPDATE2)) { iptr = ISP_READ(isp, BIU2400_ATIO_RSPINP); optr = isp->isp_atioodx; while (optr != iptr) { uint8_t qe[QENTRY_LEN]; isphdr_t *hp; uint32_t oop; void *addr; oop = optr; MEMORYBARRIER(isp, SYNC_ATIOQ, oop, QENTRY_LEN, -1); addr = ISP_QUEUE_ENTRY(isp->isp_atioq, oop); isp_get_hdr(isp, addr, (isphdr_t *)qe); hp = (isphdr_t *)qe; switch (hp->rqs_entry_type) { case RQSTYPE_NOTIFY: case RQSTYPE_ATIO: (void) isp_target_notify(isp, addr, &oop); break; default: isp_print_qentry(isp, "?ATIOQ entry?", oop, addr); break; } optr = ISP_NXT_QENTRY(oop, RESULT_QUEUE_LEN(isp)); } if (isp->isp_atioodx != optr) { ISP_WRITE(isp, BIU2400_ATIO_RSPOUTP, optr); isp->isp_atioodx = optr; } } #endif /* * You *must* read the Response Queue In Pointer * prior to clearing the RISC interrupt. * * Debounce the 2300 if revision less than 2. */ if (IS_2100(isp) || (IS_2300(isp) && isp->isp_revision < 2)) { i = 0; do { iptr = ISP_READ(isp, isp->isp_respinrp); junk = ISP_READ(isp, isp->isp_respinrp); } while (junk != iptr && ++i < 1000); if (iptr != junk) { isp_prt(isp, ISP_LOGWARN, "Response Queue Out Pointer Unstable (%x, %x)", iptr, junk); goto out; } } else { iptr = ISP_READ(isp, isp->isp_respinrp); } optr = isp->isp_resodx; if (optr == iptr && sema == 0) { /* * There are a lot of these- reasons unknown- mostly on * faster Alpha machines. * * I tried delaying after writing HCCR_CMD_CLEAR_RISC_INT to * make sure the old interrupt went away (to avoid 'ringing' * effects), but that didn't stop this from occurring. */ if (IS_24XX(isp)) { junk = 0; } else if (IS_23XX(isp)) { ISP_DELAY(100); iptr = ISP_READ(isp, isp->isp_respinrp); junk = ISP_READ(isp, BIU_R2HSTSLO); } else { junk = ISP_READ(isp, BIU_ISR); } if (optr == iptr) { if (IS_23XX(isp) || IS_24XX(isp)) { ; } else { sema = ISP_READ(isp, BIU_SEMA); info = ISP_READ(isp, OUTMAILBOX0); if ((sema & 0x3) && (info & 0x8000)) { goto again; } } isp->isp_intbogus++; isp_prt(isp, ISP_LOGDEBUG1, "bogus intr- isr %x (%x) iptr %x optr %x", isr, junk, iptr, optr); } } isp->isp_residx = iptr; while (optr != iptr) { uint8_t qe[QENTRY_LEN]; ispstatusreq_t *sp = (ispstatusreq_t *) qe; isphdr_t *hp; int buddaboom, scsi_status, completion_status; int req_status_flags, req_state_flags; uint8_t *snsp, *resp; uint32_t rlen, slen, totslen; long resid; uint16_t oop; hp = (isphdr_t *) ISP_QUEUE_ENTRY(isp->isp_result, optr); oop = optr; optr = ISP_NXT_QENTRY(optr, RESULT_QUEUE_LEN(isp)); nlooked++; read_again: buddaboom = req_status_flags = req_state_flags = 0; resid = 0L; /* * Synchronize our view of this response queue entry. */ MEMORYBARRIER(isp, SYNC_RESULT, oop, QENTRY_LEN, -1); isp_get_hdr(isp, hp, &sp->req_header); etype = sp->req_header.rqs_entry_type; if (IS_24XX(isp) && etype == RQSTYPE_RESPONSE) { isp24xx_statusreq_t *sp2 = (isp24xx_statusreq_t *)qe; isp_get_24xx_response(isp, (isp24xx_statusreq_t *)hp, sp2); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "Response Queue Entry", QENTRY_LEN, sp2); } scsi_status = sp2->req_scsi_status; completion_status = sp2->req_completion_status; if ((scsi_status & 0xff) != 0) req_state_flags = RQSF_GOT_STATUS; else req_state_flags = 0; resid = sp2->req_resid; } else if (etype == RQSTYPE_RESPONSE) { isp_get_response(isp, (ispstatusreq_t *) hp, sp); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "Response Queue Entry", QENTRY_LEN, sp); } scsi_status = sp->req_scsi_status; completion_status = sp->req_completion_status; req_status_flags = sp->req_status_flags; req_state_flags = sp->req_state_flags; resid = sp->req_resid; } else if (etype == RQSTYPE_RIO1) { isp_rio1_t *rio = (isp_rio1_t *) qe; isp_get_rio1(isp, (isp_rio1_t *) hp, rio); if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "Response Queue Entry", QENTRY_LEN, rio); } for (i = 0; i < rio->req_header.rqs_seqno; i++) { isp_fastpost_complete(isp, rio->req_handles[i]); } if (isp->isp_fpcchiwater < rio->req_header.rqs_seqno) { isp->isp_fpcchiwater = rio->req_header.rqs_seqno; } ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ last_etype = etype; continue; } else if (etype == RQSTYPE_RIO2) { isp_prt(isp, ISP_LOGERR, "dropping RIO2 response"); ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ last_etype = etype; continue; } else if (etype == RQSTYPE_STATUS_CONT) { isp_get_cont_response(isp, (ispstatus_cont_t *) hp, (ispstatus_cont_t *) sp); if (last_etype == RQSTYPE_RESPONSE && continuations_expected && ndone > 0 && (xs = complist[ndone-1]) != NULL) { ispstatus_cont_t *scp = (ispstatus_cont_t *) sp; XS_SENSE_APPEND(xs, scp->req_sense_data, sizeof (scp->req_sense_data)); isp_prt(isp, ISP_LOGDEBUG0|ISP_LOG_CWARN, "%d more Status Continuations expected", --continuations_expected); } else { isp_prt(isp, ISP_LOG_WARN1, "Ignored Continuation Response"); } ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ continue; } else { /* * Somebody reachable via isp_handle_other_response * may have updated the response queue pointers for * us, so we reload our goal index. */ int r; uint32_t tsto = oop; r = isp_handle_other_response(isp, etype, hp, &tsto); if (r < 0) { goto read_again; } /* * If somebody updated the output pointer, then reset * optr to be one more than the updated amount. */ while (tsto != oop) { optr = ISP_NXT_QENTRY(tsto, RESULT_QUEUE_LEN(isp)); } if (r > 0) { ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ last_etype = etype; continue; } /* * After this point, we'll just look at the header as * we don't know how to deal with the rest of the * response. */ /* * It really has to be a bounced request just copied * from the request queue to the response queue. If * not, something bad has happened. */ if (etype != RQSTYPE_REQUEST) { isp_prt(isp, ISP_LOGERR, notresp, etype, oop, optr, nlooked); isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, sp); ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ last_etype = etype; continue; } buddaboom = 1; scsi_status = sp->req_scsi_status; completion_status = sp->req_completion_status; req_status_flags = sp->req_status_flags; req_state_flags = sp->req_state_flags; resid = sp->req_resid; } if (sp->req_header.rqs_flags & RQSFLAG_MASK) { if (sp->req_header.rqs_flags & RQSFLAG_CONTINUATION) { isp_print_bytes(isp, "unexpected continuation segment", QENTRY_LEN, sp); last_etype = etype; continue; } if (sp->req_header.rqs_flags & RQSFLAG_FULL) { isp_prt(isp, ISP_LOG_WARN1, "internal queues full"); /* * We'll synthesize a QUEUE FULL message below. */ } if (sp->req_header.rqs_flags & RQSFLAG_BADHEADER) { isp_print_bytes(isp, "bad header flag", QENTRY_LEN, sp); buddaboom++; } if (sp->req_header.rqs_flags & RQSFLAG_BADPACKET) { isp_print_bytes(isp, "bad request packet", QENTRY_LEN, sp); buddaboom++; } if (sp->req_header.rqs_flags & RQSFLAG_BADCOUNT) { isp_print_bytes(isp, "invalid entry count", QENTRY_LEN, sp); buddaboom++; } if (sp->req_header.rqs_flags & RQSFLAG_BADORDER) { isp_print_bytes(isp, "invalid IOCB ordering", QENTRY_LEN, sp); last_etype = etype; continue; } } if (!ISP_VALID_HANDLE(isp, sp->req_handle)) { isp_prt(isp, ISP_LOGERR, "bad request handle 0x%x (iocb type 0x%x)", sp->req_handle, etype); ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ last_etype = etype; continue; } xs = isp_find_xs(isp, sp->req_handle); if (xs == NULL) { uint8_t ts = completion_status & 0xff; /* * Only whine if this isn't the expected fallout of * aborting the command or resetting the target. */ if (etype != RQSTYPE_RESPONSE) { isp_prt(isp, ISP_LOGERR, "cannot find handle 0x%x (type 0x%x)", sp->req_handle, etype); } else if (ts != RQCS_ABORTED && ts != RQCS_RESET_OCCURRED) { isp_prt(isp, ISP_LOGERR, "cannot find handle 0x%x (status 0x%x)", sp->req_handle, ts); } ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ last_etype = etype; continue; } if (req_status_flags & RQSTF_BUS_RESET) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx bus was reset", XS_CHANNEL(xs), XS_TGT(xs), (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_BUSRESET); ISP_SET_SENDMARKER(isp, XS_CHANNEL(xs), 1); } if (buddaboom) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx buddaboom", XS_CHANNEL(xs), XS_TGT(xs), (uintmax_t)XS_LUN(xs)); XS_SETERR(xs, HBA_BOTCH); } resp = NULL; rlen = 0; snsp = NULL; totslen = slen = 0; if (IS_24XX(isp) && (scsi_status & (RQCS_RV|RQCS_SV)) != 0) { resp = ((isp24xx_statusreq_t *)sp)->req_rsp_sense; rlen = ((isp24xx_statusreq_t *)sp)->req_response_len; } else if (IS_FC(isp) && (scsi_status & RQCS_RV) != 0) { resp = sp->req_response; rlen = sp->req_response_len; } if (IS_FC(isp) && (scsi_status & RQCS_SV) != 0) { /* * Fibre Channel F/W doesn't say we got status * if there's Sense Data instead. I guess they * think it goes w/o saying. */ req_state_flags |= RQSF_GOT_STATUS|RQSF_GOT_SENSE; if (IS_24XX(isp)) { snsp = ((isp24xx_statusreq_t *)sp)->req_rsp_sense; snsp += rlen; totslen = ((isp24xx_statusreq_t *)sp)->req_sense_len; slen = (sizeof (((isp24xx_statusreq_t *)sp)->req_rsp_sense)) - rlen; if (totslen < slen) slen = totslen; } else { snsp = sp->req_sense_data; totslen = sp->req_sense_len; slen = sizeof (sp->req_sense_data); if (totslen < slen) slen = totslen; } } else if (IS_SCSI(isp) && (req_state_flags & RQSF_GOT_SENSE)) { snsp = sp->req_sense_data; totslen = sp->req_sense_len; slen = sizeof (sp->req_sense_data); if (totslen < slen) slen = totslen; } if (req_state_flags & RQSF_GOT_STATUS) { *XS_STSP(xs) = scsi_status & 0xff; } switch (etype) { case RQSTYPE_RESPONSE: if (resp && rlen >= 4 && resp[FCP_RSPNS_CODE_OFFSET] != 0) { const char *ptr; char lb[64]; const char *rnames[10] = { "Task Management function complete", "FCP_DATA length different than FCP_BURST_LEN", "FCP_CMND fields invalid", "FCP_DATA parameter mismatch with FCP_DATA_RO", "Task Management function rejected", "Task Management function failed", NULL, NULL, "Task Management function succeeded", "Task Management function incorrect logical unit number", }; uint8_t code = resp[FCP_RSPNS_CODE_OFFSET]; if (code >= 10 || rnames[code] == NULL) { ISP_SNPRINTF(lb, sizeof(lb), "Unknown FCP Response Code 0x%x", code); ptr = lb; } else { ptr = rnames[code]; } isp_xs_prt(isp, xs, ISP_LOGWARN, "FCP RESPONSE, LENGTH %u: %s CDB0=0x%02x", rlen, ptr, XS_CDBP(xs)[0] & 0xff); if (code != 0 && code != 8) XS_SETERR(xs, HBA_BOTCH); } if (IS_24XX(isp)) { isp_parse_status_24xx(isp, (isp24xx_statusreq_t *)sp, xs, &resid); } else { isp_parse_status(isp, (void *)sp, xs, &resid); } if ((XS_NOERR(xs) || XS_ERR(xs) == HBA_NOERROR) && (*XS_STSP(xs) == SCSI_BUSY)) { XS_SETERR(xs, HBA_TGTBSY); } if (IS_SCSI(isp)) { XS_SET_RESID(xs, resid); /* * A new synchronous rate was negotiated for * this target. Mark state such that we'll go * look up that which has changed later. */ if (req_status_flags & RQSTF_NEGOTIATION) { int t = XS_TGT(xs); sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); sdp->isp_devparam[t].dev_refresh = 1; sdp->update = 1; } } else { if (req_status_flags & RQSF_XFER_COMPLETE) { XS_SET_RESID(xs, 0); } else if (scsi_status & RQCS_RESID) { XS_SET_RESID(xs, resid); } else { XS_SET_RESID(xs, 0); } } if (snsp && slen) { if (totslen > slen) { continuations_expected += ((totslen - slen + QENTRY_LEN - 5) / (QENTRY_LEN - 4)); if (ndone > (MAX_REQUESTQ_COMPLETIONS - continuations_expected - 1)) { /* we'll lose some stats, but that's a small price to pay */ for (i = 0; i < ndone; i++) { if (complist[i]) { isp->isp_rsltccmplt++; isp_done(complist[i]); } } ndone = 0; } isp_prt(isp, ISP_LOGDEBUG0|ISP_LOG_CWARN, "Expecting %d more Status Continuations for total sense length of %u", continuations_expected, totslen); } XS_SAVE_SENSE(xs, snsp, totslen, slen); } else if ((req_status_flags & RQSF_GOT_STATUS) && (scsi_status & 0xff) == SCSI_CHECK && IS_FC(isp)) { isp_prt(isp, ISP_LOGWARN, "CHECK CONDITION w/o sense data for CDB=0x%x", XS_CDBP(xs)[0] & 0xff); isp_print_bytes(isp, "CC with no Sense", QENTRY_LEN, qe); } isp_prt(isp, ISP_LOGDEBUG2, "asked for %ld got raw resid %ld settled for %ld", (long) XS_XFRLEN(xs), resid, (long) XS_GET_RESID(xs)); break; case RQSTYPE_REQUEST: case RQSTYPE_A64: case RQSTYPE_T2RQS: case RQSTYPE_T3RQS: case RQSTYPE_T7RQS: if (!IS_24XX(isp) && (sp->req_header.rqs_flags & RQSFLAG_FULL)) { /* * Force Queue Full status. */ *XS_STSP(xs) = SCSI_QFULL; XS_SETERR(xs, HBA_NOERROR); } else if (XS_NOERR(xs)) { isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx badness at %s:%u", XS_CHANNEL(xs), XS_TGT(xs), (uintmax_t)XS_LUN(xs), __func__, __LINE__); XS_SETERR(xs, HBA_BOTCH); } XS_SET_RESID(xs, XS_XFRLEN(xs)); break; default: isp_print_bytes(isp, "Unhandled Response Type", QENTRY_LEN, qe); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } break; } /* * Free any DMA resources. As a side effect, this may * also do any cache flushing necessary for data coherence. */ if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, sp->req_handle); } isp_destroy_handle(isp, sp->req_handle); if (isp->isp_nactive > 0) { isp->isp_nactive--; } complist[ndone++] = xs; /* defer completion call until later */ ISP_MEMZERO(hp, QENTRY_LEN); /* PERF */ last_etype = etype; if (ndone == MAX_REQUESTQ_COMPLETIONS) { break; } } /* * If we looked at any commands, then it's valid to find out * what the outpointer is. It also is a trigger to update the * ISP's notion of what we've seen so far. */ if (nlooked) { ISP_WRITE(isp, isp->isp_respoutrp, optr); isp->isp_resodx = optr; if (isp->isp_rscchiwater < ndone) isp->isp_rscchiwater = ndone; } out: if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); ISP_WRITE(isp, BIU_SEMA, 0); } for (i = 0; i < ndone; i++) { xs = complist[i]; if (xs) { if (((isp->isp_dblev & (ISP_LOGDEBUG1|ISP_LOGDEBUG2|ISP_LOGDEBUG3))) || ((isp->isp_dblev & (ISP_LOGDEBUG0|ISP_LOG_CWARN) && ((!XS_NOERR(xs)) || (*XS_STSP(xs) != SCSI_GOOD))))) { isp_prt_endcmd(isp, xs); } isp->isp_rsltccmplt++; isp_done(xs); } } } /* * Support routines. */ void isp_prt_endcmd(ispsoftc_t *isp, XS_T *xs) { char cdbstr[16 * 5 + 1]; int i, lim; lim = XS_CDBLEN(xs) > 16? 16 : XS_CDBLEN(xs); ISP_SNPRINTF(cdbstr, sizeof (cdbstr), "0x%02x ", XS_CDBP(xs)[0]); for (i = 1; i < lim; i++) { ISP_SNPRINTF(cdbstr, sizeof (cdbstr), "%s0x%02x ", cdbstr, XS_CDBP(xs)[i]); } if (XS_SENSE_VALID(xs)) { isp_xs_prt(isp, xs, ISP_LOGALL, "FIN dl%d resid %ld CDB=%s SenseLength=%u/%u KEY/ASC/ASCQ=0x%02x/0x%02x/0x%02x", XS_XFRLEN(xs), (long) XS_GET_RESID(xs), cdbstr, XS_CUR_SNSLEN(xs), XS_TOT_SNSLEN(xs), XS_SNSKEY(xs), XS_SNSASC(xs), XS_SNSASCQ(xs)); } else { isp_xs_prt(isp, xs, ISP_LOGALL, "FIN dl%d resid %ld CDB=%s STS 0x%x XS_ERR=0x%x", XS_XFRLEN(xs), (long) XS_GET_RESID(xs), cdbstr, *XS_STSP(xs), XS_ERR(xs)); } } /* * Parse an ASYNC mailbox complete * * Return non-zero if the event has been acknowledged. */ static int isp_parse_async(ispsoftc_t *isp, uint16_t mbox) { int acked = 0; uint32_t h1 = 0, h2 = 0; uint16_t chan = 0; /* * Pick up the channel, but not if this is a ASYNC_RIO32_2, * where Mailboxes 6/7 have the second handle. */ if (mbox != ASYNC_RIO32_2) { if (IS_DUALBUS(isp)) { chan = ISP_READ(isp, OUTMAILBOX6); } } isp_prt(isp, ISP_LOGDEBUG2, "Async Mbox 0x%x", mbox); switch (mbox) { case ASYNC_BUS_RESET: ISP_SET_SENDMARKER(isp, chan, 1); #ifdef ISP_TARGET_MODE if (isp_target_async(isp, chan, mbox)) { acked = 1; } #endif isp_async(isp, ISPASYNC_BUS_RESET, chan); break; case ASYNC_SYSTEM_ERROR: isp->isp_dead = 1; isp->isp_state = ISP_CRASHED; /* * Were we waiting for a mailbox command to complete? * If so, it's dead, so wake up the waiter. */ if (isp->isp_mboxbsy) { isp->isp_obits = 1; isp->isp_mboxtmp[0] = MBOX_HOST_INTERFACE_ERROR; MBOX_NOTIFY_COMPLETE(isp); } /* * It's up to the handler for isp_async to reinit stuff and * restart the firmware */ isp_async(isp, ISPASYNC_FW_CRASH); acked = 1; break; case ASYNC_RQS_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Request Queue Transfer Error"); break; case ASYNC_RSP_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Response Queue Transfer Error"); break; case ASYNC_QWAKEUP: /* * We've just been notified that the Queue has woken up. * We don't need to be chatty about this- just unlatch things * and move on. */ mbox = ISP_READ(isp, isp->isp_rqstoutrp); break; case ASYNC_TIMEOUT_RESET: isp_prt(isp, ISP_LOGWARN, "timeout initiated SCSI bus reset of chan %d", chan); ISP_SET_SENDMARKER(isp, chan, 1); #ifdef ISP_TARGET_MODE if (isp_target_async(isp, chan, mbox)) { acked = 1; } #endif break; case ASYNC_DEVICE_RESET: isp_prt(isp, ISP_LOGINFO, "device reset on chan %d", chan); ISP_SET_SENDMARKER(isp, chan, 1); #ifdef ISP_TARGET_MODE if (isp_target_async(isp, chan, mbox)) { acked = 1; } #endif break; case ASYNC_EXTMSG_UNDERRUN: isp_prt(isp, ISP_LOGWARN, "extended message underrun"); break; case ASYNC_SCAM_INT: isp_prt(isp, ISP_LOGINFO, "SCAM interrupt"); break; case ASYNC_HUNG_SCSI: isp_prt(isp, ISP_LOGERR, "stalled SCSI Bus after DATA Overrun"); /* XXX: Need to issue SCSI reset at this point */ break; case ASYNC_KILLED_BUS: isp_prt(isp, ISP_LOGERR, "SCSI Bus reset after DATA Overrun"); break; case ASYNC_BUS_TRANSIT: mbox = ISP_READ(isp, OUTMAILBOX2); switch (mbox & SXP_PINS_MODE_MASK) { case SXP_PINS_LVD_MODE: isp_prt(isp, ISP_LOGINFO, "Transition to LVD mode"); SDPARAM(isp, chan)->isp_diffmode = 0; SDPARAM(isp, chan)->isp_ultramode = 0; SDPARAM(isp, chan)->isp_lvdmode = 1; break; case SXP_PINS_HVD_MODE: isp_prt(isp, ISP_LOGINFO, "Transition to Differential mode"); SDPARAM(isp, chan)->isp_diffmode = 1; SDPARAM(isp, chan)->isp_ultramode = 0; SDPARAM(isp, chan)->isp_lvdmode = 0; break; case SXP_PINS_SE_MODE: isp_prt(isp, ISP_LOGINFO, "Transition to Single Ended mode"); SDPARAM(isp, chan)->isp_diffmode = 0; SDPARAM(isp, chan)->isp_ultramode = 1; SDPARAM(isp, chan)->isp_lvdmode = 0; break; default: isp_prt(isp, ISP_LOGWARN, "Transition to Unknown Mode 0x%x", mbox); break; } /* * XXX: Set up to renegotiate again! */ /* Can only be for a 1080... */ ISP_SET_SENDMARKER(isp, chan, 1); break; case ASYNC_CMD_CMPLT: case ASYNC_RIO32_1: if (!IS_ULTRA3(isp)) { isp_prt(isp, ISP_LOGERR, "unexpected fast posting completion"); break; } /* FALLTHROUGH */ h1 = (ISP_READ(isp, OUTMAILBOX2) << 16) | ISP_READ(isp, OUTMAILBOX1); break; case ASYNC_RIO32_2: h1 = (ISP_READ(isp, OUTMAILBOX2) << 16) | ISP_READ(isp, OUTMAILBOX1); h2 = (ISP_READ(isp, OUTMAILBOX7) << 16) | ISP_READ(isp, OUTMAILBOX6); break; case ASYNC_RIO16_5: case ASYNC_RIO16_4: case ASYNC_RIO16_3: case ASYNC_RIO16_2: case ASYNC_RIO16_1: isp_prt(isp, ISP_LOGERR, "unexpected 16 bit RIO handle"); break; default: isp_prt(isp, ISP_LOGWARN, "%s: unhandled async code 0x%x", __func__, mbox); break; } if (h1 || h2) { isp_prt(isp, ISP_LOGDEBUG3, "fast post/rio completion of 0x%08x", h1); isp_fastpost_complete(isp, h1); if (h2) { isp_prt(isp, ISP_LOGDEBUG3, "fast post/rio completion of 0x%08x", h2); isp_fastpost_complete(isp, h2); if (isp->isp_fpcchiwater < 2) { isp->isp_fpcchiwater = 2; } } else { if (isp->isp_fpcchiwater < 1) { isp->isp_fpcchiwater = 1; } } } else { isp->isp_intoasync++; } return (acked); } #define GET_24XX_BUS(isp, chan, msg) \ if (IS_24XX(isp)) { \ chan = ISP_READ(isp, OUTMAILBOX3) & 0xff; \ if (chan >= isp->isp_nchan) { \ isp_prt(isp, ISP_LOGERR, "bogus channel %u for %s at line %d", chan, msg, __LINE__); \ break; \ } \ } static int isp_parse_async_fc(ispsoftc_t *isp, uint16_t mbox) { int acked = 0; uint16_t chan; if (IS_DUALBUS(isp)) { chan = ISP_READ(isp, OUTMAILBOX6); } else { chan = 0; } isp_prt(isp, ISP_LOGDEBUG2, "Async Mbox 0x%x", mbox); switch (mbox) { case ASYNC_SYSTEM_ERROR: isp->isp_dead = 1; isp->isp_state = ISP_CRASHED; FCPARAM(isp, chan)->isp_loopstate = LOOP_NIL; - FCPARAM(isp, chan)->isp_fwstate = FW_CONFIG_WAIT; + isp_change_fw_state(isp, chan, FW_CONFIG_WAIT); /* * Were we waiting for a mailbox command to complete? * If so, it's dead, so wake up the waiter. */ if (isp->isp_mboxbsy) { isp->isp_obits = 1; isp->isp_mboxtmp[0] = MBOX_HOST_INTERFACE_ERROR; MBOX_NOTIFY_COMPLETE(isp); } /* * It's up to the handler for isp_async to reinit stuff and * restart the firmware */ isp_async(isp, ISPASYNC_FW_CRASH); acked = 1; break; case ASYNC_RQS_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Request Queue Transfer Error"); break; case ASYNC_RSP_XFER_ERR: isp_prt(isp, ISP_LOGERR, "Response Queue Transfer Error"); break; case ASYNC_QWAKEUP: #ifdef ISP_TARGET_MODE if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGERR, "ATIO Queue Transfer Error"); break; } #endif isp_prt(isp, ISP_LOGERR, "%s: unexpected ASYNC_QWAKEUP code", __func__); break; case ASYNC_CMD_CMPLT: isp_fastpost_complete(isp, (ISP_READ(isp, OUTMAILBOX2) << 16) | ISP_READ(isp, OUTMAILBOX1)); if (isp->isp_fpcchiwater < 1) { isp->isp_fpcchiwater = 1; } break; case ASYNC_RIOZIO_STALL: break; case ASYNC_CTIO_DONE: #ifdef ISP_TARGET_MODE if (isp_target_async(isp, (ISP_READ(isp, OUTMAILBOX2) << 16) | ISP_READ(isp, OUTMAILBOX1), mbox)) { acked = 1; } else { isp->isp_fphccmplt++; } #else isp_prt(isp, ISP_LOGWARN, "unexpected ASYNC CTIO done"); #endif break; case ASYNC_LIP_ERROR: case ASYNC_LIP_F8: case ASYNC_LIP_OCCURRED: case ASYNC_PTPMODE: /* * These are broadcast events that have to be sent across * all active channels. */ for (chan = 0; chan < isp->isp_nchan; chan++) { fcparam *fcp = FCPARAM(isp, chan); int topo = fcp->isp_topo; if (fcp->role == ISP_ROLE_NONE) { continue; } - fcp->isp_fwstate = FW_CONFIG_WAIT; - fcp->isp_loopstate = LOOP_LIP_RCVD; + fcp->isp_loopstate = LOOP_NIL; ISP_SET_SENDMARKER(isp, chan, 1); - ISP_MARK_PORTDB(isp, chan, 1); isp_async(isp, ISPASYNC_LIP, chan); #ifdef ISP_TARGET_MODE if (isp_target_async(isp, chan, mbox)) { acked = 1; } #endif /* * We've had problems with data corruption occuring on * commands that complete (with no apparent error) after * we receive a LIP. This has been observed mostly on * Local Loop topologies. To be safe, let's just mark * all active initiator commands as dead. */ if (topo == TOPO_NL_PORT || topo == TOPO_FL_PORT) { int i, j; for (i = j = 0; i < isp->isp_maxcmds; i++) { XS_T *xs; isp_hdl_t *hdp; hdp = &isp->isp_xflist[i]; if (ISP_H2HT(hdp->handle) != ISP_HANDLE_INITIATOR) { continue; } xs = hdp->cmd; if (XS_CHANNEL(xs) != chan) { continue; } j++; isp_prt(isp, ISP_LOG_WARN1, "%d.%d.%jx bus reset set at %s:%u", XS_CHANNEL(xs), XS_TGT(xs), (uintmax_t)XS_LUN(xs), __func__, __LINE__); XS_SETERR(xs, HBA_BUSRESET); } if (j) { isp_prt(isp, ISP_LOGERR, lipd, chan, j); } } } break; case ASYNC_LOOP_UP: /* * This is a broadcast event that has to be sent across * all active channels. */ for (chan = 0; chan < isp->isp_nchan; chan++) { fcparam *fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) { continue; } ISP_SET_SENDMARKER(isp, chan, 1); - - fcp->isp_fwstate = FW_CONFIG_WAIT; - fcp->isp_loopstate = LOOP_LIP_RCVD; - ISP_MARK_PORTDB(isp, chan, 1); isp_async(isp, ISPASYNC_LOOP_UP, chan); #ifdef ISP_TARGET_MODE if (isp_target_async(isp, chan, mbox)) { acked = 1; } #endif } break; case ASYNC_LOOP_DOWN: /* * This is a broadcast event that has to be sent across * all active channels. */ for (chan = 0; chan < isp->isp_nchan; chan++) { fcparam *fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) { continue; } ISP_SET_SENDMARKER(isp, chan, 1); - fcp->isp_fwstate = FW_CONFIG_WAIT; fcp->isp_loopstate = LOOP_NIL; - ISP_MARK_PORTDB(isp, chan, 1); isp_async(isp, ISPASYNC_LOOP_DOWN, chan); + #ifdef ISP_TARGET_MODE if (isp_target_async(isp, chan, mbox)) { acked = 1; } #endif } break; case ASYNC_LOOP_RESET: /* * This is a broadcast event that has to be sent across * all active channels. */ for (chan = 0; chan < isp->isp_nchan; chan++) { fcparam *fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) { continue; } ISP_SET_SENDMARKER(isp, chan, 1); - fcp->isp_fwstate = FW_CONFIG_WAIT; fcp->isp_loopstate = LOOP_NIL; - ISP_MARK_PORTDB(isp, chan, 1); isp_async(isp, ISPASYNC_LOOP_RESET, chan); #ifdef ISP_TARGET_MODE if (isp_target_async(isp, chan, mbox)) { acked = 1; } #endif } break; case ASYNC_PDB_CHANGED: { int echan, nphdl, nlstate, reason; if (IS_24XX(isp)) { nphdl = ISP_READ(isp, OUTMAILBOX1); nlstate = ISP_READ(isp, OUTMAILBOX2); reason = ISP_READ(isp, OUTMAILBOX3) >> 8; GET_24XX_BUS(isp, chan, "ASYNC_CHANGE_NOTIFY"); echan = (nphdl == NIL_HANDLE) ? isp->isp_nchan - 1 : chan; } else { nphdl = NIL_HANDLE; nlstate = reason = 0; chan = echan = 0; } for (; chan <= echan; chan++) { fcparam *fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) { continue; } - ISP_SET_SENDMARKER(isp, chan, 1); - fcp->isp_loopstate = LOOP_PDB_RCVD; - ISP_MARK_PORTDB(isp, chan, 1); + if (fcp->isp_loopstate > LOOP_LTEST_DONE) + fcp->isp_loopstate = LOOP_LTEST_DONE; isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan, ISPASYNC_CHANGE_PDB, nphdl, nlstate, reason); } break; } case ASYNC_CHANGE_NOTIFY: { int lochan, hichan; if (ISP_FW_NEWER_THAN(isp, 4, 0, 25) && ISP_CAP_MULTI_ID(isp)) { GET_24XX_BUS(isp, chan, "ASYNC_CHANGE_NOTIFY"); lochan = chan; hichan = chan + 1; } else { lochan = 0; hichan = isp->isp_nchan; } for (chan = lochan; chan < hichan; chan++) { fcparam *fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) { continue; } - if (fcp->isp_topo == TOPO_F_PORT) { + if (fcp->isp_loopstate > LOOP_LSCAN_DONE) fcp->isp_loopstate = LOOP_LSCAN_DONE; - } else { - fcp->isp_loopstate = LOOP_PDB_RCVD; - } - ISP_MARK_PORTDB(isp, chan, 1); isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan, ISPASYNC_CHANGE_SNS); } break; } case ASYNC_CONNMODE: /* * This only applies to 2100 amd 2200 cards */ if (!IS_2200(isp) && !IS_2100(isp)) { isp_prt(isp, ISP_LOGWARN, "bad card for ASYNC_CONNMODE event"); break; } chan = 0; mbox = ISP_READ(isp, OUTMAILBOX1); - ISP_MARK_PORTDB(isp, chan, 1); switch (mbox) { case ISP_CONN_LOOP: isp_prt(isp, ISP_LOGINFO, "Point-to-Point -> Loop mode"); break; case ISP_CONN_PTP: isp_prt(isp, ISP_LOGINFO, "Loop -> Point-to-Point mode"); break; case ISP_CONN_BADLIP: isp_prt(isp, ISP_LOGWARN, "Point-to-Point -> Loop mode (BAD LIP)"); break; case ISP_CONN_FATAL: isp->isp_dead = 1; isp->isp_state = ISP_CRASHED; isp_prt(isp, ISP_LOGERR, "FATAL CONNECTION ERROR"); isp_async(isp, ISPASYNC_FW_CRASH); return (-1); case ISP_CONN_LOOPBACK: isp_prt(isp, ISP_LOGWARN, "Looped Back in Point-to-Point mode"); break; default: isp_prt(isp, ISP_LOGWARN, "Unknown connection mode (0x%x)", mbox); break; } + ISP_SET_SENDMARKER(isp, chan, 1); + FCPARAM(isp, chan)->isp_loopstate = LOOP_NIL; isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan, ISPASYNC_CHANGE_OTHER); - FCPARAM(isp, chan)->sendmarker = 1; - FCPARAM(isp, chan)->isp_fwstate = FW_CONFIG_WAIT; - FCPARAM(isp, chan)->isp_loopstate = LOOP_LIP_RCVD; break; case ASYNC_RCV_ERR: if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGWARN, "Receive Error"); } else { isp_prt(isp, ISP_LOGWARN, "unexpected ASYNC_RCV_ERR"); } break; case ASYNC_RJT_SENT: /* same as ASYNC_QFULL_SENT */ if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGTDEBUG0, "LS_RJT sent"); break; } else if (IS_2200(isp)) { isp_prt(isp, ISP_LOGTDEBUG0, "QFULL sent"); break; } /* FALLTHROUGH */ default: isp_prt(isp, ISP_LOGWARN, "Unknown Async Code 0x%x", mbox); break; } if (mbox != ASYNC_CTIO_DONE && mbox != ASYNC_CMD_CMPLT) { isp->isp_intoasync++; } return (acked); } /* * Handle other response entries. A pointer to the request queue output * index is here in case we want to eat several entries at once, although * this is not used currently. */ static int isp_handle_other_response(ispsoftc_t *isp, int type, isphdr_t *hp, uint32_t *optrp) { isp_ridacq_t rid; int chan, c; switch (type) { case RQSTYPE_STATUS_CONT: isp_prt(isp, ISP_LOG_WARN1, "Ignored Continuation Response"); return (1); case RQSTYPE_MARKER: isp_prt(isp, ISP_LOG_WARN1, "Marker Response"); return (1); case RQSTYPE_RPT_ID_ACQ: isp_get_ridacq(isp, (isp_ridacq_t *)hp, &rid); if (rid.ridacq_format == 0) { for (chan = 0; chan < isp->isp_nchan; chan++) { fcparam *fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) continue; c = (chan == 0) ? 127 : (chan - 1); if (rid.ridacq_map[c / 16] & (1 << (c % 16))) isp_async(isp, ISPASYNC_CHANGE_NOTIFY, chan, ISPASYNC_CHANGE_OTHER); } } else { isp_async(isp, ISPASYNC_CHANGE_NOTIFY, rid.ridacq_vp_index, ISPASYNC_CHANGE_OTHER); } return (1); case RQSTYPE_ATIO: case RQSTYPE_CTIO: case RQSTYPE_ENABLE_LUN: case RQSTYPE_MODIFY_LUN: case RQSTYPE_NOTIFY: case RQSTYPE_NOTIFY_ACK: case RQSTYPE_CTIO1: case RQSTYPE_ATIO2: case RQSTYPE_CTIO2: case RQSTYPE_CTIO3: case RQSTYPE_CTIO7: case RQSTYPE_ABTS_RCVD: case RQSTYPE_ABTS_RSP: isp->isp_rsltccmplt++; /* count as a response completion */ #ifdef ISP_TARGET_MODE if (isp_target_notify(isp, (ispstatusreq_t *) hp, optrp)) { return (1); } #endif /* FALLTHROUGH */ case RQSTYPE_REQUEST: default: ISP_DELAY(100); if (type != isp_get_response_type(isp, hp)) { /* * This is questionable- we're just papering over * something we've seen on SMP linux in target * mode- we don't really know what's happening * here that causes us to think we've gotten * an entry, but that either the entry isn't * filled out yet or our CPU read data is stale. */ isp_prt(isp, ISP_LOGINFO, "unstable type in response queue"); return (-1); } isp_prt(isp, ISP_LOGWARN, "Unhandled Response Type 0x%x", isp_get_response_type(isp, hp)); return (0); } } static void isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp) { switch (sp->req_completion_status & 0xff) { case RQCS_COMPLETE: if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_INCOMPLETE: if ((sp->req_state_flags & RQSF_GOT_TARGET) == 0) { isp_xs_prt(isp, xs, ISP_LOG_WARN1, "Selection Timeout @ %s:%d", __func__, __LINE__); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); *rp = XS_XFRLEN(xs); } return; } isp_xs_prt(isp, xs, ISP_LOGERR, "Command Incomplete, state 0x%x", sp->req_state_flags); break; case RQCS_DMA_ERROR: isp_xs_prt(isp, xs, ISP_LOGERR, "DMA Error"); *rp = XS_XFRLEN(xs); break; case RQCS_TRANSPORT_ERROR: { char buf[172]; ISP_SNPRINTF(buf, sizeof (buf), "states=>"); if (sp->req_state_flags & RQSF_GOT_BUS) { ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_BUS", buf); } if (sp->req_state_flags & RQSF_GOT_TARGET) { ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_TGT", buf); } if (sp->req_state_flags & RQSF_SENT_CDB) { ISP_SNPRINTF(buf, sizeof (buf), "%s SENT_CDB", buf); } if (sp->req_state_flags & RQSF_XFRD_DATA) { ISP_SNPRINTF(buf, sizeof (buf), "%s XFRD_DATA", buf); } if (sp->req_state_flags & RQSF_GOT_STATUS) { ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_STS", buf); } if (sp->req_state_flags & RQSF_GOT_SENSE) { ISP_SNPRINTF(buf, sizeof (buf), "%s GOT_SNS", buf); } if (sp->req_state_flags & RQSF_XFER_COMPLETE) { ISP_SNPRINTF(buf, sizeof (buf), "%s XFR_CMPLT", buf); } ISP_SNPRINTF(buf, sizeof (buf), "%s\nstatus=>", buf); if (sp->req_status_flags & RQSTF_DISCONNECT) { ISP_SNPRINTF(buf, sizeof (buf), "%s Disconnect", buf); } if (sp->req_status_flags & RQSTF_SYNCHRONOUS) { ISP_SNPRINTF(buf, sizeof (buf), "%s Sync_xfr", buf); } if (sp->req_status_flags & RQSTF_PARITY_ERROR) { ISP_SNPRINTF(buf, sizeof (buf), "%s Parity", buf); } if (sp->req_status_flags & RQSTF_BUS_RESET) { ISP_SNPRINTF(buf, sizeof (buf), "%s Bus_Reset", buf); } if (sp->req_status_flags & RQSTF_DEVICE_RESET) { ISP_SNPRINTF(buf, sizeof (buf), "%s Device_Reset", buf); } if (sp->req_status_flags & RQSTF_ABORTED) { ISP_SNPRINTF(buf, sizeof (buf), "%s Aborted", buf); } if (sp->req_status_flags & RQSTF_TIMEOUT) { ISP_SNPRINTF(buf, sizeof (buf), "%s Timeout", buf); } if (sp->req_status_flags & RQSTF_NEGOTIATION) { ISP_SNPRINTF(buf, sizeof (buf), "%s Negotiation", buf); } isp_xs_prt(isp, xs, ISP_LOGERR, "Transport Error: %s", buf); *rp = XS_XFRLEN(xs); break; } case RQCS_RESET_OCCURRED: { int chan; isp_xs_prt(isp, xs, ISP_LOGWARN, "Bus Reset destroyed command"); for (chan = 0; chan < isp->isp_nchan; chan++) { FCPARAM(isp, chan)->sendmarker = 1; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BUSRESET); } *rp = XS_XFRLEN(xs); return; } case RQCS_ABORTED: isp_xs_prt(isp, xs, ISP_LOGERR, "Command Aborted"); ISP_SET_SENDMARKER(isp, XS_CHANNEL(xs), 1); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } return; case RQCS_TIMEOUT: isp_xs_prt(isp, xs, ISP_LOGWARN, "Command timed out"); /* * XXX: Check to see if we logged out of the device. */ if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_CMDTIMEOUT); } return; case RQCS_DATA_OVERRUN: XS_SET_RESID(xs, sp->req_resid); isp_xs_prt(isp, xs, ISP_LOGERR, "data overrun (%ld)", (long) XS_GET_RESID(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_DATAOVR); } return; case RQCS_COMMAND_OVERRUN: isp_xs_prt(isp, xs, ISP_LOGERR, "command overrun"); break; case RQCS_STATUS_OVERRUN: isp_xs_prt(isp, xs, ISP_LOGERR, "status overrun"); break; case RQCS_BAD_MESSAGE: isp_xs_prt(isp, xs, ISP_LOGERR, "msg not COMMAND COMPLETE after status"); break; case RQCS_NO_MESSAGE_OUT: isp_xs_prt(isp, xs, ISP_LOGERR, "No MESSAGE OUT phase after selection"); break; case RQCS_EXT_ID_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "EXTENDED IDENTIFY failed"); break; case RQCS_IDE_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "INITIATOR DETECTED ERROR rejected"); break; case RQCS_ABORT_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "ABORT OPERATION rejected"); break; case RQCS_REJECT_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "MESSAGE REJECT rejected"); break; case RQCS_NOP_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "NOP rejected"); break; case RQCS_PARITY_ERROR_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "MESSAGE PARITY ERROR rejected"); break; case RQCS_DEVICE_RESET_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGWARN, "BUS DEVICE RESET rejected"); break; case RQCS_ID_MSG_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "IDENTIFY rejected"); break; case RQCS_UNEXP_BUS_FREE: isp_xs_prt(isp, xs, ISP_LOGERR, "Unexpected Bus Free"); break; case RQCS_DATA_UNDERRUN: { if (IS_FC(isp)) { int ru_marked = (sp->req_scsi_status & RQCS_RU) != 0; if (!ru_marked || sp->req_resid > XS_XFRLEN(xs)) { isp_xs_prt(isp, xs, ISP_LOGWARN, bun, XS_XFRLEN(xs), sp->req_resid, (ru_marked)? "marked" : "not marked"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } return; } } XS_SET_RESID(xs, sp->req_resid); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; } case RQCS_XACT_ERR1: isp_xs_prt(isp, xs, ISP_LOGERR, "HBA attempted queued transaction with disconnect not set"); break; case RQCS_XACT_ERR2: isp_xs_prt(isp, xs, ISP_LOGERR, "HBA attempted queued transaction to target routine %jx", (uintmax_t)XS_LUN(xs)); break; case RQCS_XACT_ERR3: isp_xs_prt(isp, xs, ISP_LOGERR, "HBA attempted queued cmd when queueing disabled"); break; case RQCS_BAD_ENTRY: isp_prt(isp, ISP_LOGERR, "Invalid IOCB entry type detected"); break; case RQCS_QUEUE_FULL: isp_xs_prt(isp, xs, ISP_LOG_WARN1, "internal queues full status 0x%x", *XS_STSP(xs)); /* * If QFULL or some other status byte is set, then this * isn't an error, per se. * * Unfortunately, some QLogic f/w writers have, in * some cases, ommitted to *set* status to QFULL. */ #if 0 if (*XS_STSP(xs) != SCSI_GOOD && XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); return; } #endif *XS_STSP(xs) = SCSI_QFULL; XS_SETERR(xs, HBA_NOERROR); return; case RQCS_PHASE_SKIPPED: isp_xs_prt(isp, xs, ISP_LOGERR, "SCSI phase skipped"); break; case RQCS_ARQS_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "Auto Request Sense Failed"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ARQFAIL); } return; case RQCS_WIDE_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "Wide Negotiation Failed"); if (IS_SCSI(isp)) { sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); sdp->isp_devparam[XS_TGT(xs)].goal_flags &= ~DPARM_WIDE; sdp->isp_devparam[XS_TGT(xs)].dev_update = 1; sdp->update = 1; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_SYNCXFER_FAILED: isp_xs_prt(isp, xs, ISP_LOGERR, "SDTR Message Failed"); if (IS_SCSI(isp)) { sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs)); sdp += XS_CHANNEL(xs); sdp->isp_devparam[XS_TGT(xs)].goal_flags &= ~DPARM_SYNC; sdp->isp_devparam[XS_TGT(xs)].dev_update = 1; sdp->update = 1; } break; case RQCS_LVD_BUSERR: isp_xs_prt(isp, xs, ISP_LOGERR, "Bad LVD condition"); break; case RQCS_PORT_UNAVAILABLE: /* * No such port on the loop. Moral equivalent of SELTIMEO */ case RQCS_PORT_LOGGED_OUT: { const char *reason; uint8_t sts = sp->req_completion_status & 0xff; /* * It was there (maybe)- treat as a selection timeout. */ if (sts == RQCS_PORT_UNAVAILABLE) { reason = "unavailable"; } else { reason = "logout"; } isp_prt(isp, ISP_LOGINFO, "port %s for target %d", reason, XS_TGT(xs)); /* * If we're on a local loop, force a LIP (which is overkill) * to force a re-login of this unit. If we're on fabric, * then we'll have to log in again as a matter of course. */ if (FCPARAM(isp, 0)->isp_topo == TOPO_NL_PORT || FCPARAM(isp, 0)->isp_topo == TOPO_FL_PORT) { mbreg_t mbs; MBSINIT(&mbs, MBOX_INIT_LIP, MBLOGALL, 0); if (ISP_CAP_2KLOGIN(isp)) { mbs.ibits = (1 << 10); } isp_mboxcmd_qnw(isp, &mbs, 1); } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; } case RQCS_PORT_CHANGED: isp_prt(isp, ISP_LOGWARN, "port changed for target %d", XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; case RQCS_PORT_BUSY: isp_prt(isp, ISP_LOGWARN, "port busy for target %d", XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_TGTBSY); } return; default: isp_prt(isp, ISP_LOGERR, "Unknown Completion Status 0x%x", sp->req_completion_status); break; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } } static void isp_parse_status_24xx(ispsoftc_t *isp, isp24xx_statusreq_t *sp, XS_T *xs, long *rp) { int ru_marked, sv_marked; int chan = XS_CHANNEL(xs); switch (sp->req_completion_status) { case RQCS_COMPLETE: if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_DMA_ERROR: isp_xs_prt(isp, xs, ISP_LOGERR, "DMA error"); break; case RQCS_TRANSPORT_ERROR: isp_xs_prt(isp, xs, ISP_LOGERR, "Transport Error"); break; case RQCS_RESET_OCCURRED: isp_xs_prt(isp, xs, ISP_LOGWARN, "reset destroyed command"); FCPARAM(isp, chan)->sendmarker = 1; if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BUSRESET); } return; case RQCS_ABORTED: isp_xs_prt(isp, xs, ISP_LOGERR, "Command Aborted"); FCPARAM(isp, chan)->sendmarker = 1; if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } return; case RQCS_TIMEOUT: isp_xs_prt(isp, xs, ISP_LOGWARN, "Command Timed Out"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_CMDTIMEOUT); } return; case RQCS_DATA_OVERRUN: XS_SET_RESID(xs, sp->req_resid); isp_xs_prt(isp, xs, ISP_LOGERR, "Data Overrun"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_DATAOVR); } return; case RQCS_24XX_DRE: /* data reassembly error */ isp_prt(isp, ISP_LOGERR, "Chan %d data reassembly error for target %d", chan, XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } *rp = XS_XFRLEN(xs); return; case RQCS_24XX_TABORT: /* aborted by target */ isp_prt(isp, ISP_LOGERR, "Chan %d target %d sent ABTS", chan, XS_TGT(xs)); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_ABORTED); } return; case RQCS_DATA_UNDERRUN: ru_marked = (sp->req_scsi_status & RQCS_RU) != 0; /* * We can get an underrun w/o things being marked * if we got a non-zero status. */ sv_marked = (sp->req_scsi_status & (RQCS_SV|RQCS_RV)) != 0; if ((ru_marked == 0 && sv_marked == 0) || (sp->req_resid > XS_XFRLEN(xs))) { isp_xs_prt(isp, xs, ISP_LOGWARN, bun, XS_XFRLEN(xs), sp->req_resid, (ru_marked)? "marked" : "not marked"); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } return; } XS_SET_RESID(xs, sp->req_resid); isp_xs_prt(isp, xs, ISP_LOG_WARN1, "Data Underrun (%d) for command 0x%x", sp->req_resid, XS_CDBP(xs)[0] & 0xff); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_NOERROR); } return; case RQCS_PORT_UNAVAILABLE: /* * No such port on the loop. Moral equivalent of SELTIMEO */ case RQCS_PORT_LOGGED_OUT: { const char *reason; uint8_t sts = sp->req_completion_status & 0xff; /* * It was there (maybe)- treat as a selection timeout. */ if (sts == RQCS_PORT_UNAVAILABLE) { reason = "unavailable"; } else { reason = "logout"; } isp_prt(isp, ISP_LOGINFO, "Chan %d port %s for target %d", chan, reason, XS_TGT(xs)); /* * There is no MBOX_INIT_LIP for the 24XX. */ if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; } case RQCS_PORT_CHANGED: isp_prt(isp, ISP_LOGWARN, "port changed for target %d chan %d", XS_TGT(xs), chan); if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_SELTIMEOUT); } return; case RQCS_24XX_ENOMEM: /* f/w resource unavailable */ isp_prt(isp, ISP_LOGWARN, "f/w resource unavailable for target %d chan %d", XS_TGT(xs), chan); if (XS_NOERR(xs)) { *XS_STSP(xs) = SCSI_BUSY; XS_SETERR(xs, HBA_TGTBSY); } return; case RQCS_24XX_TMO: /* task management overrun */ isp_prt(isp, ISP_LOGWARN, "command for target %d overlapped task management for chan %d", XS_TGT(xs), chan); if (XS_NOERR(xs)) { *XS_STSP(xs) = SCSI_BUSY; XS_SETERR(xs, HBA_TGTBSY); } return; default: isp_prt(isp, ISP_LOGERR, "Unknown Completion Status 0x%x on chan %d", sp->req_completion_status, chan); break; } if (XS_NOERR(xs)) { XS_SETERR(xs, HBA_BOTCH); } } static void isp_fastpost_complete(ispsoftc_t *isp, uint32_t fph) { XS_T *xs; if (fph == 0) { return; } xs = isp_find_xs(isp, fph); if (xs == NULL) { isp_prt(isp, ISP_LOGWARN, "Command for fast post handle 0x%x not found", fph); return; } isp_destroy_handle(isp, fph); /* * Since we don't have a result queue entry item, * we must believe that SCSI status is zero and * that all data transferred. */ XS_SET_RESID(xs, 0); *XS_STSP(xs) = SCSI_GOOD; if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, fph); } if (isp->isp_nactive) { isp->isp_nactive--; } isp->isp_fphccmplt++; isp_done(xs); } static int isp_mbox_continue(ispsoftc_t *isp) { mbreg_t mbs; uint16_t *ptr; uint32_t offset; switch (isp->isp_lastmbxcmd) { case MBOX_WRITE_RAM_WORD: case MBOX_READ_RAM_WORD: case MBOX_WRITE_RAM_WORD_EXTENDED: case MBOX_READ_RAM_WORD_EXTENDED: break; default: return (1); } if (isp->isp_mboxtmp[0] != MBOX_COMMAND_COMPLETE) { isp->isp_mbxwrk0 = 0; return (-1); } /* * Clear the previous interrupt. */ if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); ISP_WRITE(isp, BIU_SEMA, 0); } /* * Continue with next word. */ ISP_MEMZERO(&mbs, sizeof (mbs)); ptr = isp->isp_mbxworkp; switch (isp->isp_lastmbxcmd) { case MBOX_WRITE_RAM_WORD: mbs.param[1] = isp->isp_mbxwrk1++; mbs.param[2] = *ptr++; break; case MBOX_READ_RAM_WORD: *ptr++ = isp->isp_mboxtmp[2]; mbs.param[1] = isp->isp_mbxwrk1++; break; case MBOX_WRITE_RAM_WORD_EXTENDED: if (IS_24XX(isp)) { uint32_t *lptr = (uint32_t *)ptr; mbs.param[2] = lptr[0]; mbs.param[3] = lptr[0] >> 16; lptr++; ptr = (uint16_t *)lptr; } else { mbs.param[2] = *ptr++; } offset = isp->isp_mbxwrk1; offset |= isp->isp_mbxwrk8 << 16; mbs.param[1] = offset; mbs.param[8] = offset >> 16; offset++; isp->isp_mbxwrk1 = offset; isp->isp_mbxwrk8 = offset >> 16; break; case MBOX_READ_RAM_WORD_EXTENDED: if (IS_24XX(isp)) { uint32_t *lptr = (uint32_t *)ptr; uint32_t val = isp->isp_mboxtmp[2]; val |= (isp->isp_mboxtmp[3]) << 16; *lptr++ = val; ptr = (uint16_t *)lptr; } else { *ptr++ = isp->isp_mboxtmp[2]; } offset = isp->isp_mbxwrk1; offset |= isp->isp_mbxwrk8 << 16; mbs.param[1] = offset; mbs.param[8] = offset >> 16; offset++; isp->isp_mbxwrk1 = offset; isp->isp_mbxwrk8 = offset >> 16; break; } isp->isp_mbxworkp = ptr; isp->isp_mbxwrk0--; mbs.param[0] = isp->isp_lastmbxcmd; mbs.logval = MBLOGALL; isp_mboxcmd_qnw(isp, &mbs, 0); return (0); } #define ISP_SCSI_IBITS(op) (mbpscsi[((op)<<1)]) #define ISP_SCSI_OBITS(op) (mbpscsi[((op)<<1) + 1]) #define ISP_SCSI_OPMAP(in, out) in, out static const uint8_t mbpscsi[] = { ISP_SCSI_OPMAP(0x01, 0x01), /* 0x00: MBOX_NO_OP */ ISP_SCSI_OPMAP(0x1f, 0x01), /* 0x01: MBOX_LOAD_RAM */ ISP_SCSI_OPMAP(0x03, 0x01), /* 0x02: MBOX_EXEC_FIRMWARE */ ISP_SCSI_OPMAP(0x1f, 0x01), /* 0x03: MBOX_DUMP_RAM */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x04: MBOX_WRITE_RAM_WORD */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x05: MBOX_READ_RAM_WORD */ ISP_SCSI_OPMAP(0x3f, 0x3f), /* 0x06: MBOX_MAILBOX_REG_TEST */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x07: MBOX_VERIFY_CHECKSUM */ ISP_SCSI_OPMAP(0x01, 0x0f), /* 0x08: MBOX_ABOUT_FIRMWARE */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x09: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x0a: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x0b: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x0c: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x0d: */ ISP_SCSI_OPMAP(0x01, 0x05), /* 0x0e: MBOX_CHECK_FIRMWARE */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x0f: */ ISP_SCSI_OPMAP(0x1f, 0x1f), /* 0x10: MBOX_INIT_REQ_QUEUE */ ISP_SCSI_OPMAP(0x3f, 0x3f), /* 0x11: MBOX_INIT_RES_QUEUE */ ISP_SCSI_OPMAP(0x0f, 0x0f), /* 0x12: MBOX_EXECUTE_IOCB */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x13: MBOX_WAKE_UP */ ISP_SCSI_OPMAP(0x01, 0x3f), /* 0x14: MBOX_STOP_FIRMWARE */ ISP_SCSI_OPMAP(0x0f, 0x0f), /* 0x15: MBOX_ABORT */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x16: MBOX_ABORT_DEVICE */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x17: MBOX_ABORT_TARGET */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x18: MBOX_BUS_RESET */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x19: MBOX_STOP_QUEUE */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x1a: MBOX_START_QUEUE */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x1b: MBOX_SINGLE_STEP_QUEUE */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x1c: MBOX_ABORT_QUEUE */ ISP_SCSI_OPMAP(0x03, 0x4f), /* 0x1d: MBOX_GET_DEV_QUEUE_STATUS */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x1e: */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x1f: MBOX_GET_FIRMWARE_STATUS */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x20: MBOX_GET_INIT_SCSI_ID */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x21: MBOX_GET_SELECT_TIMEOUT */ ISP_SCSI_OPMAP(0x01, 0xc7), /* 0x22: MBOX_GET_RETRY_COUNT */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x23: MBOX_GET_TAG_AGE_LIMIT */ ISP_SCSI_OPMAP(0x01, 0x03), /* 0x24: MBOX_GET_CLOCK_RATE */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x25: MBOX_GET_ACT_NEG_STATE */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x26: MBOX_GET_ASYNC_DATA_SETUP_TIME */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x27: MBOX_GET_PCI_PARAMS */ ISP_SCSI_OPMAP(0x03, 0x4f), /* 0x28: MBOX_GET_TARGET_PARAMS */ ISP_SCSI_OPMAP(0x03, 0x0f), /* 0x29: MBOX_GET_DEV_QUEUE_PARAMS */ ISP_SCSI_OPMAP(0x01, 0x07), /* 0x2a: MBOX_GET_RESET_DELAY_PARAMS */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x2b: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x2c: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x2d: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x2e: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x2f: */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x30: MBOX_SET_INIT_SCSI_ID */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x31: MBOX_SET_SELECT_TIMEOUT */ ISP_SCSI_OPMAP(0xc7, 0xc7), /* 0x32: MBOX_SET_RETRY_COUNT */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x33: MBOX_SET_TAG_AGE_LIMIT */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x34: MBOX_SET_CLOCK_RATE */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x35: MBOX_SET_ACT_NEG_STATE */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x36: MBOX_SET_ASYNC_DATA_SETUP_TIME */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x37: MBOX_SET_PCI_CONTROL_PARAMS */ ISP_SCSI_OPMAP(0x4f, 0x4f), /* 0x38: MBOX_SET_TARGET_PARAMS */ ISP_SCSI_OPMAP(0x0f, 0x0f), /* 0x39: MBOX_SET_DEV_QUEUE_PARAMS */ ISP_SCSI_OPMAP(0x07, 0x07), /* 0x3a: MBOX_SET_RESET_DELAY_PARAMS */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x3b: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x3c: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x3d: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x3e: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x3f: */ ISP_SCSI_OPMAP(0x01, 0x03), /* 0x40: MBOX_RETURN_BIOS_BLOCK_ADDR */ ISP_SCSI_OPMAP(0x3f, 0x01), /* 0x41: MBOX_WRITE_FOUR_RAM_WORDS */ ISP_SCSI_OPMAP(0x03, 0x07), /* 0x42: MBOX_EXEC_BIOS_IOCB */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x43: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x44: */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x45: SET SYSTEM PARAMETER */ ISP_SCSI_OPMAP(0x01, 0x03), /* 0x46: GET SYSTEM PARAMETER */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x47: */ ISP_SCSI_OPMAP(0x01, 0xcf), /* 0x48: GET SCAM CONFIGURATION */ ISP_SCSI_OPMAP(0xcf, 0xcf), /* 0x49: SET SCAM CONFIGURATION */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x4a: MBOX_SET_FIRMWARE_FEATURES */ ISP_SCSI_OPMAP(0x01, 0x03), /* 0x4b: MBOX_GET_FIRMWARE_FEATURES */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x4c: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x4d: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x4e: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x4f: */ ISP_SCSI_OPMAP(0xdf, 0xdf), /* 0x50: LOAD RAM A64 */ ISP_SCSI_OPMAP(0xdf, 0xdf), /* 0x51: DUMP RAM A64 */ ISP_SCSI_OPMAP(0xdf, 0xff), /* 0x52: INITIALIZE REQUEST QUEUE A64 */ ISP_SCSI_OPMAP(0xef, 0xff), /* 0x53: INITIALIZE RESPONSE QUEUE A64 */ ISP_SCSI_OPMAP(0xcf, 0x01), /* 0x54: EXECUCUTE COMMAND IOCB A64 */ ISP_SCSI_OPMAP(0x07, 0x01), /* 0x55: ENABLE TARGET MODE */ ISP_SCSI_OPMAP(0x03, 0x0f), /* 0x56: GET TARGET STATUS */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x57: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x58: */ ISP_SCSI_OPMAP(0x00, 0x00), /* 0x59: */ ISP_SCSI_OPMAP(0x03, 0x03), /* 0x5a: SET DATA OVERRUN RECOVERY MODE */ ISP_SCSI_OPMAP(0x01, 0x03), /* 0x5b: GET DATA OVERRUN RECOVERY MODE */ ISP_SCSI_OPMAP(0x0f, 0x0f), /* 0x5c: SET HOST DATA */ ISP_SCSI_OPMAP(0x01, 0x01) /* 0x5d: GET NOST DATA */ }; #define MAX_SCSI_OPCODE 0x5d static const char *scsi_mbcmd_names[] = { "NO-OP", "LOAD RAM", "EXEC FIRMWARE", "DUMP RAM", "WRITE RAM WORD", "READ RAM WORD", "MAILBOX REG TEST", "VERIFY CHECKSUM", "ABOUT FIRMWARE", NULL, NULL, NULL, NULL, NULL, "CHECK FIRMWARE", NULL, "INIT REQUEST QUEUE", "INIT RESULT QUEUE", "EXECUTE IOCB", "WAKE UP", "STOP FIRMWARE", "ABORT", "ABORT DEVICE", "ABORT TARGET", "BUS RESET", "STOP QUEUE", "START QUEUE", "SINGLE STEP QUEUE", "ABORT QUEUE", "GET DEV QUEUE STATUS", NULL, "GET FIRMWARE STATUS", "GET INIT SCSI ID", "GET SELECT TIMEOUT", "GET RETRY COUNT", "GET TAG AGE LIMIT", "GET CLOCK RATE", "GET ACT NEG STATE", "GET ASYNC DATA SETUP TIME", "GET PCI PARAMS", "GET TARGET PARAMS", "GET DEV QUEUE PARAMS", "GET RESET DELAY PARAMS", NULL, NULL, NULL, NULL, NULL, "SET INIT SCSI ID", "SET SELECT TIMEOUT", "SET RETRY COUNT", "SET TAG AGE LIMIT", "SET CLOCK RATE", "SET ACT NEG STATE", "SET ASYNC DATA SETUP TIME", "SET PCI CONTROL PARAMS", "SET TARGET PARAMS", "SET DEV QUEUE PARAMS", "SET RESET DELAY PARAMS", NULL, NULL, NULL, NULL, NULL, "RETURN BIOS BLOCK ADDR", "WRITE FOUR RAM WORDS", "EXEC BIOS IOCB", NULL, NULL, "SET SYSTEM PARAMETER", "GET SYSTEM PARAMETER", NULL, "GET SCAM CONFIGURATION", "SET SCAM CONFIGURATION", "SET FIRMWARE FEATURES", "GET FIRMWARE FEATURES", NULL, NULL, NULL, NULL, "LOAD RAM A64", "DUMP RAM A64", "INITIALIZE REQUEST QUEUE A64", "INITIALIZE RESPONSE QUEUE A64", "EXECUTE IOCB A64", "ENABLE TARGET MODE", "GET TARGET MODE STATE", NULL, NULL, NULL, "SET DATA OVERRUN RECOVERY MODE", "GET DATA OVERRUN RECOVERY MODE", "SET HOST DATA", "GET NOST DATA", }; #define ISP_FC_IBITS(op) ((mbpfc[((op)<<3) + 0] << 24) | (mbpfc[((op)<<3) + 1] << 16) | (mbpfc[((op)<<3) + 2] << 8) | (mbpfc[((op)<<3) + 3])) #define ISP_FC_OBITS(op) ((mbpfc[((op)<<3) + 4] << 24) | (mbpfc[((op)<<3) + 5] << 16) | (mbpfc[((op)<<3) + 6] << 8) | (mbpfc[((op)<<3) + 7])) #define ISP_FC_OPMAP(in0, out0) 0, 0, 0, in0, 0, 0, 0, out0 #define ISP_FC_OPMAP_HALF(in1, in0, out1, out0) 0, 0, in1, in0, 0, 0, out1, out0 #define ISP_FC_OPMAP_FULL(in3, in2, in1, in0, out3, out2, out1, out0) in3, in2, in1, in0, out3, out2, out1, out0 static const uint32_t mbpfc[] = { ISP_FC_OPMAP(0x01, 0x01), /* 0x00: MBOX_NO_OP */ ISP_FC_OPMAP(0x1f, 0x01), /* 0x01: MBOX_LOAD_RAM */ ISP_FC_OPMAP(0x0f, 0x01), /* 0x02: MBOX_EXEC_FIRMWARE */ ISP_FC_OPMAP(0xdf, 0x01), /* 0x03: MBOX_DUMP_RAM */ ISP_FC_OPMAP(0x07, 0x07), /* 0x04: MBOX_WRITE_RAM_WORD */ ISP_FC_OPMAP(0x03, 0x07), /* 0x05: MBOX_READ_RAM_WORD */ ISP_FC_OPMAP_FULL(0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff), /* 0x06: MBOX_MAILBOX_REG_TEST */ ISP_FC_OPMAP(0x07, 0x07), /* 0x07: MBOX_VERIFY_CHECKSUM */ ISP_FC_OPMAP_FULL(0x0, 0x0, 0x0, 0x01, 0x0, 0x3, 0x80, 0x7f), /* 0x08: MBOX_ABOUT_FIRMWARE */ ISP_FC_OPMAP(0xdf, 0x01), /* 0x09: MBOX_LOAD_RISC_RAM_2100 */ ISP_FC_OPMAP(0xdf, 0x01), /* 0x0a: DUMP RAM */ ISP_FC_OPMAP_HALF(0x1, 0xff, 0x0, 0x01), /* 0x0b: MBOX_LOAD_RISC_RAM */ ISP_FC_OPMAP(0x00, 0x00), /* 0x0c: */ ISP_FC_OPMAP_HALF(0x1, 0x0f, 0x0, 0x01), /* 0x0d: MBOX_WRITE_RAM_WORD_EXTENDED */ ISP_FC_OPMAP(0x01, 0x05), /* 0x0e: MBOX_CHECK_FIRMWARE */ ISP_FC_OPMAP_HALF(0x1, 0x03, 0x0, 0x0d), /* 0x0f: MBOX_READ_RAM_WORD_EXTENDED */ ISP_FC_OPMAP(0x1f, 0x11), /* 0x10: MBOX_INIT_REQ_QUEUE */ ISP_FC_OPMAP(0x2f, 0x21), /* 0x11: MBOX_INIT_RES_QUEUE */ ISP_FC_OPMAP(0x0f, 0x01), /* 0x12: MBOX_EXECUTE_IOCB */ ISP_FC_OPMAP(0x03, 0x03), /* 0x13: MBOX_WAKE_UP */ ISP_FC_OPMAP(0x01, 0xff), /* 0x14: MBOX_STOP_FIRMWARE */ ISP_FC_OPMAP(0x4f, 0x01), /* 0x15: MBOX_ABORT */ ISP_FC_OPMAP(0x07, 0x01), /* 0x16: MBOX_ABORT_DEVICE */ ISP_FC_OPMAP(0x07, 0x01), /* 0x17: MBOX_ABORT_TARGET */ ISP_FC_OPMAP(0x03, 0x03), /* 0x18: MBOX_BUS_RESET */ ISP_FC_OPMAP(0x07, 0x05), /* 0x19: MBOX_STOP_QUEUE */ ISP_FC_OPMAP(0x07, 0x05), /* 0x1a: MBOX_START_QUEUE */ ISP_FC_OPMAP(0x07, 0x05), /* 0x1b: MBOX_SINGLE_STEP_QUEUE */ ISP_FC_OPMAP(0x07, 0x05), /* 0x1c: MBOX_ABORT_QUEUE */ ISP_FC_OPMAP(0x07, 0x03), /* 0x1d: MBOX_GET_DEV_QUEUE_STATUS */ ISP_FC_OPMAP(0x00, 0x00), /* 0x1e: */ ISP_FC_OPMAP(0x01, 0x07), /* 0x1f: MBOX_GET_FIRMWARE_STATUS */ ISP_FC_OPMAP_HALF(0x2, 0x01, 0x7e, 0xcf), /* 0x20: MBOX_GET_LOOP_ID */ ISP_FC_OPMAP(0x00, 0x00), /* 0x21: */ ISP_FC_OPMAP(0x01, 0x07), /* 0x22: MBOX_GET_RETRY_COUNT */ ISP_FC_OPMAP(0x00, 0x00), /* 0x23: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x24: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x25: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x26: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x27: */ ISP_FC_OPMAP(0x01, 0x03), /* 0x28: MBOX_GET_FIRMWARE_OPTIONS */ ISP_FC_OPMAP(0x03, 0x07), /* 0x29: MBOX_GET_PORT_QUEUE_PARAMS */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2a: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2b: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2c: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2d: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2e: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x2f: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x30: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x31: */ ISP_FC_OPMAP(0x07, 0x07), /* 0x32: MBOX_SET_RETRY_COUNT */ ISP_FC_OPMAP(0x00, 0x00), /* 0x33: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x34: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x35: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x36: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x37: */ ISP_FC_OPMAP(0x0f, 0x01), /* 0x38: MBOX_SET_FIRMWARE_OPTIONS */ ISP_FC_OPMAP(0x0f, 0x07), /* 0x39: MBOX_SET_PORT_QUEUE_PARAMS */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3a: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3b: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3c: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3d: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3e: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x3f: */ ISP_FC_OPMAP(0x03, 0x01), /* 0x40: MBOX_LOOP_PORT_BYPASS */ ISP_FC_OPMAP(0x03, 0x01), /* 0x41: MBOX_LOOP_PORT_ENABLE */ ISP_FC_OPMAP_HALF(0x0, 0x01, 0x3, 0xcf), /* 0x42: MBOX_GET_RESOURCE_COUNT */ ISP_FC_OPMAP(0x01, 0x01), /* 0x43: MBOX_REQUEST_OFFLINE_MODE */ ISP_FC_OPMAP(0x00, 0x00), /* 0x44: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x45: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x46: */ ISP_FC_OPMAP(0xcf, 0x03), /* 0x47: GET PORT_DATABASE ENHANCED */ ISP_FC_OPMAP(0xcf, 0x0f), /* 0x48: MBOX_INIT_FIRMWARE_MULTI_ID */ ISP_FC_OPMAP(0xcd, 0x01), /* 0x49: MBOX_GET_VP_DATABASE */ ISP_FC_OPMAP_HALF(0x2, 0xcd, 0x0, 0x01), /* 0x4a: MBOX_GET_VP_DATABASE_ENTRY */ ISP_FC_OPMAP(0x00, 0x00), /* 0x4b: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x4c: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x4d: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x4e: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x4f: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x50: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x51: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x52: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x53: */ ISP_FC_OPMAP(0xcf, 0x01), /* 0x54: EXECUTE IOCB A64 */ ISP_FC_OPMAP(0x00, 0x00), /* 0x55: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x56: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x57: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x58: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x59: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x5a: */ ISP_FC_OPMAP(0x03, 0x01), /* 0x5b: MBOX_DRIVER_HEARTBEAT */ ISP_FC_OPMAP(0xcf, 0x01), /* 0x5c: MBOX_FW_HEARTBEAT */ ISP_FC_OPMAP(0x07, 0x03), /* 0x5d: MBOX_GET_SET_DATA_RATE */ ISP_FC_OPMAP(0x00, 0x00), /* 0x5e: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x5f: */ ISP_FC_OPMAP(0xcf, 0x0f), /* 0x60: MBOX_INIT_FIRMWARE */ ISP_FC_OPMAP(0x00, 0x00), /* 0x61: */ ISP_FC_OPMAP(0x01, 0x01), /* 0x62: MBOX_INIT_LIP */ ISP_FC_OPMAP(0xcd, 0x03), /* 0x63: MBOX_GET_FC_AL_POSITION_MAP */ ISP_FC_OPMAP(0xcf, 0x01), /* 0x64: MBOX_GET_PORT_DB */ ISP_FC_OPMAP(0x07, 0x01), /* 0x65: MBOX_CLEAR_ACA */ ISP_FC_OPMAP(0x07, 0x01), /* 0x66: MBOX_TARGET_RESET */ ISP_FC_OPMAP(0x07, 0x01), /* 0x67: MBOX_CLEAR_TASK_SET */ ISP_FC_OPMAP(0x07, 0x01), /* 0x68: MBOX_ABORT_TASK_SET */ ISP_FC_OPMAP(0x01, 0x07), /* 0x69: MBOX_GET_FW_STATE */ ISP_FC_OPMAP_HALF(0x6, 0x03, 0x0, 0xcf), /* 0x6a: MBOX_GET_PORT_NAME */ ISP_FC_OPMAP(0xcf, 0x01), /* 0x6b: MBOX_GET_LINK_STATUS */ ISP_FC_OPMAP(0x0f, 0x01), /* 0x6c: MBOX_INIT_LIP_RESET */ ISP_FC_OPMAP(0x00, 0x00), /* 0x6d: */ ISP_FC_OPMAP(0xcf, 0x03), /* 0x6e: MBOX_SEND_SNS */ ISP_FC_OPMAP(0x0f, 0x07), /* 0x6f: MBOX_FABRIC_LOGIN */ ISP_FC_OPMAP(0x03, 0x01), /* 0x70: MBOX_SEND_CHANGE_REQUEST */ ISP_FC_OPMAP(0x03, 0x03), /* 0x71: MBOX_FABRIC_LOGOUT */ ISP_FC_OPMAP(0x0f, 0x0f), /* 0x72: MBOX_INIT_LIP_LOGIN */ ISP_FC_OPMAP(0x00, 0x00), /* 0x73: */ ISP_FC_OPMAP(0x07, 0x01), /* 0x74: LOGIN LOOP PORT */ ISP_FC_OPMAP_HALF(0x03, 0xcf, 0x00, 0x07), /* 0x75: GET PORT/NODE NAME LIST */ ISP_FC_OPMAP(0x4f, 0x01), /* 0x76: SET VENDOR ID */ ISP_FC_OPMAP(0xcd, 0x01), /* 0x77: INITIALIZE IP MAILBOX */ ISP_FC_OPMAP(0x00, 0x00), /* 0x78: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x79: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x7a: */ ISP_FC_OPMAP(0x00, 0x00), /* 0x7b: */ ISP_FC_OPMAP_HALF(0x03, 0x4f, 0x00, 0x07), /* 0x7c: Get ID List */ ISP_FC_OPMAP(0xcf, 0x01), /* 0x7d: SEND LFA */ ISP_FC_OPMAP(0x0f, 0x01) /* 0x7e: LUN RESET */ }; #define MAX_FC_OPCODE 0x7e /* * Footnotes * * (1): this sets bits 21..16 in mailbox register #8, which we nominally * do not access at this time in the core driver. The caller is * responsible for setting this register first (Gross!). The assumption * is that we won't overflow. */ static const char *fc_mbcmd_names[] = { "NO-OP", "LOAD RAM", "EXEC FIRMWARE", "DUMP RAM", "WRITE RAM WORD", "READ RAM WORD", "MAILBOX REG TEST", "VERIFY CHECKSUM", "ABOUT FIRMWARE", "LOAD RAM (2100)", "DUMP RAM", "LOAD RISC RAM", NULL, "WRITE RAM WORD EXTENDED", "CHECK FIRMWARE", "READ RAM WORD EXTENDED", "INIT REQUEST QUEUE", "INIT RESULT QUEUE", "EXECUTE IOCB", "WAKE UP", "STOP FIRMWARE", "ABORT", "ABORT DEVICE", "ABORT TARGET", "BUS RESET", "STOP QUEUE", "START QUEUE", "SINGLE STEP QUEUE", "ABORT QUEUE", "GET DEV QUEUE STATUS", NULL, "GET FIRMWARE STATUS", "GET LOOP ID", NULL, "GET RETRY COUNT", NULL, NULL, NULL, NULL, NULL, "GET FIRMWARE OPTIONS", "GET PORT QUEUE PARAMS", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "SET RETRY COUNT", NULL, NULL, NULL, NULL, NULL, "SET FIRMWARE OPTIONS", "SET PORT QUEUE PARAMS", NULL, NULL, NULL, NULL, NULL, NULL, "LOOP PORT BYPASS", "LOOP PORT ENABLE", "GET RESOURCE COUNT", "REQUEST NON PARTICIPATING MODE", NULL, NULL, NULL, "GET PORT DATABASE ENHANCED", "INIT FIRMWARE MULTI ID", "GET VP DATABASE", "GET VP DATABASE ENTRY", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "EXECUTE IOCB A64", NULL, NULL, NULL, NULL, NULL, NULL, "DRIVER HEARTBEAT", NULL, "GET/SET DATA RATE", NULL, NULL, "INIT FIRMWARE", NULL, "INIT LIP", "GET FC-AL POSITION MAP", "GET PORT DATABASE", "CLEAR ACA", "TARGET RESET", "CLEAR TASK SET", "ABORT TASK SET", "GET FW STATE", "GET PORT NAME", "GET LINK STATUS", "INIT LIP RESET", NULL, "SEND SNS", "FABRIC LOGIN", "SEND CHANGE REQUEST", "FABRIC LOGOUT", "INIT LIP LOGIN", NULL, "LOGIN LOOP PORT", "GET PORT/NODE NAME LIST", "SET VENDOR ID", "INITIALIZE IP MAILBOX", NULL, NULL, NULL, NULL, "Get ID List", "SEND LFA", "Lun RESET" }; static void isp_mboxcmd_qnw(ispsoftc_t *isp, mbreg_t *mbp, int nodelay) { unsigned int ibits, obits, box, opcode; opcode = mbp->param[0]; if (IS_FC(isp)) { ibits = ISP_FC_IBITS(opcode); obits = ISP_FC_OBITS(opcode); } else { ibits = ISP_SCSI_IBITS(opcode); obits = ISP_SCSI_OBITS(opcode); } ibits |= mbp->ibits; obits |= mbp->obits; for (box = 0; box < ISP_NMBOX(isp); box++) { if (ibits & (1 << box)) { ISP_WRITE(isp, MBOX_OFF(box), mbp->param[box]); } if (nodelay == 0) { isp->isp_mboxtmp[box] = mbp->param[box] = 0; } } if (nodelay == 0) { isp->isp_lastmbxcmd = opcode; isp->isp_obits = obits; isp->isp_mboxbsy = 1; } if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_SET_HOST_INT); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_SET_HOST_INT); } /* * Oddly enough, if we're not delaying for an answer, * delay a bit to give the f/w a chance to pick up the * command. */ if (nodelay) { ISP_DELAY(1000); } } static void isp_mboxcmd(ispsoftc_t *isp, mbreg_t *mbp) { const char *cname, *xname, *sname; char tname[16], mname[16]; unsigned int ibits, obits, box, opcode; opcode = mbp->param[0]; if (IS_FC(isp)) { if (opcode > MAX_FC_OPCODE) { mbp->param[0] = MBOX_INVALID_COMMAND; isp_prt(isp, ISP_LOGERR, "Unknown Command 0x%x", opcode); return; } cname = fc_mbcmd_names[opcode]; ibits = ISP_FC_IBITS(opcode); obits = ISP_FC_OBITS(opcode); } else { if (opcode > MAX_SCSI_OPCODE) { mbp->param[0] = MBOX_INVALID_COMMAND; isp_prt(isp, ISP_LOGERR, "Unknown Command 0x%x", opcode); return; } cname = scsi_mbcmd_names[opcode]; ibits = ISP_SCSI_IBITS(opcode); obits = ISP_SCSI_OBITS(opcode); } if (cname == NULL) { cname = tname; ISP_SNPRINTF(tname, sizeof tname, "opcode %x", opcode); } isp_prt(isp, ISP_LOGDEBUG3, "Mailbox Command '%s'", cname); /* * Pick up any additional bits that the caller might have set. */ ibits |= mbp->ibits; obits |= mbp->obits; /* * Mask any bits that the caller wants us to mask */ ibits &= mbp->ibitm; obits &= mbp->obitm; if (ibits == 0 && obits == 0) { mbp->param[0] = MBOX_COMMAND_PARAM_ERROR; isp_prt(isp, ISP_LOGERR, "no parameters for 0x%x", opcode); return; } /* * Get exclusive usage of mailbox registers. */ if (MBOX_ACQUIRE(isp)) { mbp->param[0] = MBOX_REGS_BUSY; goto out; } for (box = 0; box < ISP_NMBOX(isp); box++) { if (ibits & (1 << box)) { isp_prt(isp, ISP_LOGDEBUG3, "IN mbox %d = 0x%04x", box, mbp->param[box]); ISP_WRITE(isp, MBOX_OFF(box), mbp->param[box]); } isp->isp_mboxtmp[box] = mbp->param[box] = 0; } isp->isp_lastmbxcmd = opcode; /* * We assume that we can't overwrite a previous command. */ isp->isp_obits = obits; isp->isp_mboxbsy = 1; /* * Set Host Interrupt condition so that RISC will pick up mailbox regs. */ if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_SET_HOST_INT); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_SET_HOST_INT); } /* * While we haven't finished the command, spin our wheels here. */ MBOX_WAIT_COMPLETE(isp, mbp); /* * Did the command time out? */ if (mbp->param[0] == MBOX_TIMEOUT) { isp->isp_mboxbsy = 0; MBOX_RELEASE(isp); goto out; } /* * Copy back output registers. */ for (box = 0; box < ISP_NMBOX(isp); box++) { if (obits & (1 << box)) { mbp->param[box] = isp->isp_mboxtmp[box]; isp_prt(isp, ISP_LOGDEBUG3, "OUT mbox %d = 0x%04x", box, mbp->param[box]); } } isp->isp_mboxbsy = 0; MBOX_RELEASE(isp); out: if (mbp->logval == 0 || mbp->param[0] == MBOX_COMMAND_COMPLETE) return; if ((mbp->param[0] & 0xbfe0) == 0 && (mbp->logval & MBLOGMASK(mbp->param[0])) == 0) return; xname = NULL; sname = ""; switch (mbp->param[0]) { case MBOX_INVALID_COMMAND: xname = "INVALID COMMAND"; break; case MBOX_HOST_INTERFACE_ERROR: xname = "HOST INTERFACE ERROR"; break; case MBOX_TEST_FAILED: xname = "TEST FAILED"; break; case MBOX_COMMAND_ERROR: xname = "COMMAND ERROR"; ISP_SNPRINTF(mname, sizeof(mname), " subcode 0x%x", mbp->param[1]); sname = mname; break; case MBOX_COMMAND_PARAM_ERROR: xname = "COMMAND PARAMETER ERROR"; break; case MBOX_PORT_ID_USED: xname = "PORT ID ALREADY IN USE"; break; case MBOX_LOOP_ID_USED: xname = "LOOP ID ALREADY IN USE"; break; case MBOX_ALL_IDS_USED: xname = "ALL LOOP IDS IN USE"; break; case MBOX_NOT_LOGGED_IN: xname = "NOT LOGGED IN"; break; case MBOX_LINK_DOWN_ERROR: xname = "LINK DOWN ERROR"; break; case MBOX_LOOPBACK_ERROR: xname = "LOOPBACK ERROR"; break; case MBOX_CHECKSUM_ERROR: xname = "CHECKSUM ERROR"; break; case MBOX_INVALID_PRODUCT_KEY: xname = "INVALID PRODUCT KEY"; break; case MBOX_REGS_BUSY: xname = "REGISTERS BUSY"; break; case MBOX_TIMEOUT: xname = "TIMEOUT"; break; default: ISP_SNPRINTF(mname, sizeof mname, "error 0x%x", mbp->param[0]); xname = mname; break; } if (xname) { isp_prt(isp, ISP_LOGALL, "Mailbox Command '%s' failed (%s%s)", cname, xname, sname); } } -static void +static int isp_fw_state(ispsoftc_t *isp, int chan) { if (IS_FC(isp)) { mbreg_t mbs; - fcparam *fcp = FCPARAM(isp, chan); MBSINIT(&mbs, MBOX_GET_FW_STATE, MBLOGALL, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] == MBOX_COMMAND_COMPLETE) { - fcp->isp_fwstate = mbs.param[1]; + return (mbs.param[1]); } } + return (FW_ERROR); } static void isp_spi_update(ispsoftc_t *isp, int chan) { int tgt; mbreg_t mbs; sdparam *sdp; if (IS_FC(isp)) { /* * There are no 'per-bus' settings for Fibre Channel. */ return; } sdp = SDPARAM(isp, chan); sdp->update = 0; for (tgt = 0; tgt < MAX_TARGETS; tgt++) { uint16_t flags, period, offset; int get; if (sdp->isp_devparam[tgt].dev_enable == 0) { sdp->isp_devparam[tgt].dev_update = 0; sdp->isp_devparam[tgt].dev_refresh = 0; isp_prt(isp, ISP_LOGDEBUG0, "skipping target %d bus %d update", tgt, chan); continue; } /* * If the goal is to update the status of the device, * take what's in goal_flags and try and set the device * toward that. Otherwise, if we're just refreshing the * current device state, get the current parameters. */ MBSINIT(&mbs, 0, MBLOGALL, 0); /* * Refresh overrides set */ if (sdp->isp_devparam[tgt].dev_refresh) { mbs.param[0] = MBOX_GET_TARGET_PARAMS; get = 1; } else if (sdp->isp_devparam[tgt].dev_update) { mbs.param[0] = MBOX_SET_TARGET_PARAMS; /* * Make sure goal_flags has "Renegotiate on Error" * on and "Freeze Queue on Error" off. */ sdp->isp_devparam[tgt].goal_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].goal_flags &= ~DPARM_QFRZ; mbs.param[2] = sdp->isp_devparam[tgt].goal_flags; /* * Insist that PARITY must be enabled * if SYNC or WIDE is enabled. */ if ((mbs.param[2] & (DPARM_SYNC|DPARM_WIDE)) != 0) { mbs.param[2] |= DPARM_PARITY; } if (mbs.param[2] & DPARM_SYNC) { mbs.param[3] = (sdp->isp_devparam[tgt].goal_offset << 8) | (sdp->isp_devparam[tgt].goal_period); } /* * A command completion later that has * RQSTF_NEGOTIATION set can cause * the dev_refresh/announce cycle also. * * Note: It is really important to update our current * flags with at least the state of TAG capabilities- * otherwise we might try and send a tagged command * when we have it all turned off. So change it here * to say that current already matches goal. */ sdp->isp_devparam[tgt].actv_flags &= ~DPARM_TQING; sdp->isp_devparam[tgt].actv_flags |= (sdp->isp_devparam[tgt].goal_flags & DPARM_TQING); isp_prt(isp, ISP_LOGDEBUG0, "bus %d set tgt %d flags 0x%x off 0x%x period 0x%x", chan, tgt, mbs.param[2], mbs.param[3] >> 8, mbs.param[3] & 0xff); get = 0; } else { continue; } mbs.param[1] = (chan << 15) | (tgt << 8); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { continue; } if (get == 0) { sdp->sendmarker = 1; sdp->isp_devparam[tgt].dev_update = 0; sdp->isp_devparam[tgt].dev_refresh = 1; } else { sdp->isp_devparam[tgt].dev_refresh = 0; flags = mbs.param[2]; period = mbs.param[3] & 0xff; offset = mbs.param[3] >> 8; sdp->isp_devparam[tgt].actv_flags = flags; sdp->isp_devparam[tgt].actv_period = period; sdp->isp_devparam[tgt].actv_offset = offset; isp_async(isp, ISPASYNC_NEW_TGT_PARAMS, chan, tgt); } } for (tgt = 0; tgt < MAX_TARGETS; tgt++) { if (sdp->isp_devparam[tgt].dev_update || sdp->isp_devparam[tgt].dev_refresh) { sdp->update = 1; break; } } } static void isp_setdfltsdparm(ispsoftc_t *isp) { int tgt; sdparam *sdp, *sdp1; sdp = SDPARAM(isp, 0); sdp->role = GET_DEFAULT_ROLE(isp, 0); if (IS_DUALBUS(isp)) { sdp1 = sdp + 1; sdp1->role = GET_DEFAULT_ROLE(isp, 1); } else { sdp1 = NULL; } /* * Establish some default parameters. */ sdp->isp_cmd_dma_burst_enable = 0; sdp->isp_data_dma_burst_enabl = 1; sdp->isp_fifo_threshold = 0; sdp->isp_initiator_id = DEFAULT_IID(isp, 0); if (isp->isp_type >= ISP_HA_SCSI_1040) { sdp->isp_async_data_setup = 9; } else { sdp->isp_async_data_setup = 6; } sdp->isp_selection_timeout = 250; sdp->isp_max_queue_depth = MAXISPREQUEST(isp); sdp->isp_tag_aging = 8; sdp->isp_bus_reset_delay = 5; /* * Don't retry selection, busy or queue full automatically- reflect * these back to us. */ sdp->isp_retry_count = 0; sdp->isp_retry_delay = 0; for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].exc_throttle = ISP_EXEC_THROTTLE; sdp->isp_devparam[tgt].dev_enable = 1; } /* * The trick here is to establish a default for the default (honk!) * state (goal_flags). Then try and get the current status from * the card to fill in the current state. We don't, in fact, set * the default to the SAFE default state- that's not the goal state. */ for (tgt = 0; tgt < MAX_TARGETS; tgt++) { uint8_t off, per; sdp->isp_devparam[tgt].actv_offset = 0; sdp->isp_devparam[tgt].actv_period = 0; sdp->isp_devparam[tgt].actv_flags = 0; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags = DPARM_DEFAULT; /* * We default to Wide/Fast for versions less than a 1040 * (unless it's SBus). */ if (IS_ULTRA3(isp)) { off = ISP_80M_SYNCPARMS >> 8; per = ISP_80M_SYNCPARMS & 0xff; } else if (IS_ULTRA2(isp)) { off = ISP_40M_SYNCPARMS >> 8; per = ISP_40M_SYNCPARMS & 0xff; } else if (IS_1240(isp)) { off = ISP_20M_SYNCPARMS >> 8; per = ISP_20M_SYNCPARMS & 0xff; } else if ((isp->isp_bustype == ISP_BT_SBUS && isp->isp_type < ISP_HA_SCSI_1020A) || (isp->isp_bustype == ISP_BT_PCI && isp->isp_type < ISP_HA_SCSI_1040) || (isp->isp_clock && isp->isp_clock < 60) || (sdp->isp_ultramode == 0)) { off = ISP_10M_SYNCPARMS >> 8; per = ISP_10M_SYNCPARMS & 0xff; } else { off = ISP_20M_SYNCPARMS_1040 >> 8; per = ISP_20M_SYNCPARMS_1040 & 0xff; } sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset = off; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period = per; } /* * If we're a dual bus card, just copy the data over */ if (sdp1) { *sdp1 = *sdp; sdp1->isp_initiator_id = DEFAULT_IID(isp, 1); } /* * If we've not been told to avoid reading NVRAM, try and read it. * If we're successful reading it, we can then return because NVRAM * will tell us what the desired settings are. Otherwise, we establish * some reasonable 'fake' nvram and goal defaults. */ if ((isp->isp_confopts & ISP_CFG_NONVRAM) == 0) { mbreg_t mbs; if (isp_read_nvram(isp, 0) == 0) { if (IS_DUALBUS(isp)) { if (isp_read_nvram(isp, 1) == 0) { return; } } } MBSINIT(&mbs, MBOX_GET_ACT_NEG_STATE, MBLOGNONE, 0); isp_mboxcmd(isp, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { sdp->isp_req_ack_active_neg = 1; sdp->isp_data_line_active_neg = 1; if (sdp1) { sdp1->isp_req_ack_active_neg = 1; sdp1->isp_data_line_active_neg = 1; } } else { sdp->isp_req_ack_active_neg = (mbs.param[1] >> 4) & 0x1; sdp->isp_data_line_active_neg = (mbs.param[1] >> 5) & 0x1; if (sdp1) { sdp1->isp_req_ack_active_neg = (mbs.param[2] >> 4) & 0x1; sdp1->isp_data_line_active_neg = (mbs.param[2] >> 5) & 0x1; } } } } static void isp_setdfltfcparm(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); /* * Establish some default parameters. */ fcp->role = GET_DEFAULT_ROLE(isp, chan); fcp->isp_maxalloc = ICB_DFLT_ALLOC; fcp->isp_retry_delay = ICB_DFLT_RDELAY; fcp->isp_retry_count = ICB_DFLT_RCOUNT; fcp->isp_loopid = DEFAULT_LOOPID(isp, chan); fcp->isp_wwnn_nvram = DEFAULT_NODEWWN(isp, chan); fcp->isp_wwpn_nvram = DEFAULT_PORTWWN(isp, chan); fcp->isp_fwoptions = 0; fcp->isp_lasthdl = NIL_HANDLE; if (IS_24XX(isp)) { fcp->isp_fwoptions |= ICB2400_OPT1_FAIRNESS; fcp->isp_fwoptions |= ICB2400_OPT1_HARD_ADDRESS; if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX) { fcp->isp_fwoptions |= ICB2400_OPT1_FULL_DUPLEX; } fcp->isp_fwoptions |= ICB2400_OPT1_BOTH_WWNS; } else { fcp->isp_fwoptions |= ICBOPT_FAIRNESS; fcp->isp_fwoptions |= ICBOPT_PDBCHANGE_AE; fcp->isp_fwoptions |= ICBOPT_HARD_ADDRESS; if (isp->isp_confopts & ISP_CFG_FULL_DUPLEX) { fcp->isp_fwoptions |= ICBOPT_FULL_DUPLEX; } /* * Make sure this is turned off now until we get * extended options from NVRAM */ fcp->isp_fwoptions &= ~ICBOPT_EXTENDED; } /* * Now try and read NVRAM unless told to not do so. * This will set fcparam's isp_wwnn_nvram && isp_wwpn_nvram. */ if ((isp->isp_confopts & ISP_CFG_NONVRAM) == 0) { int i, j = 0; /* * Give a couple of tries at reading NVRAM. */ for (i = 0; i < 2; i++) { j = isp_read_nvram(isp, chan); if (j == 0) { break; } } if (j) { isp->isp_confopts |= ISP_CFG_NONVRAM; } } fcp->isp_wwnn = ACTIVE_NODEWWN(isp, chan); fcp->isp_wwpn = ACTIVE_PORTWWN(isp, chan); isp_prt(isp, ISP_LOGCONFIG, "Chan %d 0x%08x%08x/0x%08x%08x Role %s", chan, (uint32_t) (fcp->isp_wwnn >> 32), (uint32_t) (fcp->isp_wwnn), (uint32_t) (fcp->isp_wwpn >> 32), (uint32_t) (fcp->isp_wwpn), isp_class3_roles[fcp->role]); } /* * Re-initialize the ISP and complete all orphaned commands * with a 'botched' notice. The reset/init routines should * not disturb an already active list of commands. */ int isp_reinit(ispsoftc_t *isp, int do_load_defaults) { int i, res = 0; if (isp->isp_state != ISP_RESETSTATE) isp_reset(isp, do_load_defaults); if (isp->isp_state != ISP_RESETSTATE) { res = EIO; isp_prt(isp, ISP_LOGERR, "%s: cannot reset card", __func__); ISP_DISABLE_INTS(isp); goto cleanup; } isp_init(isp); if (isp->isp_state > ISP_RESETSTATE && isp->isp_state != ISP_RUNSTATE) { res = EIO; isp_prt(isp, ISP_LOGERR, "%s: cannot init card", __func__); ISP_DISABLE_INTS(isp); if (IS_FC(isp)) { /* * If we're in ISP_ROLE_NONE, turn off the lasers. */ if (!IS_24XX(isp)) { ISP_WRITE(isp, BIU2100_CSR, BIU2100_FPM0_REGS); ISP_WRITE(isp, FPM_DIAG_CONFIG, FPM_SOFT_RESET); ISP_WRITE(isp, BIU2100_CSR, BIU2100_FB_REGS); ISP_WRITE(isp, FBM_CMD, FBMCMD_FIFO_RESET_ALL); ISP_WRITE(isp, BIU2100_CSR, BIU2100_RISC_REGS); } } } cleanup: isp->isp_nactive = 0; isp_clear_commands(isp); if (IS_FC(isp)) { for (i = 0; i < isp->isp_nchan; i++) - ISP_MARK_PORTDB(isp, i, -1); + isp_mark_portdb(isp, i, -1); } return (res); } /* * NVRAM Routines */ static int isp_read_nvram(ispsoftc_t *isp, int bus) { int i, amt, retval; uint8_t csum, minversion; union { uint8_t _x[ISP2400_NVRAM_SIZE]; uint16_t _s[ISP2400_NVRAM_SIZE>>1]; } _n; #define nvram_data _n._x #define nvram_words _n._s if (IS_24XX(isp)) { return (isp_read_nvram_2400(isp, nvram_data)); } else if (IS_FC(isp)) { amt = ISP2100_NVRAM_SIZE; minversion = 1; } else if (IS_ULTRA2(isp)) { amt = ISP1080_NVRAM_SIZE; minversion = 0; } else { amt = ISP_NVRAM_SIZE; minversion = 2; } for (i = 0; i < amt>>1; i++) { isp_rdnvram_word(isp, i, &nvram_words[i]); } if (nvram_data[0] != 'I' || nvram_data[1] != 'S' || nvram_data[2] != 'P') { if (isp->isp_bustype != ISP_BT_SBUS) { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM header"); isp_prt(isp, ISP_LOGDEBUG0, "%x %x %x", nvram_data[0], nvram_data[1], nvram_data[2]); } retval = -1; goto out; } for (csum = 0, i = 0; i < amt; i++) { csum += nvram_data[i]; } if (csum != 0) { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM checksum"); retval = -1; goto out; } if (ISP_NVRAM_VERSION(nvram_data) < minversion) { isp_prt(isp, ISP_LOGWARN, "version %d NVRAM not understood", ISP_NVRAM_VERSION(nvram_data)); retval = -1; goto out; } if (IS_ULTRA3(isp)) { isp_parse_nvram_12160(isp, bus, nvram_data); } else if (IS_1080(isp)) { isp_parse_nvram_1080(isp, bus, nvram_data); } else if (IS_1280(isp) || IS_1240(isp)) { isp_parse_nvram_1080(isp, bus, nvram_data); } else if (IS_SCSI(isp)) { isp_parse_nvram_1020(isp, nvram_data); } else { isp_parse_nvram_2100(isp, nvram_data); } retval = 0; out: return (retval); #undef nvram_data #undef nvram_words } static int isp_read_nvram_2400(ispsoftc_t *isp, uint8_t *nvram_data) { int retval = 0; uint32_t addr, csum, lwrds, *dptr; if (isp->isp_port) { addr = ISP2400_NVRAM_PORT1_ADDR; } else { addr = ISP2400_NVRAM_PORT0_ADDR; } dptr = (uint32_t *) nvram_data; for (lwrds = 0; lwrds < ISP2400_NVRAM_SIZE >> 2; lwrds++) { isp_rd_2400_nvram(isp, addr++, dptr++); } if (nvram_data[0] != 'I' || nvram_data[1] != 'S' || nvram_data[2] != 'P') { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM header (%x %x %x)", nvram_data[0], nvram_data[1], nvram_data[2]); retval = -1; goto out; } dptr = (uint32_t *) nvram_data; for (csum = 0, lwrds = 0; lwrds < ISP2400_NVRAM_SIZE >> 2; lwrds++) { uint32_t tmp; ISP_IOXGET_32(isp, &dptr[lwrds], tmp); csum += tmp; } if (csum != 0) { isp_prt(isp, ISP_LOGWARN, "invalid NVRAM checksum"); retval = -1; goto out; } isp_parse_nvram_2400(isp, nvram_data); out: return (retval); } static void isp_rdnvram_word(ispsoftc_t *isp, int wo, uint16_t *rp) { int i, cbits; uint16_t bit, rqst, junk; ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT); ISP_DELAY(10); ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT|BIU_NVRAM_CLOCK); ISP_DELAY(10); if (IS_FC(isp)) { wo &= ((ISP2100_NVRAM_SIZE >> 1) - 1); if (IS_2312(isp) && isp->isp_port) { wo += 128; } rqst = (ISP_NVRAM_READ << 8) | wo; cbits = 10; } else if (IS_ULTRA2(isp)) { wo &= ((ISP1080_NVRAM_SIZE >> 1) - 1); rqst = (ISP_NVRAM_READ << 8) | wo; cbits = 10; } else { wo &= ((ISP_NVRAM_SIZE >> 1) - 1); rqst = (ISP_NVRAM_READ << 6) | wo; cbits = 8; } /* * Clock the word select request out... */ for (i = cbits; i >= 0; i--) { if ((rqst >> i) & 1) { bit = BIU_NVRAM_SELECT | BIU_NVRAM_DATAOUT; } else { bit = BIU_NVRAM_SELECT; } ISP_WRITE(isp, BIU_NVRAM, bit); ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ ISP_WRITE(isp, BIU_NVRAM, bit | BIU_NVRAM_CLOCK); ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ ISP_WRITE(isp, BIU_NVRAM, bit); ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ } /* * Now read the result back in (bits come back in MSB format). */ *rp = 0; for (i = 0; i < 16; i++) { uint16_t rv; *rp <<= 1; ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT|BIU_NVRAM_CLOCK); ISP_DELAY(10); rv = ISP_READ(isp, BIU_NVRAM); if (rv & BIU_NVRAM_DATAIN) { *rp |= 1; } ISP_DELAY(10); ISP_WRITE(isp, BIU_NVRAM, BIU_NVRAM_SELECT); ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ } ISP_WRITE(isp, BIU_NVRAM, 0); ISP_DELAY(10); junk = ISP_READ(isp, BIU_NVRAM); /* force PCI flush */ ISP_SWIZZLE_NVRAM_WORD(isp, rp); } static void isp_rd_2400_nvram(ispsoftc_t *isp, uint32_t addr, uint32_t *rp) { int loops = 0; uint32_t base = 0x7ffe0000; uint32_t tmp = 0; if (IS_25XX(isp)) { base = 0x7ff00000 | 0x48000; } ISP_WRITE(isp, BIU2400_FLASH_ADDR, base | addr); for (loops = 0; loops < 5000; loops++) { ISP_DELAY(10); tmp = ISP_READ(isp, BIU2400_FLASH_ADDR); if ((tmp & (1U << 31)) != 0) { break; } } if (tmp & (1U << 31)) { *rp = ISP_READ(isp, BIU2400_FLASH_DATA); ISP_SWIZZLE_NVRAM_LONG(isp, rp); } else { *rp = 0xffffffff; } } static void isp_parse_nvram_1020(ispsoftc_t *isp, uint8_t *nvram_data) { sdparam *sdp = SDPARAM(isp, 0); int tgt; sdp->isp_fifo_threshold = ISP_NVRAM_FIFO_THRESHOLD(nvram_data) | (ISP_NVRAM_FIFO_THRESHOLD_128(nvram_data) << 2); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) sdp->isp_initiator_id = ISP_NVRAM_INITIATOR_ID(nvram_data); sdp->isp_bus_reset_delay = ISP_NVRAM_BUS_RESET_DELAY(nvram_data); sdp->isp_retry_count = ISP_NVRAM_BUS_RETRY_COUNT(nvram_data); sdp->isp_retry_delay = ISP_NVRAM_BUS_RETRY_DELAY(nvram_data); sdp->isp_async_data_setup = ISP_NVRAM_ASYNC_DATA_SETUP_TIME(nvram_data); if (isp->isp_type >= ISP_HA_SCSI_1040) { if (sdp->isp_async_data_setup < 9) { sdp->isp_async_data_setup = 9; } } else { if (sdp->isp_async_data_setup != 6) { sdp->isp_async_data_setup = 6; } } sdp->isp_req_ack_active_neg = ISP_NVRAM_REQ_ACK_ACTIVE_NEGATION(nvram_data); sdp->isp_data_line_active_neg = ISP_NVRAM_DATA_LINE_ACTIVE_NEGATION(nvram_data); sdp->isp_data_dma_burst_enabl = ISP_NVRAM_DATA_DMA_BURST_ENABLE(nvram_data); sdp->isp_cmd_dma_burst_enable = ISP_NVRAM_CMD_DMA_BURST_ENABLE(nvram_data); sdp->isp_tag_aging = ISP_NVRAM_TAG_AGE_LIMIT(nvram_data); sdp->isp_selection_timeout = ISP_NVRAM_SELECTION_TIMEOUT(nvram_data); sdp->isp_max_queue_depth = ISP_NVRAM_MAX_QUEUE_DEPTH(nvram_data); sdp->isp_fast_mttr = ISP_NVRAM_FAST_MTTR_ENABLE(nvram_data); for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].dev_enable = ISP_NVRAM_TGT_DEVICE_ENABLE(nvram_data, tgt); sdp->isp_devparam[tgt].exc_throttle = ISP_NVRAM_TGT_EXEC_THROTTLE(nvram_data, tgt); sdp->isp_devparam[tgt].nvrm_offset = ISP_NVRAM_TGT_SYNC_OFFSET(nvram_data, tgt); sdp->isp_devparam[tgt].nvrm_period = ISP_NVRAM_TGT_SYNC_PERIOD(nvram_data, tgt); /* * We probably shouldn't lie about this, but it * it makes it much safer if we limit NVRAM values * to sanity. */ if (isp->isp_type < ISP_HA_SCSI_1040) { /* * If we're not ultra, we can't possibly * be a shorter period than this. */ if (sdp->isp_devparam[tgt].nvrm_period < 0x19) { sdp->isp_devparam[tgt].nvrm_period = 0x19; } if (sdp->isp_devparam[tgt].nvrm_offset > 0xc) { sdp->isp_devparam[tgt].nvrm_offset = 0x0c; } } else { if (sdp->isp_devparam[tgt].nvrm_offset > 0x8) { sdp->isp_devparam[tgt].nvrm_offset = 0x8; } } sdp->isp_devparam[tgt].nvrm_flags = 0; if (ISP_NVRAM_TGT_RENEG(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].nvrm_flags |= DPARM_ARQ; if (ISP_NVRAM_TGT_TQING(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_TQING; if (ISP_NVRAM_TGT_SYNC(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_SYNC; if (ISP_NVRAM_TGT_WIDE(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_WIDE; if (ISP_NVRAM_TGT_PARITY(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_PARITY; if (ISP_NVRAM_TGT_DISC(nvram_data, tgt)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_DISC; sdp->isp_devparam[tgt].actv_flags = 0; /* we don't know */ sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags; } } static void isp_parse_nvram_1080(ispsoftc_t *isp, int bus, uint8_t *nvram_data) { sdparam *sdp = SDPARAM(isp, bus); int tgt; sdp->isp_fifo_threshold = ISP1080_NVRAM_FIFO_THRESHOLD(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) sdp->isp_initiator_id = ISP1080_NVRAM_INITIATOR_ID(nvram_data, bus); sdp->isp_bus_reset_delay = ISP1080_NVRAM_BUS_RESET_DELAY(nvram_data, bus); sdp->isp_retry_count = ISP1080_NVRAM_BUS_RETRY_COUNT(nvram_data, bus); sdp->isp_retry_delay = ISP1080_NVRAM_BUS_RETRY_DELAY(nvram_data, bus); sdp->isp_async_data_setup = ISP1080_NVRAM_ASYNC_DATA_SETUP_TIME(nvram_data, bus); sdp->isp_req_ack_active_neg = ISP1080_NVRAM_REQ_ACK_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_line_active_neg = ISP1080_NVRAM_DATA_LINE_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_dma_burst_enabl = ISP1080_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_cmd_dma_burst_enable = ISP1080_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_selection_timeout = ISP1080_NVRAM_SELECTION_TIMEOUT(nvram_data, bus); sdp->isp_max_queue_depth = ISP1080_NVRAM_MAX_QUEUE_DEPTH(nvram_data, bus); for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].dev_enable = ISP1080_NVRAM_TGT_DEVICE_ENABLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].exc_throttle = ISP1080_NVRAM_TGT_EXEC_THROTTLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_offset = ISP1080_NVRAM_TGT_SYNC_OFFSET(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_period = ISP1080_NVRAM_TGT_SYNC_PERIOD(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_flags = 0; if (ISP1080_NVRAM_TGT_RENEG(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].nvrm_flags |= DPARM_ARQ; if (ISP1080_NVRAM_TGT_TQING(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_TQING; if (ISP1080_NVRAM_TGT_SYNC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_SYNC; if (ISP1080_NVRAM_TGT_WIDE(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_WIDE; if (ISP1080_NVRAM_TGT_PARITY(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_PARITY; if (ISP1080_NVRAM_TGT_DISC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_DISC; sdp->isp_devparam[tgt].actv_flags = 0; sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags; } } static void isp_parse_nvram_12160(ispsoftc_t *isp, int bus, uint8_t *nvram_data) { sdparam *sdp = SDPARAM(isp, bus); int tgt; sdp->isp_fifo_threshold = ISP12160_NVRAM_FIFO_THRESHOLD(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) sdp->isp_initiator_id = ISP12160_NVRAM_INITIATOR_ID(nvram_data, bus); sdp->isp_bus_reset_delay = ISP12160_NVRAM_BUS_RESET_DELAY(nvram_data, bus); sdp->isp_retry_count = ISP12160_NVRAM_BUS_RETRY_COUNT(nvram_data, bus); sdp->isp_retry_delay = ISP12160_NVRAM_BUS_RETRY_DELAY(nvram_data, bus); sdp->isp_async_data_setup = ISP12160_NVRAM_ASYNC_DATA_SETUP_TIME(nvram_data, bus); sdp->isp_req_ack_active_neg = ISP12160_NVRAM_REQ_ACK_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_line_active_neg = ISP12160_NVRAM_DATA_LINE_ACTIVE_NEGATION(nvram_data, bus); sdp->isp_data_dma_burst_enabl = ISP12160_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_cmd_dma_burst_enable = ISP12160_NVRAM_BURST_ENABLE(nvram_data); sdp->isp_selection_timeout = ISP12160_NVRAM_SELECTION_TIMEOUT(nvram_data, bus); sdp->isp_max_queue_depth = ISP12160_NVRAM_MAX_QUEUE_DEPTH(nvram_data, bus); for (tgt = 0; tgt < MAX_TARGETS; tgt++) { sdp->isp_devparam[tgt].dev_enable = ISP12160_NVRAM_TGT_DEVICE_ENABLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].exc_throttle = ISP12160_NVRAM_TGT_EXEC_THROTTLE(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_offset = ISP12160_NVRAM_TGT_SYNC_OFFSET(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_period = ISP12160_NVRAM_TGT_SYNC_PERIOD(nvram_data, tgt, bus); sdp->isp_devparam[tgt].nvrm_flags = 0; if (ISP12160_NVRAM_TGT_RENEG(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_RENEG; sdp->isp_devparam[tgt].nvrm_flags |= DPARM_ARQ; if (ISP12160_NVRAM_TGT_TQING(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_TQING; if (ISP12160_NVRAM_TGT_SYNC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_SYNC; if (ISP12160_NVRAM_TGT_WIDE(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_WIDE; if (ISP12160_NVRAM_TGT_PARITY(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_PARITY; if (ISP12160_NVRAM_TGT_DISC(nvram_data, tgt, bus)) sdp->isp_devparam[tgt].nvrm_flags |= DPARM_DISC; sdp->isp_devparam[tgt].actv_flags = 0; sdp->isp_devparam[tgt].goal_offset = sdp->isp_devparam[tgt].nvrm_offset; sdp->isp_devparam[tgt].goal_period = sdp->isp_devparam[tgt].nvrm_period; sdp->isp_devparam[tgt].goal_flags = sdp->isp_devparam[tgt].nvrm_flags; } } static void isp_parse_nvram_2100(ispsoftc_t *isp, uint8_t *nvram_data) { fcparam *fcp = FCPARAM(isp, 0); uint64_t wwn; /* * There is NVRAM storage for both Port and Node entities- * but the Node entity appears to be unused on all the cards * I can find. However, we should account for this being set * at some point in the future. * * Qlogic WWNs have an NAA of 2, but usually nothing shows up in * bits 48..60. In the case of the 2202, it appears that they do * use bit 48 to distinguish between the two instances on the card. * The 2204, which I've never seen, *probably* extends this method. */ wwn = ISP2100_NVRAM_PORT_NAME(nvram_data); if (wwn) { isp_prt(isp, ISP_LOGCONFIG, "NVRAM Port WWN 0x%08x%08x", (uint32_t) (wwn >> 32), (uint32_t) (wwn)); if ((wwn >> 60) == 0) { wwn |= (((uint64_t) 2)<< 60); } } fcp->isp_wwpn_nvram = wwn; if (IS_2200(isp) || IS_23XX(isp)) { wwn = ISP2100_NVRAM_NODE_NAME(nvram_data); if (wwn) { isp_prt(isp, ISP_LOGCONFIG, "NVRAM Node WWN 0x%08x%08x", (uint32_t) (wwn >> 32), (uint32_t) (wwn)); if ((wwn >> 60) == 0) { wwn |= (((uint64_t) 2)<< 60); } } else { wwn = fcp->isp_wwpn_nvram & ~((uint64_t) 0xfff << 48); } } else { wwn &= ~((uint64_t) 0xfff << 48); } fcp->isp_wwnn_nvram = wwn; fcp->isp_maxalloc = ISP2100_NVRAM_MAXIOCBALLOCATION(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNFSZ) == 0) { DEFAULT_FRAMESIZE(isp) = ISP2100_NVRAM_MAXFRAMELENGTH(nvram_data); } fcp->isp_retry_delay = ISP2100_NVRAM_RETRY_DELAY(nvram_data); fcp->isp_retry_count = ISP2100_NVRAM_RETRY_COUNT(nvram_data); if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) { fcp->isp_loopid = ISP2100_NVRAM_HARDLOOPID(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNEXCTHROTTLE) == 0) { DEFAULT_EXEC_THROTTLE(isp) = ISP2100_NVRAM_EXECUTION_THROTTLE(nvram_data); } fcp->isp_fwoptions = ISP2100_NVRAM_OPTIONS(nvram_data); isp_prt(isp, ISP_LOGDEBUG0, "NVRAM 0x%08x%08x 0x%08x%08x maxalloc %d maxframelen %d", (uint32_t) (fcp->isp_wwnn_nvram >> 32), (uint32_t) fcp->isp_wwnn_nvram, (uint32_t) (fcp->isp_wwpn_nvram >> 32), (uint32_t) fcp->isp_wwpn_nvram, ISP2100_NVRAM_MAXIOCBALLOCATION(nvram_data), ISP2100_NVRAM_MAXFRAMELENGTH(nvram_data)); isp_prt(isp, ISP_LOGDEBUG0, "execthrottle %d fwoptions 0x%x hardloop %d tov %d", ISP2100_NVRAM_EXECUTION_THROTTLE(nvram_data), ISP2100_NVRAM_OPTIONS(nvram_data), ISP2100_NVRAM_HARDLOOPID(nvram_data), ISP2100_NVRAM_TOV(nvram_data)); fcp->isp_xfwoptions = ISP2100_XFW_OPTIONS(nvram_data); fcp->isp_zfwoptions = ISP2100_ZFW_OPTIONS(nvram_data); isp_prt(isp, ISP_LOGDEBUG0, "xfwoptions 0x%x zfw options 0x%x", ISP2100_XFW_OPTIONS(nvram_data), ISP2100_ZFW_OPTIONS(nvram_data)); } static void isp_parse_nvram_2400(ispsoftc_t *isp, uint8_t *nvram_data) { fcparam *fcp = FCPARAM(isp, 0); uint64_t wwn; isp_prt(isp, ISP_LOGDEBUG0, "NVRAM 0x%08x%08x 0x%08x%08x exchg_cnt %d maxframelen %d", (uint32_t) (ISP2400_NVRAM_NODE_NAME(nvram_data) >> 32), (uint32_t) (ISP2400_NVRAM_NODE_NAME(nvram_data)), (uint32_t) (ISP2400_NVRAM_PORT_NAME(nvram_data) >> 32), (uint32_t) (ISP2400_NVRAM_PORT_NAME(nvram_data)), ISP2400_NVRAM_EXCHANGE_COUNT(nvram_data), ISP2400_NVRAM_MAXFRAMELENGTH(nvram_data)); isp_prt(isp, ISP_LOGDEBUG0, "NVRAM execthr %d loopid %d fwopt1 0x%x fwopt2 0x%x fwopt3 0x%x", ISP2400_NVRAM_EXECUTION_THROTTLE(nvram_data), ISP2400_NVRAM_HARDLOOPID(nvram_data), ISP2400_NVRAM_FIRMWARE_OPTIONS1(nvram_data), ISP2400_NVRAM_FIRMWARE_OPTIONS2(nvram_data), ISP2400_NVRAM_FIRMWARE_OPTIONS3(nvram_data)); wwn = ISP2400_NVRAM_PORT_NAME(nvram_data); fcp->isp_wwpn_nvram = wwn; wwn = ISP2400_NVRAM_NODE_NAME(nvram_data); if (wwn) { if ((wwn >> 60) != 2 && (wwn >> 60) != 5) { wwn = 0; } } if (wwn == 0 && (fcp->isp_wwpn_nvram >> 60) == 2) { wwn = fcp->isp_wwpn_nvram; wwn &= ~((uint64_t) 0xfff << 48); } fcp->isp_wwnn_nvram = wwn; if (ISP2400_NVRAM_EXCHANGE_COUNT(nvram_data)) { fcp->isp_maxalloc = ISP2400_NVRAM_EXCHANGE_COUNT(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNFSZ) == 0) { DEFAULT_FRAMESIZE(isp) = ISP2400_NVRAM_MAXFRAMELENGTH(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNLOOPID) == 0) { fcp->isp_loopid = ISP2400_NVRAM_HARDLOOPID(nvram_data); } if ((isp->isp_confopts & ISP_CFG_OWNEXCTHROTTLE) == 0) { DEFAULT_EXEC_THROTTLE(isp) = ISP2400_NVRAM_EXECUTION_THROTTLE(nvram_data); } fcp->isp_fwoptions = ISP2400_NVRAM_FIRMWARE_OPTIONS1(nvram_data); fcp->isp_xfwoptions = ISP2400_NVRAM_FIRMWARE_OPTIONS2(nvram_data); fcp->isp_zfwoptions = ISP2400_NVRAM_FIRMWARE_OPTIONS3(nvram_data); } Index: projects/powernv/dev/isp/isp_freebsd.c =================================================================== --- projects/powernv/dev/isp/isp_freebsd.c (revision 291050) +++ projects/powernv/dev/isp/isp_freebsd.c (revision 291051) @@ -1,5580 +1,5580 @@ /*- * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Platform (FreeBSD) dependent common attachment code for Qlogic adapters. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version < 800002 #define THREAD_CREATE kthread_create #else #define THREAD_CREATE kproc_create #endif MODULE_VERSION(isp, 1); MODULE_DEPEND(isp, cam, 1, 1, 1); int isp_announced = 0; int isp_fabric_hysteresis = 5; int isp_loop_down_limit = 60; /* default loop down limit */ int isp_quickboot_time = 7; /* don't wait more than N secs for loop up */ int isp_gone_device_time = 30; /* grace time before reporting device lost */ static const char prom3[] = "Chan %d [%u] PortID 0x%06x Departed because of %s"; static void isp_freeze_loopdown(ispsoftc_t *, int, char *); static d_ioctl_t ispioctl; static void isp_intr_enable(void *); static void isp_cam_async(void *, uint32_t, struct cam_path *, void *); static void isp_poll(struct cam_sim *); static timeout_t isp_watchdog; static timeout_t isp_gdt; static task_fn_t isp_gdt_task; static timeout_t isp_ldt; static task_fn_t isp_ldt_task; static void isp_kthread(void *); static void isp_action(struct cam_sim *, union ccb *); static int isp_timer_count; static void isp_timer(void *); static struct cdevsw isp_cdevsw = { .d_version = D_VERSION, .d_ioctl = ispioctl, .d_name = "isp", }; static int isp_role_sysctl(SYSCTL_HANDLER_ARGS) { ispsoftc_t *isp = (ispsoftc_t *)arg1; int chan = arg2; int error, old, value; value = FCPARAM(isp, chan)->role; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); if (value < ISP_ROLE_NONE || value > ISP_ROLE_BOTH) return (EINVAL); ISP_LOCK(isp); old = FCPARAM(isp, chan)->role; /* We don't allow target mode switch from here. */ value = (old & ISP_ROLE_TARGET) | (value & ISP_ROLE_INITIATOR); /* If nothing has changed -- we are done. */ if (value == old) { ISP_UNLOCK(isp); return (0); } /* Actually change the role. */ error = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, value); ISP_UNLOCK(isp); return (error); } static int isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan) { struct ccb_setasync csa; struct cam_sim *sim; struct cam_path *path; /* * Construct our SIM entry. */ sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, device_get_unit(isp->isp_dev), &isp->isp_osinfo.lock, isp->isp_maxcmds, isp->isp_maxcmds, devq); if (sim == NULL) { return (ENOMEM); } ISP_LOCK(isp); if (xpt_bus_register(sim, isp->isp_dev, chan) != CAM_SUCCESS) { ISP_UNLOCK(isp); cam_sim_free(sim, FALSE); return (EIO); } ISP_UNLOCK(isp); if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { ISP_LOCK(isp); xpt_bus_deregister(cam_sim_path(sim)); ISP_UNLOCK(isp); cam_sim_free(sim, FALSE); return (ENXIO); } xpt_setup_ccb(&csa.ccb_h, path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = isp_cam_async; csa.callback_arg = sim; ISP_LOCK(isp); xpt_action((union ccb *)&csa); ISP_UNLOCK(isp); if (IS_SCSI(isp)) { struct isp_spi *spi = ISP_SPI_PC(isp, chan); spi->sim = sim; spi->path = path; } else { fcparam *fcp = FCPARAM(isp, chan); struct isp_fc *fc = ISP_FC_PC(isp, chan); struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(isp->isp_osinfo.dev); struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev); char name[16]; ISP_LOCK(isp); fc->sim = sim; fc->path = path; fc->isp = isp; fc->ready = 1; callout_init_mtx(&fc->ldt, &isp->isp_osinfo.lock, 0); callout_init_mtx(&fc->gdt, &isp->isp_osinfo.lock, 0); TASK_INIT(&fc->ltask, 1, isp_ldt_task, fc); TASK_INIT(&fc->gtask, 1, isp_gdt_task, fc); /* * We start by being "loop down" if we have an initiator role */ if (fcp->role & ISP_ROLE_INITIATOR) { isp_freeze_loopdown(isp, chan, "isp_attach"); callout_reset(&fc->ldt, isp_quickboot_time * hz, isp_ldt, fc); isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Starting Initial Loop Down Timer @ %lu", (unsigned long) time_uptime); } ISP_UNLOCK(isp); if (THREAD_CREATE(isp_kthread, fc, &fc->kproc, 0, 0, "%s: fc_thrd%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) { xpt_free_path(fc->path); ISP_LOCK(isp); if (callout_active(&fc->ldt)) callout_stop(&fc->ldt); xpt_bus_deregister(cam_sim_path(fc->sim)); ISP_UNLOCK(isp); cam_sim_free(fc->sim, FALSE); return (ENOMEM); } fc->num_threads += 1; if (chan > 0) { snprintf(name, sizeof(name), "chan%d", chan); tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, name, CTLFLAG_RW, 0, "Virtual channel"); } SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwnn", CTLFLAG_RD, &fcp->isp_wwnn, "World Wide Node Name"); SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwpn", CTLFLAG_RD, &fcp->isp_wwpn, "World Wide Port Name"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "loop_down_limit", CTLFLAG_RW, &fc->loop_down_limit, 0, "Loop Down Limit"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "gone_device_time", CTLFLAG_RW, &fc->gone_device_time, 0, "Gone Device Time"); #if defined(ISP_TARGET_MODE) && defined(DEBUG) SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "inject_lost_data_frame", CTLFLAG_RW, &fc->inject_lost_data_frame, 0, "Cause a Lost Frame on a Read"); #endif SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "role", CTLTYPE_INT | CTLFLAG_RW, isp, chan, isp_role_sysctl, "I", "Current role"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "speed", CTLFLAG_RD, &fcp->isp_gbspeed, 0, "Connection speed in gigabits"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "linkstate", CTLFLAG_RD, &fcp->isp_linkstate, 0, "Link state"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "fwstate", CTLFLAG_RD, &fcp->isp_fwstate, 0, "Firmware state"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "loopstate", CTLFLAG_RD, &fcp->isp_loopstate, 0, "Loop state"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "topo", CTLFLAG_RD, &fcp->isp_topo, 0, "Connection topology"); } return (0); } static void isp_detach_chan(ispsoftc_t *isp, int chan) { struct cam_sim *sim; struct cam_path *path; struct ccb_setasync csa; int *num_threads; ISP_GET_PC(isp, chan, sim, sim); ISP_GET_PC(isp, chan, path, path); ISP_GET_PC_ADDR(isp, chan, num_threads, num_threads); xpt_setup_ccb(&csa.ccb_h, path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = isp_cam_async; csa.callback_arg = sim; xpt_action((union ccb *)&csa); xpt_free_path(path); xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, FALSE); /* Wait for the channel's spawned threads to exit. */ wakeup(isp->isp_osinfo.pc.ptr); while (*num_threads != 0) mtx_sleep(isp, &isp->isp_osinfo.lock, PRIBIO, "isp_reap", 100); } int isp_attach(ispsoftc_t *isp) { const char *nu = device_get_nameunit(isp->isp_osinfo.dev); int du = device_get_unit(isp->isp_dev); int chan; isp->isp_osinfo.ehook.ich_func = isp_intr_enable; isp->isp_osinfo.ehook.ich_arg = isp; /* * Haha. Set this first, because if we're loaded as a module isp_intr_enable * will be called right awawy, which will clear isp_osinfo.ehook_active, * which would be unwise to then set again later. */ isp->isp_osinfo.ehook_active = 1; if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { isp_prt(isp, ISP_LOGERR, "could not establish interrupt enable hook"); return (-EIO); } /* * Create the device queue for our SIM(s). */ isp->isp_osinfo.devq = cam_simq_alloc(isp->isp_maxcmds); if (isp->isp_osinfo.devq == NULL) { config_intrhook_disestablish(&isp->isp_osinfo.ehook); return (EIO); } for (chan = 0; chan < isp->isp_nchan; chan++) { if (isp_attach_chan(isp, isp->isp_osinfo.devq, chan)) { goto unwind; } } callout_init_mtx(&isp->isp_osinfo.tmo, &isp->isp_osinfo.lock, 0); isp_timer_count = hz >> 2; callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp); isp->isp_osinfo.timer_active = 1; isp->isp_osinfo.cdev = make_dev(&isp_cdevsw, du, UID_ROOT, GID_OPERATOR, 0600, "%s", nu); if (isp->isp_osinfo.cdev) { isp->isp_osinfo.cdev->si_drv1 = isp; } return (0); unwind: while (--chan >= 0) { struct cam_sim *sim; struct cam_path *path; ISP_GET_PC(isp, chan, sim, sim); ISP_GET_PC(isp, chan, path, path); xpt_free_path(path); ISP_LOCK(isp); xpt_bus_deregister(cam_sim_path(sim)); ISP_UNLOCK(isp); cam_sim_free(sim, FALSE); } if (isp->isp_osinfo.ehook_active) { config_intrhook_disestablish(&isp->isp_osinfo.ehook); isp->isp_osinfo.ehook_active = 0; } if (isp->isp_osinfo.cdev) { destroy_dev(isp->isp_osinfo.cdev); isp->isp_osinfo.cdev = NULL; } cam_simq_free(isp->isp_osinfo.devq); isp->isp_osinfo.devq = NULL; return (-1); } int isp_detach(ispsoftc_t *isp) { struct cam_sim *sim; int chan; ISP_LOCK(isp); for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1) { ISP_GET_PC(isp, chan, sim, sim); if (sim->refcount > 2) { ISP_UNLOCK(isp); return (EBUSY); } } /* Tell spawned threads that we're exiting. */ isp->isp_osinfo.is_exiting = 1; if (isp->isp_osinfo.timer_active) { callout_stop(&isp->isp_osinfo.tmo); isp->isp_osinfo.timer_active = 0; } for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1) isp_detach_chan(isp, chan); ISP_UNLOCK(isp); if (isp->isp_osinfo.cdev) { destroy_dev(isp->isp_osinfo.cdev); isp->isp_osinfo.cdev = NULL; } if (isp->isp_osinfo.ehook_active) { config_intrhook_disestablish(&isp->isp_osinfo.ehook); isp->isp_osinfo.ehook_active = 0; } if (isp->isp_osinfo.devq != NULL) { cam_simq_free(isp->isp_osinfo.devq); isp->isp_osinfo.devq = NULL; } return (0); } static void isp_freeze_loopdown(ispsoftc_t *isp, int chan, char *msg) { if (IS_FC(isp)) { struct isp_fc *fc = ISP_FC_PC(isp, chan); if (fc->simqfrozen == 0) { isp_prt(isp, ISP_LOGDEBUG0, "Chan %d %s -- freeze simq (loopdown)", chan, msg); fc->simqfrozen = SIMQFRZ_LOOPDOWN; #if __FreeBSD_version >= 1000039 xpt_hold_boot(); #endif xpt_freeze_simq(fc->sim, 1); } else { isp_prt(isp, ISP_LOGDEBUG0, "Chan %d %s -- mark frozen (loopdown)", chan, msg); fc->simqfrozen |= SIMQFRZ_LOOPDOWN; } } } static void isp_unfreeze_loopdown(ispsoftc_t *isp, int chan) { if (IS_FC(isp)) { struct isp_fc *fc = ISP_FC_PC(isp, chan); int wasfrozen = fc->simqfrozen & SIMQFRZ_LOOPDOWN; fc->simqfrozen &= ~SIMQFRZ_LOOPDOWN; if (wasfrozen && fc->simqfrozen == 0) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d releasing simq", __func__, chan); xpt_release_simq(fc->sim, 1); #if __FreeBSD_version >= 1000039 xpt_release_boot(); #endif } } } static int ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td) { ispsoftc_t *isp; int nr, chan, retval = ENOTTY; isp = dev->si_drv1; switch (c) { case ISP_SDBLEV: { int olddblev = isp->isp_dblev; isp->isp_dblev = *(int *)addr; *(int *)addr = olddblev; retval = 0; break; } case ISP_GETROLE: chan = *(int *)addr; if (chan < 0 || chan >= isp->isp_nchan) { retval = -ENXIO; break; } if (IS_FC(isp)) { *(int *)addr = FCPARAM(isp, chan)->role; } else { *(int *)addr = SDPARAM(isp, chan)->role; } retval = 0; break; case ISP_SETROLE: nr = *(int *)addr; chan = nr >> 8; if (chan < 0 || chan >= isp->isp_nchan) { retval = -ENXIO; break; } nr &= 0xff; if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { retval = EINVAL; break; } ISP_LOCK(isp); if (IS_FC(isp)) *(int *)addr = FCPARAM(isp, chan)->role; else *(int *)addr = SDPARAM(isp, chan)->role; retval = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, nr); ISP_UNLOCK(isp); retval = 0; break; case ISP_RESETHBA: ISP_LOCK(isp); isp_reinit(isp, 0); ISP_UNLOCK(isp); retval = 0; break; case ISP_RESCAN: if (IS_FC(isp)) { chan = *(int *)addr; if (chan < 0 || chan >= isp->isp_nchan) { retval = -ENXIO; break; } ISP_LOCK(isp); if (isp_fc_runstate(isp, chan, 5 * 1000000)) { retval = EIO; } else { retval = 0; } ISP_UNLOCK(isp); } break; case ISP_FC_LIP: if (IS_FC(isp)) { chan = *(int *)addr; if (chan < 0 || chan >= isp->isp_nchan) { retval = -ENXIO; break; } ISP_LOCK(isp); if (isp_control(isp, ISPCTL_SEND_LIP, chan)) { retval = EIO; } else { retval = 0; } ISP_UNLOCK(isp); } break; case ISP_FC_GETDINFO: { struct isp_fc_device *ifc = (struct isp_fc_device *) addr; fcportdb_t *lp; if (IS_SCSI(isp)) { break; } if (ifc->loopid >= MAX_FC_TARG) { retval = EINVAL; break; } lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid]; if (lp->state != FC_PORTDB_STATE_NIL) { ifc->role = (lp->prli_word3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT; ifc->loopid = lp->handle; ifc->portid = lp->portid; ifc->node_wwn = lp->node_wwn; ifc->port_wwn = lp->port_wwn; retval = 0; } else { retval = ENODEV; } break; } case ISP_GET_STATS: { isp_stats_t *sp = (isp_stats_t *) addr; ISP_MEMZERO(sp, sizeof (*sp)); sp->isp_stat_version = ISP_STATS_VERSION; sp->isp_type = isp->isp_type; sp->isp_revision = isp->isp_revision; ISP_LOCK(isp); sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; ISP_UNLOCK(isp); retval = 0; break; } case ISP_CLR_STATS: ISP_LOCK(isp); isp->isp_intcnt = 0; isp->isp_intbogus = 0; isp->isp_intmboxc = 0; isp->isp_intoasync = 0; isp->isp_rsltccmplt = 0; isp->isp_fphccmplt = 0; isp->isp_rscchiwater = 0; isp->isp_fpcchiwater = 0; ISP_UNLOCK(isp); retval = 0; break; case ISP_FC_GETHINFO: { struct isp_hba_device *hba = (struct isp_hba_device *) addr; int chan = hba->fc_channel; if (chan < 0 || chan >= isp->isp_nchan) { retval = ENXIO; break; } hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); hba->fc_nchannels = isp->isp_nchan; if (IS_FC(isp)) { hba->fc_nports = MAX_FC_TARG; hba->fc_speed = FCPARAM(isp, hba->fc_channel)->isp_gbspeed; hba->fc_topology = FCPARAM(isp, chan)->isp_topo + 1; hba->fc_loopid = FCPARAM(isp, chan)->isp_loopid; hba->nvram_node_wwn = FCPARAM(isp, chan)->isp_wwnn_nvram; hba->nvram_port_wwn = FCPARAM(isp, chan)->isp_wwpn_nvram; hba->active_node_wwn = FCPARAM(isp, chan)->isp_wwnn; hba->active_port_wwn = FCPARAM(isp, chan)->isp_wwpn; } else { hba->fc_nports = MAX_TARGETS; hba->fc_speed = 0; hba->fc_topology = 0; hba->nvram_node_wwn = 0ull; hba->nvram_port_wwn = 0ull; hba->active_node_wwn = 0ull; hba->active_port_wwn = 0ull; } retval = 0; break; } case ISP_TSK_MGMT: { int needmarker; struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; - uint16_t loopid; + uint16_t nphdl; mbreg_t mbs; if (IS_SCSI(isp)) { break; } chan = fct->chan; if (chan < 0 || chan >= isp->isp_nchan) { retval = -ENXIO; break; } needmarker = retval = 0; - loopid = fct->loopid; + nphdl = fct->loopid; ISP_LOCK(isp); if (IS_24XX(isp)) { uint8_t local[QENTRY_LEN]; isp24xx_tmf_t *tmf; isp24xx_statusreq_t *sp; fcparam *fcp = FCPARAM(isp, chan); fcportdb_t *lp; int i; for (i = 0; i < MAX_FC_TARG; i++) { lp = &fcp->portdb[i]; - if (lp->handle == loopid) { + if (lp->handle == nphdl) { break; } } if (i == MAX_FC_TARG) { retval = ENXIO; ISP_UNLOCK(isp); break; } /* XXX VALIDATE LP XXX */ tmf = (isp24xx_tmf_t *) local; ISP_MEMZERO(tmf, QENTRY_LEN); tmf->tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT; tmf->tmf_header.rqs_entry_count = 1; tmf->tmf_nphdl = lp->handle; tmf->tmf_delay = 2; tmf->tmf_timeout = 2; tmf->tmf_tidlo = lp->portid; tmf->tmf_tidhi = lp->portid >> 16; tmf->tmf_vpidx = ISP_GET_VPIDX(isp, chan); tmf->tmf_lun[1] = fct->lun & 0xff; if (fct->lun >= 256) { tmf->tmf_lun[0] = 0x40 | (fct->lun >> 8); } switch (fct->action) { case IPT_CLEAR_ACA: tmf->tmf_flags = ISP24XX_TMF_CLEAR_ACA; break; case IPT_TARGET_RESET: tmf->tmf_flags = ISP24XX_TMF_TARGET_RESET; needmarker = 1; break; case IPT_LUN_RESET: tmf->tmf_flags = ISP24XX_TMF_LUN_RESET; needmarker = 1; break; case IPT_CLEAR_TASK_SET: tmf->tmf_flags = ISP24XX_TMF_CLEAR_TASK_SET; needmarker = 1; break; case IPT_ABORT_TASK_SET: tmf->tmf_flags = ISP24XX_TMF_ABORT_TASK_SET; needmarker = 1; break; default: retval = EINVAL; break; } if (retval) { ISP_UNLOCK(isp); break; } MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 5000000); mbs.param[1] = QENTRY_LEN; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); if (FC_SCRATCH_ACQUIRE(isp, chan)) { ISP_UNLOCK(isp); retval = ENOMEM; break; } isp_put_24xx_tmf(isp, tmf, fcp->isp_scratch); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, QENTRY_LEN, chan); sp = (isp24xx_statusreq_t *) local; sp->req_completion_status = 1; retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); MEMORYBARRIER(isp, SYNC_SFORCPU, QENTRY_LEN, QENTRY_LEN, chan); isp_get_24xx_response(isp, &((isp24xx_statusreq_t *)fcp->isp_scratch)[1], sp); FC_SCRATCH_RELEASE(isp, chan); if (retval || sp->req_completion_status != 0) { FC_SCRATCH_RELEASE(isp, chan); retval = EIO; } if (retval == 0) { if (needmarker) { fcp->sendmarker = 1; } } } else { MBSINIT(&mbs, 0, MBLOGALL, 0); if (ISP_CAP_2KLOGIN(isp) == 0) { - loopid <<= 8; + nphdl <<= 8; } switch (fct->action) { case IPT_CLEAR_ACA: mbs.param[0] = MBOX_CLEAR_ACA; - mbs.param[1] = loopid; + mbs.param[1] = nphdl; mbs.param[2] = fct->lun; break; case IPT_TARGET_RESET: mbs.param[0] = MBOX_TARGET_RESET; - mbs.param[1] = loopid; + mbs.param[1] = nphdl; needmarker = 1; break; case IPT_LUN_RESET: mbs.param[0] = MBOX_LUN_RESET; - mbs.param[1] = loopid; + mbs.param[1] = nphdl; mbs.param[2] = fct->lun; needmarker = 1; break; case IPT_CLEAR_TASK_SET: mbs.param[0] = MBOX_CLEAR_TASK_SET; - mbs.param[1] = loopid; + mbs.param[1] = nphdl; mbs.param[2] = fct->lun; needmarker = 1; break; case IPT_ABORT_TASK_SET: mbs.param[0] = MBOX_ABORT_TASK_SET; - mbs.param[1] = loopid; + mbs.param[1] = nphdl; mbs.param[2] = fct->lun; needmarker = 1; break; default: retval = EINVAL; break; } if (retval == 0) { if (needmarker) { FCPARAM(isp, chan)->sendmarker = 1; } retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); if (retval) { retval = EIO; } } } ISP_UNLOCK(isp); break; } default: break; } return (retval); } static void isp_intr_enable(void *arg) { int chan; ispsoftc_t *isp = arg; ISP_LOCK(isp); for (chan = 0; chan < isp->isp_nchan; chan++) { if (IS_FC(isp)) { if (FCPARAM(isp, chan)->role != ISP_ROLE_NONE) { ISP_ENABLE_INTS(isp); break; } } else { if (SDPARAM(isp, chan)->role != ISP_ROLE_NONE) { ISP_ENABLE_INTS(isp); break; } } } isp->isp_osinfo.ehook_active = 0; ISP_UNLOCK(isp); /* Release our hook so that the boot can continue. */ config_intrhook_disestablish(&isp->isp_osinfo.ehook); } /* * Local Inlines */ static ISP_INLINE int isp_get_pcmd(ispsoftc_t *, union ccb *); static ISP_INLINE void isp_free_pcmd(ispsoftc_t *, union ccb *); static ISP_INLINE int isp_get_pcmd(ispsoftc_t *isp, union ccb *ccb) { ISP_PCMD(ccb) = isp->isp_osinfo.pcmd_free; if (ISP_PCMD(ccb) == NULL) { return (-1); } isp->isp_osinfo.pcmd_free = ((struct isp_pcmd *)ISP_PCMD(ccb))->next; return (0); } static ISP_INLINE void isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb) { if (ISP_PCMD(ccb)) { #ifdef ISP_TARGET_MODE PISP_PCMD(ccb)->datalen = 0; PISP_PCMD(ccb)->totslen = 0; PISP_PCMD(ccb)->cumslen = 0; PISP_PCMD(ccb)->crn = 0; #endif PISP_PCMD(ccb)->next = isp->isp_osinfo.pcmd_free; isp->isp_osinfo.pcmd_free = ISP_PCMD(ccb); ISP_PCMD(ccb) = NULL; } } /* * Put the target mode functions here, because some are inlines */ #ifdef ISP_TARGET_MODE static ISP_INLINE void isp_tmlock(ispsoftc_t *, const char *); static ISP_INLINE void isp_tmunlk(ispsoftc_t *); static ISP_INLINE int is_any_lun_enabled(ispsoftc_t *, int); static ISP_INLINE int is_lun_enabled(ispsoftc_t *, int, lun_id_t); static ISP_INLINE tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); static ISP_INLINE tstate_t *get_lun_statep_from_tag(ispsoftc_t *, int, uint32_t); static ISP_INLINE void rls_lun_statep(ispsoftc_t *, tstate_t *); static ISP_INLINE inot_private_data_t *get_ntp_from_tagdata(ispsoftc_t *, uint32_t, uint32_t, tstate_t **); static ISP_INLINE atio_private_data_t *isp_get_atpd(ispsoftc_t *, tstate_t *, uint32_t); static ISP_INLINE atio_private_data_t *isp_find_atpd(ispsoftc_t *, tstate_t *, uint32_t); static ISP_INLINE void isp_put_atpd(ispsoftc_t *, tstate_t *, atio_private_data_t *); static ISP_INLINE inot_private_data_t *isp_get_ntpd(ispsoftc_t *, tstate_t *); static ISP_INLINE inot_private_data_t *isp_find_ntpd(ispsoftc_t *, tstate_t *, uint32_t, uint32_t); static ISP_INLINE void isp_put_ntpd(ispsoftc_t *, tstate_t *, inot_private_data_t *); static cam_status create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **); static void destroy_lun_state(ispsoftc_t *, tstate_t *); static void isp_enable_lun(ispsoftc_t *, union ccb *); static cam_status isp_enable_deferred_luns(ispsoftc_t *, int); static cam_status isp_enable_deferred(ispsoftc_t *, int, lun_id_t); static void isp_disable_lun(ispsoftc_t *, union ccb *); static int isp_enable_target_mode(ispsoftc_t *, int); static int isp_disable_target_mode(ispsoftc_t *, int); static void isp_ledone(ispsoftc_t *, lun_entry_t *); static timeout_t isp_refire_putback_atio; static timeout_t isp_refire_notify_ack; static void isp_complete_ctio(union ccb *); static void isp_target_putback_atio(union ccb *); enum Start_Ctio_How { FROM_CAM, FROM_TIMER, FROM_SRR, FROM_CTIO_DONE }; static void isp_target_start_ctio(ispsoftc_t *, union ccb *, enum Start_Ctio_How); static void isp_handle_platform_atio(ispsoftc_t *, at_entry_t *); static void isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *); static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *); static void isp_handle_platform_ctio(ispsoftc_t *, void *); static void isp_handle_platform_notify_scsi(ispsoftc_t *, in_entry_t *); static void isp_handle_platform_notify_fc(ispsoftc_t *, in_fcentry_t *); static void isp_handle_platform_notify_24xx(ispsoftc_t *, in_fcentry_24xx_t *); static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *); static void isp_handle_platform_target_tmf(ispsoftc_t *, isp_notify_t *); static void isp_target_mark_aborted(ispsoftc_t *, union ccb *); static void isp_target_mark_aborted_early(ispsoftc_t *, tstate_t *, uint32_t); static ISP_INLINE void isp_tmlock(ispsoftc_t *isp, const char *msg) { while (isp->isp_osinfo.tmbusy) { isp->isp_osinfo.tmwanted = 1; mtx_sleep(isp, &isp->isp_lock, PRIBIO, msg, 0); } isp->isp_osinfo.tmbusy = 1; } static ISP_INLINE void isp_tmunlk(ispsoftc_t *isp) { isp->isp_osinfo.tmbusy = 0; if (isp->isp_osinfo.tmwanted) { isp->isp_osinfo.tmwanted = 0; wakeup(isp); } } static ISP_INLINE int is_any_lun_enabled(ispsoftc_t *isp, int bus) { struct tslist *lhp; int i; for (i = 0; i < LUN_HASH_SIZE; i++) { ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); if (SLIST_FIRST(lhp)) return (1); } return (0); } static ISP_INLINE int is_lun_enabled(ispsoftc_t *isp, int bus, lun_id_t lun) { tstate_t *tptr; struct tslist *lhp; ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(lun)], lhp); SLIST_FOREACH(tptr, lhp, next) { if (tptr->ts_lun == lun) { return (1); } } return (0); } static void dump_tstates(ispsoftc_t *isp, int bus) { int i, j; struct tslist *lhp; tstate_t *tptr = NULL; if (bus >= isp->isp_nchan) { return; } for (i = 0; i < LUN_HASH_SIZE; i++) { ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); j = 0; SLIST_FOREACH(tptr, lhp, next) { xpt_print(tptr->owner, "[%d, %d] atio_cnt=%d inot_cnt=%d\n", i, j, tptr->atio_count, tptr->inot_count); j++; } } } static ISP_INLINE tstate_t * get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun) { tstate_t *tptr = NULL; struct tslist *lhp; if (bus < isp->isp_nchan) { ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(lun)], lhp); SLIST_FOREACH(tptr, lhp, next) { if (tptr->ts_lun == lun) { tptr->hold++; return (tptr); } } } return (NULL); } static ISP_INLINE tstate_t * get_lun_statep_from_tag(ispsoftc_t *isp, int bus, uint32_t tagval) { tstate_t *tptr = NULL; atio_private_data_t *atp; struct tslist *lhp; int i; if (bus < isp->isp_nchan && tagval != 0) { for (i = 0; i < LUN_HASH_SIZE; i++) { ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); SLIST_FOREACH(tptr, lhp, next) { atp = isp_find_atpd(isp, tptr, tagval); if (atp) { tptr->hold++; return (tptr); } } } } return (NULL); } static ISP_INLINE inot_private_data_t * get_ntp_from_tagdata(ispsoftc_t *isp, uint32_t tag_id, uint32_t seq_id, tstate_t **rslt) { inot_private_data_t *ntp; tstate_t *tptr; struct tslist *lhp; int bus, i; for (bus = 0; bus < isp->isp_nchan; bus++) { for (i = 0; i < LUN_HASH_SIZE; i++) { ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); SLIST_FOREACH(tptr, lhp, next) { ntp = isp_find_ntpd(isp, tptr, tag_id, seq_id); if (ntp) { *rslt = tptr; tptr->hold++; return (ntp); } } } } return (NULL); } static ISP_INLINE void rls_lun_statep(ispsoftc_t *isp, tstate_t *tptr) { KASSERT((tptr->hold), ("tptr not held")); tptr->hold--; } static void isp_tmcmd_restart(ispsoftc_t *isp) { inot_private_data_t *ntp; inot_private_data_t *restart_queue; tstate_t *tptr; union ccb *ccb; struct tslist *lhp; int bus, i; for (bus = 0; bus < isp->isp_nchan; bus++) { for (i = 0; i < LUN_HASH_SIZE; i++) { ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); SLIST_FOREACH(tptr, lhp, next) { if ((restart_queue = tptr->restart_queue) != NULL) tptr->restart_queue = NULL; while (restart_queue) { ntp = restart_queue; restart_queue = ntp->rd.nt.nt_hba; if (IS_24XX(isp)) { isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at7_entry_t *)ntp->rd.data)->at_rxid); isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->rd.data); } else { isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at2_entry_t *)ntp->rd.data)->at_rxid); isp_handle_platform_atio2(isp, (at2_entry_t *) ntp->rd.data); } isp_put_ntpd(isp, tptr, ntp); if (tptr->restart_queue && restart_queue != NULL) { ntp = tptr->restart_queue; tptr->restart_queue = restart_queue; while (restart_queue->rd.nt.nt_hba) { restart_queue = restart_queue->rd.nt.nt_hba; } restart_queue->rd.nt.nt_hba = ntp; break; } } /* * We only need to do this once per tptr */ if (!TAILQ_EMPTY(&tptr->waitq)) { ccb = (union ccb *)TAILQ_LAST(&tptr->waitq, isp_ccbq); TAILQ_REMOVE(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); isp_target_start_ctio(isp, ccb, FROM_TIMER); } } } } } static ISP_INLINE atio_private_data_t * isp_get_atpd(ispsoftc_t *isp, tstate_t *tptr, uint32_t tag) { atio_private_data_t *atp; atp = LIST_FIRST(&tptr->atfree); if (atp) { LIST_REMOVE(atp, next); atp->tag = tag; LIST_INSERT_HEAD(&tptr->atused[ATPDPHASH(tag)], atp, next); } return (atp); } static ISP_INLINE atio_private_data_t * isp_find_atpd(ispsoftc_t *isp, tstate_t *tptr, uint32_t tag) { atio_private_data_t *atp; LIST_FOREACH(atp, &tptr->atused[ATPDPHASH(tag)], next) { if (atp->tag == tag) return (atp); } return (NULL); } static ISP_INLINE void isp_put_atpd(ispsoftc_t *isp, tstate_t *tptr, atio_private_data_t *atp) { if (atp->ests) { isp_put_ecmd(isp, atp->ests); } LIST_REMOVE(atp, next); memset(atp, 0, sizeof (*atp)); LIST_INSERT_HEAD(&tptr->atfree, atp, next); } static void isp_dump_atpd(ispsoftc_t *isp, tstate_t *tptr) { atio_private_data_t *atp; const char *states[8] = { "Free", "ATIO", "CAM", "CTIO", "LAST_CTIO", "PDON", "?6", "7" }; for (atp = tptr->atpool; atp < &tptr->atpool[ATPDPSIZE]; atp++) { - xpt_print(tptr->owner, "ATP: [0x%x] origdlen %u bytes_xfrd %u lun %u nphdl 0x%04x s_id 0x%06x d_id 0x%06x oxid 0x%04x state %s\n", + xpt_print(tptr->owner, "ATP: [0x%x] origdlen %u bytes_xfrd %u lun %x nphdl 0x%04x s_id 0x%06x d_id 0x%06x oxid 0x%04x state %s\n", atp->tag, atp->orig_datalen, atp->bytes_xfered, atp->lun, atp->nphdl, atp->sid, atp->portid, atp->oxid, states[atp->state & 0x7]); } } static ISP_INLINE inot_private_data_t * isp_get_ntpd(ispsoftc_t *isp, tstate_t *tptr) { inot_private_data_t *ntp; ntp = tptr->ntfree; if (ntp) { tptr->ntfree = ntp->next; } return (ntp); } static ISP_INLINE inot_private_data_t * isp_find_ntpd(ispsoftc_t *isp, tstate_t *tptr, uint32_t tag_id, uint32_t seq_id) { inot_private_data_t *ntp; for (ntp = tptr->ntpool; ntp < &tptr->ntpool[ATPDPSIZE]; ntp++) { if (ntp->rd.tag_id == tag_id && ntp->rd.seq_id == seq_id) { return (ntp); } } return (NULL); } static ISP_INLINE void isp_put_ntpd(ispsoftc_t *isp, tstate_t *tptr, inot_private_data_t *ntp) { ntp->rd.tag_id = ntp->rd.seq_id = 0; ntp->next = tptr->ntfree; tptr->ntfree = ntp; } static cam_status create_lun_state(ispsoftc_t *isp, int bus, struct cam_path *path, tstate_t **rslt) { cam_status status; lun_id_t lun; struct tslist *lhp; tstate_t *tptr; int i; lun = xpt_path_lun_id(path); if (lun != CAM_LUN_WILDCARD) { if (ISP_MAX_LUNS(isp) > 0 && lun >= ISP_MAX_LUNS(isp)) { return (CAM_LUN_INVALID); } } if (is_lun_enabled(isp, bus, lun)) { return (CAM_LUN_ALRDY_ENA); } tptr = malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); if (tptr == NULL) { return (CAM_RESRC_UNAVAIL); } tptr->ts_lun = lun; status = xpt_create_path(&tptr->owner, NULL, xpt_path_path_id(path), xpt_path_target_id(path), lun); if (status != CAM_REQ_CMP) { free(tptr, M_DEVBUF); return (status); } SLIST_INIT(&tptr->atios); SLIST_INIT(&tptr->inots); TAILQ_INIT(&tptr->waitq); LIST_INIT(&tptr->atfree); for (i = ATPDPSIZE-1; i >= 0; i--) LIST_INSERT_HEAD(&tptr->atfree, &tptr->atpool[i], next); for (i = 0; i < ATPDPHASHSIZE; i++) LIST_INIT(&tptr->atused[i]); for (i = 0; i < ATPDPSIZE-1; i++) tptr->ntpool[i].next = &tptr->ntpool[i+1]; tptr->ntfree = tptr->ntpool; tptr->hold = 1; ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(lun)], lhp); SLIST_INSERT_HEAD(lhp, tptr, next); *rslt = tptr; ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, path, "created tstate\n"); return (CAM_REQ_CMP); } static ISP_INLINE void destroy_lun_state(ispsoftc_t *isp, tstate_t *tptr) { union ccb *ccb; struct tslist *lhp; KASSERT((tptr->hold != 0), ("tptr is not held")); KASSERT((tptr->hold == 1), ("tptr still held (%d)", tptr->hold)); do { ccb = (union ccb *)SLIST_FIRST(&tptr->atios); if (ccb) { SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); } } while (ccb); do { ccb = (union ccb *)SLIST_FIRST(&tptr->inots); if (ccb) { SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); } } while (ccb); ISP_GET_PC_ADDR(isp, cam_sim_bus(xpt_path_sim(tptr->owner)), lun_hash[LUN_HASH_FUNC(tptr->ts_lun)], lhp); SLIST_REMOVE(lhp, tptr, tstate, next); ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, tptr->owner, "destroyed tstate\n"); xpt_free_path(tptr->owner); free(tptr, M_DEVBUF); } /* * Enable a lun. */ static void isp_enable_lun(ispsoftc_t *isp, union ccb *ccb) { tstate_t *tptr = NULL; int bus, tm_enabled, target_role; target_id_t target; lun_id_t lun; /* * We only support either a wildcard target/lun or a target ID of zero and a non-wildcard lun */ bus = XS_CHANNEL(ccb); target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path, "enabling lun %jx\n", (uintmax_t)lun); if (target == CAM_TARGET_WILDCARD && lun != CAM_LUN_WILDCARD) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return; } if (target != CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return; } if (isp->isp_dblev & ISP_LOGTDEBUG0) { xpt_print(ccb->ccb_h.path, "enabling lun 0x%jx on channel %d\n", (uintmax_t)lun, bus); } /* * Wait until we're not busy with the lun enables subsystem */ isp_tmlock(isp, "isp_enable_lun"); /* * This is as a good a place as any to check f/w capabilities. */ if (IS_FC(isp)) { if (ISP_CAP_TMODE(isp) == 0) { xpt_print(ccb->ccb_h.path, "firmware does not support target mode\n"); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; goto done; } /* * We *could* handle non-SCCLUN f/w, but we'd have to * dork with our already fragile enable/disable code. */ if (ISP_CAP_SCCFW(isp) == 0) { xpt_print(ccb->ccb_h.path, "firmware not SCCLUN capable\n"); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; goto done; } target_role = (FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) != 0; } else { target_role = (SDPARAM(isp, bus)->role & ISP_ROLE_TARGET) != 0; } /* * Create the state pointer. * It should not already exist. */ tptr = get_lun_statep(isp, bus, lun); if (tptr) { ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; goto done; } ccb->ccb_h.status = create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); if (ccb->ccb_h.status != CAM_REQ_CMP) { goto done; } /* * We have a tricky maneuver to perform here. * * If target mode isn't already enabled here, * *and* our current role includes target mode, * we enable target mode here. * */ ISP_GET_PC(isp, bus, tm_enabled, tm_enabled); if (tm_enabled == 0 && target_role != 0) { if (isp_enable_target_mode(isp, bus)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; destroy_lun_state(isp, tptr); tptr = NULL; goto done; } tm_enabled = 1; } /* * Now check to see whether this bus is in target mode already. * * If not, a later role change into target mode will finish the job. */ if (tm_enabled == 0) { ISP_SET_PC(isp, bus, tm_enable_defer, 1); ccb->ccb_h.status = CAM_REQ_CMP; xpt_print(ccb->ccb_h.path, "Target Mode not enabled yet- lun enable deferred\n"); goto done1; } /* * Enable the lun. */ ccb->ccb_h.status = isp_enable_deferred(isp, bus, lun); done: if (ccb->ccb_h.status != CAM_REQ_CMP) { if (tptr) { destroy_lun_state(isp, tptr); tptr = NULL; } } else { tptr->enabled = 1; } done1: if (tptr) { rls_lun_statep(isp, tptr); } /* * And we're outta here.... */ isp_tmunlk(isp); xpt_done(ccb); } static cam_status isp_enable_deferred_luns(ispsoftc_t *isp, int bus) { tstate_t *tptr = NULL; struct tslist *lhp; int i, n; ISP_GET_PC(isp, bus, tm_enabled, i); if (i == 1) { return (CAM_REQ_CMP); } ISP_GET_PC(isp, bus, tm_enable_defer, i); if (i == 0) { return (CAM_REQ_CMP); } /* * If this succeeds, it will set tm_enable */ if (isp_enable_target_mode(isp, bus)) { return (CAM_REQ_CMP_ERR); } isp_tmlock(isp, "isp_enable_deferred_luns"); for (n = i = 0; i < LUN_HASH_SIZE; i++) { ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp); SLIST_FOREACH(tptr, lhp, next) { tptr->hold++; if (tptr->enabled == 0) { if (isp_enable_deferred(isp, bus, tptr->ts_lun) == CAM_REQ_CMP) { tptr->enabled = 1; n++; } } else { n++; } tptr->hold--; } } isp_tmunlk(isp); if (n == 0) { return (CAM_REQ_CMP_ERR); } ISP_SET_PC(isp, bus, tm_enable_defer, 0); return (CAM_REQ_CMP); } static cam_status isp_enable_deferred(ispsoftc_t *isp, int bus, lun_id_t lun) { cam_status status; int luns_already_enabled; ISP_GET_PC(isp, bus, tm_luns_enabled, luns_already_enabled); isp_prt(isp, ISP_LOGTINFO, "%s: bus %d lun %jx luns_enabled %d", __func__, bus, (uintmax_t)lun, luns_already_enabled); if (IS_23XX(isp) || IS_24XX(isp) || (IS_FC(isp) && luns_already_enabled)) { status = CAM_REQ_CMP; } else { int cmd_cnt, not_cnt; if (IS_23XX(isp)) { cmd_cnt = DFLT_CMND_CNT; not_cnt = DFLT_INOT_CNT; } else { cmd_cnt = 64; not_cnt = 8; } status = CAM_REQ_INPROG; isp->isp_osinfo.rptr = &status; if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, lun == CAM_LUN_WILDCARD? 0 : lun, cmd_cnt, not_cnt)) { status = CAM_RESRC_UNAVAIL; } else { mtx_sleep(&status, &isp->isp_lock, PRIBIO, "isp_enable_deferred", 0); } isp->isp_osinfo.rptr = NULL; } if (status == CAM_REQ_CMP) { ISP_SET_PC(isp, bus, tm_luns_enabled, 1); isp_prt(isp, ISP_LOGCONFIG|ISP_LOGTINFO, "bus %d lun %jx now enabled for target mode", bus, (uintmax_t)lun); } return (status); } static void isp_disable_lun(ispsoftc_t *isp, union ccb *ccb) { tstate_t *tptr = NULL; int bus; cam_status status; target_id_t target; lun_id_t lun; bus = XS_CHANNEL(ccb); target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path, "disabling lun %jx\n", (uintmax_t)lun); if (target == CAM_TARGET_WILDCARD && lun != CAM_LUN_WILDCARD) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return; } if (target != CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return; } /* * See if we're busy disabling a lun now. */ isp_tmlock(isp, "isp_disable_lun"); status = CAM_REQ_INPROG; /* * Find the state pointer. */ if ((tptr = get_lun_statep(isp, bus, lun)) == NULL) { status = CAM_PATH_INVALID; goto done; } /* * If we're a 24XX card, we're done. */ if (IS_23XX(isp) || IS_24XX(isp)) { status = CAM_REQ_CMP; goto done; } /* * For SCC FW, we only deal with lun zero. */ if (IS_FC(isp) && lun > 0) { status = CAM_REQ_CMP; goto done; } isp->isp_osinfo.rptr = &status; if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, lun, 0, 0)) { status = CAM_RESRC_UNAVAIL; } else { mtx_sleep(&status, &isp->isp_lock, PRIBIO, "isp_disable_lun", 0); } isp->isp_osinfo.rptr = NULL; done: if (status == CAM_REQ_CMP) { tptr->enabled = 0; if (is_any_lun_enabled(isp, bus) == 0) { if (isp_disable_target_mode(isp, bus)) { status = CAM_REQ_CMP_ERR; } } } ccb->ccb_h.status = status; if (status == CAM_REQ_CMP) { destroy_lun_state(isp, tptr); xpt_print(ccb->ccb_h.path, "lun now disabled for target mode\n"); } else { if (tptr) rls_lun_statep(isp, tptr); } isp_tmunlk(isp); xpt_done(ccb); } static int isp_enable_target_mode(ispsoftc_t *isp, int bus) { int tm_enabled; ISP_GET_PC(isp, bus, tm_enabled, tm_enabled); if (tm_enabled != 0) { return (0); } if (IS_SCSI(isp)) { mbreg_t mbs; MBSINIT(&mbs, MBOX_ENABLE_TARGET_MODE, MBLOGALL, 0); mbs.param[0] = MBOX_ENABLE_TARGET_MODE; mbs.param[1] = ENABLE_TARGET_FLAG|ENABLE_TQING_FLAG; mbs.param[2] = bus << 7; if (isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs) < 0 || mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "Unable to enable Target Role on Bus %d", bus); return (EIO); } } ISP_SET_PC(isp, bus, tm_enabled, 1); isp_prt(isp, ISP_LOGINFO, "Target Role enabled on Bus %d", bus); return (0); } static int isp_disable_target_mode(ispsoftc_t *isp, int bus) { int tm_enabled; ISP_GET_PC(isp, bus, tm_enabled, tm_enabled); if (tm_enabled == 0) { return (0); } if (IS_SCSI(isp)) { mbreg_t mbs; MBSINIT(&mbs, MBOX_ENABLE_TARGET_MODE, MBLOGALL, 0); mbs.param[2] = bus << 7; if (isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs) < 0 || mbs.param[0] != MBOX_COMMAND_COMPLETE) { isp_prt(isp, ISP_LOGERR, "Unable to disable Target Role on Bus %d", bus); return (EIO); } } ISP_SET_PC(isp, bus, tm_enabled, 0); isp_prt(isp, ISP_LOGINFO, "Target Role disabled on Bus %d", bus); return (0); } static void isp_ledone(ispsoftc_t *isp, lun_entry_t *lep) { uint32_t *rptr; rptr = isp->isp_osinfo.rptr; if (lep->le_status != LUN_OK) { isp_prt(isp, ISP_LOGERR, "ENABLE/MODIFY LUN returned 0x%x", lep->le_status); if (rptr) { *rptr = CAM_REQ_CMP_ERR; wakeup_one(rptr); } } else { if (rptr) { *rptr = CAM_REQ_CMP; wakeup_one(rptr); } } } static void isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how) { int fctape, sendstatus, resid; tstate_t *tptr; fcparam *fcp; atio_private_data_t *atp; struct ccb_scsiio *cso; uint32_t dmaresult, handle, xfrlen, sense_length, tmp; uint8_t local[QENTRY_LEN]; tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); if (tptr == NULL) { tptr = get_lun_statep(isp, XS_CHANNEL(ccb), CAM_LUN_WILDCARD); if (tptr == NULL) { isp_prt(isp, ISP_LOGERR, "%s: [0x%x] cannot find tstate pointer", __func__, ccb->csio.tag_id); ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } } isp_prt(isp, ISP_LOGTDEBUG0, "%s: ENTRY[0x%x] how %u xfrlen %u sendstatus %d sense_len %u", __func__, ccb->csio.tag_id, how, ccb->csio.dxfer_len, (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0, ((ccb->ccb_h.flags & CAM_SEND_SENSE)? ccb->csio.sense_len : 0)); switch (how) { case FROM_TIMER: case FROM_CAM: /* * Insert at the tail of the list, if any, waiting CTIO CCBs */ TAILQ_INSERT_TAIL(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); break; case FROM_SRR: case FROM_CTIO_DONE: TAILQ_INSERT_HEAD(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); break; } while (TAILQ_FIRST(&tptr->waitq) != NULL) { ccb = (union ccb *) TAILQ_FIRST(&tptr->waitq); TAILQ_REMOVE(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); cso = &ccb->csio; xfrlen = cso->dxfer_len; if (xfrlen == 0) { if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { ISP_PATH_PRT(isp, ISP_LOGERR, ccb->ccb_h.path, "a data transfer length of zero but no status to send is wrong\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); continue; } } atp = isp_find_atpd(isp, tptr, cso->tag_id); if (atp == NULL) { isp_prt(isp, ISP_LOGERR, "%s: [0x%x] cannot find private data adjunct in %s", __func__, cso->tag_id, __func__); isp_dump_atpd(isp, tptr); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); continue; } /* * Is this command a dead duck? */ if (atp->dead) { isp_prt(isp, ISP_LOGERR, "%s: [0x%x] not sending a CTIO for a dead command", __func__, cso->tag_id); ccb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(ccb); continue; } /* * Check to make sure we're still in target mode. */ fcp = FCPARAM(isp, XS_CHANNEL(ccb)); if ((fcp->role & ISP_ROLE_TARGET) == 0) { isp_prt(isp, ISP_LOGERR, "%s: [0x%x] stopping sending a CTIO because we're no longer in target mode", __func__, cso->tag_id); ccb->ccb_h.status = CAM_PROVIDE_FAIL; xpt_done(ccb); continue; } /* * We're only handling ATPD_CCB_OUTSTANDING outstanding CCB at a time (one of which * could be split into two CTIOs to split data and status). */ if (atp->ctcnt >= ATPD_CCB_OUTSTANDING) { isp_prt(isp, ISP_LOGTINFO, "[0x%x] handling only %d CCBs at a time (flags for this ccb: 0x%x)", cso->tag_id, ATPD_CCB_OUTSTANDING, ccb->ccb_h.flags); TAILQ_INSERT_HEAD(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); break; } /* * Does the initiator expect FC-Tape style responses? */ if ((atp->word3 & PRLI_WD3_RETRY) && fcp->fctape_enabled) { fctape = 1; } else { fctape = 0; } /* * If we already did the data xfer portion of a CTIO that sends data * and status, don't do it again and do the status portion now. */ if (atp->sendst) { isp_prt(isp, ISP_LOGTINFO, "[0x%x] now sending synthesized status orig_dl=%u xfered=%u bit=%u", cso->tag_id, atp->orig_datalen, atp->bytes_xfered, atp->bytes_in_transit); xfrlen = 0; /* we already did the data transfer */ atp->sendst = 0; } if (ccb->ccb_h.flags & CAM_SEND_STATUS) { sendstatus = 1; } else { sendstatus = 0; } if (ccb->ccb_h.flags & CAM_SEND_SENSE) { KASSERT((sendstatus != 0), ("how can you have CAM_SEND_SENSE w/o CAM_SEND_STATUS?")); /* * Sense length is not the entire sense data structure size. Periph * drivers don't seem to be setting sense_len to reflect the actual * size. We'll peek inside to get the right amount. */ sense_length = cso->sense_len; /* * This 'cannot' happen */ if (sense_length > (XCMD_SIZE - MIN_FCP_RESPONSE_SIZE)) { sense_length = XCMD_SIZE - MIN_FCP_RESPONSE_SIZE; } } else { sense_length = 0; } memset(local, 0, QENTRY_LEN); /* * Check for overflow */ tmp = atp->bytes_xfered + atp->bytes_in_transit + xfrlen; if (tmp > atp->orig_datalen) { isp_prt(isp, ISP_LOGERR, "%s: [0x%x] data overflow by %u bytes", __func__, cso->tag_id, tmp - atp->orig_datalen); ccb->ccb_h.status = CAM_DATA_RUN_ERR; xpt_done(ccb); continue; } if (IS_24XX(isp)) { ct7_entry_t *cto = (ct7_entry_t *) local; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; cto->ct_header.rqs_entry_count = 1; cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM; ATPD_SET_SEQNO(cto, atp); cto->ct_nphdl = atp->nphdl; cto->ct_rxid = atp->tag; cto->ct_iid_lo = atp->portid; cto->ct_iid_hi = atp->portid >> 16; cto->ct_oxid = atp->oxid; cto->ct_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(ccb)); cto->ct_timeout = 120; cto->ct_flags = atp->tattr << CT7_TASK_ATTR_SHIFT; /* * Mode 1, status, no data. Only possible when we are sending status, have * no data to transfer, and any sense data can fit into a ct7_entry_t. * * Mode 2, status, no data. We have to use this in the case that * the sense data won't fit into a ct7_entry_t. * */ if (sendstatus && xfrlen == 0) { cto->ct_flags |= CT7_SENDSTATUS | CT7_NO_DATA; resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit; if (sense_length <= MAXRESPLEN_24XX) { if (resid < 0) { cto->ct_resid = -resid; } else if (resid > 0) { cto->ct_resid = resid; } cto->ct_flags |= CT7_FLAG_MODE1; cto->ct_scsi_status = cso->scsi_status; if (resid < 0) { cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8); } else if (resid > 0) { cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8); } if (fctape) { cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; } if (sense_length) { cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8); cto->rsp.m1.ct_resplen = cto->ct_senselen = sense_length; memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length); } } else { bus_addr_t addr; char buf[XCMD_SIZE]; fcp_rsp_iu_t *rp; if (atp->ests == NULL) { atp->ests = isp_get_ecmd(isp); if (atp->ests == NULL) { TAILQ_INSERT_HEAD(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); break; } } memset(buf, 0, sizeof (buf)); rp = (fcp_rsp_iu_t *)buf; if (fctape) { cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; rp->fcp_rsp_bits |= FCP_CONF_REQ; } cto->ct_flags |= CT7_FLAG_MODE2; rp->fcp_rsp_scsi_status = cso->scsi_status; if (resid < 0) { rp->fcp_rsp_resid = -resid; rp->fcp_rsp_bits |= FCP_RESID_OVERFLOW; } else if (resid > 0) { rp->fcp_rsp_resid = resid; rp->fcp_rsp_bits |= FCP_RESID_UNDERFLOW; } if (sense_length) { rp->fcp_rsp_snslen = sense_length; cto->ct_senselen = sense_length; rp->fcp_rsp_bits |= FCP_SNSLEN_VALID; isp_put_fcp_rsp_iu(isp, rp, atp->ests); memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length); } else { isp_put_fcp_rsp_iu(isp, rp, atp->ests); } if (isp->isp_dblev & ISP_LOGTDEBUG1) { isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests); } addr = isp->isp_osinfo.ecmd_dma; addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE); isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests, (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length); cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length; cto->rsp.m2.ct_fcp_rsp_iudata.ds_base = DMA_LO32(addr); cto->rsp.m2.ct_fcp_rsp_iudata.ds_basehi = DMA_HI32(addr); cto->rsp.m2.ct_fcp_rsp_iudata.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length; } if (sense_length) { isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d slen %u sense: %x %x/%x/%x", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid, sense_length, cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]); } else { isp_prt(isp, ISP_LOGDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid); } atp->state = ATPD_STATE_LAST_CTIO; } /* * Mode 0 data transfers, *possibly* with status. */ if (xfrlen != 0) { cto->ct_flags |= CT7_FLAG_MODE0; if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cto->ct_flags |= CT7_DATA_IN; } else { cto->ct_flags |= CT7_DATA_OUT; } cto->rsp.m0.reloff = atp->bytes_xfered + atp->bytes_in_transit; cto->rsp.m0.ct_xfrlen = xfrlen; #ifdef DEBUG if (ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame && xfrlen > ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame) { isp_prt(isp, ISP_LOGWARN, "%s: truncating data frame with xfrlen %d to %d", __func__, xfrlen, xfrlen - (xfrlen >> 2)); ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame = 0; cto->rsp.m0.ct_xfrlen -= xfrlen >> 2; } #endif if (sendstatus) { resid = atp->orig_datalen - atp->bytes_xfered - xfrlen; if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /* && fctape == 0 */) { cto->ct_flags |= CT7_SENDSTATUS; atp->state = ATPD_STATE_LAST_CTIO; if (fctape) { cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF; } } else { atp->sendst = 1; /* send status later */ cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM; atp->state = ATPD_STATE_CTIO; } } else { atp->state = ATPD_STATE_CTIO; } isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x xfrlen=%u off=%u", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, xfrlen, atp->bytes_xfered); } } else if (IS_FC(isp)) { ct2_entry_t *cto = (ct2_entry_t *) local; if (isp->isp_osinfo.sixtyfourbit) cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3; else cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; cto->ct_header.rqs_entry_count = 1; cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM; ATPD_SET_SEQNO(cto, atp); if (ISP_CAP_2KLOGIN(isp)) { ((ct2e_entry_t *)cto)->ct_iid = atp->nphdl; } else { cto->ct_iid = atp->nphdl; if (ISP_CAP_SCCFW(isp) == 0) { cto->ct_lun = ccb->ccb_h.target_lun; } } cto->ct_timeout = 10; cto->ct_rxid = cso->tag_id; /* * Mode 1, status, no data. Only possible when we are sending status, have * no data to transfer, and the sense length can fit in the ct7_entry. * * Mode 2, status, no data. We have to use this in the case the response * length won't fit into a ct2_entry_t. * * We'll fill out this structure with information as if this were a * Mode 1. The hardware layer will create the Mode 2 FCP RSP IU as * needed based upon this. */ if (sendstatus && xfrlen == 0) { cto->ct_flags |= CT2_SENDSTATUS | CT2_NO_DATA; resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit; if (sense_length <= MAXRESPLEN) { if (resid < 0) { cto->ct_resid = -resid; } else if (resid > 0) { cto->ct_resid = resid; } cto->ct_flags |= CT2_FLAG_MODE1; cto->rsp.m1.ct_scsi_status = cso->scsi_status; if (resid < 0) { cto->rsp.m1.ct_scsi_status |= CT2_DATA_OVER; } else if (resid > 0) { cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER; } if (fctape) { cto->ct_flags |= CT2_CONFIRM; } if (sense_length) { cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; cto->rsp.m1.ct_resplen = cto->rsp.m1.ct_senselen = sense_length; memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length); } } else { bus_addr_t addr; char buf[XCMD_SIZE]; fcp_rsp_iu_t *rp; if (atp->ests == NULL) { atp->ests = isp_get_ecmd(isp); if (atp->ests == NULL) { TAILQ_INSERT_HEAD(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); break; } } memset(buf, 0, sizeof (buf)); rp = (fcp_rsp_iu_t *)buf; if (fctape) { cto->ct_flags |= CT2_CONFIRM; rp->fcp_rsp_bits |= FCP_CONF_REQ; } cto->ct_flags |= CT2_FLAG_MODE2; rp->fcp_rsp_scsi_status = cso->scsi_status; if (resid < 0) { rp->fcp_rsp_resid = -resid; rp->fcp_rsp_bits |= FCP_RESID_OVERFLOW; } else if (resid > 0) { rp->fcp_rsp_resid = resid; rp->fcp_rsp_bits |= FCP_RESID_UNDERFLOW; } if (sense_length) { rp->fcp_rsp_snslen = sense_length; rp->fcp_rsp_bits |= FCP_SNSLEN_VALID; isp_put_fcp_rsp_iu(isp, rp, atp->ests); memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length); } else { isp_put_fcp_rsp_iu(isp, rp, atp->ests); } if (isp->isp_dblev & ISP_LOGTDEBUG1) { isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests); } addr = isp->isp_osinfo.ecmd_dma; addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE); isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests, (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length); cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length; if (isp->isp_osinfo.sixtyfourbit) { cto->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_base = DMA_LO32(addr); cto->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_basehi = DMA_HI32(addr); cto->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length; } else { cto->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_base = DMA_LO32(addr); cto->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length; } } if (sense_length) { isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO2[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d sense: %x %x/%x/%x", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cso->scsi_status, cto->ct_flags, cto->ct_resid, cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]); } else { isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO2[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cso->scsi_status, cto->ct_flags, cto->ct_resid); } atp->state = ATPD_STATE_LAST_CTIO; } if (xfrlen != 0) { cto->ct_flags |= CT2_FLAG_MODE0; if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cto->ct_flags |= CT2_DATA_IN; } else { cto->ct_flags |= CT2_DATA_OUT; } cto->ct_reloff = atp->bytes_xfered + atp->bytes_in_transit; cto->rsp.m0.ct_xfrlen = xfrlen; if (sendstatus) { resid = atp->orig_datalen - atp->bytes_xfered - xfrlen; if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /*&& fctape == 0*/) { cto->ct_flags |= CT2_SENDSTATUS; atp->state = ATPD_STATE_LAST_CTIO; if (fctape) { cto->ct_flags |= CT2_CONFIRM; } } else { atp->sendst = 1; /* send status later */ cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM; atp->state = ATPD_STATE_CTIO; } } else { atp->state = ATPD_STATE_CTIO; } } isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO2[%x] seq %u nc %d CDB0=%x scsi status %x flags %x resid %d xfrlen %u offset %u", __func__, cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cso->scsi_status, cto->ct_flags, cto->ct_resid, cso->dxfer_len, atp->bytes_xfered); } else { ct_entry_t *cto = (ct_entry_t *) local; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; cto->ct_header.rqs_entry_count = 1; cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM; ATPD_SET_SEQNO(cto, atp); cto->ct_iid = cso->init_id; cto->ct_iid |= XS_CHANNEL(ccb) << 7; cto->ct_tgt = ccb->ccb_h.target_id; cto->ct_lun = ccb->ccb_h.target_lun; cto->ct_fwhandle = cso->tag_id; if (atp->rxid) { cto->ct_tag_val = atp->rxid; cto->ct_flags |= CT_TQAE; } if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { cto->ct_flags |= CT_NODISC; } if (cso->dxfer_len == 0) { cto->ct_flags |= CT_NO_DATA; } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cto->ct_flags |= CT_DATA_IN; } else { cto->ct_flags |= CT_DATA_OUT; } if (ccb->ccb_h.flags & CAM_SEND_STATUS) { cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; cto->ct_scsi_status = cso->scsi_status; cto->ct_resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit - xfrlen; isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO[%x] seq %u nc %d scsi status %x resid %d tag_id %x", __func__, cto->ct_fwhandle, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), cso->scsi_status, cso->resid, cso->tag_id); } ccb->ccb_h.flags &= ~CAM_SEND_SENSE; cto->ct_timeout = 10; } if (isp_get_pcmd(isp, ccb)) { ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "out of PCMDs\n"); TAILQ_INSERT_HEAD(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); break; } if (isp_allocate_xs_tgt(isp, ccb, &handle)) { ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "No XFLIST pointers for %s\n", __func__); TAILQ_INSERT_HEAD(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); isp_free_pcmd(isp, ccb); break; } atp->bytes_in_transit += xfrlen; PISP_PCMD(ccb)->datalen = xfrlen; /* * Call the dma setup routines for this entry (and any subsequent * CTIOs) if there's data to move, and then tell the f/w it's got * new things to play with. As with isp_start's usage of DMA setup, * any swizzling is done in the machine dependent layer. Because * of this, we put the request onto the queue area first in native * format. */ if (IS_24XX(isp)) { ct7_entry_t *cto = (ct7_entry_t *) local; cto->ct_syshandle = handle; } else if (IS_FC(isp)) { ct2_entry_t *cto = (ct2_entry_t *) local; cto->ct_syshandle = handle; } else { ct_entry_t *cto = (ct_entry_t *) local; cto->ct_syshandle = handle; } dmaresult = ISP_DMASETUP(isp, cso, (ispreq_t *) local); if (dmaresult != CMD_QUEUED) { isp_destroy_tgt_handle(isp, handle); isp_free_pcmd(isp, ccb); if (dmaresult == CMD_EAGAIN) { TAILQ_INSERT_HEAD(&tptr->waitq, &ccb->ccb_h, periph_links.tqe); break; } ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); continue; } isp->isp_nactive++; ccb->ccb_h.status = CAM_REQ_INPROG | CAM_SIM_QUEUED; if (xfrlen) { ccb->ccb_h.spriv_field0 = atp->bytes_xfered; } else { ccb->ccb_h.spriv_field0 = ~0; } atp->ctcnt++; atp->seqno++; } rls_lun_statep(isp, tptr); } static void isp_refire_putback_atio(void *arg) { union ccb *ccb = arg; ISP_ASSERT_LOCKED((ispsoftc_t *)XS_ISP(ccb)); isp_target_putback_atio(ccb); } static void isp_refire_notify_ack(void *arg) { isp_tna_t *tp = arg; ispsoftc_t *isp = tp->isp; ISP_ASSERT_LOCKED(isp); if (isp_notify_ack(isp, tp->not)) { callout_schedule(&tp->timer, 5); } else { free(tp, M_DEVBUF); } } static void isp_target_putback_atio(union ccb *ccb) { ispsoftc_t *isp; struct ccb_scsiio *cso; void *qe; isp = XS_ISP(ccb); qe = isp_getrqentry(isp); if (qe == NULL) { xpt_print(ccb->ccb_h.path, "%s: Request Queue Overflow\n", __func__); callout_reset(&PISP_PCMD(ccb)->wdog, 10, isp_refire_putback_atio, ccb); return; } memset(qe, 0, QENTRY_LEN); cso = &ccb->csio; if (IS_FC(isp)) { at2_entry_t local, *at = &local; ISP_MEMZERO(at, sizeof (at2_entry_t)); at->at_header.rqs_entry_type = RQSTYPE_ATIO2; at->at_header.rqs_entry_count = 1; if (ISP_CAP_SCCFW(isp)) { at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; #if __FreeBSD_version < 1000700 if (at->at_scclun >= 256) at->at_scclun |= 0x4000; #endif } else { at->at_lun = (uint8_t) ccb->ccb_h.target_lun; } at->at_status = CT_OK; at->at_rxid = cso->tag_id; at->at_iid = cso->ccb_h.target_id; isp_put_atio2(isp, at, qe); } else { at_entry_t local, *at = &local; ISP_MEMZERO(at, sizeof (at_entry_t)); at->at_header.rqs_entry_type = RQSTYPE_ATIO; at->at_header.rqs_entry_count = 1; at->at_iid = cso->init_id; at->at_iid |= XS_CHANNEL(ccb) << 7; at->at_tgt = cso->ccb_h.target_id; at->at_lun = cso->ccb_h.target_lun; at->at_status = CT_OK; at->at_tag_val = AT_GET_TAG(cso->tag_id); at->at_handle = AT_GET_HANDLE(cso->tag_id); isp_put_atio(isp, at, qe); } ISP_TDQE(isp, "isp_target_putback_atio", isp->isp_reqidx, qe); ISP_SYNC_REQUEST(isp); isp_complete_ctio(ccb); } static void isp_complete_ctio(union ccb *ccb) { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); } } /* * Handle ATIO stuff that the generic code can't. * This means handling CDBs. */ static void isp_handle_platform_atio(ispsoftc_t *isp, at_entry_t *aep) { tstate_t *tptr; int status, bus; struct ccb_accept_tio *atiop; atio_private_data_t *atp; /* * The firmware status (except for the QLTM_SVALID bit) * indicates why this ATIO was sent to us. * * If QLTM_SVALID is set, the firmware has recommended Sense Data. * * If the DISCONNECTS DISABLED bit is set in the flags field, * we're still connected on the SCSI bus. */ status = aep->at_status; if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { /* * Bus Phase Sequence error. We should have sense data * suggested by the f/w. I'm not sure quite yet what * to do about this for CAM. */ isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); return; } if ((status & ~QLTM_SVALID) != AT_CDB) { isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", status); isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); return; } bus = GET_BUS_VAL(aep->at_iid); tptr = get_lun_statep(isp, bus, aep->at_lun); if (tptr == NULL) { tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); if (tptr == NULL) { /* * Because we can't autofeed sense data back with * a command for parallel SCSI, we can't give back * a CHECK CONDITION. We'll give back a BUSY status * instead. This works out okay because the only * time we should, in fact, get this, is in the * case that somebody configured us without the * blackhole driver, so they get what they deserve. */ isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); return; } } atp = isp_get_atpd(isp, tptr, aep->at_handle); atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); if (atiop == NULL || atp == NULL) { /* * Because we can't autofeed sense data back with * a command for parallel SCSI, we can't give back * a CHECK CONDITION. We'll give back a QUEUE FULL status * instead. This works out okay because the only time we * should, in fact, get this, is in the case that we've * run out of ATIOS. */ - xpt_print(tptr->owner, "no %s for lun %d from initiator %d\n", (atp == NULL && atiop == NULL)? "ATIOs *or* ATPS" : + xpt_print(tptr->owner, "no %s for lun %x from initiator %d\n", (atp == NULL && atiop == NULL)? "ATIOs *or* ATPS" : ((atp == NULL)? "ATPs" : "ATIOs"), aep->at_lun, aep->at_iid); isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); if (atp) { isp_put_atpd(isp, tptr, atp); } rls_lun_statep(isp, tptr); return; } atp->rxid = aep->at_tag_val; atp->state = ATPD_STATE_ATIO; SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); tptr->atio_count--; ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO count now %d\n", tptr->atio_count); atiop->ccb_h.target_id = aep->at_tgt; atiop->ccb_h.target_lun = aep->at_lun; if (aep->at_flags & AT_NODISC) { atiop->ccb_h.flags |= CAM_DIS_DISCONNECT; } else { atiop->ccb_h.flags &= ~CAM_DIS_DISCONNECT; } if (status & QLTM_SVALID) { size_t amt = ISP_MIN(QLTM_SENSELEN, sizeof (atiop->sense_data)); atiop->sense_len = amt; ISP_MEMCPY(&atiop->sense_data, aep->at_sense, amt); } else { atiop->sense_len = 0; } atiop->init_id = GET_IID_VAL(aep->at_iid); atiop->cdb_len = aep->at_cdblen; ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); atiop->ccb_h.status = CAM_CDB_RECVD; /* * Construct a tag 'id' based upon tag value (which may be 0..255) * and the handle (which we have to preserve). */ atiop->tag_id = atp->tag; if (aep->at_flags & AT_TQAE) { atiop->tag_action = aep->at_tag_type; atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; } atp->orig_datalen = 0; atp->bytes_xfered = 0; atp->lun = aep->at_lun; atp->nphdl = aep->at_iid; atp->portid = PORT_NONE; atp->oxid = 0; atp->cdb0 = atiop->cdb_io.cdb_bytes[0]; atp->tattr = aep->at_tag_type; atp->state = ATPD_STATE_CAM; - isp_prt(isp, ISP_LOGTDEBUG0, "ATIO[0x%x] CDB=0x%x lun %d", aep->at_tag_val, atp->cdb0, atp->lun); + isp_prt(isp, ISP_LOGTDEBUG0, "ATIO[0x%x] CDB=0x%x lun %x", aep->at_tag_val, atp->cdb0, atp->lun); rls_lun_statep(isp, tptr); } static void isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep) { lun_id_t lun; fcportdb_t *lp; tstate_t *tptr; struct ccb_accept_tio *atiop; uint16_t nphdl; atio_private_data_t *atp; inot_private_data_t *ntp; /* * The firmware status (except for the QLTM_SVALID bit) * indicates why this ATIO was sent to us. * * If QLTM_SVALID is set, the firmware has recommended Sense Data. */ if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { isp_prt(isp, ISP_LOGWARN, "bogus atio (0x%x) leaked to platform", aep->at_status); isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); return; } if (ISP_CAP_SCCFW(isp)) { lun = aep->at_scclun; #if __FreeBSD_version < 1000700 lun &= 0x3fff; #endif } else { lun = aep->at_lun; } if (ISP_CAP_2KLOGIN(isp)) { nphdl = ((at2e_entry_t *)aep)->at_iid; } else { nphdl = aep->at_iid; } tptr = get_lun_statep(isp, 0, lun); if (tptr == NULL) { tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); if (tptr == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: [0x%x] no state pointer for lun %jx or wildcard", __func__, aep->at_rxid, (uintmax_t)lun); if (lun == 0) { isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); } else { isp_endcmd(isp, aep, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0); } return; } } /* * Start any commands pending resources first. */ if (tptr->restart_queue) { inot_private_data_t *restart_queue = tptr->restart_queue; tptr->restart_queue = NULL; while (restart_queue) { ntp = restart_queue; restart_queue = ntp->rd.nt.nt_hba; isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at2_entry_t *)ntp->rd.data)->at_rxid); isp_handle_platform_atio2(isp, (at2_entry_t *) ntp->rd.data); isp_put_ntpd(isp, tptr, ntp); /* * If a recursion caused the restart queue to start to fill again, * stop and splice the new list on top of the old list and restore * it and go to noresrc. */ if (tptr->restart_queue) { ntp = tptr->restart_queue; tptr->restart_queue = restart_queue; while (restart_queue->rd.nt.nt_hba) { restart_queue = restart_queue->rd.nt.nt_hba; } restart_queue->rd.nt.nt_hba = ntp; goto noresrc; } } } atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); if (atiop == NULL) { goto noresrc; } atp = isp_get_atpd(isp, tptr, aep->at_rxid); if (atp == NULL) { goto noresrc; } atp->state = ATPD_STATE_ATIO; SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); tptr->atio_count--; isp_prt(isp, ISP_LOGTDEBUG2, "Take FREE ATIO count now %d", tptr->atio_count); atiop->ccb_h.target_id = FCPARAM(isp, 0)->isp_loopid; atiop->ccb_h.target_lun = lun; /* * We don't get 'suggested' sense data as we do with SCSI cards. */ atiop->sense_len = 0; /* * If we're not in the port database, add ourselves. */ if (IS_2100(isp)) atiop->init_id = nphdl; else { if ((isp_find_pdb_by_handle(isp, 0, nphdl, &lp) == 0 || lp->state == FC_PORTDB_STATE_ZOMBIE)) { uint64_t wwpn = (((uint64_t) aep->at_wwpn[0]) << 48) | (((uint64_t) aep->at_wwpn[1]) << 32) | (((uint64_t) aep->at_wwpn[2]) << 16) | (((uint64_t) aep->at_wwpn[3]) << 0); isp_add_wwn_entry(isp, 0, wwpn, INI_NONE, nphdl, PORT_ANY, 0); isp_find_pdb_by_handle(isp, 0, nphdl, &lp); } atiop->init_id = FC_PORTDB_TGT(isp, 0, lp); } atiop->cdb_len = ATIO2_CDBLEN; ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); atiop->ccb_h.status = CAM_CDB_RECVD; atiop->tag_id = atp->tag; switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { case ATIO2_TC_ATTR_SIMPLEQ: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_SIMPLE_Q_TAG; break; case ATIO2_TC_ATTR_HEADOFQ: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_HEAD_OF_Q_TAG; break; case ATIO2_TC_ATTR_ORDERED: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_ORDERED_Q_TAG; break; case ATIO2_TC_ATTR_ACAQ: /* ?? */ case ATIO2_TC_ATTR_UNTAGGED: default: atiop->tag_action = 0; break; } atp->orig_datalen = aep->at_datalen; atp->bytes_xfered = 0; atp->lun = lun; atp->nphdl = nphdl; atp->sid = PORT_ANY; atp->oxid = aep->at_oxid; atp->cdb0 = aep->at_cdb[0]; atp->tattr = aep->at_taskflags & ATIO2_TC_ATTR_MASK; atp->state = ATPD_STATE_CAM; xpt_done((union ccb *)atiop); isp_prt(isp, ISP_LOGTDEBUG0, "ATIO2[0x%x] CDB=0x%x lun %jx datalen %u", aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen); rls_lun_statep(isp, tptr); return; noresrc: ntp = isp_get_ntpd(isp, tptr); if (ntp == NULL) { rls_lun_statep(isp, tptr); isp_endcmd(isp, aep, nphdl, 0, SCSI_STATUS_BUSY, 0); return; } memcpy(ntp->rd.data, aep, QENTRY_LEN); ntp->rd.nt.nt_hba = tptr->restart_queue; tptr->restart_queue = ntp; rls_lun_statep(isp, tptr); } static void isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep) { int cdbxlen; lun_id_t lun; uint16_t chan, nphdl = NIL_HANDLE; uint32_t did, sid; fcportdb_t *lp; tstate_t *tptr; struct ccb_accept_tio *atiop; atio_private_data_t *atp = NULL; atio_private_data_t *oatp; inot_private_data_t *ntp; did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2]; sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; #if __FreeBSD_version >= 1000700 lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(aep->at_cmnd.fcp_cmnd_lun)); #else lun = (aep->at_cmnd.fcp_cmnd_lun[0] & 0x3f << 8) | aep->at_cmnd.fcp_cmnd_lun[1]; #endif /* * Find the N-port handle, and Virtual Port Index for this command. * * If we can't, we're somewhat in trouble because we can't actually respond w/o that information. * We also, as a matter of course, need to know the WWN of the initiator too. */ if (ISP_CAP_MULTI_ID(isp) && isp->isp_nchan > 1) { /* * Find the right channel based upon D_ID */ isp_find_chan_by_did(isp, did, &chan); if (chan == ISP_NOCHAN) { NANOTIME_T now; /* * If we don't recognizer our own D_DID, terminate the exchange, unless we're within 2 seconds of startup * It's a bit tricky here as we need to stash this command *somewhere*. */ GET_NANOTIME(&now); if (NANOTIME_SUB(&isp->isp_init_time, &now) > 2000000000ULL) { isp_prt(isp, ISP_LOGWARN, "%s: [RX_ID 0x%x] D_ID %x not found on any channel- dropping", __func__, aep->at_rxid, did); isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN, ECMD_TERMINATE, 0); return; } tptr = get_lun_statep(isp, 0, 0); if (tptr == NULL) { tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); if (tptr == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: [RX_ID 0x%x] D_ID %x not found on any channel and no tptr- dropping", __func__, aep->at_rxid, did); isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN, ECMD_TERMINATE, 0); return; } } isp_prt(isp, ISP_LOGWARN, "%s: [RX_ID 0x%x] D_ID %x not found on any channel- deferring", __func__, aep->at_rxid, did); goto noresrc; } isp_prt(isp, ISP_LOGTDEBUG0, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x", __func__, aep->at_rxid, did, chan, sid); } else { chan = 0; } /* * Find the PDB entry for this initiator */ if (isp_find_pdb_by_sid(isp, chan, sid, &lp) == 0) { /* * If we're not in the port database terminate the exchange. */ isp_prt(isp, ISP_LOGTINFO, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x wasn't in PDB already", __func__, aep->at_rxid, did, chan, sid); isp_dump_portdb(isp, chan); isp_endcmd(isp, aep, NIL_HANDLE, chan, ECMD_TERMINATE, 0); return; } nphdl = lp->handle; /* * Get the tstate pointer */ tptr = get_lun_statep(isp, chan, lun); if (tptr == NULL) { tptr = get_lun_statep(isp, chan, CAM_LUN_WILDCARD); if (tptr == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: [0x%x] no state pointer for lun %jx or wildcard", __func__, aep->at_rxid, (uintmax_t)lun); if (lun == 0) { isp_endcmd(isp, aep, nphdl, SCSI_STATUS_BUSY, 0); } else { isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0); } return; } } /* * Start any commands pending resources first. */ if (tptr->restart_queue) { inot_private_data_t *restart_queue = tptr->restart_queue; tptr->restart_queue = NULL; while (restart_queue) { ntp = restart_queue; restart_queue = ntp->rd.nt.nt_hba; isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at7_entry_t *)ntp->rd.data)->at_rxid); isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->rd.data); isp_put_ntpd(isp, tptr, ntp); /* * If a recursion caused the restart queue to start to fill again, * stop and splice the new list on top of the old list and restore * it and go to noresrc. */ if (tptr->restart_queue) { isp_prt(isp, ISP_LOGTDEBUG0, "%s: restart queue refilling", __func__); if (restart_queue) { ntp = tptr->restart_queue; tptr->restart_queue = restart_queue; while (restart_queue->rd.nt.nt_hba) { restart_queue = restart_queue->rd.nt.nt_hba; } restart_queue->rd.nt.nt_hba = ntp; } goto noresrc; } } } /* * If the f/w is out of resources, just send a BUSY status back. */ if (aep->at_rxid == AT7_NORESRC_RXID) { rls_lun_statep(isp, tptr); isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0); return; } /* * If we're out of resources, just send a BUSY status back. */ atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); if (atiop == NULL) { isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atios", aep->at_rxid); goto noresrc; } oatp = isp_find_atpd(isp, tptr, aep->at_rxid); if (oatp) { isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] tag wraparound in isp_handle_platforms_atio7 (N-Port Handle 0x%04x S_ID 0x%04x OX_ID 0x%04x) oatp state %d", aep->at_rxid, nphdl, sid, aep->at_hdr.ox_id, oatp->state); /* * It's not a "no resource" condition- but we can treat it like one */ goto noresrc; } atp = isp_get_atpd(isp, tptr, aep->at_rxid); if (atp == NULL) { isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atps", aep->at_rxid); goto noresrc; } atp->word3 = lp->prli_word3; atp->state = ATPD_STATE_ATIO; SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); tptr->atio_count--; ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO count now %d\n", tptr->atio_count); atiop->init_id = FC_PORTDB_TGT(isp, chan, lp); atiop->ccb_h.target_id = FCPARAM(isp, chan)->isp_loopid; atiop->ccb_h.target_lun = lun; atiop->sense_len = 0; cdbxlen = aep->at_cmnd.fcp_cmnd_alen_datadir >> FCP_CMND_ADDTL_CDBLEN_SHIFT; if (cdbxlen) { isp_prt(isp, ISP_LOGWARN, "additional CDBLEN ignored"); } cdbxlen = sizeof (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb); ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb, cdbxlen); atiop->cdb_len = cdbxlen; atiop->ccb_h.status = CAM_CDB_RECVD; atiop->tag_id = atp->tag; switch (aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK) { case FCP_CMND_TASK_ATTR_SIMPLE: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_SIMPLE_Q_TAG; break; case FCP_CMND_TASK_ATTR_HEAD: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_HEAD_OF_Q_TAG; break; case FCP_CMND_TASK_ATTR_ORDERED: atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; atiop->tag_action = MSG_ORDERED_Q_TAG; break; default: /* FALLTHROUGH */ case FCP_CMND_TASK_ATTR_ACA: case FCP_CMND_TASK_ATTR_UNTAGGED: atiop->tag_action = 0; break; } atp->orig_datalen = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl; atp->bytes_xfered = 0; atp->lun = lun; atp->nphdl = nphdl; atp->portid = sid; atp->oxid = aep->at_hdr.ox_id; atp->rxid = aep->at_hdr.rx_id; atp->cdb0 = atiop->cdb_io.cdb_bytes[0]; atp->tattr = aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK; atp->state = ATPD_STATE_CAM; isp_prt(isp, ISP_LOGTDEBUG0, "ATIO7[0x%x] CDB=0x%x lun %jx datalen %u", aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen); xpt_done((union ccb *)atiop); rls_lun_statep(isp, tptr); return; noresrc: if (atp) { isp_put_atpd(isp, tptr, atp); } ntp = isp_get_ntpd(isp, tptr); if (ntp == NULL) { rls_lun_statep(isp, tptr); isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0); return; } memcpy(ntp->rd.data, aep, QENTRY_LEN); ntp->rd.nt.nt_hba = tptr->restart_queue; tptr->restart_queue = ntp; rls_lun_statep(isp, tptr); } /* * Handle starting an SRR (sequence retransmit request) * We get here when we've gotten the immediate notify * and the return of all outstanding CTIOs for this * transaction. */ static void isp_handle_srr_start(ispsoftc_t *isp, tstate_t *tptr, atio_private_data_t *atp) { in_fcentry_24xx_t *inot; uint32_t srr_off, ccb_off, ccb_len, ccb_end; union ccb *ccb; inot = (in_fcentry_24xx_t *)atp->srr; srr_off = inot->in_srr_reloff_lo | (inot->in_srr_reloff_hi << 16); ccb = atp->srr_ccb; atp->srr_ccb = NULL; atp->nsrr++; if (ccb == NULL) { isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] null ccb", atp->tag); goto fail; } ccb_off = ccb->ccb_h.spriv_field0; ccb_len = ccb->csio.dxfer_len; ccb_end = (ccb_off == ~0)? ~0 : ccb_off + ccb_len; switch (inot->in_srr_iu) { case R_CTL_INFO_SOLICITED_DATA: /* * We have to restart a FCP_DATA data out transaction */ atp->sendst = 0; atp->bytes_xfered = srr_off; if (ccb_len == 0) { isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x but current CCB doesn't transfer data", atp->tag, srr_off); goto mdp; } if (srr_off < ccb_off || ccb_off > srr_off + ccb_len) { isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x not covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end); goto mdp; } isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end); break; case R_CTL_INFO_COMMAND_STATUS: isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] Got an FCP RSP SRR- resending status", atp->tag); atp->sendst = 1; /* * We have to restart a FCP_RSP IU transaction */ break; case R_CTL_INFO_DATA_DESCRIPTOR: /* * We have to restart an FCP DATA in transaction */ isp_prt(isp, ISP_LOGWARN, "Got an FCP DATA IN SRR- dropping"); goto fail; default: isp_prt(isp, ISP_LOGWARN, "Got an unknown information (%x) SRR- dropping", inot->in_srr_iu); goto fail; } /* * We can't do anything until this is acked, so we might as well start it now. * We aren't going to do the usual asynchronous ack issue because we need * to make sure this gets on the wire first. */ if (isp_notify_ack(isp, inot)) { isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose"); goto fail; } isp_target_start_ctio(isp, ccb, FROM_SRR); return; fail: inot->in_reserved = 1; isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP_ERR; isp_complete_ctio(ccb); return; mdp: if (isp_notify_ack(isp, inot)) { isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose"); goto fail; } ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status = CAM_MESSAGE_RECV; /* * This is not a strict interpretation of MDP, but it's close */ ccb->csio.msg_ptr = &ccb->csio.sense_data.sense_buf[SSD_FULL_SIZE - 16]; ccb->csio.msg_len = 7; ccb->csio.msg_ptr[0] = MSG_EXTENDED; ccb->csio.msg_ptr[1] = 5; ccb->csio.msg_ptr[2] = 0; /* modify data pointer */ ccb->csio.msg_ptr[3] = srr_off >> 24; ccb->csio.msg_ptr[4] = srr_off >> 16; ccb->csio.msg_ptr[5] = srr_off >> 8; ccb->csio.msg_ptr[6] = srr_off; isp_complete_ctio(ccb); } static void isp_handle_srr_notify(ispsoftc_t *isp, void *inot_raw) { tstate_t *tptr; in_fcentry_24xx_t *inot = inot_raw; atio_private_data_t *atp; uint32_t tag = inot->in_rxid; uint32_t bus = inot->in_vpidx; if (!IS_24XX(isp)) { isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot_raw); return; } tptr = get_lun_statep_from_tag(isp, bus, tag); if (tptr == NULL) { isp_prt(isp, ISP_LOGERR, "%s: cannot find tptr for tag %x in SRR Notify", __func__, tag); isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); return; } atp = isp_find_atpd(isp, tptr, tag); if (atp == NULL) { rls_lun_statep(isp, tptr); isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x in SRR Notify", __func__, tag); isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); return; } atp->srr_notify_rcvd = 1; memcpy(atp->srr, inot, sizeof (atp->srr)); isp_prt(isp, ISP_LOGTINFO /* ISP_LOGTDEBUG0 */, "SRR[0x%x] inot->in_rxid flags 0x%x srr_iu=%x reloff 0x%x", inot->in_rxid, inot->in_flags, inot->in_srr_iu, inot->in_srr_reloff_lo | (inot->in_srr_reloff_hi << 16)); if (atp->srr_ccb) isp_handle_srr_start(isp, tptr, atp); rls_lun_statep(isp, tptr); } static void isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) { union ccb *ccb; int sentstatus = 0, ok = 0, notify_cam = 0, resid = 0, failure = 0; tstate_t *tptr = NULL; atio_private_data_t *atp = NULL; int bus; uint32_t handle, moved_data = 0, data_requested; /* * CTIO handles are 16 bits. * CTIO2 and CTIO7 are 32 bits. */ if (IS_SCSI(isp)) { handle = ((ct_entry_t *)arg)->ct_syshandle; } else { handle = ((ct2_entry_t *)arg)->ct_syshandle; } ccb = isp_find_xs_tgt(isp, handle); if (ccb == NULL) { isp_print_bytes(isp, "null ccb in isp_handle_platform_ctio", QENTRY_LEN, arg); return; } isp_destroy_tgt_handle(isp, handle); data_requested = PISP_PCMD(ccb)->datalen; isp_free_pcmd(isp, ccb); if (isp->isp_nactive) { isp->isp_nactive--; } bus = XS_CHANNEL(ccb); tptr = get_lun_statep(isp, bus, XS_LUN(ccb)); if (tptr == NULL) { tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); } if (tptr == NULL) { isp_prt(isp, ISP_LOGERR, "%s: cannot find tptr for tag %x after I/O", __func__, ccb->csio.tag_id); return; } if (IS_24XX(isp)) { atp = isp_find_atpd(isp, tptr, ((ct7_entry_t *)arg)->ct_rxid); } else if (IS_FC(isp)) { atp = isp_find_atpd(isp, tptr, ((ct2_entry_t *)arg)->ct_rxid); } else { atp = isp_find_atpd(isp, tptr, ((ct_entry_t *)arg)->ct_fwhandle); } if (atp == NULL) { /* * XXX: isp_clear_commands() generates fake CTIO with zero * ct_rxid value, filling only ct_syshandle. Workaround * that using tag_id from the CCB, pointed by ct_syshandle. */ atp = isp_find_atpd(isp, tptr, ccb->csio.tag_id); } if (atp == NULL) { rls_lun_statep(isp, tptr); isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ccb->csio.tag_id); return; } KASSERT((atp->ctcnt > 0), ("ctio count not greater than zero")); atp->bytes_in_transit -= data_requested; atp->ctcnt -= 1; ccb->ccb_h.status &= ~CAM_STATUS_MASK; if (IS_24XX(isp)) { ct7_entry_t *ct = arg; if (ct->ct_nphdl == CT7_SRR) { atp->srr_ccb = ccb; if (atp->srr_notify_rcvd) isp_handle_srr_start(isp, tptr, atp); rls_lun_statep(isp, tptr); return; } if (ct->ct_nphdl == CT_HBA_RESET) { failure = CAM_UNREC_HBA_ERROR; } else { sentstatus = ct->ct_flags & CT7_SENDSTATUS; ok = (ct->ct_nphdl == CT7_OK); notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0; if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA) { resid = ct->ct_resid; moved_data = data_requested - resid; } } isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO7[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct), notify_cam, ct->ct_nphdl, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID"); } else if (IS_FC(isp)) { ct2_entry_t *ct = arg; if (ct->ct_status == CT_SRR) { atp->srr_ccb = ccb; if (atp->srr_notify_rcvd) isp_handle_srr_start(isp, tptr, atp); rls_lun_statep(isp, tptr); isp_target_putback_atio(ccb); return; } if (ct->ct_status == CT_HBA_RESET) { failure = CAM_UNREC_HBA_ERROR; } else { sentstatus = ct->ct_flags & CT2_SENDSTATUS; ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0; if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { resid = ct->ct_resid; moved_data = data_requested - resid; } } isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO2[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct), notify_cam, ct->ct_status, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID"); } else { ct_entry_t *ct = arg; if (ct->ct_status == (CT_HBA_RESET & 0xff)) { failure = CAM_UNREC_HBA_ERROR; } else { sentstatus = ct->ct_flags & CT_SENDSTATUS; ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0; } if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { resid = ct->ct_resid; moved_data = data_requested - resid; } isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO[%x] seq %u nc %d tag %x S_ID 0x%x lun %x sts %x flg %x resid %d %s", __func__, ct->ct_fwhandle, ATPD_GET_SEQNO(ct), notify_cam, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, ct->ct_status, ct->ct_flags, resid, sentstatus? "FIN" : "MID"); } if (ok) { if (moved_data) { atp->bytes_xfered += moved_data; ccb->csio.resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit; } if (sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { ccb->ccb_h.status |= CAM_SENT_SENSE; } ccb->ccb_h.status |= CAM_REQ_CMP; } else { notify_cam = 1; if (failure == CAM_UNREC_HBA_ERROR) ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR; else ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } atp->state = ATPD_STATE_PDON; rls_lun_statep(isp, tptr); /* * We never *not* notify CAM when there has been any error (ok == 0), * so we never need to do an ATIO putback if we're not notifying CAM. */ isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done (ok=%d nc=%d nowsendstatus=%d ccb ss=%d)", (sentstatus)? " FINAL " : "MIDTERM ", atp->tag, ok, notify_cam, atp->sendst, (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0); if (notify_cam == 0) { if (atp->sendst) { isp_target_start_ctio(isp, ccb, FROM_CTIO_DONE); } return; } /* * We're telling CAM we're done with this CTIO transaction. * * 24XX cards never need an ATIO put back. * * Other cards need one put back only on error. * In the latter case, a timeout will re-fire * and try again in case we didn't have * queue resources to do so at first. In any case, * once the putback is done we do the completion * call. */ if (ok || IS_24XX(isp)) { isp_complete_ctio(ccb); } else { isp_target_putback_atio(ccb); } } static void isp_handle_platform_notify_scsi(ispsoftc_t *isp, in_entry_t *inot) { isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); } static void isp_handle_platform_notify_fc(ispsoftc_t *isp, in_fcentry_t *inp) { int needack = 1; switch (inp->in_status) { case IN_PORT_LOGOUT: /* * XXX: Need to delete this initiator's WWN from the database * XXX: Need to send this LOGOUT upstream */ isp_prt(isp, ISP_LOGWARN, "port logout of S_ID 0x%x", inp->in_iid); break; case IN_PORT_CHANGED: isp_prt(isp, ISP_LOGWARN, "port changed for S_ID 0x%x", inp->in_iid); break; case IN_GLOBAL_LOGO: isp_del_all_wwn_entries(isp, 0); isp_prt(isp, ISP_LOGINFO, "all ports logged out"); break; case IN_ABORT_TASK: { tstate_t *tptr; - uint16_t lun; - uint32_t loopid, sid; + uint16_t nphdl, lun; + uint32_t sid; uint64_t wwn; atio_private_data_t *atp; fcportdb_t *lp; struct ccb_immediate_notify *inot = NULL; if (ISP_CAP_SCCFW(isp)) { lun = inp->in_scclun; #if __FreeBSD_version < 1000700 lun &= 0x3fff; #endif } else { lun = inp->in_lun; } if (ISP_CAP_2KLOGIN(isp)) { - loopid = ((in_fcentry_e_t *)inp)->in_iid; + nphdl = ((in_fcentry_e_t *)inp)->in_iid; } else { - loopid = inp->in_iid; + nphdl = inp->in_iid; } - if (isp_find_pdb_by_handle(isp, 0, loopid, &lp)) { + if (isp_find_pdb_by_handle(isp, 0, nphdl, &lp)) { wwn = lp->port_wwn; sid = lp->portid; } else { wwn = INI_ANY; sid = PORT_ANY; } tptr = get_lun_statep(isp, 0, lun); if (tptr == NULL) { tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); if (tptr == NULL) { - isp_prt(isp, ISP_LOGWARN, "ABORT TASK for lun %u- but no tstate", lun); + isp_prt(isp, ISP_LOGWARN, "ABORT TASK for lun %x, but no tstate", lun); return; } } atp = isp_find_atpd(isp, tptr, inp->in_seqid); if (atp) { inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots); isp_prt(isp, ISP_LOGTDEBUG0, "ABORT TASK RX_ID %x WWN 0x%016llx state %d", inp->in_seqid, (unsigned long long) wwn, atp->state); if (inot) { tptr->inot_count--; SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, inot->ccb_h.path, "%s: Take FREE INOT count now %d\n", __func__, tptr->inot_count); } else { ISP_PATH_PRT(isp, ISP_LOGWARN, tptr->owner, "out of INOT structures\n"); } } else { ISP_PATH_PRT(isp, ISP_LOGWARN, tptr->owner, "abort task RX_ID %x from wwn 0x%016llx, state unknown\n", inp->in_seqid, wwn); } if (inot) { isp_notify_t tmp, *nt = &tmp; ISP_MEMZERO(nt, sizeof (isp_notify_t)); nt->nt_hba = isp; nt->nt_tgt = FCPARAM(isp, 0)->isp_wwpn; nt->nt_wwn = wwn; - nt->nt_nphdl = loopid; + nt->nt_nphdl = nphdl; nt->nt_sid = sid; nt->nt_did = PORT_ANY; nt->nt_lun = lun; nt->nt_need_ack = 1; nt->nt_channel = 0; nt->nt_ncode = NT_ABORT_TASK; nt->nt_lreserved = inot; isp_handle_platform_target_tmf(isp, nt); needack = 0; } rls_lun_statep(isp, tptr); break; } default: break; } if (needack) { isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inp); } } static void isp_handle_platform_notify_24xx(ispsoftc_t *isp, in_fcentry_24xx_t *inot) { uint16_t nphdl; uint16_t prli_options = 0; uint32_t portid; fcportdb_t *lp; char *msg = NULL; uint8_t *ptr = (uint8_t *)inot; uint64_t wwpn = INI_NONE, wwnn = INI_NONE; nphdl = inot->in_nphdl; if (nphdl != NIL_HANDLE) { portid = inot->in_portid_hi << 16 | inot->in_portid_lo; } else { portid = PORT_ANY; } switch (inot->in_status) { case IN24XX_ELS_RCVD: { char buf[16]; int chan = ISP_GET_VPIDX(isp, inot->in_vpidx); /* * Note that we're just getting notification that an ELS was received * (possibly with some associated information sent upstream). This is * *not* the same as being given the ELS frame to accept or reject. */ switch (inot->in_status_subcode) { case LOGO: msg = "LOGO"; wwpn = be64dec(&ptr[IN24XX_PLOGI_WWPN_OFF]); isp_del_wwn_entry(isp, chan, wwpn, nphdl, portid); break; case PRLO: msg = "PRLO"; break; case PLOGI: msg = "PLOGI"; wwnn = be64dec(&ptr[IN24XX_PLOGI_WWNN_OFF]); wwpn = be64dec(&ptr[IN24XX_PLOGI_WWPN_OFF]); isp_add_wwn_entry(isp, chan, wwpn, wwnn, nphdl, portid, prli_options); break; case PRLI: msg = "PRLI"; prli_options = inot->in_prli_options; if (inot->in_flags & IN24XX_FLAG_PN_NN_VALID) wwnn = be64dec(&ptr[IN24XX_PRLI_WWNN_OFF]); wwpn = be64dec(&ptr[IN24XX_PRLI_WWPN_OFF]); isp_add_wwn_entry(isp, chan, wwpn, wwnn, nphdl, portid, prli_options); break; case PDISC: msg = "PDISC"; break; case ADISC: msg = "ADISC"; break; default: ISP_SNPRINTF(buf, sizeof (buf), "ELS 0x%x", inot->in_status_subcode); msg = buf; break; } if (inot->in_flags & IN24XX_FLAG_PUREX_IOCB) { isp_prt(isp, ISP_LOGERR, "%s Chan %d ELS N-port handle %x PortID 0x%06x marked as needing a PUREX response", msg, chan, nphdl, portid); break; } isp_prt(isp, ISP_LOGTDEBUG0, "%s Chan %d ELS N-port handle %x PortID 0x%06x RX_ID 0x%x OX_ID 0x%x", msg, chan, nphdl, portid, inot->in_rxid, inot->in_oxid); isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); break; } case IN24XX_PORT_LOGOUT: msg = "PORT LOGOUT"; if (isp_find_pdb_by_handle(isp, ISP_GET_VPIDX(isp, inot->in_vpidx), nphdl, &lp)) { isp_del_wwn_entry(isp, ISP_GET_VPIDX(isp, inot->in_vpidx), lp->port_wwn, nphdl, lp->portid); } /* FALLTHROUGH */ case IN24XX_PORT_CHANGED: if (msg == NULL) msg = "PORT CHANGED"; /* FALLTHROUGH */ case IN24XX_LIP_RESET: if (msg == NULL) msg = "LIP RESET"; isp_prt(isp, ISP_LOGINFO, "Chan %d %s (sub-status 0x%x) for N-port handle 0x%x", ISP_GET_VPIDX(isp, inot->in_vpidx), msg, inot->in_status_subcode, nphdl); /* * All subcodes here are irrelevant. What is relevant * is that we need to terminate all active commands from * this initiator (known by N-port handle). */ /* XXX IMPLEMENT XXX */ isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); break; case IN24XX_SRR_RCVD: #ifdef ISP_TARGET_MODE isp_handle_srr_notify(isp, inot); break; #else if (msg == NULL) msg = "SRR RCVD"; /* FALLTHROUGH */ #endif case IN24XX_LINK_RESET: if (msg == NULL) msg = "LINK RESET"; case IN24XX_LINK_FAILED: if (msg == NULL) msg = "LINK FAILED"; default: isp_prt(isp, ISP_LOGWARN, "Chan %d %s", ISP_GET_VPIDX(isp, inot->in_vpidx), msg); isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot); break; } } static int isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp) { if (isp->isp_state != ISP_RUNSTATE) { isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) acked- h/w not ready (dropping)", mp->nt_ncode, mp->nt_lreserved != NULL); return (0); } /* * This case is for a Task Management Function, which shows up as an ATIO7 entry. */ if (IS_24XX(isp) && mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ATIO) { ct7_entry_t local, *cto = &local; at7_entry_t *aep = (at7_entry_t *)mp->nt_lreserved; fcportdb_t *lp; uint32_t sid; uint16_t nphdl; sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; if (isp_find_pdb_by_sid(isp, mp->nt_channel, sid, &lp)) { nphdl = lp->handle; } else { nphdl = NIL_HANDLE; } ISP_MEMZERO(&local, sizeof (local)); cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; cto->ct_header.rqs_entry_count = 1; cto->ct_nphdl = nphdl; cto->ct_rxid = aep->at_rxid; cto->ct_vpidx = mp->nt_channel; cto->ct_iid_lo = sid; cto->ct_iid_hi = sid >> 16; cto->ct_oxid = aep->at_hdr.ox_id; cto->ct_flags = CT7_SENDSTATUS|CT7_NOACK|CT7_NO_DATA|CT7_FLAG_MODE1; cto->ct_flags |= (aep->at_ta_len >> 12) << CT7_TASK_ATTR_SHIFT; return (isp_target_put_entry(isp, &local)); } /* * This case is for a responding to an ABTS frame */ if (IS_24XX(isp) && mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) { /* * Overload nt_need_ack here to mark whether we've terminated the associated command. */ if (mp->nt_need_ack) { uint8_t storage[QENTRY_LEN]; ct7_entry_t *cto = (ct7_entry_t *) storage; abts_t *abts = (abts_t *)mp->nt_lreserved; ISP_MEMZERO(cto, sizeof (ct7_entry_t)); isp_prt(isp, ISP_LOGTDEBUG0, "%s: [%x] terminating after ABTS received", __func__, abts->abts_rxid_task); cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; cto->ct_header.rqs_entry_count = 1; cto->ct_nphdl = mp->nt_nphdl; cto->ct_rxid = abts->abts_rxid_task; cto->ct_iid_lo = mp->nt_sid; cto->ct_iid_hi = mp->nt_sid >> 16; cto->ct_oxid = abts->abts_ox_id; cto->ct_vpidx = mp->nt_channel; cto->ct_flags = CT7_NOACK|CT7_TERMINATE; if (isp_target_put_entry(isp, cto)) { return (ENOMEM); } mp->nt_need_ack = 0; } if (isp_acknak_abts(isp, mp->nt_lreserved, 0) == ENOMEM) { return (ENOMEM); } else { return (0); } } /* * Handle logout cases here */ if (mp->nt_ncode == NT_GLOBAL_LOGOUT) { isp_del_all_wwn_entries(isp, mp->nt_channel); } if (mp->nt_ncode == NT_LOGOUT) { if (!IS_2100(isp) && IS_FC(isp)) { isp_del_wwn_entries(isp, mp); } } /* * General purpose acknowledgement */ if (mp->nt_need_ack) { isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) being acked", mp->nt_ncode, mp->nt_lreserved != NULL); /* * Don't need to use the guaranteed send because the caller can retry */ return (isp_notify_ack(isp, mp->nt_lreserved)); } return (0); } /* * Handle task management functions. * * We show up here with a notify structure filled out. * * The nt_lreserved tag points to the original queue entry */ static void isp_handle_platform_target_tmf(ispsoftc_t *isp, isp_notify_t *notify) { tstate_t *tptr; fcportdb_t *lp; struct ccb_immediate_notify *inot; inot_private_data_t *ntp = NULL; lun_id_t lun; isp_prt(isp, ISP_LOGTDEBUG0, "%s: code 0x%x sid 0x%x tagval 0x%016llx chan %d lun 0x%x", __func__, notify->nt_ncode, notify->nt_sid, (unsigned long long) notify->nt_tagval, notify->nt_channel, notify->nt_lun); /* * NB: This assignment is necessary because of tricky type conversion. * XXX: This is tricky and I need to check this. If the lun isn't known * XXX: for the task management function, it does not of necessity follow * XXX: that it should go up stream to the wildcard listener. */ if (notify->nt_lun == LUN_ANY) { lun = CAM_LUN_WILDCARD; } else { lun = notify->nt_lun; } tptr = get_lun_statep(isp, notify->nt_channel, lun); if (tptr == NULL) { tptr = get_lun_statep(isp, notify->nt_channel, CAM_LUN_WILDCARD); if (tptr == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: no state pointer found for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun); goto bad; } } inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots); if (inot == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: out of immediate notify structures for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun); goto bad; } if (isp_find_pdb_by_sid(isp, notify->nt_channel, notify->nt_sid, &lp) == 0 && isp_find_pdb_by_handle(isp, notify->nt_channel, notify->nt_nphdl, &lp) == 0) { inot->initiator_id = CAM_TARGET_WILDCARD; } else { inot->initiator_id = FC_PORTDB_TGT(isp, notify->nt_channel, lp); } inot->seq_id = notify->nt_tagval; inot->tag_id = notify->nt_tagval >> 32; switch (notify->nt_ncode) { case NT_ABORT_TASK: isp_target_mark_aborted_early(isp, tptr, inot->tag_id); inot->arg = MSG_ABORT_TASK; break; case NT_ABORT_TASK_SET: isp_target_mark_aborted_early(isp, tptr, TAG_ANY); inot->arg = MSG_ABORT_TASK_SET; break; case NT_CLEAR_ACA: inot->arg = MSG_CLEAR_ACA; break; case NT_CLEAR_TASK_SET: inot->arg = MSG_CLEAR_TASK_SET; break; case NT_LUN_RESET: inot->arg = MSG_LOGICAL_UNIT_RESET; break; case NT_TARGET_RESET: inot->arg = MSG_TARGET_RESET; break; case NT_QUERY_TASK_SET: inot->arg = MSG_QUERY_TASK_SET; break; case NT_QUERY_ASYNC_EVENT: inot->arg = MSG_QUERY_ASYNC_EVENT; break; default: isp_prt(isp, ISP_LOGWARN, "%s: unknown TMF code 0x%x for chan %d lun %#jx", __func__, notify->nt_ncode, notify->nt_channel, (uintmax_t)lun); goto bad; } ntp = isp_get_ntpd(isp, tptr); if (ntp == NULL) { isp_prt(isp, ISP_LOGWARN, "%s: out of inotify private structures", __func__); goto bad; } ISP_MEMCPY(&ntp->rd.nt, notify, sizeof (isp_notify_t)); if (notify->nt_lreserved) { ISP_MEMCPY(&ntp->rd.data, notify->nt_lreserved, QENTRY_LEN); ntp->rd.nt.nt_lreserved = &ntp->rd.data; } ntp->rd.seq_id = notify->nt_tagval; ntp->rd.tag_id = notify->nt_tagval >> 32; tptr->inot_count--; SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle); rls_lun_statep(isp, tptr); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, inot->ccb_h.path, "%s: Take FREE INOT count now %d\n", __func__, tptr->inot_count); inot->ccb_h.status = CAM_MESSAGE_RECV; xpt_done((union ccb *)inot); return; bad: if (tptr) { rls_lun_statep(isp, tptr); } if (notify->nt_need_ack && notify->nt_lreserved) { if (((isphdr_t *)notify->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) { if (isp_acknak_abts(isp, notify->nt_lreserved, ENOMEM)) { isp_prt(isp, ISP_LOGWARN, "you lose- unable to send an ACKNAK"); } } else { isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, notify->nt_lreserved); } } } /* * Find the associated private data and mark it as dead so * we don't try to work on it any further. */ static void isp_target_mark_aborted(ispsoftc_t *isp, union ccb *ccb) { tstate_t *tptr; atio_private_data_t *atp; union ccb *accb = ccb->cab.abort_ccb; tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb)); if (tptr == NULL) { tptr = get_lun_statep(isp, XS_CHANNEL(accb), CAM_LUN_WILDCARD); if (tptr == NULL) { ccb->ccb_h.status = CAM_REQ_INVALID; return; } } atp = isp_find_atpd(isp, tptr, accb->atio.tag_id); if (atp == NULL) { ccb->ccb_h.status = CAM_REQ_INVALID; } else { atp->dead = 1; ccb->ccb_h.status = CAM_REQ_CMP; } rls_lun_statep(isp, tptr); } static void isp_target_mark_aborted_early(ispsoftc_t *isp, tstate_t *tptr, uint32_t tag_id) { atio_private_data_t *atp; inot_private_data_t *restart_queue = tptr->restart_queue; /* * First, clean any commands pending restart */ tptr->restart_queue = NULL; while (restart_queue) { uint32_t this_tag_id; inot_private_data_t *ntp = restart_queue; restart_queue = ntp->rd.nt.nt_hba; if (IS_24XX(isp)) { this_tag_id = ((at7_entry_t *)ntp->rd.data)->at_rxid; } else { this_tag_id = ((at2_entry_t *)ntp->rd.data)->at_rxid; } if ((uint64_t)tag_id == TAG_ANY || tag_id == this_tag_id) { isp_put_ntpd(isp, tptr, ntp); } else { ntp->rd.nt.nt_hba = tptr->restart_queue; tptr->restart_queue = ntp; } } /* * Now mark other ones dead as well. */ for (atp = tptr->atpool; atp < &tptr->atpool[ATPDPSIZE]; atp++) { if ((uint64_t)tag_id == TAG_ANY || atp->tag == tag_id) { atp->dead = 1; } } } #endif static void isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg) { struct cam_sim *sim; int bus, tgt; ispsoftc_t *isp; sim = (struct cam_sim *)cbarg; isp = (ispsoftc_t *) cam_sim_softc(sim); bus = cam_sim_bus(sim); tgt = xpt_path_target_id(path); switch (code) { case AC_LOST_DEVICE: if (IS_SCSI(isp)) { uint16_t oflags, nflags; sdparam *sdp = SDPARAM(isp, bus); if (tgt >= 0) { nflags = sdp->isp_devparam[tgt].nvrm_flags; #ifndef ISP_TARGET_MODE nflags &= DPARM_SAFE_DFLT; if (isp->isp_loaded_fw) { nflags |= DPARM_NARROW | DPARM_ASYNC; } #else nflags = DPARM_DEFAULT; #endif oflags = sdp->isp_devparam[tgt].goal_flags; sdp->isp_devparam[tgt].goal_flags = nflags; sdp->isp_devparam[tgt].dev_update = 1; sdp->update = 1; (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, bus); sdp->isp_devparam[tgt].goal_flags = oflags; } } break; default: isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); break; } } static void isp_poll(struct cam_sim *sim) { ispsoftc_t *isp = cam_sim_softc(sim); uint16_t isr, sema, info; if (ISP_READ_ISR(isp, &isr, &sema, &info)) isp_intr(isp, isr, sema, info); } static void isp_watchdog(void *arg) { struct ccb_scsiio *xs = arg; ispsoftc_t *isp; uint32_t ohandle = ISP_HANDLE_FREE, handle; isp = XS_ISP(xs); handle = isp_find_handle(isp, xs); /* * Hand crank the interrupt code just to be sure the command isn't stuck somewhere. */ if (handle != ISP_HANDLE_FREE) { uint16_t isr, sema, info; if (ISP_READ_ISR(isp, &isr, &sema, &info) != 0) isp_intr(isp, isr, sema, info); ohandle = handle; handle = isp_find_handle(isp, xs); } if (handle != ISP_HANDLE_FREE) { /* * Try and make sure the command is really dead before * we release the handle (and DMA resources) for reuse. * * If we are successful in aborting the command then * we're done here because we'll get the command returned * back separately. */ if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) { return; } /* * Note that after calling the above, the command may in * fact have been completed. */ xs = isp_find_xs(isp, handle); /* * If the command no longer exists, then we won't * be able to find the xs again with this handle. */ if (xs == NULL) { return; } /* * After this point, the command is really dead. */ if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, handle); } isp_destroy_handle(isp, handle); isp_prt(isp, ISP_LOGERR, "%s: timeout for handle 0x%x", __func__, handle); xs->ccb_h.status &= ~CAM_STATUS_MASK; xs->ccb_h.status |= CAM_CMD_TIMEOUT; isp_prt_endcmd(isp, xs); isp_done(xs); } else { if (ohandle != ISP_HANDLE_FREE) { isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle 0x%x, recovered during interrupt", __func__, ohandle); } else { isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle already free", __func__); } } } static void isp_make_here(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt) { union ccb *ccb; struct isp_fc *fc = ISP_FC_PC(isp, chan); /* * Allocate a CCB, create a wildcard path for this target and schedule a rescan. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { isp_prt(isp, ISP_LOGWARN, "Chan %d unable to alloc CCB for rescan", chan); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan"); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void isp_make_gone(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt) { struct cam_path *tp; struct isp_fc *fc = ISP_FC_PC(isp, chan); if (xpt_create_path(&tp, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) == CAM_REQ_CMP) { xpt_async(AC_LOST_DEVICE, tp, NULL); xpt_free_path(tp); } } /* * Gone Device Timer Function- when we have decided that a device has gone * away, we wait a specific period of time prior to telling the OS it has * gone away. * * This timer function fires once a second and then scans the port database * for devices that are marked dead but still have a virtual target assigned. * We decrement a counter for that port database entry, and when it hits zero, * we tell the OS the device has gone away. */ static void isp_gdt(void *arg) { struct isp_fc *fc = arg; taskqueue_enqueue(taskqueue_thread, &fc->gtask); } static void isp_gdt_task(void *arg, int pending) { struct isp_fc *fc = arg; ispsoftc_t *isp = fc->isp; int chan = fc - isp->isp_osinfo.pc.fc; fcportdb_t *lp; struct ac_contract ac; struct ac_device_changed *adc; int dbidx, more_to_do = 0; ISP_LOCK(isp); isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GDT timer expired", chan); for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { lp = &FCPARAM(isp, chan)->portdb[dbidx]; if (lp->state != FC_PORTDB_STATE_ZOMBIE) { continue; } if (lp->gone_timer != 0) { lp->gone_timer -= 1; more_to_do++; continue; } isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Gone Device Timeout"); if (lp->is_target) { lp->is_target = 0; isp_make_gone(isp, lp, chan, dbidx); } if (lp->is_initiator) { lp->is_initiator = 0; ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = lp->port_wwn; adc->port = lp->portid; adc->target = dbidx; adc->arrived = 0; xpt_async(AC_CONTRACT, fc->path, &ac); } lp->state = FC_PORTDB_STATE_NIL; } if (fc->ready) { if (more_to_do) { callout_reset(&fc->gdt, hz, isp_gdt, fc); } else { callout_deactivate(&fc->gdt); isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Stopping Gone Device Timer @ %lu", chan, (unsigned long) time_uptime); } } ISP_UNLOCK(isp); } /* * Loop Down Timer Function- when loop goes down, a timer is started and * and after it expires we come here and take all probational devices that * the OS knows about and the tell the OS that they've gone away. * * We don't clear the devices out of our port database because, when loop * come back up, we have to do some actual cleanup with the chip at that * point (implicit PLOGO, e.g., to get the chip's port database state right). */ static void isp_ldt(void *arg) { struct isp_fc *fc = arg; taskqueue_enqueue(taskqueue_thread, &fc->ltask); } static void isp_ldt_task(void *arg, int pending) { struct isp_fc *fc = arg; ispsoftc_t *isp = fc->isp; int chan = fc - isp->isp_osinfo.pc.fc; fcportdb_t *lp; struct ac_contract ac; struct ac_device_changed *adc; int dbidx, i; ISP_LOCK(isp); isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop Down Timer expired @ %lu", chan, (unsigned long) time_uptime); callout_deactivate(&fc->ldt); /* * Notify to the OS all targets who we now consider have departed. */ for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) { lp = &FCPARAM(isp, chan)->portdb[dbidx]; if (lp->state == FC_PORTDB_STATE_NIL) continue; /* * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST! */ for (i = 0; i < isp->isp_maxcmds; i++) { struct ccb_scsiio *xs; if (!ISP_VALID_HANDLE(isp, isp->isp_xflist[i].handle)) { continue; } if ((xs = isp->isp_xflist[i].cmd) == NULL) { continue; } if (dbidx != XS_TGT(xs)) { continue; } isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx orphaned by loop down timeout", isp->isp_xflist[i].handle, chan, XS_TGT(xs), (uintmax_t)XS_LUN(xs)); } isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Loop Down Timeout"); if (lp->is_target) { lp->is_target = 0; isp_make_gone(isp, lp, chan, dbidx); } if (lp->is_initiator) { lp->is_initiator = 0; ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = lp->port_wwn; adc->port = lp->portid; adc->target = dbidx; adc->arrived = 0; xpt_async(AC_CONTRACT, fc->path, &ac); } } isp_unfreeze_loopdown(isp, chan); /* * The loop down timer has expired. Wake up the kthread * to notice that fact (or make it false). */ fc->loop_dead = 1; fc->loop_down_time = fc->loop_down_limit+1; wakeup(fc); ISP_UNLOCK(isp); } static void isp_kthread(void *arg) { struct isp_fc *fc = arg; ispsoftc_t *isp = fc->isp; int chan = fc - isp->isp_osinfo.pc.fc; int slp = 0; mtx_lock(&isp->isp_osinfo.lock); while (isp->isp_osinfo.is_exiting == 0) { int lb, lim; isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d checking FC state", __func__, chan); lb = isp_fc_runstate(isp, chan, 250000); /* * Our action is different based upon whether we're supporting * Initiator mode or not. If we are, we might freeze the simq * when loop is down and set all sorts of different delays to * check again. * * If not, we simply just wait for loop to come up. */ if (lb && (FCPARAM(isp, chan)->role & ISP_ROLE_INITIATOR)) { /* * Increment loop down time by the last sleep interval */ fc->loop_down_time += slp; if (lb < 0) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC loop not up (down count %d)", __func__, chan, fc->loop_down_time); } else { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC got to %d (down count %d)", __func__, chan, lb, fc->loop_down_time); } /* * If we've never seen loop up and we've waited longer * than quickboot time, or we've seen loop up but we've * waited longer than loop_down_limit, give up and go * to sleep until loop comes up. */ if (FCPARAM(isp, chan)->loop_seen_once == 0) { lim = isp_quickboot_time; } else { lim = fc->loop_down_limit; } if (fc->loop_down_time >= lim) { isp_freeze_loopdown(isp, chan, "loop limit hit"); slp = 0; } else if (fc->loop_down_time < 10) { slp = 1; } else if (fc->loop_down_time < 30) { slp = 5; } else if (fc->loop_down_time < 60) { slp = 10; } else if (fc->loop_down_time < 120) { slp = 20; } else { slp = 30; } } else if (lb) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC Loop Down", __func__, chan); fc->loop_down_time += slp; if (fc->loop_down_time > 300) slp = 0; else slp = 60; } else { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC state OK", __func__, chan); fc->loop_down_time = 0; slp = 0; } /* * If this is past the first loop up or the loop is dead and if we'd frozen the simq, unfreeze it * now so that CAM can start sending us commands. * * If the FC state isn't okay yet, they'll hit that in isp_start which will freeze the queue again * or kill the commands, as appropriate. */ if (FCPARAM(isp, chan)->loop_seen_once || fc->loop_dead) { isp_unfreeze_loopdown(isp, chan); } isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d sleep time %d", __func__, chan, slp); msleep(fc, &isp->isp_osinfo.lock, PRIBIO, "ispf", slp * hz); /* * If slp is zero, we're waking up for the first time after * things have been okay. In this case, we set a deferral state * for all commands and delay hysteresis seconds before starting * the FC state evaluation. This gives the loop/fabric a chance * to settle. */ if (slp == 0 && fc->hysteresis) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "%s: Chan %d sleep hysteresis ticks %d", __func__, chan, fc->hysteresis * hz); mtx_unlock(&isp->isp_osinfo.lock); pause("ispt", fc->hysteresis * hz); mtx_lock(&isp->isp_osinfo.lock); } } fc->num_threads -= 1; mtx_unlock(&isp->isp_osinfo.lock); kthread_exit(); } static void isp_action(struct cam_sim *sim, union ccb *ccb) { int bus, tgt, ts, error, lim; ispsoftc_t *isp; struct ccb_trans_settings *cts; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); isp = (ispsoftc_t *)cam_sim_softc(sim); mtx_assert(&isp->isp_lock, MA_OWNED); isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); ISP_PCMD(ccb) = NULL; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ bus = XS_CHANNEL(ccb); /* * Do a couple of preliminary checks... */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { ccb->ccb_h.status = CAM_REQ_INVALID; isp_done((struct ccb_scsiio *) ccb); break; } } ccb->csio.req_map = NULL; #ifdef DIAGNOSTIC if (ccb->ccb_h.target_id >= ISP_MAX_TARGETS(isp)) { xpt_print(ccb->ccb_h.path, "invalid target\n"); ccb->ccb_h.status = CAM_PATH_INVALID; } else if (ISP_MAX_LUNS(isp) > 0 && ccb->ccb_h.target_lun >= ISP_MAX_LUNS(isp)) { xpt_print(ccb->ccb_h.path, "invalid lun\n"); ccb->ccb_h.status = CAM_PATH_INVALID; } if (ccb->ccb_h.status == CAM_PATH_INVALID) { xpt_done(ccb); break; } #endif ccb->csio.scsi_status = SCSI_STATUS_OK; if (isp_get_pcmd(isp, ccb)) { isp_prt(isp, ISP_LOGWARN, "out of PCMDs"); cam_freeze_devq(ccb->ccb_h.path); cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); break; } error = isp_start((XS_T *) ccb); switch (error) { case CMD_QUEUED: ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) { break; } ts = ccb->ccb_h.timeout; if (ts == CAM_TIME_DEFAULT) { ts = 60*1000; } ts = isp_mstohz(ts); callout_reset(&PISP_PCMD(ccb)->wdog, ts, isp_watchdog, ccb); break; case CMD_RQLATER: /* * We get this result for FC devices if the loop state isn't ready yet * or if the device in question has gone zombie on us. * * If we've never seen Loop UP at all, we requeue this request and wait * for the initial loop up delay to expire. */ lim = ISP_FC_PC(isp, bus)->loop_down_limit; if (FCPARAM(isp, bus)->loop_seen_once == 0 || ISP_FC_PC(isp, bus)->loop_down_time >= lim) { if (FCPARAM(isp, bus)->loop_seen_once == 0) { isp_prt(isp, ISP_LOGDEBUG0, "%d.%jx loop not seen yet @ %lu", XS_TGT(ccb), (uintmax_t)XS_LUN(ccb), (unsigned long) time_uptime); } else { isp_prt(isp, ISP_LOGDEBUG0, "%d.%jx downtime (%d) > lim (%d)", XS_TGT(ccb), (uintmax_t)XS_LUN(ccb), ISP_FC_PC(isp, bus)->loop_down_time, lim); } ccb->ccb_h.status = CAM_SEL_TIMEOUT; isp_done((struct ccb_scsiio *) ccb); break; } isp_prt(isp, ISP_LOGDEBUG0, "%d.%jx retry later", XS_TGT(ccb), (uintmax_t)XS_LUN(ccb)); cam_freeze_devq(ccb->ccb_h.path); cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); ccb->ccb_h.status = CAM_REQUEUE_REQ; isp_free_pcmd(isp, ccb); xpt_done(ccb); break; case CMD_EAGAIN: isp_free_pcmd(isp, ccb); cam_freeze_devq(ccb->ccb_h.path); cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 100, 0); ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_done(ccb); break; case CMD_COMPLETE: isp_done((struct ccb_scsiio *) ccb); break; default: isp_prt(isp, ISP_LOGERR, "What's this? 0x%x at %d in file %s", error, __LINE__, __FILE__); ccb->ccb_h.status = CAM_REQUEUE_REQ; isp_free_pcmd(isp, ccb); xpt_done(ccb); } break; #ifdef ISP_TARGET_MODE case XPT_EN_LUN: /* Enable/Disable LUN as a target */ if (ccb->cel.enable) { isp_enable_lun(isp, ccb); } else { isp_disable_lun(isp, ccb); } break; case XPT_IMMED_NOTIFY: case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ { tstate_t *tptr = get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); if (tptr == NULL) { tptr = get_lun_statep(isp, XS_CHANNEL(ccb), CAM_LUN_WILDCARD); } if (tptr == NULL) { const char *str; uint32_t tag; if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { str = "XPT_IMMEDIATE_NOTIFY"; tag = ccb->cin1.seq_id; } else { tag = ccb->atio.tag_id; str = "XPT_ACCEPT_TARGET_IO"; } ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] no state pointer found for %s\n", __func__, tag, str); dump_tstates(isp, XS_CHANNEL(ccb)); ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; } ccb->ccb_h.spriv_field0 = 0; ccb->ccb_h.spriv_ptr1 = isp; if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { if (ccb->atio.tag_id) { atio_private_data_t *atp = isp_find_atpd(isp, tptr, ccb->atio.tag_id); if (atp) { isp_put_atpd(isp, tptr, atp); } } tptr->atio_count++; SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path, "Put FREE ATIO (tag id 0x%x), count now %d\n", ccb->atio.tag_id, tptr->atio_count); ccb->atio.tag_id = 0; } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { if (ccb->cin1.tag_id) { inot_private_data_t *ntp = isp_find_ntpd(isp, tptr, ccb->cin1.tag_id, ccb->cin1.seq_id); if (ntp) { isp_put_ntpd(isp, tptr, ntp); } } tptr->inot_count++; SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path, "Put FREE INOT, (seq id 0x%x) count now %d\n", ccb->cin1.seq_id, tptr->inot_count); ccb->cin1.seq_id = 0; } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { tptr->inot_count++; SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle); ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path, "Put FREE INOT, (seq id 0x%x) count now %d\n", ccb->cin1.seq_id, tptr->inot_count); ccb->cin1.seq_id = 0; } rls_lun_statep(isp, tptr); ccb->ccb_h.status = CAM_REQ_INPROG; break; } case XPT_NOTIFY_ACK: ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; case XPT_NOTIFY_ACKNOWLEDGE: /* notify ack */ { tstate_t *tptr; inot_private_data_t *ntp; /* * XXX: Because we cannot guarantee that the path information in the notify acknowledge ccb * XXX: matches that for the immediate notify, we have to *search* for the notify structure */ /* * All the relevant path information is in the associated immediate notify */ ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] NOTIFY ACKNOWLEDGE for 0x%x seen\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); ntp = get_ntp_from_tagdata(isp, ccb->cna2.tag_id, ccb->cna2.seq_id, &tptr); if (ntp == NULL) { ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] XPT_NOTIFY_ACKNOWLEDGE of 0x%x cannot find ntp private data\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); break; } if (isp_handle_platform_target_notify_ack(isp, &ntp->rd.nt)) { rls_lun_statep(isp, tptr); cam_freeze_devq(ccb->ccb_h.path); cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; break; } isp_put_ntpd(isp, tptr, ntp); rls_lun_statep(isp, tptr); ccb->ccb_h.status = CAM_REQ_CMP; ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] calling xpt_done for tag 0x%x\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id); xpt_done(ccb); break; } case XPT_CONT_TARGET_IO: isp_target_start_ctio(isp, ccb, FROM_CAM); break; #endif case XPT_RESET_DEV: /* BDR the specified SCSI device */ bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); tgt = ccb->ccb_h.target_id; tgt |= (bus << 16); error = isp_control(isp, ISPCTL_RESET_DEV, bus, tgt); if (error) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else { /* * If we have a FC device, reset the Command * Reference Number, because the target will expect * that we re-start the CRN at 1 after a reset. */ if (IS_FC(isp)) isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); break; case XPT_ABORT: /* Abort the specified CCB */ { union ccb *accb = ccb->cab.abort_ccb; switch (accb->ccb_h.func_code) { #ifdef ISP_TARGET_MODE case XPT_ACCEPT_TARGET_IO: isp_target_mark_aborted(isp, ccb); break; #endif case XPT_SCSI_IO: error = isp_control(isp, ISPCTL_ABORT_CMD, accb); if (error) { ccb->ccb_h.status = CAM_UA_ABORT; } else { ccb->ccb_h.status = CAM_REQ_CMP; } break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } /* * This is not a queued CCB, so the caller expects it to be * complete when control is returned. */ break; } #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ cts = &ccb->cts; if (!IS_CURRENT_SETTINGS(cts)) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } tgt = cts->ccb_h.target_id; bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); if (IS_SCSI(isp)) { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; sdparam *sdp = SDPARAM(isp, bus); uint16_t *dptr; if (spi->valid == 0 && scsi->valid == 0) { ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } /* * We always update (internally) from goal_flags * so any request to change settings just gets * vectored to that location. */ dptr = &sdp->isp_devparam[tgt].goal_flags; if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) *dptr |= DPARM_DISC; else *dptr &= ~DPARM_DISC; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) *dptr |= DPARM_TQING; else *dptr &= ~DPARM_TQING; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) *dptr |= DPARM_WIDE; else *dptr &= ~DPARM_WIDE; } /* * XXX: FIX ME */ if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && (spi->valid & CTS_SPI_VALID_SYNC_RATE) && (spi->sync_period && spi->sync_offset)) { *dptr |= DPARM_SYNC; /* * XXX: CHECK FOR LEGALITY */ sdp->isp_devparam[tgt].goal_period = spi->sync_period; sdp->isp_devparam[tgt].goal_offset = spi->sync_offset; } else { *dptr &= ~DPARM_SYNC; } isp_prt(isp, ISP_LOGDEBUG0, "SET (%d.%d.%jx) to flags %x off %x per %x", bus, tgt, (uintmax_t)cts->ccb_h.target_lun, sdp->isp_devparam[tgt].goal_flags, sdp->isp_devparam[tgt].goal_offset, sdp->isp_devparam[tgt].goal_period); sdp->isp_devparam[tgt].dev_update = 1; sdp->update = 1; } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: cts = &ccb->cts; tgt = cts->ccb_h.target_id; bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); if (IS_FC(isp)) { fcparam *fcp = FCPARAM(isp, bus); struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_FC; cts->transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; fc->valid = CTS_FC_VALID_SPEED; fc->bitrate = 100000; fc->bitrate *= fcp->isp_gbspeed; if (tgt < MAX_FC_TARG) { fcportdb_t *lp = &fcp->portdb[tgt]; fc->wwnn = lp->node_wwn; fc->wwpn = lp->port_wwn; fc->port = lp->portid; fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; } } else { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; sdparam *sdp = SDPARAM(isp, bus); uint16_t dval, pval, oval; if (IS_CURRENT_SETTINGS(cts)) { sdp->isp_devparam[tgt].dev_refresh = 1; sdp->update = 1; (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, bus); dval = sdp->isp_devparam[tgt].actv_flags; oval = sdp->isp_devparam[tgt].actv_offset; pval = sdp->isp_devparam[tgt].actv_period; } else { dval = sdp->isp_devparam[tgt].nvrm_flags; oval = sdp->isp_devparam[tgt].nvrm_offset; pval = sdp->isp_devparam[tgt].nvrm_period; } cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->valid = 0; scsi->valid = 0; spi->flags = 0; scsi->flags = 0; if (dval & DPARM_DISC) { spi->flags |= CTS_SPI_FLAGS_DISC_ENB; } if ((dval & DPARM_SYNC) && oval && pval) { spi->sync_offset = oval; spi->sync_period = pval; } else { spi->sync_offset = 0; spi->sync_period = 0; } spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; if (dval & DPARM_WIDE) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; if (dval & DPARM_TQING) { scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } spi->valid |= CTS_SPI_VALID_DISC; } isp_prt(isp, ISP_LOGDEBUG0, "GET %s (%d.%d.%jx) to flags %x off %x per %x", IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", bus, tgt, (uintmax_t)cts->ccb_h.target_lun, dval, oval, pval); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_RESET_BUS: /* Reset the specified bus */ bus = cam_sim_bus(sim); error = isp_control(isp, ISPCTL_RESET_BUS, bus); if (error) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); break; } if (bootverbose) { xpt_print(ccb->ccb_h.path, "reset bus on channel %d\n", bus); } if (IS_FC(isp)) { xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, 0); } else { xpt_async(AC_BUS_RESET, ISP_SPI_PC(isp, bus)->path, 0); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_TERM_IO: /* Terminate the I/O process */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_SET_SIM_KNOB: /* Set SIM knobs */ { struct ccb_sim_knob *kp = &ccb->knob; fcparam *fcp; if (!IS_FC(isp)) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } bus = cam_sim_bus(xpt_path_sim(kp->ccb_h.path)); fcp = FCPARAM(isp, bus); if (kp->xport_specific.fc.valid & KNOB_VALID_ADDRESS) { fcp->isp_wwnn = ISP_FC_PC(isp, bus)->def_wwnn = kp->xport_specific.fc.wwnn; fcp->isp_wwpn = ISP_FC_PC(isp, bus)->def_wwpn = kp->xport_specific.fc.wwpn; isp_prt(isp, ISP_LOGALL, "Setting Channel %d wwns to 0x%jx 0x%jx", bus, fcp->isp_wwnn, fcp->isp_wwpn); } ccb->ccb_h.status = CAM_REQ_CMP; if (kp->xport_specific.fc.valid & KNOB_VALID_ROLE) { int rchange = 0; int newrole = 0; switch (kp->xport_specific.fc.role) { case KNOB_ROLE_NONE: if (fcp->role != ISP_ROLE_NONE) { rchange = 1; newrole = ISP_ROLE_NONE; } break; case KNOB_ROLE_TARGET: if (fcp->role != ISP_ROLE_TARGET) { rchange = 1; newrole = ISP_ROLE_TARGET; } break; case KNOB_ROLE_INITIATOR: if (fcp->role != ISP_ROLE_INITIATOR) { rchange = 1; newrole = ISP_ROLE_INITIATOR; } break; case KNOB_ROLE_BOTH: if (fcp->role != ISP_ROLE_BOTH) { rchange = 1; newrole = ISP_ROLE_BOTH; } break; } if (rchange) { ISP_PATH_PRT(isp, ISP_LOGCONFIG, ccb->ccb_h.path, "changing role on from %d to %d\n", fcp->role, newrole); #ifdef ISP_TARGET_MODE ISP_SET_PC(isp, bus, tm_enabled, 0); ISP_SET_PC(isp, bus, tm_luns_enabled, 0); #endif if (isp_control(isp, ISPCTL_CHANGE_ROLE, bus, newrole) != 0) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); break; } #ifdef ISP_TARGET_MODE if (newrole == ISP_ROLE_TARGET || newrole == ISP_ROLE_BOTH) { /* * Give the new role a chance to complain and settle */ msleep(isp, &isp->isp_lock, PRIBIO, "taking a breather", 2); ccb->ccb_h.status = isp_enable_deferred_luns(isp, bus); } #endif } } xpt_done(ccb); break; } case XPT_GET_SIM_KNOB: /* Get SIM knobs */ { struct ccb_sim_knob *kp = &ccb->knob; if (IS_FC(isp)) { fcparam *fcp; bus = cam_sim_bus(xpt_path_sim(kp->ccb_h.path)); fcp = FCPARAM(isp, bus); kp->xport_specific.fc.wwnn = fcp->isp_wwnn; kp->xport_specific.fc.wwpn = fcp->isp_wwpn; switch (fcp->role) { case ISP_ROLE_NONE: kp->xport_specific.fc.role = KNOB_ROLE_NONE; break; case ISP_ROLE_TARGET: kp->xport_specific.fc.role = KNOB_ROLE_TARGET; break; case ISP_ROLE_INITIATOR: kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR; break; case ISP_ROLE_BOTH: kp->xport_specific.fc.role = KNOB_ROLE_BOTH; break; } kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE; ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->ccb_h.status = CAM_REQ_INVALID; } xpt_done(ccb); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; #ifdef ISP_TARGET_MODE cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; #else cpi->target_sprt = 0; #endif cpi->hba_eng_cnt = 0; cpi->max_target = ISP_MAX_TARGETS(isp) - 1; cpi->max_lun = ISP_MAX_LUNS(isp) == 0 ? 255 : ISP_MAX_LUNS(isp) - 1; cpi->bus_id = cam_sim_bus(sim); if (isp->isp_osinfo.sixtyfourbit) cpi->maxio = (ISP_NSEG64_MAX - 1) * PAGE_SIZE; else cpi->maxio = (ISP_NSEG_MAX - 1) * PAGE_SIZE; bus = cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); if (IS_FC(isp)) { fcparam *fcp = FCPARAM(isp, bus); cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; #if __FreeBSD_version >= 1000700 cpi->hba_misc |= PIM_EXTLUNS; #endif #if __FreeBSD_version >= 1000039 cpi->hba_misc |= PIM_NOSCAN; #endif /* * Because our loop ID can shift from time to time, * make our initiator ID out of range of our bus. */ cpi->initiator_id = cpi->max_target + 1; /* * Set base transfer capabilities for Fibre Channel, for this HBA. */ if (IS_25XX(isp)) { cpi->base_transfer_speed = 8000000; } else if (IS_24XX(isp)) { cpi->base_transfer_speed = 4000000; } else if (IS_23XX(isp)) { cpi->base_transfer_speed = 2000000; } else { cpi->base_transfer_speed = 1000000; } cpi->hba_inquiry = PI_TAG_ABLE; cpi->transport = XPORT_FC; cpi->transport_version = 0; cpi->xport_specific.fc.wwnn = fcp->isp_wwnn; cpi->xport_specific.fc.wwpn = fcp->isp_wwpn; cpi->xport_specific.fc.port = fcp->isp_portid; cpi->xport_specific.fc.bitrate = fcp->isp_gbspeed * 1000; } else { sdparam *sdp = SDPARAM(isp, bus); cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->hba_misc = PIM_UNMAPPED; cpi->initiator_id = sdp->isp_initiator_id; cpi->base_transfer_speed = 3300; cpi->transport = XPORT_SPI; cpi->transport_version = 2; } cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) void isp_done(XS_T *sccb) { ispsoftc_t *isp = XS_ISP(sccb); uint32_t status; if (XS_NOERR(sccb)) XS_SETERR(sccb, CAM_REQ_CMP); if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) { sccb->ccb_h.status &= ~CAM_STATUS_MASK; if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; } else { sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; } } sccb->ccb_h.status &= ~CAM_SIM_QUEUED; status = sccb->ccb_h.status & CAM_STATUS_MASK; if (status != CAM_REQ_CMP) { if (status != CAM_SEL_TIMEOUT) isp_prt(isp, ISP_LOGDEBUG0, "target %d lun %jx CAM status 0x%x SCSI status 0x%x", XS_TGT(sccb), (uintmax_t)XS_LUN(sccb), sccb->ccb_h.status, sccb->scsi_status); else if ((IS_FC(isp)) && (XS_TGT(sccb) < MAX_FC_TARG)) { fcparam *fcp; fcp = FCPARAM(isp, XS_CHANNEL(sccb)); fcp->portdb[XS_TGT(sccb)].is_target = 0; } if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { sccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(sccb->ccb_h.path, 1); } } if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { xpt_print(sccb->ccb_h.path, "cam completion status 0x%x\n", sccb->ccb_h.status); } if (ISP_PCMD(sccb)) { if (callout_active(&PISP_PCMD(sccb)->wdog)) callout_stop(&PISP_PCMD(sccb)->wdog); isp_free_pcmd(isp, (union ccb *) sccb); } xpt_done((union ccb *) sccb); } void isp_async(ispsoftc_t *isp, ispasync_t cmd, ...) { int bus; static const char prom[] = "Chan %d [%d] WWPN 0x%16jx PortID 0x%06x handle 0x%x %s %s"; char buf[64]; char *msg = NULL; target_id_t tgt; fcportdb_t *lp; struct isp_fc *fc; struct cam_path *tmppath; struct ac_contract ac; struct ac_device_changed *adc; va_list ap; switch (cmd) { case ISPASYNC_NEW_TGT_PARAMS: { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; int flags, tgt; sdparam *sdp; struct ccb_trans_settings cts; memset(&cts, 0, sizeof (struct ccb_trans_settings)); va_start(ap, cmd); bus = va_arg(ap, int); tgt = va_arg(ap, int); va_end(ap); sdp = SDPARAM(isp, bus); if (xpt_create_path(&tmppath, NULL, cam_sim_path(ISP_SPI_PC(isp, bus)->sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { isp_prt(isp, ISP_LOGWARN, "isp_async cannot make temp path for %d.%d", tgt, bus); break; } flags = sdp->isp_devparam[tgt].actv_flags; cts.type = CTS_TYPE_CURRENT_SETTINGS; cts.protocol = PROTO_SCSI; cts.transport = XPORT_SPI; scsi = &cts.proto_specific.scsi; spi = &cts.xport_specific.spi; if (flags & DPARM_TQING) { scsi->valid |= CTS_SCSI_VALID_TQ; scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } if (flags & DPARM_DISC) { spi->valid |= CTS_SPI_VALID_DISC; spi->flags |= CTS_SPI_FLAGS_DISC_ENB; } spi->flags |= CTS_SPI_VALID_BUS_WIDTH; if (flags & DPARM_WIDE) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } if (flags & DPARM_SYNC) { spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->sync_period = sdp->isp_devparam[tgt].actv_period; spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; } isp_prt(isp, ISP_LOGDEBUG2, "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", bus, tgt, sdp->isp_devparam[tgt].actv_period, sdp->isp_devparam[tgt].actv_offset, flags); xpt_setup_ccb(&cts.ccb_h, tmppath, 1); xpt_async(AC_TRANSFER_NEG, tmppath, &cts); xpt_free_path(tmppath); break; } case ISPASYNC_BUS_RESET: { va_start(ap, cmd); bus = va_arg(ap, int); va_end(ap); isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", bus); if (IS_FC(isp)) { xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, NULL); } else { xpt_async(AC_BUS_RESET, ISP_SPI_PC(isp, bus)->path, NULL); } break; } case ISPASYNC_LIP: if (msg == NULL) msg = "LIP Received"; /* FALLTHROUGH */ case ISPASYNC_LOOP_RESET: if (msg == NULL) msg = "LOOP Reset"; /* FALLTHROUGH */ case ISPASYNC_LOOP_DOWN: { if (msg == NULL) msg = "LOOP Down"; va_start(ap, cmd); bus = va_arg(ap, int); va_end(ap); FCPARAM(isp, bus)->isp_linkstate = 0; fc = ISP_FC_PC(isp, bus); if (cmd == ISPASYNC_LOOP_DOWN && fc->ready) { /* * We don't do any simq freezing if we are only in target mode */ if (FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) { if (fc->path) { isp_freeze_loopdown(isp, bus, msg); } } if (!callout_active(&fc->ldt)) { callout_reset(&fc->ldt, fc->loop_down_limit * hz, isp_ldt, fc); isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Starting Loop Down Timer @ %lu", (unsigned long) time_uptime); } } isp_fcp_reset_crn(isp, bus, /*tgt*/0, /*tgt_set*/ 0); isp_prt(isp, ISP_LOGINFO, "Chan %d: %s", bus, msg); break; } case ISPASYNC_LOOP_UP: va_start(ap, cmd); bus = va_arg(ap, int); va_end(ap); fc = ISP_FC_PC(isp, bus); /* * Now we just note that Loop has come up. We don't * actually do anything because we're waiting for a * Change Notify before activating the FC cleanup * thread to look at the state of the loop again. */ FCPARAM(isp, bus)->isp_linkstate = 1; fc->loop_dead = 0; fc->loop_down_time = 0; isp_prt(isp, ISP_LOGINFO, "Chan %d Loop UP", bus); break; case ISPASYNC_DEV_ARRIVED: va_start(ap, cmd); bus = va_arg(ap, int); lp = va_arg(ap, fcportdb_t *); va_end(ap); fc = ISP_FC_PC(isp, bus); tgt = FC_PORTDB_TGT(isp, bus, lp); isp_gen_role_str(buf, sizeof (buf), lp->prli_word3); isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "arrived"); if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) && (lp->prli_word3 & PRLI_WD3_TARGET_FUNCTION)) { lp->is_target = 1; isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); isp_make_here(isp, lp, bus, tgt); } if ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) && (lp->prli_word3 & PRLI_WD3_INITIATOR_FUNCTION)) { lp->is_initiator = 1; ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = lp->port_wwn; adc->port = lp->portid; adc->target = tgt; adc->arrived = 1; xpt_async(AC_CONTRACT, fc->path, &ac); } break; case ISPASYNC_DEV_CHANGED: va_start(ap, cmd); bus = va_arg(ap, int); lp = va_arg(ap, fcportdb_t *); va_end(ap); fc = ISP_FC_PC(isp, bus); tgt = FC_PORTDB_TGT(isp, bus, lp); isp_gen_role_str(buf, sizeof (buf), lp->new_prli_word3); isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->new_portid, lp->handle, buf, "changed"); changed: if (lp->is_target != ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) && (lp->new_prli_word3 & PRLI_WD3_TARGET_FUNCTION))) { lp->is_target = !lp->is_target; if (lp->is_target) { isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); isp_make_here(isp, lp, bus, tgt); } else { isp_make_gone(isp, lp, bus, tgt); isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1); } } if (lp->is_initiator != ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) && (lp->new_prli_word3 & PRLI_WD3_INITIATOR_FUNCTION))) { lp->is_initiator = !lp->is_initiator; ac.contract_number = AC_CONTRACT_DEV_CHG; adc = (struct ac_device_changed *) ac.contract_data; adc->wwpn = lp->port_wwn; adc->port = lp->portid; adc->target = tgt; adc->arrived = lp->is_initiator; xpt_async(AC_CONTRACT, fc->path, &ac); } break; case ISPASYNC_DEV_STAYED: va_start(ap, cmd); bus = va_arg(ap, int); lp = va_arg(ap, fcportdb_t *); va_end(ap); fc = ISP_FC_PC(isp, bus); tgt = FC_PORTDB_TGT(isp, bus, lp); isp_gen_role_str(buf, sizeof (buf), lp->prli_word3); isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "stayed"); goto changed; case ISPASYNC_DEV_GONE: va_start(ap, cmd); bus = va_arg(ap, int); lp = va_arg(ap, fcportdb_t *); va_end(ap); fc = ISP_FC_PC(isp, bus); tgt = FC_PORTDB_TGT(isp, bus, lp); /* * If this has a virtual target or initiator set the isp_gdt * timer running on it to delay its departure. */ isp_gen_role_str(buf, sizeof (buf), lp->prli_word3); if (lp->is_target || lp->is_initiator) { lp->state = FC_PORTDB_STATE_ZOMBIE; lp->gone_timer = fc->gone_device_time; isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone zombie"); if (fc->ready && !callout_active(&fc->gdt)) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Starting Gone Device Timer with %u seconds time now %lu", bus, lp->gone_timer, (unsigned long)time_uptime); callout_reset(&fc->gdt, hz, isp_gdt, fc); } break; } isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone"); break; case ISPASYNC_CHANGE_NOTIFY: { char *msg; int evt, nphdl, nlstate, reason; va_start(ap, cmd); bus = va_arg(ap, int); evt = va_arg(ap, int); if (IS_24XX(isp) && evt == ISPASYNC_CHANGE_PDB) { nphdl = va_arg(ap, int); nlstate = va_arg(ap, int); reason = va_arg(ap, int); } else { nphdl = NIL_HANDLE; nlstate = reason = 0; } va_end(ap); fc = ISP_FC_PC(isp, bus); if (evt == ISPASYNC_CHANGE_PDB) { msg = "Port Database Changed"; } else if (evt == ISPASYNC_CHANGE_SNS) { msg = "Name Server Database Changed"; } else { msg = "Other Change Notify"; } /* * If the loop down timer is running, cancel it. */ if (fc->ready && callout_active(&fc->ldt)) { isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Stopping Loop Down Timer @ %lu", (unsigned long) time_uptime); callout_stop(&fc->ldt); } isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg); if (FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) { isp_freeze_loopdown(isp, bus, msg); } wakeup(fc); break; } #ifdef ISP_TARGET_MODE case ISPASYNC_TARGET_NOTIFY: { isp_notify_t *notify; va_start(ap, cmd); notify = va_arg(ap, isp_notify_t *); va_end(ap); switch (notify->nt_ncode) { case NT_ABORT_TASK: case NT_ABORT_TASK_SET: case NT_CLEAR_ACA: case NT_CLEAR_TASK_SET: case NT_LUN_RESET: case NT_TARGET_RESET: case NT_QUERY_TASK_SET: case NT_QUERY_ASYNC_EVENT: /* * These are task management functions. */ isp_handle_platform_target_tmf(isp, notify); break; case NT_BUS_RESET: case NT_LIP_RESET: case NT_LINK_UP: case NT_LINK_DOWN: case NT_HBA_RESET: /* * No action need be taken here. */ break; case NT_GLOBAL_LOGOUT: case NT_LOGOUT: /* * This is device arrival/departure notification */ isp_handle_platform_target_notify_ack(isp, notify); break; default: isp_prt(isp, ISP_LOGALL, "target notify code 0x%x", notify->nt_ncode); isp_handle_platform_target_notify_ack(isp, notify); break; } break; } case ISPASYNC_TARGET_NOTIFY_ACK: { void *inot; va_start(ap, cmd); inot = va_arg(ap, void *); va_end(ap); if (isp_notify_ack(isp, inot)) { isp_tna_t *tp = malloc(sizeof (*tp), M_DEVBUF, M_NOWAIT); if (tp) { tp->isp = isp; if (inot) { memcpy(tp->data, inot, sizeof (tp->data)); tp->not = tp->data; } else { tp->not = NULL; } callout_init_mtx(&tp->timer, &isp->isp_lock, 0); callout_reset(&tp->timer, 5, isp_refire_notify_ack, tp); } else { isp_prt(isp, ISP_LOGERR, "you lose- cannot allocate a notify refire"); } } break; } case ISPASYNC_TARGET_ACTION: { isphdr_t *hp; va_start(ap, cmd); hp = va_arg(ap, isphdr_t *); va_end(ap); switch (hp->rqs_entry_type) { default: isp_prt(isp, ISP_LOGWARN, "%s: unhandled target action 0x%x", __func__, hp->rqs_entry_type); break; case RQSTYPE_NOTIFY: if (IS_SCSI(isp)) { isp_handle_platform_notify_scsi(isp, (in_entry_t *) hp); } else if (IS_24XX(isp)) { isp_handle_platform_notify_24xx(isp, (in_fcentry_24xx_t *) hp); } else { isp_handle_platform_notify_fc(isp, (in_fcentry_t *) hp); } break; case RQSTYPE_ATIO: if (IS_24XX(isp)) { isp_handle_platform_atio7(isp, (at7_entry_t *) hp); } else { isp_handle_platform_atio(isp, (at_entry_t *) hp); } break; case RQSTYPE_ATIO2: isp_handle_platform_atio2(isp, (at2_entry_t *) hp); break; case RQSTYPE_CTIO7: case RQSTYPE_CTIO3: case RQSTYPE_CTIO2: case RQSTYPE_CTIO: isp_handle_platform_ctio(isp, hp); break; case RQSTYPE_ABTS_RCVD: { abts_t *abts = (abts_t *)hp; isp_notify_t notify, *nt = ¬ify; tstate_t *tptr; fcportdb_t *lp; uint16_t chan; uint32_t sid, did; did = (abts->abts_did_hi << 16) | abts->abts_did_lo; sid = (abts->abts_sid_hi << 16) | abts->abts_sid_lo; ISP_MEMZERO(nt, sizeof (isp_notify_t)); nt->nt_hba = isp; nt->nt_did = did; nt->nt_nphdl = abts->abts_nphdl; nt->nt_sid = sid; isp_find_chan_by_did(isp, did, &chan); if (chan == ISP_NOCHAN) { nt->nt_tgt = TGT_ANY; } else { nt->nt_tgt = FCPARAM(isp, chan)->isp_wwpn; if (isp_find_pdb_by_handle(isp, chan, abts->abts_nphdl, &lp)) { nt->nt_wwn = lp->port_wwn; } else { nt->nt_wwn = INI_ANY; } } /* * Try hard to find the lun for this command. */ tptr = get_lun_statep_from_tag(isp, chan, abts->abts_rxid_task); if (tptr) { nt->nt_lun = tptr->ts_lun; rls_lun_statep(isp, tptr); } else { nt->nt_lun = LUN_ANY; } nt->nt_need_ack = 1; nt->nt_tagval = abts->abts_rxid_task; nt->nt_tagval |= (((uint64_t) abts->abts_rxid_abts) << 32); if (abts->abts_rxid_task == ISP24XX_NO_TASK) { isp_prt(isp, ISP_LOGTINFO, "[0x%x] ABTS from N-Port handle 0x%x Port 0x%06x has no task id (rx_id 0x%04x ox_id 0x%04x)", abts->abts_rxid_abts, abts->abts_nphdl, sid, abts->abts_rx_id, abts->abts_ox_id); } else { isp_prt(isp, ISP_LOGTINFO, "[0x%x] ABTS from N-Port handle 0x%x Port 0x%06x for task 0x%x (rx_id 0x%04x ox_id 0x%04x)", abts->abts_rxid_abts, abts->abts_nphdl, sid, abts->abts_rxid_task, abts->abts_rx_id, abts->abts_ox_id); } nt->nt_channel = chan; nt->nt_ncode = NT_ABORT_TASK; nt->nt_lreserved = hp; isp_handle_platform_target_tmf(isp, nt); break; } case RQSTYPE_ENABLE_LUN: case RQSTYPE_MODIFY_LUN: isp_ledone(isp, (lun_entry_t *) hp); break; } break; } #endif case ISPASYNC_FW_CRASH: { uint16_t mbox1, mbox6; mbox1 = ISP_READ(isp, OUTMAILBOX1); if (IS_DUALBUS(isp)) { mbox6 = ISP_READ(isp, OUTMAILBOX6); } else { mbox6 = 0; } isp_prt(isp, ISP_LOGERR, "Internal Firmware Error on bus %d @ RISC Address 0x%x", mbox6, mbox1); mbox1 = isp->isp_osinfo.mbox_sleep_ok; isp->isp_osinfo.mbox_sleep_ok = 0; isp_reinit(isp, 1); isp->isp_osinfo.mbox_sleep_ok = mbox1; isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); break; } default: isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); break; } } /* * Locks are held before coming here. */ void isp_uninit(ispsoftc_t *isp) { if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RESET); } else { ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); } ISP_DISABLE_INTS(isp); } /* * When we want to get the 'default' WWNs (when lacking NVRAM), we pick them * up from our platform default (defww{p|n}n) and morph them based upon * channel. * * When we want to get the 'active' WWNs, we get NVRAM WWNs and then morph them * based upon channel. */ uint64_t isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn) { uint64_t seed; struct isp_fc *fc = ISP_FC_PC(isp, chan); /* * If we're asking for a active WWN, the default overrides get * returned, otherwise the NVRAM value is picked. * * If we're asking for a default WWN, we just pick the default override. */ if (isactive) { seed = iswwnn ? fc->def_wwnn : fc->def_wwpn; if (seed) { return (seed); } seed = iswwnn ? FCPARAM(isp, chan)->isp_wwnn_nvram : FCPARAM(isp, chan)->isp_wwpn_nvram; if (seed) { return (seed); } return (0x400000007F000009ull); } seed = iswwnn ? fc->def_wwnn : fc->def_wwpn; /* * For channel zero just return what we have. For either ACTIVE or * DEFAULT cases, we depend on default override of NVRAM values for * channel zero. */ if (chan == 0) { return (seed); } /* * For other channels, we are doing one of three things: * * 1. If what we have now is non-zero, return it. Otherwise we morph * values from channel 0. 2. If we're here for a WWPN we synthesize * it if Channel 0's wwpn has a type 2 NAA. 3. If we're here for a * WWNN we synthesize it if Channel 0's wwnn has a type 2 NAA. */ if (seed) { return (seed); } seed = iswwnn ? ISP_FC_PC(isp, 0)->def_wwnn : ISP_FC_PC(isp, 0)->def_wwpn; if (seed == 0) seed = iswwnn ? FCPARAM(isp, 0)->isp_wwnn_nvram : FCPARAM(isp, 0)->isp_wwpn_nvram; if (((seed >> 60) & 0xf) == 2) { /* * The type 2 NAA fields for QLogic cards appear be laid out * thusly: * * bits 63..60 NAA == 2 bits 59..57 unused/zero bit 56 * port (1) or node (0) WWN distinguishor bit 48 * physical port on dual-port chips (23XX/24XX) * * This is somewhat nutty, particularly since bit 48 is * irrelevant as they assign separate serial numbers to * different physical ports anyway. * * We'll stick our channel number plus one first into bits * 57..59 and thence into bits 52..55 which allows for 8 bits * of channel which is comfortably more than our maximum * (126) now. */ seed &= ~0x0FF0000000000000ULL; if (iswwnn == 0) { seed |= ((uint64_t) (chan + 1) & 0xf) << 56; seed |= ((uint64_t) ((chan + 1) >> 4) & 0xf) << 52; } } else { seed = 0; } return (seed); } void isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...) { int loc; char lbuf[200]; va_list ap; if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { return; } snprintf(lbuf, sizeof (lbuf), "%s: ", device_get_nameunit(isp->isp_dev)); loc = strlen(lbuf); va_start(ap, fmt); vsnprintf(&lbuf[loc], sizeof (lbuf) - loc - 1, fmt, ap); va_end(ap); printf("%s\n", lbuf); } void isp_xs_prt(ispsoftc_t *isp, XS_T *xs, int level, const char *fmt, ...) { va_list ap; if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { return; } xpt_print_path(xs->ccb_h.path); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); printf("\n"); } uint64_t isp_nanotime_sub(struct timespec *b, struct timespec *a) { uint64_t elapsed; struct timespec x = *b; timespecsub(&x, a); elapsed = GET_NANOSEC(&x); if (elapsed == 0) elapsed++; return (elapsed); } int isp_mbox_acquire(ispsoftc_t *isp) { if (isp->isp_osinfo.mboxbsy) { return (1); } else { isp->isp_osinfo.mboxcmd_done = 0; isp->isp_osinfo.mboxbsy = 1; return (0); } } void isp_mbox_wait_complete(ispsoftc_t *isp, mbreg_t *mbp) { unsigned int usecs = mbp->timeout; unsigned int max, olim, ilim; if (usecs == 0) { usecs = MBCMD_DEFAULT_TIMEOUT; } max = isp->isp_mbxwrk0 + 1; if (isp->isp_osinfo.mbox_sleep_ok) { unsigned int ms = (usecs + 999) / 1000; isp->isp_osinfo.mbox_sleep_ok = 0; isp->isp_osinfo.mbox_sleeping = 1; for (olim = 0; olim < max; olim++) { msleep(&isp->isp_mbxworkp, &isp->isp_osinfo.lock, PRIBIO, "ispmbx_sleep", isp_mstohz(ms)); if (isp->isp_osinfo.mboxcmd_done) { break; } } isp->isp_osinfo.mbox_sleep_ok = 1; isp->isp_osinfo.mbox_sleeping = 0; } else { for (olim = 0; olim < max; olim++) { for (ilim = 0; ilim < usecs; ilim += 100) { uint16_t isr, sema, info; if (isp->isp_osinfo.mboxcmd_done) { break; } if (ISP_READ_ISR(isp, &isr, &sema, &info)) { isp_intr(isp, isr, sema, info); if (isp->isp_osinfo.mboxcmd_done) { break; } } ISP_DELAY(100); } if (isp->isp_osinfo.mboxcmd_done) { break; } } } if (isp->isp_osinfo.mboxcmd_done == 0) { isp_prt(isp, ISP_LOGWARN, "%s Mailbox Command (0x%x) Timeout (%uus) (started @ %s:%d)", isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled", isp->isp_lastmbxcmd, usecs, mbp->func, mbp->lineno); mbp->param[0] = MBOX_TIMEOUT; isp->isp_osinfo.mboxcmd_done = 1; } } void isp_mbox_notify_done(ispsoftc_t *isp) { if (isp->isp_osinfo.mbox_sleeping) { wakeup(&isp->isp_mbxworkp); } isp->isp_osinfo.mboxcmd_done = 1; } void isp_mbox_release(ispsoftc_t *isp) { isp->isp_osinfo.mboxbsy = 0; } int isp_fc_scratch_acquire(ispsoftc_t *isp, int chan) { int ret = 0; if (isp->isp_osinfo.pc.fc[chan].fcbsy) { ret = -1; } else { isp->isp_osinfo.pc.fc[chan].fcbsy = 1; } return (ret); } int isp_mstohz(int ms) { int hz; struct timeval t; t.tv_sec = ms / 1000; t.tv_usec = (ms % 1000) * 1000; hz = tvtohz(&t); if (hz < 0) { hz = 0x7fffffff; } if (hz == 0) { hz = 1; } return (hz); } void isp_platform_intr(void *arg) { ispsoftc_t *isp = arg; uint16_t isr, sema, info; ISP_LOCK(isp); isp->isp_intcnt++; if (ISP_READ_ISR(isp, &isr, &sema, &info)) isp_intr(isp, isr, sema, info); else isp->isp_intbogus++; ISP_UNLOCK(isp); } void isp_common_dmateardown(ispsoftc_t *isp, struct ccb_scsiio *csio, uint32_t hdl) { if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTREAD); } else { bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTWRITE); } bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); } /* * Reset the command reference number for all LUNs on a specific target * (needed when a target arrives again) or for all targets on a port * (needed for events like a LIP). */ void isp_fcp_reset_crn(ispsoftc_t *isp, int chan, uint32_t tgt, int tgt_set) { struct isp_fc *fc = ISP_FC_PC(isp, chan); struct isp_nexus *nxp; int i; if (tgt_set == 0) isp_prt(isp, ISP_LOGDEBUG0, "Chan %d resetting CRN on all targets", chan); else isp_prt(isp, ISP_LOGDEBUG0, "Chan %d resetting CRN on target %u", chan, tgt); for (i = 0; i < NEXUS_HASH_WIDTH; i++) { for (nxp = fc->nexus_hash[i]; nxp != NULL; nxp = nxp->next) { if (tgt_set == 0 || tgt == nxp->tgt) nxp->crnseed = 0; } } } int isp_fcp_next_crn(ispsoftc_t *isp, uint8_t *crnp, XS_T *cmd) { lun_id_t lun; uint32_t chan, tgt; struct isp_fc *fc; struct isp_nexus *nxp; int idx; if (isp->isp_type < ISP_HA_FC_2300) return (0); chan = XS_CHANNEL(cmd); tgt = XS_TGT(cmd); lun = XS_LUN(cmd); fc = &isp->isp_osinfo.pc.fc[chan]; idx = NEXUS_HASH(tgt, lun); nxp = fc->nexus_hash[idx]; while (nxp) { if (nxp->tgt == tgt && nxp->lun == lun) break; nxp = nxp->next; } if (nxp == NULL) { nxp = fc->nexus_free_list; if (nxp == NULL) { nxp = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_ZERO|M_NOWAIT); if (nxp == NULL) { return (-1); } } else { fc->nexus_free_list = nxp->next; } nxp->tgt = tgt; nxp->lun = lun; nxp->next = fc->nexus_hash[idx]; fc->nexus_hash[idx] = nxp; } if (nxp->crnseed == 0) nxp->crnseed = 1; PISP_PCMD(cmd)->crn = nxp->crnseed; *crnp = nxp->crnseed++; return (0); } /* * We enter with the lock held */ void isp_timer(void *arg) { ispsoftc_t *isp = arg; #ifdef ISP_TARGET_MODE isp_tmcmd_restart(isp); #endif callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp); } isp_ecmd_t * isp_get_ecmd(ispsoftc_t *isp) { isp_ecmd_t *ecmd = isp->isp_osinfo.ecmd_free; if (ecmd) { isp->isp_osinfo.ecmd_free = ecmd->next; } return (ecmd); } void isp_put_ecmd(ispsoftc_t *isp, isp_ecmd_t *ecmd) { ecmd->next = isp->isp_osinfo.ecmd_free; isp->isp_osinfo.ecmd_free = ecmd; } Index: projects/powernv/dev/isp/isp_library.c =================================================================== --- projects/powernv/dev/isp/isp_library.c (revision 291050) +++ projects/powernv/dev/isp/isp_library.c (revision 291051) @@ -1,4010 +1,4018 @@ /*- * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Qlogic Host Adapter Internal Library Functions */ #ifdef __NetBSD__ #include __KERNEL_RCSID(0, "$NetBSD$"); #include #endif #ifdef __FreeBSD__ #include __FBSDID("$FreeBSD$"); #include #endif #ifdef __OpenBSD__ #include #endif #ifdef __linux__ #include "isp_linux.h" #endif #ifdef __svr4__ #include "isp_solaris.h" #endif const char *isp_class3_roles[4] = { "None", "Target", "Initiator", "Target/Initiator" }; /* * Command shipping- finish off first queue entry and do dma mapping and additional segments as needed. * * Called with the first queue entry at least partially filled out. */ int isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t totalcnt, isp_ddir_t ddir, ispds64_t *ecmd) { uint8_t storage[QENTRY_LEN]; uint8_t type, nqe; uint32_t seg, curseg, seglim, nxt, nxtnxt, ddf; ispds_t *dsp = NULL; ispds64_t *dsp64 = NULL; void *qe0, *qe1; qe0 = isp_getrqentry(isp); if (qe0 == NULL) { return (CMD_EAGAIN); } nxt = ISP_NXT_QENTRY(isp->isp_reqidx, RQUEST_QUEUE_LEN(isp)); type = ((isphdr_t *)fqe)->rqs_entry_type; nqe = 1; /* * If we have no data to transmit, just copy the first IOCB and start it up. */ if (ddir == ISP_NOXFR) { if (type == RQSTYPE_T2RQS || type == RQSTYPE_T3RQS) { ddf = CT2_NO_DATA; } else { ddf = 0; } goto copy_and_sync; } /* * First figure out how many pieces of data to transfer and what kind and how many we can put into the first queue entry. */ switch (type) { case RQSTYPE_REQUEST: ddf = (ddir == ISP_TO_DEVICE)? REQFLAG_DATA_OUT : REQFLAG_DATA_IN; dsp = ((ispreq_t *)fqe)->req_dataseg; seglim = ISP_RQDSEG; break; case RQSTYPE_CMDONLY: ddf = (ddir == ISP_TO_DEVICE)? REQFLAG_DATA_OUT : REQFLAG_DATA_IN; seglim = 0; break; case RQSTYPE_T2RQS: ddf = (ddir == ISP_TO_DEVICE)? REQFLAG_DATA_OUT : REQFLAG_DATA_IN; dsp = ((ispreqt2_t *)fqe)->req_dataseg; seglim = ISP_RQDSEG_T2; break; case RQSTYPE_A64: ddf = (ddir == ISP_TO_DEVICE)? REQFLAG_DATA_OUT : REQFLAG_DATA_IN; dsp64 = ((ispreqt3_t *)fqe)->req_dataseg; seglim = ISP_RQDSEG_T3; break; case RQSTYPE_T3RQS: ddf = (ddir == ISP_TO_DEVICE)? REQFLAG_DATA_OUT : REQFLAG_DATA_IN; dsp64 = ((ispreqt3_t *)fqe)->req_dataseg; seglim = ISP_RQDSEG_T3; break; case RQSTYPE_T7RQS: ddf = (ddir == ISP_TO_DEVICE)? FCP_CMND_DATA_WRITE : FCP_CMND_DATA_READ; dsp64 = &((ispreqt7_t *)fqe)->req_dataseg; seglim = 1; break; default: return (CMD_COMPLETE); } if (seglim > nsegs) { seglim = nsegs; } for (seg = curseg = 0; curseg < seglim; curseg++) { if (dsp64) { XS_GET_DMA64_SEG(dsp64++, segp, seg++); } else { XS_GET_DMA_SEG(dsp++, segp, seg++); } } /* * Second, start building additional continuation segments as needed. */ while (seg < nsegs) { nxtnxt = ISP_NXT_QENTRY(nxt, RQUEST_QUEUE_LEN(isp)); if (nxtnxt == isp->isp_reqodx) { isp->isp_reqodx = ISP_READ(isp, isp->isp_rqstoutrp); if (nxtnxt == isp->isp_reqodx) return (CMD_EAGAIN); } ISP_MEMZERO(storage, QENTRY_LEN); qe1 = ISP_QUEUE_ENTRY(isp->isp_rquest, nxt); nxt = nxtnxt; if (dsp64) { ispcontreq64_t *crq = (ispcontreq64_t *) storage; seglim = ISP_CDSEG64; crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; crq->req_header.rqs_entry_count = 1; dsp64 = crq->req_dataseg; } else { ispcontreq_t *crq = (ispcontreq_t *) storage; seglim = ISP_CDSEG; crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; crq->req_header.rqs_entry_count = 1; dsp = crq->req_dataseg; } if (seg + seglim > nsegs) { seglim = nsegs - seg; } for (curseg = 0; curseg < seglim; curseg++) { if (dsp64) { XS_GET_DMA64_SEG(dsp64++, segp, seg++); } else { XS_GET_DMA_SEG(dsp++, segp, seg++); } } if (dsp64) { isp_put_cont64_req(isp, (ispcontreq64_t *)storage, qe1); } else { isp_put_cont_req(isp, (ispcontreq_t *)storage, qe1); } if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "additional queue entry", QENTRY_LEN, storage); } nqe++; } copy_and_sync: ((isphdr_t *)fqe)->rqs_entry_count = nqe; switch (type) { case RQSTYPE_REQUEST: ((ispreq_t *)fqe)->req_flags |= ddf; /* * This is historical and not clear whether really needed. */ if (nsegs == 0) { nsegs = 1; } ((ispreq_t *)fqe)->req_seg_count = nsegs; isp_put_request(isp, fqe, qe0); break; case RQSTYPE_CMDONLY: ((ispreq_t *)fqe)->req_flags |= ddf; /* * This is historical and not clear whether really needed. */ if (nsegs == 0) { nsegs = 1; } ((ispextreq_t *)fqe)->req_seg_count = nsegs; isp_put_extended_request(isp, fqe, qe0); break; case RQSTYPE_T2RQS: ((ispreqt2_t *)fqe)->req_flags |= ddf; ((ispreqt2_t *)fqe)->req_seg_count = nsegs; ((ispreqt2_t *)fqe)->req_totalcnt = totalcnt; if (ISP_CAP_2KLOGIN(isp)) { isp_put_request_t2e(isp, fqe, qe0); } else { isp_put_request_t2(isp, fqe, qe0); } break; case RQSTYPE_A64: case RQSTYPE_T3RQS: ((ispreqt3_t *)fqe)->req_flags |= ddf; ((ispreqt3_t *)fqe)->req_seg_count = nsegs; ((ispreqt3_t *)fqe)->req_totalcnt = totalcnt; if (ISP_CAP_2KLOGIN(isp)) { isp_put_request_t3e(isp, fqe, qe0); } else { isp_put_request_t3(isp, fqe, qe0); } break; case RQSTYPE_T7RQS: ((ispreqt7_t *)fqe)->req_alen_datadir = ddf; ((ispreqt7_t *)fqe)->req_seg_count = nsegs; ((ispreqt7_t *)fqe)->req_dl = totalcnt; isp_put_request_t7(isp, fqe, qe0); break; default: return (CMD_COMPLETE); } if (isp->isp_dblev & ISP_LOGDEBUG1) { isp_print_bytes(isp, "first queue entry", QENTRY_LEN, fqe); } ISP_ADD_REQUEST(isp, nxt); return (CMD_QUEUED); } int isp_allocate_xs(ispsoftc_t *isp, XS_T *xs, uint32_t *handlep) { isp_hdl_t *hdp; hdp = isp->isp_xffree; if (hdp == NULL) { return (-1); } isp->isp_xffree = hdp->cmd; hdp->cmd = xs; hdp->handle = (hdp - isp->isp_xflist); hdp->handle |= (ISP_HANDLE_INITIATOR << ISP_HANDLE_USAGE_SHIFT); hdp->handle |= (isp->isp_seqno++ << ISP_HANDLE_SEQ_SHIFT); *handlep = hdp->handle; return (0); } XS_T * isp_find_xs(ispsoftc_t *isp, uint32_t handle) { if (!ISP_VALID_INI_HANDLE(isp, handle)) { isp_prt(isp, ISP_LOGERR, "%s: bad handle 0x%x", __func__, handle); return (NULL); } return (isp->isp_xflist[(handle & ISP_HANDLE_CMD_MASK)].cmd); } uint32_t isp_find_handle(ispsoftc_t *isp, XS_T *xs) { uint32_t i, foundhdl = ISP_HANDLE_FREE; if (xs != NULL) { for (i = 0; i < isp->isp_maxcmds; i++) { if (isp->isp_xflist[i].cmd != xs) { continue; } foundhdl = isp->isp_xflist[i].handle; break; } } return (foundhdl); } uint32_t isp_handle_index(ispsoftc_t *isp, uint32_t handle) { if (!ISP_VALID_HANDLE(isp, handle)) { isp_prt(isp, ISP_LOGERR, "%s: bad handle 0x%x", __func__, handle); return (ISP_BAD_HANDLE_INDEX); } else { return (handle & ISP_HANDLE_CMD_MASK); } } void isp_destroy_handle(ispsoftc_t *isp, uint32_t handle) { if (!ISP_VALID_INI_HANDLE(isp, handle)) { isp_prt(isp, ISP_LOGERR, "%s: bad handle 0x%x", __func__, handle); } else { isp->isp_xflist[(handle & ISP_HANDLE_CMD_MASK)].handle = ISP_HANDLE_FREE; isp->isp_xflist[(handle & ISP_HANDLE_CMD_MASK)].cmd = isp->isp_xffree; isp->isp_xffree = &isp->isp_xflist[(handle & ISP_HANDLE_CMD_MASK)]; } } /* * Make sure we have space to put something on the request queue. * Return a pointer to that entry if we do. A side effect of this * function is to update the output index. The input index * stays the same. */ void * isp_getrqentry(ispsoftc_t *isp) { uint32_t next; next = ISP_NXT_QENTRY(isp->isp_reqidx, RQUEST_QUEUE_LEN(isp)); if (next == isp->isp_reqodx) { isp->isp_reqodx = ISP_READ(isp, isp->isp_rqstoutrp); if (next == isp->isp_reqodx) return (NULL); } return (ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx)); } #define TBA (4 * (((QENTRY_LEN >> 2) * 3) + 1) + 1) void isp_print_qentry(ispsoftc_t *isp, const char *msg, int idx, void *arg) { char buf[TBA]; int amt, i, j; uint8_t *ptr = arg; isp_prt(isp, ISP_LOGALL, "%s index %d=>", msg, idx); for (buf[0] = 0, amt = i = 0; i < 4; i++) { buf[0] = 0; ISP_SNPRINTF(buf, TBA, " "); for (j = 0; j < (QENTRY_LEN >> 2); j++) { ISP_SNPRINTF(buf, TBA, "%s %02x", buf, ptr[amt++] & 0xff); } isp_prt(isp, ISP_LOGALL, "%s", buf); } } void isp_print_bytes(ispsoftc_t *isp, const char *msg, int amt, void *arg) { char buf[128]; uint8_t *ptr = arg; int off; if (msg) isp_prt(isp, ISP_LOGALL, "%s:", msg); off = 0; buf[0] = 0; while (off < amt) { int j, to; to = off; for (j = 0; j < 16; j++) { ISP_SNPRINTF(buf, 128, "%s %02x", buf, ptr[off++] & 0xff); if (off == amt) { break; } } isp_prt(isp, ISP_LOGALL, "0x%08x:%s", to, buf); buf[0] = 0; } } /* * Do the common path to try and ensure that link is up, we've scanned * the fabric (if we're on a fabric), and that we've synchronized this * all with our own database and done the appropriate logins. * * We repeatedly check for firmware state and loop state after each * action because things may have changed while we were doing this. * Any failure or change of state causes us to return a nonzero value. * * We assume we enter here with any locks held. */ int isp_fc_runstate(ispsoftc_t *isp, int chan, int tval) { fcparam *fcp; fcp = FCPARAM(isp, chan); if (fcp->role == ISP_ROLE_NONE) { return (0); } - if (fcp->isp_fwstate < FW_READY || fcp->isp_loopstate < LOOP_PDB_RCVD) { - if (isp_control(isp, ISPCTL_FCLINK_TEST, chan, tval) != 0) { - isp_prt(isp, ISP_LOG_SANCFG, "isp_fc_runstate: linktest failed for channel %d", chan); - return (-1); - } - if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate < LOOP_PDB_RCVD) { - isp_prt(isp, ISP_LOG_SANCFG, "isp_fc_runstate: f/w not ready for channel %d", chan); - return (-1); - } + if (isp_control(isp, ISPCTL_FCLINK_TEST, chan, tval) != 0) { + isp_prt(isp, ISP_LOG_SANCFG, "isp_fc_runstate: linktest failed for channel %d", chan); + return (-1); } - if (isp_control(isp, ISPCTL_SCAN_LOOP, chan) != 0) { - isp_prt(isp, ISP_LOG_SANCFG, "isp_fc_runstate: scan loop fails on channel %d", chan); - return (LOOP_PDB_RCVD); + isp_prt(isp, ISP_LOG_SANCFG, "isp_fc_runstate: scan loop failed on channel %d", chan); + return (LOOP_LTEST_DONE); } if (isp_control(isp, ISPCTL_SCAN_FABRIC, chan) != 0) { - isp_prt(isp, ISP_LOG_SANCFG, "isp_fc_runstate: scan fabric fails on channel %d", chan); + isp_prt(isp, ISP_LOG_SANCFG, "isp_fc_runstate: scan fabric failed on channel %d", chan); return (LOOP_LSCAN_DONE); } if (isp_control(isp, ISPCTL_PDB_SYNC, chan) != 0) { - isp_prt(isp, ISP_LOG_SANCFG, "isp_fc_runstate: pdb_sync fails on channel %d", chan); + isp_prt(isp, ISP_LOG_SANCFG, "isp_fc_runstate: pdb_sync failed on channel %d", chan); return (LOOP_FSCAN_DONE); } - if (fcp->isp_fwstate != FW_READY || fcp->isp_loopstate != LOOP_READY) { - isp_prt(isp, ISP_LOG_SANCFG, "isp_fc_runstate: f/w not ready again on channel %d", chan); + if (fcp->isp_loopstate != LOOP_READY) { + isp_prt(isp, ISP_LOG_SANCFG, "isp_fc_runstate: not ready again on channel %d", chan); return (-1); } return (0); } /* * Fibre Channel Support routines */ void isp_dump_portdb(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); int i; for (i = 0; i < MAX_FC_TARG; i++) { char buf1[64], buf2[64]; const char *dbs[8] = { "NIL ", "PROB", "DEAD", "CHGD", "NEW ", "PVLD", "ZOMB", "VLD " }; fcportdb_t *lp = &fcp->portdb[i]; if (lp->state == FC_PORTDB_STATE_NIL) { continue; } isp_gen_role_str(buf1, sizeof (buf1), lp->prli_word3); isp_gen_role_str(buf2, sizeof (buf2), lp->new_prli_word3); isp_prt(isp, ISP_LOGALL, "Chan %d [%d]: hdl 0x%x %s al%d %s 0x%06x =>%s 0x%06x; WWNN 0x%08x%08x WWPN 0x%08x%08x", chan, i, lp->handle, dbs[lp->state], lp->autologin, buf1, lp->portid, buf2, lp->new_portid, (uint32_t) (lp->node_wwn >> 32), (uint32_t) (lp->node_wwn), (uint32_t) (lp->port_wwn >> 32), (uint32_t) (lp->port_wwn)); } } void isp_gen_role_str(char *buf, size_t len, uint16_t p3) { int nd = 0; buf[0] = '('; buf[1] = 0; if (p3 & PRLI_WD3_ENHANCED_DISCOVERY) { nd++; strlcat(buf, "EDisc", len); } if (p3 & PRLI_WD3_REC_SUPPORT) { if (nd++) { strlcat(buf, ",", len); } strlcat(buf, "REC", len); } if (p3 & PRLI_WD3_TASK_RETRY_IDENTIFICATION_REQUESTED) { if (nd++) { strlcat(buf, ",", len); } strlcat(buf, "RetryID", len); } if (p3 & PRLI_WD3_RETRY) { if (nd++) { strlcat(buf, ",", len); } strlcat(buf, "Retry", len); } if (p3 & PRLI_WD3_CONFIRMED_COMPLETION_ALLOWED) { if (nd++) { strlcat(buf, ",", len); } strlcat(buf, "CNFRM", len); } if (p3 & PRLI_WD3_DATA_OVERLAY_ALLOWED) { if (nd++) { strlcat(buf, ",", len); } strlcat(buf, "DOver", len); } if (p3 & PRLI_WD3_INITIATOR_FUNCTION) { if (nd++) { strlcat(buf, ",", len); } strlcat(buf, "INI", len); } if (p3 & PRLI_WD3_TARGET_FUNCTION) { if (nd++) { strlcat(buf, ",", len); } strlcat(buf, "TGT", len); } if (p3 & PRLI_WD3_READ_FCP_XFER_RDY_DISABLED) { if (nd++) { strlcat(buf, ",", len); } strlcat(buf, "RdXfrDis", len); } if (p3 & PRLI_WD3_WRITE_FCP_XFER_RDY_DISABLED) { if (nd++) { strlcat(buf, ",", len); } strlcat(buf, "XfrDis", len); } strlcat(buf, ")", len); } const char * isp_fc_fw_statename(int state) { switch (state) { case FW_CONFIG_WAIT: return "Config Wait"; - case FW_WAIT_AL_PA: return "Waiting for AL_PA"; + case FW_WAIT_LINK: return "Wait Link"; case FW_WAIT_LOGIN: return "Wait Login"; case FW_READY: return "Ready"; case FW_LOSS_OF_SYNC: return "Loss Of Sync"; case FW_ERROR: return "Error"; case FW_REINIT: return "Re-Init"; case FW_NON_PART: return "Nonparticipating"; default: return "?????"; } } const char * isp_fc_loop_statename(int state) { switch (state) { case LOOP_NIL: return "NIL"; - case LOOP_LIP_RCVD: return "LIP Received"; - case LOOP_PDB_RCVD: return "PDB Received"; - case LOOP_SCANNING_LOOP: return "Scanning"; + case LOOP_TESTING_LINK: return "Testing Link"; + case LOOP_LTEST_DONE: return "Link Test Done"; + case LOOP_SCANNING_LOOP: return "Scanning Loop"; case LOOP_LSCAN_DONE: return "Loop Scan Done"; case LOOP_SCANNING_FABRIC: return "Scanning Fabric"; case LOOP_FSCAN_DONE: return "Fabric Scan Done"; case LOOP_SYNCING_PDB: return "Syncing PDB"; case LOOP_READY: return "Ready"; default: return "?????"; } } const char * isp_fc_toponame(fcparam *fcp) { - if (fcp->isp_fwstate != FW_READY) { + if (fcp->isp_loopstate < LOOP_LTEST_DONE) { return "Unavailable"; } switch (fcp->isp_topo) { case TOPO_NL_PORT: return "Private Loop"; case TOPO_FL_PORT: return "FL Port"; case TOPO_N_PORT: return "N-Port to N-Port"; case TOPO_F_PORT: return "F Port"; case TOPO_PTP_STUB: return "F Port (no FLOGI_ACC response)"; default: return "?????"; } } static int isp_fc_enable_vp(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); mbreg_t mbs; vp_modify_t *vp; uint8_t qe[QENTRY_LEN], *scp; ISP_MEMZERO(qe, QENTRY_LEN); if (FC_SCRATCH_ACQUIRE(isp, chan)) { return (EBUSY); } scp = fcp->isp_scratch; /* * Build a VP MODIFY command in memory */ vp = (vp_modify_t *) qe; vp->vp_mod_hdr.rqs_entry_type = RQSTYPE_VP_MODIFY; vp->vp_mod_hdr.rqs_entry_count = 1; vp->vp_mod_cnt = 1; vp->vp_mod_idx0 = chan; vp->vp_mod_cmd = VP_MODIFY_ENA; vp->vp_mod_ports[0].options = ICB2400_VPOPT_ENABLED | ICB2400_VPOPT_ENA_SNSLOGIN; if (fcp->role & ISP_ROLE_INITIATOR) { vp->vp_mod_ports[0].options |= ICB2400_VPOPT_INI_ENABLE; } if ((fcp->role & ISP_ROLE_TARGET) == 0) { vp->vp_mod_ports[0].options |= ICB2400_VPOPT_TGT_DISABLE; } if (fcp->isp_loopid < LOCAL_LOOP_LIM) { vp->vp_mod_ports[0].loopid = fcp->isp_loopid; if (isp->isp_confopts & ISP_CFG_OWNLOOPID) vp->vp_mod_ports[0].options |= ICB2400_VPOPT_HARD_ADDRESS; else vp->vp_mod_ports[0].options |= ICB2400_VPOPT_PREV_ADDRESS; } MAKE_NODE_NAME_FROM_WWN(vp->vp_mod_ports[0].wwpn, fcp->isp_wwpn); MAKE_NODE_NAME_FROM_WWN(vp->vp_mod_ports[0].wwnn, fcp->isp_wwnn); isp_put_vp_modify(isp, vp, (vp_modify_t *) scp); /* * Build a EXEC IOCB A64 command that points to the VP MODIFY command */ MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 0); mbs.param[1] = QENTRY_LEN; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, 2 * QENTRY_LEN, chan); isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { FC_SCRATCH_RELEASE(isp, chan); return (EIO); } MEMORYBARRIER(isp, SYNC_SFORCPU, QENTRY_LEN, QENTRY_LEN, chan); isp_get_vp_modify(isp, (vp_modify_t *)&scp[QENTRY_LEN], vp); FC_SCRATCH_RELEASE(isp, chan); if (vp->vp_mod_status != VP_STS_OK) { isp_prt(isp, ISP_LOGERR, "%s: VP_MODIFY of Chan %d failed with status %d", __func__, chan, vp->vp_mod_status); return (EIO); } return (0); } static int isp_fc_disable_vp(ispsoftc_t *isp, int chan) { fcparam *fcp = FCPARAM(isp, chan); mbreg_t mbs; vp_ctrl_info_t *vp; uint8_t qe[QENTRY_LEN], *scp; ISP_MEMZERO(qe, QENTRY_LEN); if (FC_SCRATCH_ACQUIRE(isp, chan)) { return (EBUSY); } scp = fcp->isp_scratch; /* * Build a VP CTRL command in memory */ vp = (vp_ctrl_info_t *) qe; vp->vp_ctrl_hdr.rqs_entry_type = RQSTYPE_VP_CTRL; vp->vp_ctrl_hdr.rqs_entry_count = 1; if (ISP_CAP_VP0(isp)) { vp->vp_ctrl_status = 1; } else { vp->vp_ctrl_status = 0; chan--; /* VP0 can not be controlled in this case. */ } vp->vp_ctrl_command = VP_CTRL_CMD_DISABLE_VP_LOGO_ALL; vp->vp_ctrl_vp_count = 1; vp->vp_ctrl_idmap[chan / 16] |= (1 << chan % 16); isp_put_vp_ctrl_info(isp, vp, (vp_ctrl_info_t *) scp); /* * Build a EXEC IOCB A64 command that points to the VP CTRL command */ MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 0); mbs.param[1] = QENTRY_LEN; mbs.param[2] = DMA_WD1(fcp->isp_scdma); mbs.param[3] = DMA_WD0(fcp->isp_scdma); mbs.param[6] = DMA_WD3(fcp->isp_scdma); mbs.param[7] = DMA_WD2(fcp->isp_scdma); MEMORYBARRIER(isp, SYNC_SFORDEV, 0, 2 * QENTRY_LEN, chan); isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); if (mbs.param[0] != MBOX_COMMAND_COMPLETE) { FC_SCRATCH_RELEASE(isp, chan); return (EIO); } MEMORYBARRIER(isp, SYNC_SFORCPU, QENTRY_LEN, QENTRY_LEN, chan); isp_get_vp_ctrl_info(isp, (vp_ctrl_info_t *)&scp[QENTRY_LEN], vp); FC_SCRATCH_RELEASE(isp, chan); if (vp->vp_ctrl_status != 0) { isp_prt(isp, ISP_LOGERR, "%s: VP_CTRL of Chan %d failed with status %d %d", __func__, chan, vp->vp_ctrl_status, vp->vp_ctrl_index_fail); return (EIO); } return (0); } /* * Change Roles */ int isp_fc_change_role(ispsoftc_t *isp, int chan, int new_role) { fcparam *fcp = FCPARAM(isp, chan); int i, was, res = 0; if (chan >= isp->isp_nchan) { isp_prt(isp, ISP_LOGWARN, "%s: bad channel %d", __func__, chan); return (ENXIO); } if (fcp->role == new_role) return (0); for (was = 0, i = 0; i < isp->isp_nchan; i++) { if (FCPARAM(isp, i)->role != ISP_ROLE_NONE) was++; } if (was == 0 || (was == 1 && fcp->role != ISP_ROLE_NONE)) { fcp->role = new_role; return (isp_reinit(isp, 0)); } if (fcp->role != ISP_ROLE_NONE) res = isp_fc_disable_vp(isp, chan); fcp->role = new_role; if (fcp->role != ISP_ROLE_NONE) res = isp_fc_enable_vp(isp, chan); return (res); } void isp_clear_commands(ispsoftc_t *isp) { uint32_t tmp; isp_hdl_t *hdp; #ifdef ISP_TARGET_MODE isp_notify_t notify; #endif for (tmp = 0; isp->isp_xflist && tmp < isp->isp_maxcmds; tmp++) { XS_T *xs; hdp = &isp->isp_xflist[tmp]; if (hdp->handle == ISP_HANDLE_FREE) { continue; } xs = hdp->cmd; if (XS_XFRLEN(xs)) { ISP_DMAFREE(isp, xs, hdp->handle); XS_SET_RESID(xs, XS_XFRLEN(xs)); } else { XS_SET_RESID(xs, 0); } hdp->handle = 0; hdp->cmd = NULL; XS_SETERR(xs, HBA_BUSRESET); isp_done(xs); } #ifdef ISP_TARGET_MODE for (tmp = 0; isp->isp_tgtlist && tmp < isp->isp_maxcmds; tmp++) { uint8_t local[QENTRY_LEN]; hdp = &isp->isp_tgtlist[tmp]; if (hdp->handle == ISP_HANDLE_FREE) { continue; } ISP_DMAFREE(isp, hdp->cmd, hdp->handle); ISP_MEMZERO(local, QENTRY_LEN); if (IS_24XX(isp)) { ct7_entry_t *ctio = (ct7_entry_t *) local; ctio->ct_syshandle = hdp->handle; ctio->ct_nphdl = CT_HBA_RESET; ctio->ct_header.rqs_entry_type = RQSTYPE_CTIO7; } else if (IS_FC(isp)) { ct2_entry_t *ctio = (ct2_entry_t *) local; ctio->ct_syshandle = hdp->handle; ctio->ct_status = CT_HBA_RESET; ctio->ct_header.rqs_entry_type = RQSTYPE_CTIO2; } else { ct_entry_t *ctio = (ct_entry_t *) local; ctio->ct_syshandle = hdp->handle; ctio->ct_status = CT_HBA_RESET & 0xff; ctio->ct_header.rqs_entry_type = RQSTYPE_CTIO; } isp_async(isp, ISPASYNC_TARGET_ACTION, local); } for (tmp = 0; tmp < isp->isp_nchan; tmp++) { ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); notify.nt_ncode = NT_HBA_RESET; notify.nt_hba = isp; notify.nt_wwn = INI_ANY; notify.nt_nphdl = NIL_HANDLE; notify.nt_sid = PORT_ANY; notify.nt_did = PORT_ANY; notify.nt_tgt = TGT_ANY; notify.nt_channel = tmp; notify.nt_lun = LUN_ANY; notify.nt_tagval = TAG_ANY; isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); } #endif } void isp_shutdown(ispsoftc_t *isp) { if (IS_FC(isp)) { if (IS_24XX(isp)) { ISP_WRITE(isp, BIU2400_ICR, 0); ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_PAUSE); } else { ISP_WRITE(isp, BIU_ICR, 0); ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); ISP_WRITE(isp, BIU2100_CSR, BIU2100_FPM0_REGS); ISP_WRITE(isp, FPM_DIAG_CONFIG, FPM_SOFT_RESET); ISP_WRITE(isp, BIU2100_CSR, BIU2100_FB_REGS); ISP_WRITE(isp, FBM_CMD, FBMCMD_FIFO_RESET_ALL); ISP_WRITE(isp, BIU2100_CSR, BIU2100_RISC_REGS); } } else { ISP_WRITE(isp, BIU_ICR, 0); ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); } } /* * Functions to move stuff to a form that the QLogic RISC engine understands * and functions to move stuff back to a form the processor understands. * * Each platform is required to provide the 8, 16 and 32 bit * swizzle and unswizzle macros (ISP_IOX{PUT|GET}_{8,16,32}) * * The assumption is that swizzling and unswizzling is mostly done 'in place' * (with a few exceptions for efficiency). */ #define ISP_IS_SBUS(isp) (ISP_SBUS_SUPPORTED && (isp)->isp_bustype == ISP_BT_SBUS) #define ASIZE(x) (sizeof (x) / sizeof (x[0])) /* * Swizzle/Copy Functions */ void isp_put_hdr(ispsoftc_t *isp, isphdr_t *hpsrc, isphdr_t *hpdst) { if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, hpsrc->rqs_entry_type, &hpdst->rqs_entry_count); ISP_IOXPUT_8(isp, hpsrc->rqs_entry_count, &hpdst->rqs_entry_type); ISP_IOXPUT_8(isp, hpsrc->rqs_seqno, &hpdst->rqs_flags); ISP_IOXPUT_8(isp, hpsrc->rqs_flags, &hpdst->rqs_seqno); } else { ISP_IOXPUT_8(isp, hpsrc->rqs_entry_type, &hpdst->rqs_entry_type); ISP_IOXPUT_8(isp, hpsrc->rqs_entry_count, &hpdst->rqs_entry_count); ISP_IOXPUT_8(isp, hpsrc->rqs_seqno, &hpdst->rqs_seqno); ISP_IOXPUT_8(isp, hpsrc->rqs_flags, &hpdst->rqs_flags); } } void isp_get_hdr(ispsoftc_t *isp, isphdr_t *hpsrc, isphdr_t *hpdst) { if (ISP_IS_SBUS(isp)) { ISP_IOXGET_8(isp, &hpsrc->rqs_entry_type, hpdst->rqs_entry_count); ISP_IOXGET_8(isp, &hpsrc->rqs_entry_count, hpdst->rqs_entry_type); ISP_IOXGET_8(isp, &hpsrc->rqs_seqno, hpdst->rqs_flags); ISP_IOXGET_8(isp, &hpsrc->rqs_flags, hpdst->rqs_seqno); } else { ISP_IOXGET_8(isp, &hpsrc->rqs_entry_type, hpdst->rqs_entry_type); ISP_IOXGET_8(isp, &hpsrc->rqs_entry_count, hpdst->rqs_entry_count); ISP_IOXGET_8(isp, &hpsrc->rqs_seqno, hpdst->rqs_seqno); ISP_IOXGET_8(isp, &hpsrc->rqs_flags, hpdst->rqs_flags); } } int isp_get_response_type(ispsoftc_t *isp, isphdr_t *hp) { uint8_t type; if (ISP_IS_SBUS(isp)) { ISP_IOXGET_8(isp, &hp->rqs_entry_count, type); } else { ISP_IOXGET_8(isp, &hp->rqs_entry_type, type); } return ((int)type); } void isp_put_request(ispsoftc_t *isp, ispreq_t *rqsrc, ispreq_t *rqdst) { int i; isp_put_hdr(isp, &rqsrc->req_header, &rqdst->req_header); ISP_IOXPUT_32(isp, rqsrc->req_handle, &rqdst->req_handle); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, rqsrc->req_lun_trn, &rqdst->req_target); ISP_IOXPUT_8(isp, rqsrc->req_target, &rqdst->req_lun_trn); } else { ISP_IOXPUT_8(isp, rqsrc->req_lun_trn, &rqdst->req_lun_trn); ISP_IOXPUT_8(isp, rqsrc->req_target, &rqdst->req_target); } ISP_IOXPUT_16(isp, rqsrc->req_cdblen, &rqdst->req_cdblen); ISP_IOXPUT_16(isp, rqsrc->req_flags, &rqdst->req_flags); ISP_IOXPUT_16(isp, rqsrc->req_time, &rqdst->req_time); ISP_IOXPUT_16(isp, rqsrc->req_seg_count, &rqdst->req_seg_count); for (i = 0; i < ASIZE(rqsrc->req_cdb); i++) { ISP_IOXPUT_8(isp, rqsrc->req_cdb[i], &rqdst->req_cdb[i]); } for (i = 0; i < ISP_RQDSEG; i++) { ISP_IOXPUT_32(isp, rqsrc->req_dataseg[i].ds_base, &rqdst->req_dataseg[i].ds_base); ISP_IOXPUT_32(isp, rqsrc->req_dataseg[i].ds_count, &rqdst->req_dataseg[i].ds_count); } } void isp_put_marker(ispsoftc_t *isp, isp_marker_t *src, isp_marker_t *dst) { int i; isp_put_hdr(isp, &src->mrk_header, &dst->mrk_header); ISP_IOXPUT_32(isp, src->mrk_handle, &dst->mrk_handle); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->mrk_reserved0, &dst->mrk_target); ISP_IOXPUT_8(isp, src->mrk_target, &dst->mrk_reserved0); } else { ISP_IOXPUT_8(isp, src->mrk_reserved0, &dst->mrk_reserved0); ISP_IOXPUT_8(isp, src->mrk_target, &dst->mrk_target); } ISP_IOXPUT_16(isp, src->mrk_modifier, &dst->mrk_modifier); ISP_IOXPUT_16(isp, src->mrk_flags, &dst->mrk_flags); ISP_IOXPUT_16(isp, src->mrk_lun, &dst->mrk_lun); for (i = 0; i < ASIZE(src->mrk_reserved1); i++) { ISP_IOXPUT_8(isp, src->mrk_reserved1[i], &dst->mrk_reserved1[i]); } } void isp_put_marker_24xx(ispsoftc_t *isp, isp_marker_24xx_t *src, isp_marker_24xx_t *dst) { int i; isp_put_hdr(isp, &src->mrk_header, &dst->mrk_header); ISP_IOXPUT_32(isp, src->mrk_handle, &dst->mrk_handle); ISP_IOXPUT_16(isp, src->mrk_nphdl, &dst->mrk_nphdl); ISP_IOXPUT_8(isp, src->mrk_modifier, &dst->mrk_modifier); ISP_IOXPUT_8(isp, src->mrk_reserved0, &dst->mrk_reserved0); ISP_IOXPUT_8(isp, src->mrk_reserved1, &dst->mrk_reserved1); ISP_IOXPUT_8(isp, src->mrk_vphdl, &dst->mrk_vphdl); ISP_IOXPUT_8(isp, src->mrk_reserved2, &dst->mrk_reserved2); for (i = 0; i < ASIZE(src->mrk_lun); i++) { ISP_IOXPUT_8(isp, src->mrk_lun[i], &dst->mrk_lun[i]); } for (i = 0; i < ASIZE(src->mrk_reserved3); i++) { ISP_IOXPUT_8(isp, src->mrk_reserved3[i], &dst->mrk_reserved3[i]); } } void isp_put_request_t2(ispsoftc_t *isp, ispreqt2_t *src, ispreqt2_t *dst) { int i; isp_put_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXPUT_32(isp, src->req_handle, &dst->req_handle); ISP_IOXPUT_8(isp, src->req_lun_trn, &dst->req_lun_trn); ISP_IOXPUT_8(isp, src->req_target, &dst->req_target); ISP_IOXPUT_16(isp, src->req_scclun, &dst->req_scclun); ISP_IOXPUT_16(isp, src->req_flags, &dst->req_flags); ISP_IOXPUT_16(isp, src->req_reserved, &dst->req_reserved); ISP_IOXPUT_16(isp, src->req_time, &dst->req_time); ISP_IOXPUT_16(isp, src->req_seg_count, &dst->req_seg_count); for (i = 0; i < ASIZE(src->req_cdb); i++) { ISP_IOXPUT_8(isp, src->req_cdb[i], &dst->req_cdb[i]); } ISP_IOXPUT_32(isp, src->req_totalcnt, &dst->req_totalcnt); for (i = 0; i < ISP_RQDSEG_T2; i++) { ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, &dst->req_dataseg[i].ds_base); ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, &dst->req_dataseg[i].ds_count); } } void isp_put_request_t2e(ispsoftc_t *isp, ispreqt2e_t *src, ispreqt2e_t *dst) { int i; isp_put_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXPUT_32(isp, src->req_handle, &dst->req_handle); ISP_IOXPUT_16(isp, src->req_target, &dst->req_target); ISP_IOXPUT_16(isp, src->req_scclun, &dst->req_scclun); ISP_IOXPUT_16(isp, src->req_flags, &dst->req_flags); ISP_IOXPUT_16(isp, src->req_reserved, &dst->req_reserved); ISP_IOXPUT_16(isp, src->req_time, &dst->req_time); ISP_IOXPUT_16(isp, src->req_seg_count, &dst->req_seg_count); for (i = 0; i < ASIZE(src->req_cdb); i++) { ISP_IOXPUT_8(isp, src->req_cdb[i], &dst->req_cdb[i]); } ISP_IOXPUT_32(isp, src->req_totalcnt, &dst->req_totalcnt); for (i = 0; i < ISP_RQDSEG_T2; i++) { ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, &dst->req_dataseg[i].ds_base); ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, &dst->req_dataseg[i].ds_count); } } void isp_put_request_t3(ispsoftc_t *isp, ispreqt3_t *src, ispreqt3_t *dst) { int i; isp_put_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXPUT_32(isp, src->req_handle, &dst->req_handle); ISP_IOXPUT_8(isp, src->req_lun_trn, &dst->req_lun_trn); ISP_IOXPUT_8(isp, src->req_target, &dst->req_target); ISP_IOXPUT_16(isp, src->req_scclun, &dst->req_scclun); ISP_IOXPUT_16(isp, src->req_flags, &dst->req_flags); ISP_IOXPUT_8(isp, src->req_crn, &dst->req_crn); ISP_IOXPUT_8(isp, src->req_reserved, &dst->req_reserved); ISP_IOXPUT_16(isp, src->req_time, &dst->req_time); ISP_IOXPUT_16(isp, src->req_seg_count, &dst->req_seg_count); for (i = 0; i < ASIZE(src->req_cdb); i++) { ISP_IOXPUT_8(isp, src->req_cdb[i], &dst->req_cdb[i]); } ISP_IOXPUT_32(isp, src->req_totalcnt, &dst->req_totalcnt); for (i = 0; i < ISP_RQDSEG_T3; i++) { ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, &dst->req_dataseg[i].ds_base); ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_basehi, &dst->req_dataseg[i].ds_basehi); ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, &dst->req_dataseg[i].ds_count); } } void isp_put_request_t3e(ispsoftc_t *isp, ispreqt3e_t *src, ispreqt3e_t *dst) { int i; isp_put_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXPUT_32(isp, src->req_handle, &dst->req_handle); ISP_IOXPUT_16(isp, src->req_target, &dst->req_target); ISP_IOXPUT_16(isp, src->req_scclun, &dst->req_scclun); ISP_IOXPUT_16(isp, src->req_flags, &dst->req_flags); ISP_IOXPUT_8(isp, src->req_crn, &dst->req_crn); ISP_IOXPUT_8(isp, src->req_reserved, &dst->req_reserved); ISP_IOXPUT_16(isp, src->req_time, &dst->req_time); ISP_IOXPUT_16(isp, src->req_seg_count, &dst->req_seg_count); for (i = 0; i < ASIZE(src->req_cdb); i++) { ISP_IOXPUT_8(isp, src->req_cdb[i], &dst->req_cdb[i]); } ISP_IOXPUT_32(isp, src->req_totalcnt, &dst->req_totalcnt); for (i = 0; i < ISP_RQDSEG_T3; i++) { ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, &dst->req_dataseg[i].ds_base); ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_basehi, &dst->req_dataseg[i].ds_basehi); ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, &dst->req_dataseg[i].ds_count); } } void isp_put_extended_request(ispsoftc_t *isp, ispextreq_t *src, ispextreq_t *dst) { int i; isp_put_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXPUT_32(isp, src->req_handle, &dst->req_handle); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->req_lun_trn, &dst->req_target); ISP_IOXPUT_8(isp, src->req_target, &dst->req_lun_trn); } else { ISP_IOXPUT_8(isp, src->req_lun_trn, &dst->req_lun_trn); ISP_IOXPUT_8(isp, src->req_target, &dst->req_target); } ISP_IOXPUT_16(isp, src->req_cdblen, &dst->req_cdblen); ISP_IOXPUT_16(isp, src->req_flags, &dst->req_flags); ISP_IOXPUT_16(isp, src->req_time, &dst->req_time); ISP_IOXPUT_16(isp, src->req_seg_count, &dst->req_seg_count); for (i = 0; i < ASIZE(src->req_cdb); i++) { ISP_IOXPUT_8(isp, src->req_cdb[i], &dst->req_cdb[i]); } } void isp_put_request_t7(ispsoftc_t *isp, ispreqt7_t *src, ispreqt7_t *dst) { int i; uint32_t *a, *b; isp_put_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXPUT_32(isp, src->req_handle, &dst->req_handle); ISP_IOXPUT_16(isp, src->req_nphdl, &dst->req_nphdl); ISP_IOXPUT_16(isp, src->req_time, &dst->req_time); ISP_IOXPUT_16(isp, src->req_seg_count, &dst->req_seg_count); ISP_IOXPUT_16(isp, src->req_reserved, &dst->req_reserved); a = (uint32_t *) src->req_lun; b = (uint32_t *) dst->req_lun; for (i = 0; i < (ASIZE(src->req_lun) >> 2); i++ ) { *b++ = ISP_SWAP32(isp, *a++); } ISP_IOXPUT_8(isp, src->req_alen_datadir, &dst->req_alen_datadir); ISP_IOXPUT_8(isp, src->req_task_management, &dst->req_task_management); ISP_IOXPUT_8(isp, src->req_task_attribute, &dst->req_task_attribute); ISP_IOXPUT_8(isp, src->req_crn, &dst->req_crn); a = (uint32_t *) src->req_cdb; b = (uint32_t *) dst->req_cdb; for (i = 0; i < (ASIZE(src->req_cdb) >> 2); i++) { *b++ = ISP_SWAP32(isp, *a++); } ISP_IOXPUT_32(isp, src->req_dl, &dst->req_dl); ISP_IOXPUT_16(isp, src->req_tidlo, &dst->req_tidlo); ISP_IOXPUT_8(isp, src->req_tidhi, &dst->req_tidhi); ISP_IOXPUT_8(isp, src->req_vpidx, &dst->req_vpidx); ISP_IOXPUT_32(isp, src->req_dataseg.ds_base, &dst->req_dataseg.ds_base); ISP_IOXPUT_32(isp, src->req_dataseg.ds_basehi, &dst->req_dataseg.ds_basehi); ISP_IOXPUT_32(isp, src->req_dataseg.ds_count, &dst->req_dataseg.ds_count); } void isp_put_24xx_tmf(ispsoftc_t *isp, isp24xx_tmf_t *src, isp24xx_tmf_t *dst) { int i; uint32_t *a, *b; isp_put_hdr(isp, &src->tmf_header, &dst->tmf_header); ISP_IOXPUT_32(isp, src->tmf_handle, &dst->tmf_handle); ISP_IOXPUT_16(isp, src->tmf_nphdl, &dst->tmf_nphdl); ISP_IOXPUT_16(isp, src->tmf_delay, &dst->tmf_delay); ISP_IOXPUT_16(isp, src->tmf_timeout, &dst->tmf_timeout); for (i = 0; i < ASIZE(src->tmf_reserved0); i++) { ISP_IOXPUT_8(isp, src->tmf_reserved0[i], &dst->tmf_reserved0[i]); } a = (uint32_t *) src->tmf_lun; b = (uint32_t *) dst->tmf_lun; for (i = 0; i < (ASIZE(src->tmf_lun) >> 2); i++ ) { *b++ = ISP_SWAP32(isp, *a++); } ISP_IOXPUT_32(isp, src->tmf_flags, &dst->tmf_flags); for (i = 0; i < ASIZE(src->tmf_reserved1); i++) { ISP_IOXPUT_8(isp, src->tmf_reserved1[i], &dst->tmf_reserved1[i]); } ISP_IOXPUT_16(isp, src->tmf_tidlo, &dst->tmf_tidlo); ISP_IOXPUT_8(isp, src->tmf_tidhi, &dst->tmf_tidhi); ISP_IOXPUT_8(isp, src->tmf_vpidx, &dst->tmf_vpidx); for (i = 0; i < ASIZE(src->tmf_reserved2); i++) { ISP_IOXPUT_8(isp, src->tmf_reserved2[i], &dst->tmf_reserved2[i]); } } void isp_put_24xx_abrt(ispsoftc_t *isp, isp24xx_abrt_t *src, isp24xx_abrt_t *dst) { int i; isp_put_hdr(isp, &src->abrt_header, &dst->abrt_header); ISP_IOXPUT_32(isp, src->abrt_handle, &dst->abrt_handle); ISP_IOXPUT_16(isp, src->abrt_nphdl, &dst->abrt_nphdl); ISP_IOXPUT_16(isp, src->abrt_options, &dst->abrt_options); ISP_IOXPUT_32(isp, src->abrt_cmd_handle, &dst->abrt_cmd_handle); ISP_IOXPUT_16(isp, src->abrt_queue_number, &dst->abrt_queue_number); for (i = 0; i < ASIZE(src->abrt_reserved); i++) { ISP_IOXPUT_8(isp, src->abrt_reserved[i], &dst->abrt_reserved[i]); } ISP_IOXPUT_16(isp, src->abrt_tidlo, &dst->abrt_tidlo); ISP_IOXPUT_8(isp, src->abrt_tidhi, &dst->abrt_tidhi); ISP_IOXPUT_8(isp, src->abrt_vpidx, &dst->abrt_vpidx); for (i = 0; i < ASIZE(src->abrt_reserved1); i++) { ISP_IOXPUT_8(isp, src->abrt_reserved1[i], &dst->abrt_reserved1[i]); } } void isp_put_cont_req(ispsoftc_t *isp, ispcontreq_t *src, ispcontreq_t *dst) { int i; isp_put_hdr(isp, &src->req_header, &dst->req_header); for (i = 0; i < ISP_CDSEG; i++) { ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, &dst->req_dataseg[i].ds_base); ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, &dst->req_dataseg[i].ds_count); } } void isp_put_cont64_req(ispsoftc_t *isp, ispcontreq64_t *src, ispcontreq64_t *dst) { int i; isp_put_hdr(isp, &src->req_header, &dst->req_header); for (i = 0; i < ISP_CDSEG64; i++) { ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_base, &dst->req_dataseg[i].ds_base); ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_basehi, &dst->req_dataseg[i].ds_basehi); ISP_IOXPUT_32(isp, src->req_dataseg[i].ds_count, &dst->req_dataseg[i].ds_count); } } void isp_get_response(ispsoftc_t *isp, ispstatusreq_t *src, ispstatusreq_t *dst) { int i; isp_get_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXGET_32(isp, &src->req_handle, dst->req_handle); ISP_IOXGET_16(isp, &src->req_scsi_status, dst->req_scsi_status); ISP_IOXGET_16(isp, &src->req_completion_status, dst->req_completion_status); ISP_IOXGET_16(isp, &src->req_state_flags, dst->req_state_flags); ISP_IOXGET_16(isp, &src->req_status_flags, dst->req_status_flags); ISP_IOXGET_16(isp, &src->req_time, dst->req_time); ISP_IOXGET_16(isp, &src->req_sense_len, dst->req_sense_len); ISP_IOXGET_32(isp, &src->req_resid, dst->req_resid); for (i = 0; i < sizeof (src->req_response); i++) { ISP_IOXGET_8(isp, &src->req_response[i], dst->req_response[i]); } for (i = 0; i < sizeof (src->req_sense_data); i++) { ISP_IOXGET_8(isp, &src->req_sense_data[i], dst->req_sense_data[i]); } } void isp_get_cont_response(ispsoftc_t *isp, ispstatus_cont_t *src, ispstatus_cont_t *dst) { int i; isp_get_hdr(isp, &src->req_header, &dst->req_header); if (IS_24XX(isp)) { uint32_t *a, *b; a = (uint32_t *) src->req_sense_data; b = (uint32_t *) dst->req_sense_data; for (i = 0; i < (sizeof (src->req_sense_data) / sizeof (uint32_t)); i++) { ISP_IOZGET_32(isp, a++, *b++); } } else { for (i = 0; i < sizeof (src->req_sense_data); i++) { ISP_IOXGET_8(isp, &src->req_sense_data[i], dst->req_sense_data[i]); } } } void isp_get_24xx_response(ispsoftc_t *isp, isp24xx_statusreq_t *src, isp24xx_statusreq_t *dst) { int i; uint32_t *s, *d; isp_get_hdr(isp, &src->req_header, &dst->req_header); ISP_IOXGET_32(isp, &src->req_handle, dst->req_handle); ISP_IOXGET_16(isp, &src->req_completion_status, dst->req_completion_status); ISP_IOXGET_16(isp, &src->req_oxid, dst->req_oxid); ISP_IOXGET_32(isp, &src->req_resid, dst->req_resid); ISP_IOXGET_16(isp, &src->req_reserved0, dst->req_reserved0); ISP_IOXGET_16(isp, &src->req_state_flags, dst->req_state_flags); ISP_IOXGET_16(isp, &src->req_retry_delay, dst->req_retry_delay); ISP_IOXGET_16(isp, &src->req_scsi_status, dst->req_scsi_status); ISP_IOXGET_32(isp, &src->req_fcp_residual, dst->req_fcp_residual); ISP_IOXGET_32(isp, &src->req_sense_len, dst->req_sense_len); ISP_IOXGET_32(isp, &src->req_response_len, dst->req_response_len); s = (uint32_t *)src->req_rsp_sense; d = (uint32_t *)dst->req_rsp_sense; for (i = 0; i < (ASIZE(src->req_rsp_sense) >> 2); i++) { d[i] = ISP_SWAP32(isp, s[i]); } } void isp_get_24xx_abrt(ispsoftc_t *isp, isp24xx_abrt_t *src, isp24xx_abrt_t *dst) { int i; isp_get_hdr(isp, &src->abrt_header, &dst->abrt_header); ISP_IOXGET_32(isp, &src->abrt_handle, dst->abrt_handle); ISP_IOXGET_16(isp, &src->abrt_nphdl, dst->abrt_nphdl); ISP_IOXGET_16(isp, &src->abrt_options, dst->abrt_options); ISP_IOXGET_32(isp, &src->abrt_cmd_handle, dst->abrt_cmd_handle); ISP_IOXGET_16(isp, &src->abrt_queue_number, dst->abrt_queue_number); for (i = 0; i < ASIZE(src->abrt_reserved); i++) { ISP_IOXGET_8(isp, &src->abrt_reserved[i], dst->abrt_reserved[i]); } ISP_IOXGET_16(isp, &src->abrt_tidlo, dst->abrt_tidlo); ISP_IOXGET_8(isp, &src->abrt_tidhi, dst->abrt_tidhi); ISP_IOXGET_8(isp, &src->abrt_vpidx, dst->abrt_vpidx); for (i = 0; i < ASIZE(src->abrt_reserved1); i++) { ISP_IOXGET_8(isp, &src->abrt_reserved1[i], dst->abrt_reserved1[i]); } } void isp_get_rio1(ispsoftc_t *isp, isp_rio1_t *r1src, isp_rio1_t *r1dst) { const int lim = sizeof (r1dst->req_handles) / sizeof (r1dst->req_handles[0]); int i; isp_get_hdr(isp, &r1src->req_header, &r1dst->req_header); if (r1dst->req_header.rqs_seqno > lim) { r1dst->req_header.rqs_seqno = lim; } for (i = 0; i < r1dst->req_header.rqs_seqno; i++) { ISP_IOXGET_32(isp, &r1src->req_handles[i], r1dst->req_handles[i]); } while (i < lim) { r1dst->req_handles[i++] = 0; } } void isp_get_rio2(ispsoftc_t *isp, isp_rio2_t *r2src, isp_rio2_t *r2dst) { const int lim = sizeof (r2dst->req_handles) / sizeof (r2dst->req_handles[0]); int i; isp_get_hdr(isp, &r2src->req_header, &r2dst->req_header); if (r2dst->req_header.rqs_seqno > lim) { r2dst->req_header.rqs_seqno = lim; } for (i = 0; i < r2dst->req_header.rqs_seqno; i++) { ISP_IOXGET_16(isp, &r2src->req_handles[i], r2dst->req_handles[i]); } while (i < lim) { r2dst->req_handles[i++] = 0; } } void isp_put_icb(ispsoftc_t *isp, isp_icb_t *src, isp_icb_t *dst) { int i; if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->icb_version, &dst->icb_reserved0); ISP_IOXPUT_8(isp, src->icb_reserved0, &dst->icb_version); } else { ISP_IOXPUT_8(isp, src->icb_version, &dst->icb_version); ISP_IOXPUT_8(isp, src->icb_reserved0, &dst->icb_reserved0); } ISP_IOXPUT_16(isp, src->icb_fwoptions, &dst->icb_fwoptions); ISP_IOXPUT_16(isp, src->icb_maxfrmlen, &dst->icb_maxfrmlen); ISP_IOXPUT_16(isp, src->icb_maxalloc, &dst->icb_maxalloc); ISP_IOXPUT_16(isp, src->icb_execthrottle, &dst->icb_execthrottle); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->icb_retry_count, &dst->icb_retry_delay); ISP_IOXPUT_8(isp, src->icb_retry_delay, &dst->icb_retry_count); } else { ISP_IOXPUT_8(isp, src->icb_retry_count, &dst->icb_retry_count); ISP_IOXPUT_8(isp, src->icb_retry_delay, &dst->icb_retry_delay); } for (i = 0; i < 8; i++) { ISP_IOXPUT_8(isp, src->icb_portname[i], &dst->icb_portname[i]); } ISP_IOXPUT_16(isp, src->icb_hardaddr, &dst->icb_hardaddr); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->icb_iqdevtype, &dst->icb_logintime); ISP_IOXPUT_8(isp, src->icb_logintime, &dst->icb_iqdevtype); } else { ISP_IOXPUT_8(isp, src->icb_iqdevtype, &dst->icb_iqdevtype); ISP_IOXPUT_8(isp, src->icb_logintime, &dst->icb_logintime); } for (i = 0; i < 8; i++) { ISP_IOXPUT_8(isp, src->icb_nodename[i], &dst->icb_nodename[i]); } ISP_IOXPUT_16(isp, src->icb_rqstout, &dst->icb_rqstout); ISP_IOXPUT_16(isp, src->icb_rspnsin, &dst->icb_rspnsin); ISP_IOXPUT_16(isp, src->icb_rqstqlen, &dst->icb_rqstqlen); ISP_IOXPUT_16(isp, src->icb_rsltqlen, &dst->icb_rsltqlen); for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->icb_rqstaddr[i], &dst->icb_rqstaddr[i]); } for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->icb_respaddr[i], &dst->icb_respaddr[i]); } ISP_IOXPUT_16(isp, src->icb_lunenables, &dst->icb_lunenables); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->icb_ccnt, &dst->icb_icnt); ISP_IOXPUT_8(isp, src->icb_icnt, &dst->icb_ccnt); } else { ISP_IOXPUT_8(isp, src->icb_ccnt, &dst->icb_ccnt); ISP_IOXPUT_8(isp, src->icb_icnt, &dst->icb_icnt); } ISP_IOXPUT_16(isp, src->icb_lunetimeout, &dst->icb_lunetimeout); ISP_IOXPUT_16(isp, src->icb_reserved1, &dst->icb_reserved1); ISP_IOXPUT_16(isp, src->icb_xfwoptions, &dst->icb_xfwoptions); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->icb_racctimer, &dst->icb_idelaytimer); ISP_IOXPUT_8(isp, src->icb_idelaytimer, &dst->icb_racctimer); } else { ISP_IOXPUT_8(isp, src->icb_racctimer, &dst->icb_racctimer); ISP_IOXPUT_8(isp, src->icb_idelaytimer, &dst->icb_idelaytimer); } ISP_IOXPUT_16(isp, src->icb_zfwoptions, &dst->icb_zfwoptions); } void isp_put_icb_2400(ispsoftc_t *isp, isp_icb_2400_t *src, isp_icb_2400_t *dst) { int i; ISP_IOXPUT_16(isp, src->icb_version, &dst->icb_version); ISP_IOXPUT_16(isp, src->icb_reserved0, &dst->icb_reserved0); ISP_IOXPUT_16(isp, src->icb_maxfrmlen, &dst->icb_maxfrmlen); ISP_IOXPUT_16(isp, src->icb_execthrottle, &dst->icb_execthrottle); ISP_IOXPUT_16(isp, src->icb_xchgcnt, &dst->icb_xchgcnt); ISP_IOXPUT_16(isp, src->icb_hardaddr, &dst->icb_hardaddr); for (i = 0; i < 8; i++) { ISP_IOXPUT_8(isp, src->icb_portname[i], &dst->icb_portname[i]); } for (i = 0; i < 8; i++) { ISP_IOXPUT_8(isp, src->icb_nodename[i], &dst->icb_nodename[i]); } ISP_IOXPUT_16(isp, src->icb_rspnsin, &dst->icb_rspnsin); ISP_IOXPUT_16(isp, src->icb_rqstout, &dst->icb_rqstout); ISP_IOXPUT_16(isp, src->icb_retry_count, &dst->icb_retry_count); ISP_IOXPUT_16(isp, src->icb_priout, &dst->icb_priout); ISP_IOXPUT_16(isp, src->icb_rsltqlen, &dst->icb_rsltqlen); ISP_IOXPUT_16(isp, src->icb_rqstqlen, &dst->icb_rqstqlen); ISP_IOXPUT_16(isp, src->icb_ldn_nols, &dst->icb_ldn_nols); ISP_IOXPUT_16(isp, src->icb_prqstqlen, &dst->icb_prqstqlen); for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->icb_rqstaddr[i], &dst->icb_rqstaddr[i]); } for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->icb_respaddr[i], &dst->icb_respaddr[i]); } for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->icb_priaddr[i], &dst->icb_priaddr[i]); } for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->icb_reserved1[i], &dst->icb_reserved1[i]); } ISP_IOXPUT_16(isp, src->icb_atio_in, &dst->icb_atio_in); ISP_IOXPUT_16(isp, src->icb_atioqlen, &dst->icb_atioqlen); for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->icb_atioqaddr[i], &dst->icb_atioqaddr[i]); } ISP_IOXPUT_16(isp, src->icb_idelaytimer, &dst->icb_idelaytimer); ISP_IOXPUT_16(isp, src->icb_logintime, &dst->icb_logintime); ISP_IOXPUT_32(isp, src->icb_fwoptions1, &dst->icb_fwoptions1); ISP_IOXPUT_32(isp, src->icb_fwoptions2, &dst->icb_fwoptions2); ISP_IOXPUT_32(isp, src->icb_fwoptions3, &dst->icb_fwoptions3); for (i = 0; i < 12; i++) { ISP_IOXPUT_16(isp, src->icb_reserved2[i], &dst->icb_reserved2[i]); } } void isp_put_icb_2400_vpinfo(ispsoftc_t *isp, isp_icb_2400_vpinfo_t *src, isp_icb_2400_vpinfo_t *dst) { ISP_IOXPUT_16(isp, src->vp_count, &dst->vp_count); ISP_IOXPUT_16(isp, src->vp_global_options, &dst->vp_global_options); } void isp_put_vp_port_info(ispsoftc_t *isp, vp_port_info_t *src, vp_port_info_t *dst) { int i; ISP_IOXPUT_16(isp, src->vp_port_status, &dst->vp_port_status); ISP_IOXPUT_8(isp, src->vp_port_options, &dst->vp_port_options); ISP_IOXPUT_8(isp, src->vp_port_loopid, &dst->vp_port_loopid); for (i = 0; i < 8; i++) { ISP_IOXPUT_8(isp, src->vp_port_portname[i], &dst->vp_port_portname[i]); } for (i = 0; i < 8; i++) { ISP_IOXPUT_8(isp, src->vp_port_nodename[i], &dst->vp_port_nodename[i]); } /* we never *put* portid_lo/portid_hi */ } void isp_get_vp_port_info(ispsoftc_t *isp, vp_port_info_t *src, vp_port_info_t *dst) { int i; ISP_IOXGET_16(isp, &src->vp_port_status, dst->vp_port_status); ISP_IOXGET_8(isp, &src->vp_port_options, dst->vp_port_options); ISP_IOXGET_8(isp, &src->vp_port_loopid, dst->vp_port_loopid); for (i = 0; i < ASIZE(src->vp_port_portname); i++) { ISP_IOXGET_8(isp, &src->vp_port_portname[i], dst->vp_port_portname[i]); } for (i = 0; i < ASIZE(src->vp_port_nodename); i++) { ISP_IOXGET_8(isp, &src->vp_port_nodename[i], dst->vp_port_nodename[i]); } ISP_IOXGET_16(isp, &src->vp_port_portid_lo, dst->vp_port_portid_lo); ISP_IOXGET_16(isp, &src->vp_port_portid_hi, dst->vp_port_portid_hi); } void isp_put_vp_ctrl_info(ispsoftc_t *isp, vp_ctrl_info_t *src, vp_ctrl_info_t *dst) { int i; isp_put_hdr(isp, &src->vp_ctrl_hdr, &dst->vp_ctrl_hdr); ISP_IOXPUT_32(isp, src->vp_ctrl_handle, &dst->vp_ctrl_handle); ISP_IOXPUT_16(isp, src->vp_ctrl_index_fail, &dst->vp_ctrl_index_fail); ISP_IOXPUT_16(isp, src->vp_ctrl_status, &dst->vp_ctrl_status); ISP_IOXPUT_16(isp, src->vp_ctrl_command, &dst->vp_ctrl_command); ISP_IOXPUT_16(isp, src->vp_ctrl_vp_count, &dst->vp_ctrl_vp_count); for (i = 0; i < ASIZE(src->vp_ctrl_idmap); i++) { ISP_IOXPUT_16(isp, src->vp_ctrl_idmap[i], &dst->vp_ctrl_idmap[i]); } for (i = 0; i < ASIZE(src->vp_ctrl_reserved); i++) { ISP_IOXPUT_16(isp, src->vp_ctrl_reserved[i], &dst->vp_ctrl_reserved[i]); } ISP_IOXPUT_16(isp, src->vp_ctrl_fcf_index, &dst->vp_ctrl_fcf_index); } void isp_get_vp_ctrl_info(ispsoftc_t *isp, vp_ctrl_info_t *src, vp_ctrl_info_t *dst) { int i; isp_get_hdr(isp, &src->vp_ctrl_hdr, &dst->vp_ctrl_hdr); ISP_IOXGET_32(isp, &src->vp_ctrl_handle, dst->vp_ctrl_handle); ISP_IOXGET_16(isp, &src->vp_ctrl_index_fail, dst->vp_ctrl_index_fail); ISP_IOXGET_16(isp, &src->vp_ctrl_status, dst->vp_ctrl_status); ISP_IOXGET_16(isp, &src->vp_ctrl_command, dst->vp_ctrl_command); ISP_IOXGET_16(isp, &src->vp_ctrl_vp_count, dst->vp_ctrl_vp_count); for (i = 0; i < ASIZE(src->vp_ctrl_idmap); i++) { ISP_IOXGET_16(isp, &src->vp_ctrl_idmap[i], dst->vp_ctrl_idmap[i]); } for (i = 0; i < ASIZE(src->vp_ctrl_reserved); i++) { ISP_IOXGET_16(isp, &src->vp_ctrl_reserved[i], dst->vp_ctrl_reserved[i]); } ISP_IOXGET_16(isp, &src->vp_ctrl_fcf_index, dst->vp_ctrl_fcf_index); } void isp_put_vp_modify(ispsoftc_t *isp, vp_modify_t *src, vp_modify_t *dst) { int i, j; isp_put_hdr(isp, &src->vp_mod_hdr, &dst->vp_mod_hdr); ISP_IOXPUT_32(isp, src->vp_mod_hdl, &dst->vp_mod_hdl); ISP_IOXPUT_16(isp, src->vp_mod_reserved0, &dst->vp_mod_reserved0); ISP_IOXPUT_16(isp, src->vp_mod_status, &dst->vp_mod_status); ISP_IOXPUT_8(isp, src->vp_mod_cmd, &dst->vp_mod_cmd); ISP_IOXPUT_8(isp, src->vp_mod_cnt, &dst->vp_mod_cnt); ISP_IOXPUT_8(isp, src->vp_mod_idx0, &dst->vp_mod_idx0); ISP_IOXPUT_8(isp, src->vp_mod_idx1, &dst->vp_mod_idx1); for (i = 0; i < ASIZE(src->vp_mod_ports); i++) { ISP_IOXPUT_8(isp, src->vp_mod_ports[i].options, &dst->vp_mod_ports[i].options); ISP_IOXPUT_8(isp, src->vp_mod_ports[i].loopid, &dst->vp_mod_ports[i].loopid); ISP_IOXPUT_16(isp, src->vp_mod_ports[i].reserved1, &dst->vp_mod_ports[i].reserved1); for (j = 0; j < ASIZE(src->vp_mod_ports[i].wwpn); j++) { ISP_IOXPUT_8(isp, src->vp_mod_ports[i].wwpn[j], &dst->vp_mod_ports[i].wwpn[j]); } for (j = 0; j < ASIZE(src->vp_mod_ports[i].wwnn); j++) { ISP_IOXPUT_8(isp, src->vp_mod_ports[i].wwnn[j], &dst->vp_mod_ports[i].wwnn[j]); } } for (i = 0; i < ASIZE(src->vp_mod_reserved2); i++) { ISP_IOXPUT_8(isp, src->vp_mod_reserved2[i], &dst->vp_mod_reserved2[i]); } } void isp_get_vp_modify(ispsoftc_t *isp, vp_modify_t *src, vp_modify_t *dst) { int i, j; isp_get_hdr(isp, &src->vp_mod_hdr, &dst->vp_mod_hdr); ISP_IOXGET_32(isp, &src->vp_mod_hdl, dst->vp_mod_hdl); ISP_IOXGET_16(isp, &src->vp_mod_reserved0, dst->vp_mod_reserved0); ISP_IOXGET_16(isp, &src->vp_mod_status, dst->vp_mod_status); ISP_IOXGET_8(isp, &src->vp_mod_cmd, dst->vp_mod_cmd); ISP_IOXGET_8(isp, &src->vp_mod_cnt, dst->vp_mod_cnt); ISP_IOXGET_8(isp, &src->vp_mod_idx0, dst->vp_mod_idx0); ISP_IOXGET_8(isp, &src->vp_mod_idx1, dst->vp_mod_idx1); for (i = 0; i < ASIZE(src->vp_mod_ports); i++) { ISP_IOXGET_8(isp, &src->vp_mod_ports[i].options, dst->vp_mod_ports[i].options); ISP_IOXGET_8(isp, &src->vp_mod_ports[i].loopid, dst->vp_mod_ports[i].loopid); ISP_IOXGET_16(isp, &src->vp_mod_ports[i].reserved1, dst->vp_mod_ports[i].reserved1); for (j = 0; j < ASIZE(src->vp_mod_ports[i].wwpn); j++) { ISP_IOXGET_8(isp, &src->vp_mod_ports[i].wwpn[j], dst->vp_mod_ports[i].wwpn[j]); } for (j = 0; j < ASIZE(src->vp_mod_ports[i].wwnn); j++) { ISP_IOXGET_8(isp, &src->vp_mod_ports[i].wwnn[j], dst->vp_mod_ports[i].wwnn[j]); } } for (i = 0; i < ASIZE(src->vp_mod_reserved2); i++) { ISP_IOXGET_8(isp, &src->vp_mod_reserved2[i], dst->vp_mod_reserved2[i]); } } void isp_get_pdb_21xx(ispsoftc_t *isp, isp_pdb_21xx_t *src, isp_pdb_21xx_t *dst) { int i; ISP_IOXGET_16(isp, &src->pdb_options, dst->pdb_options); ISP_IOXGET_8(isp, &src->pdb_mstate, dst->pdb_mstate); ISP_IOXGET_8(isp, &src->pdb_sstate, dst->pdb_sstate); for (i = 0; i < 4; i++) { ISP_IOXGET_8(isp, &src->pdb_hardaddr_bits[i], dst->pdb_hardaddr_bits[i]); } for (i = 0; i < 4; i++) { ISP_IOXGET_8(isp, &src->pdb_portid_bits[i], dst->pdb_portid_bits[i]); } for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->pdb_nodename[i], dst->pdb_nodename[i]); } for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->pdb_portname[i], dst->pdb_portname[i]); } ISP_IOXGET_16(isp, &src->pdb_execthrottle, dst->pdb_execthrottle); ISP_IOXGET_16(isp, &src->pdb_exec_count, dst->pdb_exec_count); ISP_IOXGET_8(isp, &src->pdb_retry_count, dst->pdb_retry_count); ISP_IOXGET_8(isp, &src->pdb_retry_delay, dst->pdb_retry_delay); ISP_IOXGET_16(isp, &src->pdb_resalloc, dst->pdb_resalloc); ISP_IOXGET_16(isp, &src->pdb_curalloc, dst->pdb_curalloc); ISP_IOXGET_16(isp, &src->pdb_qhead, dst->pdb_qhead); ISP_IOXGET_16(isp, &src->pdb_qtail, dst->pdb_qtail); ISP_IOXGET_16(isp, &src->pdb_tl_next, dst->pdb_tl_next); ISP_IOXGET_16(isp, &src->pdb_tl_last, dst->pdb_tl_last); ISP_IOXGET_16(isp, &src->pdb_features, dst->pdb_features); ISP_IOXGET_16(isp, &src->pdb_pconcurrnt, dst->pdb_pconcurrnt); ISP_IOXGET_16(isp, &src->pdb_roi, dst->pdb_roi); ISP_IOXGET_8(isp, &src->pdb_target, dst->pdb_target); ISP_IOXGET_8(isp, &src->pdb_initiator, dst->pdb_initiator); ISP_IOXGET_16(isp, &src->pdb_rdsiz, dst->pdb_rdsiz); ISP_IOXGET_16(isp, &src->pdb_ncseq, dst->pdb_ncseq); ISP_IOXGET_16(isp, &src->pdb_noseq, dst->pdb_noseq); ISP_IOXGET_16(isp, &src->pdb_labrtflg, dst->pdb_labrtflg); ISP_IOXGET_16(isp, &src->pdb_lstopflg, dst->pdb_lstopflg); ISP_IOXGET_16(isp, &src->pdb_sqhead, dst->pdb_sqhead); ISP_IOXGET_16(isp, &src->pdb_sqtail, dst->pdb_sqtail); ISP_IOXGET_16(isp, &src->pdb_ptimer, dst->pdb_ptimer); ISP_IOXGET_16(isp, &src->pdb_nxt_seqid, dst->pdb_nxt_seqid); ISP_IOXGET_16(isp, &src->pdb_fcount, dst->pdb_fcount); ISP_IOXGET_16(isp, &src->pdb_prli_len, dst->pdb_prli_len); ISP_IOXGET_16(isp, &src->pdb_prli_svc0, dst->pdb_prli_svc0); ISP_IOXGET_16(isp, &src->pdb_prli_svc3, dst->pdb_prli_svc3); ISP_IOXGET_16(isp, &src->pdb_loopid, dst->pdb_loopid); ISP_IOXGET_16(isp, &src->pdb_il_ptr, dst->pdb_il_ptr); ISP_IOXGET_16(isp, &src->pdb_sl_ptr, dst->pdb_sl_ptr); } void isp_get_pdb_24xx(ispsoftc_t *isp, isp_pdb_24xx_t *src, isp_pdb_24xx_t *dst) { int i; ISP_IOXGET_16(isp, &src->pdb_flags, dst->pdb_flags); ISP_IOXGET_8(isp, &src->pdb_curstate, dst->pdb_curstate); ISP_IOXGET_8(isp, &src->pdb_laststate, dst->pdb_laststate); for (i = 0; i < 4; i++) { ISP_IOXGET_8(isp, &src->pdb_hardaddr_bits[i], dst->pdb_hardaddr_bits[i]); } for (i = 0; i < 4; i++) { ISP_IOXGET_8(isp, &src->pdb_portid_bits[i], dst->pdb_portid_bits[i]); } ISP_IOXGET_16(isp, &src->pdb_retry_timer, dst->pdb_retry_timer); ISP_IOXGET_16(isp, &src->pdb_handle, dst->pdb_handle); ISP_IOXGET_16(isp, &src->pdb_rcv_dsize, dst->pdb_rcv_dsize); ISP_IOXGET_16(isp, &src->pdb_reserved0, dst->pdb_reserved0); ISP_IOXGET_16(isp, &src->pdb_prli_svc0, dst->pdb_prli_svc0); ISP_IOXGET_16(isp, &src->pdb_prli_svc3, dst->pdb_prli_svc3); for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->pdb_nodename[i], dst->pdb_nodename[i]); } for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->pdb_portname[i], dst->pdb_portname[i]); } for (i = 0; i < 24; i++) { ISP_IOXGET_8(isp, &src->pdb_reserved1[i], dst->pdb_reserved1[i]); } } void isp_get_pnhle_21xx(ispsoftc_t *isp, isp_pnhle_21xx_t *src, isp_pnhle_21xx_t *dst) { ISP_IOXGET_16(isp, &src->pnhle_port_id_lo, dst->pnhle_port_id_lo); ISP_IOXGET_16(isp, &src->pnhle_port_id_hi_handle, dst->pnhle_port_id_hi_handle); } void isp_get_pnhle_23xx(ispsoftc_t *isp, isp_pnhle_23xx_t *src, isp_pnhle_23xx_t *dst) { ISP_IOXGET_16(isp, &src->pnhle_port_id_lo, dst->pnhle_port_id_lo); ISP_IOXGET_16(isp, &src->pnhle_port_id_hi, dst->pnhle_port_id_hi); ISP_IOXGET_16(isp, &src->pnhle_handle, dst->pnhle_handle); } void isp_get_pnhle_24xx(ispsoftc_t *isp, isp_pnhle_24xx_t *src, isp_pnhle_24xx_t *dst) { ISP_IOXGET_16(isp, &src->pnhle_port_id_lo, dst->pnhle_port_id_lo); ISP_IOXGET_16(isp, &src->pnhle_port_id_hi, dst->pnhle_port_id_hi); ISP_IOXGET_16(isp, &src->pnhle_handle, dst->pnhle_handle); ISP_IOXGET_16(isp, &src->pnhle_reserved, dst->pnhle_reserved); } void isp_get_pnnle(ispsoftc_t *isp, isp_pnnle_t *src, isp_pnnle_t *dst) { int i; for (i = 0; i < 8; i++) ISP_IOXGET_8(isp, &src->pnnle_name[i], dst->pnnle_name[i]); ISP_IOXGET_16(isp, &src->pnnle_handle, dst->pnnle_handle); ISP_IOXGET_16(isp, &src->pnnle_reserved, dst->pnnle_reserved); } /* * PLOGI/LOGO IOCB canonicalization */ void isp_get_plogx(ispsoftc_t *isp, isp_plogx_t *src, isp_plogx_t *dst) { int i; isp_get_hdr(isp, &src->plogx_header, &dst->plogx_header); ISP_IOXGET_32(isp, &src->plogx_handle, dst->plogx_handle); ISP_IOXGET_16(isp, &src->plogx_status, dst->plogx_status); ISP_IOXGET_16(isp, &src->plogx_nphdl, dst->plogx_nphdl); ISP_IOXGET_16(isp, &src->plogx_flags, dst->plogx_flags); ISP_IOXGET_16(isp, &src->plogx_vphdl, dst->plogx_vphdl); ISP_IOXGET_16(isp, &src->plogx_portlo, dst->plogx_portlo); ISP_IOXGET_16(isp, &src->plogx_rspsz_porthi, dst->plogx_rspsz_porthi); for (i = 0; i < 11; i++) { ISP_IOXGET_16(isp, &src->plogx_ioparm[i].lo16, dst->plogx_ioparm[i].lo16); ISP_IOXGET_16(isp, &src->plogx_ioparm[i].hi16, dst->plogx_ioparm[i].hi16); } } void isp_put_plogx(ispsoftc_t *isp, isp_plogx_t *src, isp_plogx_t *dst) { int i; isp_put_hdr(isp, &src->plogx_header, &dst->plogx_header); ISP_IOXPUT_32(isp, src->plogx_handle, &dst->plogx_handle); ISP_IOXPUT_16(isp, src->plogx_status, &dst->plogx_status); ISP_IOXPUT_16(isp, src->plogx_nphdl, &dst->plogx_nphdl); ISP_IOXPUT_16(isp, src->plogx_flags, &dst->plogx_flags); ISP_IOXPUT_16(isp, src->plogx_vphdl, &dst->plogx_vphdl); ISP_IOXPUT_16(isp, src->plogx_portlo, &dst->plogx_portlo); ISP_IOXPUT_16(isp, src->plogx_rspsz_porthi, &dst->plogx_rspsz_porthi); for (i = 0; i < 11; i++) { ISP_IOXPUT_16(isp, src->plogx_ioparm[i].lo16, &dst->plogx_ioparm[i].lo16); ISP_IOXPUT_16(isp, src->plogx_ioparm[i].hi16, &dst->plogx_ioparm[i].hi16); } } /* * Report ID canonicalization */ void isp_get_ridacq(ispsoftc_t *isp, isp_ridacq_t *src, isp_ridacq_t *dst) { int i; isp_get_hdr(isp, &src->ridacq_hdr, &dst->ridacq_hdr); ISP_IOXGET_32(isp, &src->ridacq_handle, dst->ridacq_handle); ISP_IOXGET_8(isp, &src->ridacq_vp_acquired, dst->ridacq_vp_acquired); ISP_IOXGET_8(isp, &src->ridacq_vp_setup, dst->ridacq_vp_setup); ISP_IOXGET_8(isp, &src->ridacq_vp_index, dst->ridacq_vp_index); ISP_IOXGET_8(isp, &src->ridacq_vp_status, dst->ridacq_vp_status); ISP_IOXGET_16(isp, &src->ridacq_vp_port_lo, dst->ridacq_vp_port_lo); ISP_IOXGET_8(isp, &src->ridacq_vp_port_hi, dst->ridacq_vp_port_hi); ISP_IOXGET_8(isp, &src->ridacq_format, dst->ridacq_format); for (i = 0; i < sizeof (src->ridacq_map) / sizeof (src->ridacq_map[0]); i++) { ISP_IOXGET_16(isp, &src->ridacq_map[i], dst->ridacq_map[i]); } for (i = 0; i < sizeof (src->ridacq_reserved1) / sizeof (src->ridacq_reserved1[0]); i++) { ISP_IOXGET_16(isp, &src->ridacq_reserved1[i], dst->ridacq_reserved1[i]); } } /* * CT Passthru canonicalization */ void isp_get_ct_pt(ispsoftc_t *isp, isp_ct_pt_t *src, isp_ct_pt_t *dst) { int i; isp_get_hdr(isp, &src->ctp_header, &dst->ctp_header); ISP_IOXGET_32(isp, &src->ctp_handle, dst->ctp_handle); ISP_IOXGET_16(isp, &src->ctp_status, dst->ctp_status); ISP_IOXGET_16(isp, &src->ctp_nphdl, dst->ctp_nphdl); ISP_IOXGET_16(isp, &src->ctp_cmd_cnt, dst->ctp_cmd_cnt); ISP_IOXGET_8(isp, &src->ctp_vpidx, dst->ctp_vpidx); ISP_IOXGET_8(isp, &src->ctp_reserved0, dst->ctp_reserved0); ISP_IOXGET_16(isp, &src->ctp_time, dst->ctp_time); ISP_IOXGET_16(isp, &src->ctp_reserved1, dst->ctp_reserved1); ISP_IOXGET_16(isp, &src->ctp_rsp_cnt, dst->ctp_rsp_cnt); for (i = 0; i < 5; i++) { ISP_IOXGET_16(isp, &src->ctp_reserved2[i], dst->ctp_reserved2[i]); } ISP_IOXGET_32(isp, &src->ctp_rsp_bcnt, dst->ctp_rsp_bcnt); ISP_IOXGET_32(isp, &src->ctp_cmd_bcnt, dst->ctp_cmd_bcnt); for (i = 0; i < 2; i++) { ISP_IOXGET_32(isp, &src->ctp_dataseg[i].ds_base, dst->ctp_dataseg[i].ds_base); ISP_IOXGET_32(isp, &src->ctp_dataseg[i].ds_basehi, dst->ctp_dataseg[i].ds_basehi); ISP_IOXGET_32(isp, &src->ctp_dataseg[i].ds_count, dst->ctp_dataseg[i].ds_count); } } void isp_get_ms(ispsoftc_t *isp, isp_ms_t *src, isp_ms_t *dst) { int i; isp_get_hdr(isp, &src->ms_header, &dst->ms_header); ISP_IOXGET_32(isp, &src->ms_handle, dst->ms_handle); ISP_IOXGET_16(isp, &src->ms_nphdl, dst->ms_nphdl); ISP_IOXGET_16(isp, &src->ms_status, dst->ms_status); ISP_IOXGET_16(isp, &src->ms_flags, dst->ms_flags); ISP_IOXGET_16(isp, &src->ms_reserved1, dst->ms_reserved1); ISP_IOXGET_16(isp, &src->ms_time, dst->ms_time); ISP_IOXGET_16(isp, &src->ms_cmd_cnt, dst->ms_cmd_cnt); ISP_IOXGET_16(isp, &src->ms_tot_cnt, dst->ms_tot_cnt); ISP_IOXGET_8(isp, &src->ms_type, dst->ms_type); ISP_IOXGET_8(isp, &src->ms_r_ctl, dst->ms_r_ctl); ISP_IOXGET_16(isp, &src->ms_rxid, dst->ms_rxid); ISP_IOXGET_16(isp, &src->ms_reserved2, dst->ms_reserved2); ISP_IOXGET_32(isp, &src->ms_rsp_bcnt, dst->ms_rsp_bcnt); ISP_IOXGET_32(isp, &src->ms_cmd_bcnt, dst->ms_cmd_bcnt); for (i = 0; i < 2; i++) { ISP_IOXGET_32(isp, &src->ms_dataseg[i].ds_base, dst->ms_dataseg[i].ds_base); ISP_IOXGET_32(isp, &src->ms_dataseg[i].ds_basehi, dst->ms_dataseg[i].ds_basehi); ISP_IOXGET_32(isp, &src->ms_dataseg[i].ds_count, dst->ms_dataseg[i].ds_count); } } void isp_put_ct_pt(ispsoftc_t *isp, isp_ct_pt_t *src, isp_ct_pt_t *dst) { int i; isp_put_hdr(isp, &src->ctp_header, &dst->ctp_header); ISP_IOXPUT_32(isp, src->ctp_handle, &dst->ctp_handle); ISP_IOXPUT_16(isp, src->ctp_status, &dst->ctp_status); ISP_IOXPUT_16(isp, src->ctp_nphdl, &dst->ctp_nphdl); ISP_IOXPUT_16(isp, src->ctp_cmd_cnt, &dst->ctp_cmd_cnt); ISP_IOXPUT_8(isp, src->ctp_vpidx, &dst->ctp_vpidx); ISP_IOXPUT_8(isp, src->ctp_reserved0, &dst->ctp_reserved0); ISP_IOXPUT_16(isp, src->ctp_time, &dst->ctp_time); ISP_IOXPUT_16(isp, src->ctp_reserved1, &dst->ctp_reserved1); ISP_IOXPUT_16(isp, src->ctp_rsp_cnt, &dst->ctp_rsp_cnt); for (i = 0; i < 5; i++) { ISP_IOXPUT_16(isp, src->ctp_reserved2[i], &dst->ctp_reserved2[i]); } ISP_IOXPUT_32(isp, src->ctp_rsp_bcnt, &dst->ctp_rsp_bcnt); ISP_IOXPUT_32(isp, src->ctp_cmd_bcnt, &dst->ctp_cmd_bcnt); for (i = 0; i < 2; i++) { ISP_IOXPUT_32(isp, src->ctp_dataseg[i].ds_base, &dst->ctp_dataseg[i].ds_base); ISP_IOXPUT_32(isp, src->ctp_dataseg[i].ds_basehi, &dst->ctp_dataseg[i].ds_basehi); ISP_IOXPUT_32(isp, src->ctp_dataseg[i].ds_count, &dst->ctp_dataseg[i].ds_count); } } void isp_put_ms(ispsoftc_t *isp, isp_ms_t *src, isp_ms_t *dst) { int i; isp_put_hdr(isp, &src->ms_header, &dst->ms_header); ISP_IOXPUT_32(isp, src->ms_handle, &dst->ms_handle); ISP_IOXPUT_16(isp, src->ms_nphdl, &dst->ms_nphdl); ISP_IOXPUT_16(isp, src->ms_status, &dst->ms_status); ISP_IOXPUT_16(isp, src->ms_flags, &dst->ms_flags); ISP_IOXPUT_16(isp, src->ms_reserved1, &dst->ms_reserved1); ISP_IOXPUT_16(isp, src->ms_time, &dst->ms_time); ISP_IOXPUT_16(isp, src->ms_cmd_cnt, &dst->ms_cmd_cnt); ISP_IOXPUT_16(isp, src->ms_tot_cnt, &dst->ms_tot_cnt); ISP_IOXPUT_8(isp, src->ms_type, &dst->ms_type); ISP_IOXPUT_8(isp, src->ms_r_ctl, &dst->ms_r_ctl); ISP_IOXPUT_16(isp, src->ms_rxid, &dst->ms_rxid); ISP_IOXPUT_16(isp, src->ms_reserved2, &dst->ms_reserved2); ISP_IOXPUT_32(isp, src->ms_rsp_bcnt, &dst->ms_rsp_bcnt); ISP_IOXPUT_32(isp, src->ms_cmd_bcnt, &dst->ms_cmd_bcnt); for (i = 0; i < 2; i++) { ISP_IOXPUT_32(isp, src->ms_dataseg[i].ds_base, &dst->ms_dataseg[i].ds_base); ISP_IOXPUT_32(isp, src->ms_dataseg[i].ds_basehi, &dst->ms_dataseg[i].ds_basehi); ISP_IOXPUT_32(isp, src->ms_dataseg[i].ds_count, &dst->ms_dataseg[i].ds_count); } } /* * Generic SNS request - not particularly useful since the per-command data * isn't always 16 bit words. */ void isp_put_sns_request(ispsoftc_t *isp, sns_screq_t *src, sns_screq_t *dst) { int i, nw = (int) src->snscb_sblen; ISP_IOXPUT_16(isp, src->snscb_rblen, &dst->snscb_rblen); for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->snscb_addr[i], &dst->snscb_addr[i]); } ISP_IOXPUT_16(isp, src->snscb_sblen, &dst->snscb_sblen); for (i = 0; i < nw; i++) { ISP_IOXPUT_16(isp, src->snscb_data[i], &dst->snscb_data[i]); } } void isp_put_gid_ft_request(ispsoftc_t *isp, sns_gid_ft_req_t *src, sns_gid_ft_req_t *dst) { ISP_IOXPUT_16(isp, src->snscb_rblen, &dst->snscb_rblen); ISP_IOXPUT_16(isp, src->snscb_reserved0, &dst->snscb_reserved0); ISP_IOXPUT_16(isp, src->snscb_addr[0], &dst->snscb_addr[0]); ISP_IOXPUT_16(isp, src->snscb_addr[1], &dst->snscb_addr[1]); ISP_IOXPUT_16(isp, src->snscb_addr[2], &dst->snscb_addr[2]); ISP_IOXPUT_16(isp, src->snscb_addr[3], &dst->snscb_addr[3]); ISP_IOXPUT_16(isp, src->snscb_sblen, &dst->snscb_sblen); ISP_IOXPUT_16(isp, src->snscb_reserved1, &dst->snscb_reserved1); ISP_IOXPUT_16(isp, src->snscb_cmd, &dst->snscb_cmd); ISP_IOXPUT_16(isp, src->snscb_mword_div_2, &dst->snscb_mword_div_2); ISP_IOXPUT_32(isp, src->snscb_reserved3, &dst->snscb_reserved3); ISP_IOXPUT_32(isp, src->snscb_fc4_type, &dst->snscb_fc4_type); } void isp_put_gxn_id_request(ispsoftc_t *isp, sns_gxn_id_req_t *src, sns_gxn_id_req_t *dst) { ISP_IOXPUT_16(isp, src->snscb_rblen, &dst->snscb_rblen); ISP_IOXPUT_16(isp, src->snscb_reserved0, &dst->snscb_reserved0); ISP_IOXPUT_16(isp, src->snscb_addr[0], &dst->snscb_addr[0]); ISP_IOXPUT_16(isp, src->snscb_addr[1], &dst->snscb_addr[1]); ISP_IOXPUT_16(isp, src->snscb_addr[2], &dst->snscb_addr[2]); ISP_IOXPUT_16(isp, src->snscb_addr[3], &dst->snscb_addr[3]); ISP_IOXPUT_16(isp, src->snscb_sblen, &dst->snscb_sblen); ISP_IOXPUT_16(isp, src->snscb_reserved1, &dst->snscb_reserved1); ISP_IOXPUT_16(isp, src->snscb_cmd, &dst->snscb_cmd); ISP_IOXPUT_16(isp, src->snscb_reserved2, &dst->snscb_reserved2); ISP_IOXPUT_32(isp, src->snscb_reserved3, &dst->snscb_reserved3); ISP_IOXPUT_32(isp, src->snscb_portid, &dst->snscb_portid); } /* * Generic SNS response - not particularly useful since the per-command data * isn't always 16 bit words. */ void isp_get_sns_response(ispsoftc_t *isp, sns_scrsp_t *src, sns_scrsp_t *dst, int nwords) { int i; isp_get_ct_hdr(isp, &src->snscb_cthdr, &dst->snscb_cthdr); ISP_IOXGET_8(isp, &src->snscb_port_type, dst->snscb_port_type); for (i = 0; i < 3; i++) { ISP_IOXGET_8(isp, &src->snscb_port_id[i], dst->snscb_port_id[i]); } for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->snscb_portname[i], dst->snscb_portname[i]); } for (i = 0; i < nwords; i++) { ISP_IOXGET_16(isp, &src->snscb_data[i], dst->snscb_data[i]); } } void isp_get_gid_ft_response(ispsoftc_t *isp, sns_gid_ft_rsp_t *src, sns_gid_ft_rsp_t *dst, int nwords) { int i; isp_get_ct_hdr(isp, &src->snscb_cthdr, &dst->snscb_cthdr); for (i = 0; i < nwords; i++) { int j; ISP_IOXGET_8(isp, &src->snscb_ports[i].control, dst->snscb_ports[i].control); for (j = 0; j < 3; j++) { ISP_IOXGET_8(isp, &src->snscb_ports[i].portid[j], dst->snscb_ports[i].portid[j]); } if (dst->snscb_ports[i].control & 0x80) { break; } } } void isp_get_gxn_id_response(ispsoftc_t *isp, sns_gxn_id_rsp_t *src, sns_gxn_id_rsp_t *dst) { int i; isp_get_ct_hdr(isp, &src->snscb_cthdr, &dst->snscb_cthdr); for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->snscb_wwn[i], dst->snscb_wwn[i]); } } void isp_get_gff_id_response(ispsoftc_t *isp, sns_gff_id_rsp_t *src, sns_gff_id_rsp_t *dst) { int i; isp_get_ct_hdr(isp, &src->snscb_cthdr, &dst->snscb_cthdr); for (i = 0; i < 32; i++) { ISP_IOXGET_32(isp, &src->snscb_fc4_features[i], dst->snscb_fc4_features[i]); } } void isp_get_ga_nxt_response(ispsoftc_t *isp, sns_ga_nxt_rsp_t *src, sns_ga_nxt_rsp_t *dst) { int i; isp_get_ct_hdr(isp, &src->snscb_cthdr, &dst->snscb_cthdr); ISP_IOXGET_8(isp, &src->snscb_port_type, dst->snscb_port_type); for (i = 0; i < 3; i++) { ISP_IOXGET_8(isp, &src->snscb_port_id[i], dst->snscb_port_id[i]); } for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->snscb_portname[i], dst->snscb_portname[i]); } ISP_IOXGET_8(isp, &src->snscb_pnlen, dst->snscb_pnlen); for (i = 0; i < 255; i++) { ISP_IOXGET_8(isp, &src->snscb_pname[i], dst->snscb_pname[i]); } for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->snscb_nodename[i], dst->snscb_nodename[i]); } ISP_IOXGET_8(isp, &src->snscb_nnlen, dst->snscb_nnlen); for (i = 0; i < 255; i++) { ISP_IOXGET_8(isp, &src->snscb_nname[i], dst->snscb_nname[i]); } for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->snscb_ipassoc[i], dst->snscb_ipassoc[i]); } for (i = 0; i < 16; i++) { ISP_IOXGET_8(isp, &src->snscb_ipaddr[i], dst->snscb_ipaddr[i]); } for (i = 0; i < 4; i++) { ISP_IOXGET_8(isp, &src->snscb_svc_class[i], dst->snscb_svc_class[i]); } for (i = 0; i < 32; i++) { ISP_IOXGET_8(isp, &src->snscb_fc4_types[i], dst->snscb_fc4_types[i]); } for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->snscb_fpname[i], dst->snscb_fpname[i]); } ISP_IOXGET_8(isp, &src->snscb_reserved, dst->snscb_reserved); for (i = 0; i < 3; i++) { ISP_IOXGET_8(isp, &src->snscb_hardaddr[i], dst->snscb_hardaddr[i]); } } void isp_get_els(ispsoftc_t *isp, els_t *src, els_t *dst) { int i; isp_get_hdr(isp, &src->els_hdr, &dst->els_hdr); ISP_IOXGET_32(isp, &src->els_handle, dst->els_handle); ISP_IOXGET_16(isp, &src->els_status, dst->els_status); ISP_IOXGET_16(isp, &src->els_nphdl, dst->els_nphdl); ISP_IOXGET_16(isp, &src->els_xmit_dsd_count, dst->els_xmit_dsd_count); ISP_IOXGET_8(isp, &src->els_vphdl, dst->els_vphdl); ISP_IOXGET_8(isp, &src->els_sof, dst->els_sof); ISP_IOXGET_32(isp, &src->els_rxid, dst->els_rxid); ISP_IOXGET_16(isp, &src->els_recv_dsd_count, dst->els_recv_dsd_count); ISP_IOXGET_8(isp, &src->els_opcode, dst->els_opcode); ISP_IOXGET_8(isp, &src->els_reserved2, dst->els_reserved1); ISP_IOXGET_8(isp, &src->els_did_lo, dst->els_did_lo); ISP_IOXGET_8(isp, &src->els_did_mid, dst->els_did_mid); ISP_IOXGET_8(isp, &src->els_did_hi, dst->els_did_hi); ISP_IOXGET_8(isp, &src->els_reserved2, dst->els_reserved2); ISP_IOXGET_16(isp, &src->els_reserved3, dst->els_reserved3); ISP_IOXGET_16(isp, &src->els_ctl_flags, dst->els_ctl_flags); ISP_IOXGET_32(isp, &src->els_bytecnt, dst->els_bytecnt); ISP_IOXGET_32(isp, &src->els_subcode1, dst->els_subcode1); ISP_IOXGET_32(isp, &src->els_subcode2, dst->els_subcode2); for (i = 0; i < 20; i++) { ISP_IOXGET_8(isp, &src->els_reserved4[i], dst->els_reserved4[i]); } } void isp_put_els(ispsoftc_t *isp, els_t *src, els_t *dst) { isp_put_hdr(isp, &src->els_hdr, &dst->els_hdr); ISP_IOXPUT_32(isp, src->els_handle, &dst->els_handle); ISP_IOXPUT_16(isp, src->els_status, &dst->els_status); ISP_IOXPUT_16(isp, src->els_nphdl, &dst->els_nphdl); ISP_IOXPUT_16(isp, src->els_xmit_dsd_count, &dst->els_xmit_dsd_count); ISP_IOXPUT_8(isp, src->els_vphdl, &dst->els_vphdl); ISP_IOXPUT_8(isp, src->els_sof, &dst->els_sof); ISP_IOXPUT_32(isp, src->els_rxid, &dst->els_rxid); ISP_IOXPUT_16(isp, src->els_recv_dsd_count, &dst->els_recv_dsd_count); ISP_IOXPUT_8(isp, src->els_opcode, &dst->els_opcode); ISP_IOXPUT_8(isp, src->els_reserved2, &dst->els_reserved1); ISP_IOXPUT_8(isp, src->els_did_lo, &dst->els_did_lo); ISP_IOXPUT_8(isp, src->els_did_mid, &dst->els_did_mid); ISP_IOXPUT_8(isp, src->els_did_hi, &dst->els_did_hi); ISP_IOXPUT_8(isp, src->els_reserved2, &dst->els_reserved2); ISP_IOXPUT_16(isp, src->els_reserved3, &dst->els_reserved3); ISP_IOXPUT_16(isp, src->els_ctl_flags, &dst->els_ctl_flags); ISP_IOXPUT_32(isp, src->els_recv_bytecnt, &dst->els_recv_bytecnt); ISP_IOXPUT_32(isp, src->els_xmit_bytecnt, &dst->els_xmit_bytecnt); ISP_IOXPUT_32(isp, src->els_xmit_dsd_length, &dst->els_xmit_dsd_length); ISP_IOXPUT_16(isp, src->els_xmit_dsd_a1500, &dst->els_xmit_dsd_a1500); ISP_IOXPUT_16(isp, src->els_xmit_dsd_a3116, &dst->els_xmit_dsd_a3116); ISP_IOXPUT_16(isp, src->els_xmit_dsd_a4732, &dst->els_xmit_dsd_a4732); ISP_IOXPUT_16(isp, src->els_xmit_dsd_a6348, &dst->els_xmit_dsd_a6348); ISP_IOXPUT_32(isp, src->els_recv_dsd_length, &dst->els_recv_dsd_length); ISP_IOXPUT_16(isp, src->els_recv_dsd_a1500, &dst->els_recv_dsd_a1500); ISP_IOXPUT_16(isp, src->els_recv_dsd_a3116, &dst->els_recv_dsd_a3116); ISP_IOXPUT_16(isp, src->els_recv_dsd_a4732, &dst->els_recv_dsd_a4732); ISP_IOXPUT_16(isp, src->els_recv_dsd_a6348, &dst->els_recv_dsd_a6348); } /* * FC Structure Canonicalization */ void isp_get_fc_hdr(ispsoftc_t *isp, fc_hdr_t *src, fc_hdr_t *dst) { ISP_IOZGET_8(isp, &src->r_ctl, dst->r_ctl); ISP_IOZGET_8(isp, &src->d_id[0], dst->d_id[0]); ISP_IOZGET_8(isp, &src->d_id[1], dst->d_id[1]); ISP_IOZGET_8(isp, &src->d_id[2], dst->d_id[2]); ISP_IOZGET_8(isp, &src->cs_ctl, dst->cs_ctl); ISP_IOZGET_8(isp, &src->s_id[0], dst->s_id[0]); ISP_IOZGET_8(isp, &src->s_id[1], dst->s_id[1]); ISP_IOZGET_8(isp, &src->s_id[2], dst->s_id[2]); ISP_IOZGET_8(isp, &src->type, dst->type); ISP_IOZGET_8(isp, &src->f_ctl[0], dst->f_ctl[0]); ISP_IOZGET_8(isp, &src->f_ctl[1], dst->f_ctl[1]); ISP_IOZGET_8(isp, &src->f_ctl[2], dst->f_ctl[2]); ISP_IOZGET_8(isp, &src->seq_id, dst->seq_id); ISP_IOZGET_8(isp, &src->df_ctl, dst->df_ctl); ISP_IOZGET_16(isp, &src->seq_cnt, dst->seq_cnt); ISP_IOZGET_16(isp, &src->ox_id, dst->ox_id); ISP_IOZGET_16(isp, &src->rx_id, dst->rx_id); ISP_IOZGET_32(isp, &src->parameter, dst->parameter); } void isp_put_fc_hdr(ispsoftc_t *isp, fc_hdr_t *src, fc_hdr_t *dst) { ISP_IOZPUT_8(isp, src->r_ctl, &dst->r_ctl); ISP_IOZPUT_8(isp, src->d_id[0], &dst->d_id[0]); ISP_IOZPUT_8(isp, src->d_id[1], &dst->d_id[1]); ISP_IOZPUT_8(isp, src->d_id[2], &dst->d_id[2]); ISP_IOZPUT_8(isp, src->cs_ctl, &dst->cs_ctl); ISP_IOZPUT_8(isp, src->s_id[0], &dst->s_id[0]); ISP_IOZPUT_8(isp, src->s_id[1], &dst->s_id[1]); ISP_IOZPUT_8(isp, src->s_id[2], &dst->s_id[2]); ISP_IOZPUT_8(isp, src->type, &dst->type); ISP_IOZPUT_8(isp, src->f_ctl[0], &dst->f_ctl[0]); ISP_IOZPUT_8(isp, src->f_ctl[1], &dst->f_ctl[1]); ISP_IOZPUT_8(isp, src->f_ctl[2], &dst->f_ctl[2]); ISP_IOZPUT_8(isp, src->seq_id, &dst->seq_id); ISP_IOZPUT_8(isp, src->df_ctl, &dst->df_ctl); ISP_IOZPUT_16(isp, src->seq_cnt, &dst->seq_cnt); ISP_IOZPUT_16(isp, src->ox_id, &dst->ox_id); ISP_IOZPUT_16(isp, src->rx_id, &dst->rx_id); ISP_IOZPUT_32(isp, src->parameter, &dst->parameter); } void isp_get_fcp_cmnd_iu(ispsoftc_t *isp, fcp_cmnd_iu_t *src, fcp_cmnd_iu_t *dst) { int i; for (i = 0; i < 8; i++) { ISP_IOZGET_8(isp, &src->fcp_cmnd_lun[i], dst->fcp_cmnd_lun[i]); } ISP_IOZGET_8(isp, &src->fcp_cmnd_crn, dst->fcp_cmnd_crn); ISP_IOZGET_8(isp, &src->fcp_cmnd_task_attribute, dst->fcp_cmnd_task_attribute); ISP_IOZGET_8(isp, &src->fcp_cmnd_task_management, dst->fcp_cmnd_task_management); ISP_IOZGET_8(isp, &src->fcp_cmnd_alen_datadir, dst->fcp_cmnd_alen_datadir); for (i = 0; i < 16; i++) { ISP_IOZGET_8(isp, &src->cdb_dl.sf.fcp_cmnd_cdb[i], dst->cdb_dl.sf.fcp_cmnd_cdb[i]); } ISP_IOZGET_32(isp, &src->cdb_dl.sf.fcp_cmnd_dl, dst->cdb_dl.sf.fcp_cmnd_dl); } void isp_put_rft_id(ispsoftc_t *isp, rft_id_t *src, rft_id_t *dst) { int i; isp_put_ct_hdr(isp, &src->rftid_hdr, &dst->rftid_hdr); ISP_IOZPUT_8(isp, src->rftid_reserved, &dst->rftid_reserved); for (i = 0; i < 3; i++) { ISP_IOZPUT_8(isp, src->rftid_portid[i], &dst->rftid_portid[i]); } for (i = 0; i < 8; i++) { ISP_IOZPUT_32(isp, src->rftid_fc4types[i], &dst->rftid_fc4types[i]); } } void +isp_put_rff_id(ispsoftc_t *isp, rff_id_t *src, rff_id_t *dst) +{ + int i; + + isp_put_ct_hdr(isp, &src->rffid_hdr, &dst->rffid_hdr); + ISP_IOZPUT_8(isp, src->rffid_reserved, &dst->rffid_reserved); + for (i = 0; i < 3; i++) + ISP_IOZPUT_8(isp, src->rffid_portid[i], &dst->rffid_portid[i]); + ISP_IOZPUT_16(isp, src->rffid_reserved2, &dst->rffid_reserved2); + ISP_IOZPUT_8(isp, src->rffid_fc4features, &dst->rffid_fc4features); + ISP_IOZPUT_8(isp, src->rffid_fc4type, &dst->rffid_fc4type); +} + +void isp_get_ct_hdr(ispsoftc_t *isp, ct_hdr_t *src, ct_hdr_t *dst) { ISP_IOZGET_8(isp, &src->ct_revision, dst->ct_revision); ISP_IOZGET_8(isp, &src->ct_in_id[0], dst->ct_in_id[0]); ISP_IOZGET_8(isp, &src->ct_in_id[1], dst->ct_in_id[1]); ISP_IOZGET_8(isp, &src->ct_in_id[2], dst->ct_in_id[2]); ISP_IOZGET_8(isp, &src->ct_fcs_type, dst->ct_fcs_type); ISP_IOZGET_8(isp, &src->ct_fcs_subtype, dst->ct_fcs_subtype); ISP_IOZGET_8(isp, &src->ct_options, dst->ct_options); ISP_IOZGET_8(isp, &src->ct_reserved0, dst->ct_reserved0); ISP_IOZGET_16(isp, &src->ct_cmd_resp, dst->ct_cmd_resp); ISP_IOZGET_16(isp, &src->ct_bcnt_resid, dst->ct_bcnt_resid); ISP_IOZGET_8(isp, &src->ct_reserved1, dst->ct_reserved1); ISP_IOZGET_8(isp, &src->ct_reason, dst->ct_reason); ISP_IOZGET_8(isp, &src->ct_explanation, dst->ct_explanation); ISP_IOZGET_8(isp, &src->ct_vunique, dst->ct_vunique); } void isp_put_ct_hdr(ispsoftc_t *isp, ct_hdr_t *src, ct_hdr_t *dst) { ISP_IOZPUT_8(isp, src->ct_revision, &dst->ct_revision); ISP_IOZPUT_8(isp, src->ct_in_id[0], &dst->ct_in_id[0]); ISP_IOZPUT_8(isp, src->ct_in_id[1], &dst->ct_in_id[1]); ISP_IOZPUT_8(isp, src->ct_in_id[2], &dst->ct_in_id[2]); ISP_IOZPUT_8(isp, src->ct_fcs_type, &dst->ct_fcs_type); ISP_IOZPUT_8(isp, src->ct_fcs_subtype, &dst->ct_fcs_subtype); ISP_IOZPUT_8(isp, src->ct_options, &dst->ct_options); ISP_IOZPUT_8(isp, src->ct_reserved0, &dst->ct_reserved0); ISP_IOZPUT_16(isp, src->ct_cmd_resp, &dst->ct_cmd_resp); ISP_IOZPUT_16(isp, src->ct_bcnt_resid, &dst->ct_bcnt_resid); ISP_IOZPUT_8(isp, src->ct_reserved1, &dst->ct_reserved1); ISP_IOZPUT_8(isp, src->ct_reason, &dst->ct_reason); ISP_IOZPUT_8(isp, src->ct_explanation, &dst->ct_explanation); ISP_IOZPUT_8(isp, src->ct_vunique, &dst->ct_vunique); } void isp_put_fcp_rsp_iu(ispsoftc_t *isp, fcp_rsp_iu_t *src, fcp_rsp_iu_t *dst) { int i; for (i = 0; i < ((sizeof (src->fcp_rsp_reserved))/(sizeof (src->fcp_rsp_reserved[0]))); i++) { ISP_IOZPUT_8(isp, src->fcp_rsp_reserved[i], &dst->fcp_rsp_reserved[i]); } ISP_IOZPUT_16(isp, src->fcp_rsp_status_qualifier, &dst->fcp_rsp_status_qualifier); ISP_IOZPUT_8(isp, src->fcp_rsp_bits, &dst->fcp_rsp_bits); ISP_IOZPUT_8(isp, src->fcp_rsp_scsi_status, &dst->fcp_rsp_scsi_status); ISP_IOZPUT_32(isp, src->fcp_rsp_resid, &dst->fcp_rsp_resid); ISP_IOZPUT_32(isp, src->fcp_rsp_snslen, &dst->fcp_rsp_snslen); ISP_IOZPUT_32(isp, src->fcp_rsp_rsplen, &dst->fcp_rsp_rsplen); } #ifdef ISP_TARGET_MODE /* * Command shipping- finish off first queue entry and do dma mapping and * additional segments as needed. * * Called with the first queue entry mostly filled out. * Our job here is to finish that and add additional data * segments if needed. * * We used to do synthetic entries to split data and status * at this level, but that started getting too tricky. */ int isp_send_tgt_cmd(ispsoftc_t *isp, void *fqe, void *segp, uint32_t nsegs, uint32_t totalcnt, isp_ddir_t ddir, void *snsptr, uint32_t snslen) { uint8_t storage[QENTRY_LEN]; uint8_t type, nqe; uint32_t seg, curseg, seglim, nxt, nxtnxt; ispds_t *dsp = NULL; ispds64_t *dsp64 = NULL; void *qe0, *qe1; qe0 = isp_getrqentry(isp); if (qe0 == NULL) { return (CMD_EAGAIN); } nxt = ISP_NXT_QENTRY(isp->isp_reqidx, RQUEST_QUEUE_LEN(isp)); type = ((isphdr_t *)fqe)->rqs_entry_type; nqe = 1; seglim = 0; /* * If we have data to transmit, figure out how many segments can fit into the first entry. */ if (ddir != ISP_NOXFR) { /* * First, figure out how many pieces of data to transfer and what kind and how many we can put into the first queue entry. */ switch (type) { case RQSTYPE_CTIO: dsp = ((ct_entry_t *)fqe)->ct_dataseg; seglim = ISP_RQDSEG; break; case RQSTYPE_CTIO2: dsp = ((ct2_entry_t *)fqe)->rsp.m0.u.ct_dataseg; seglim = ISP_RQDSEG_T2; break; case RQSTYPE_CTIO3: dsp64 = ((ct2_entry_t *)fqe)->rsp.m0.u.ct_dataseg64; seglim = ISP_RQDSEG_T3; break; case RQSTYPE_CTIO7: dsp64 = &((ct7_entry_t *)fqe)->rsp.m0.ds; seglim = 1; break; default: return (CMD_COMPLETE); } } /* * First, fill out any of the data transfer stuff that fits * in the first queue entry. */ if (seglim > nsegs) { seglim = nsegs; } for (seg = curseg = 0; curseg < seglim; curseg++) { if (dsp64) { XS_GET_DMA64_SEG(dsp64++, segp, seg++); } else { XS_GET_DMA_SEG(dsp++, segp, seg++); } } /* * Second, start building additional continuation segments as needed. */ while (seg < nsegs) { nxtnxt = ISP_NXT_QENTRY(nxt, RQUEST_QUEUE_LEN(isp)); if (nxtnxt == isp->isp_reqodx) { isp->isp_reqodx = ISP_READ(isp, isp->isp_rqstoutrp); if (nxtnxt == isp->isp_reqodx) return (CMD_EAGAIN); } ISP_MEMZERO(storage, QENTRY_LEN); qe1 = ISP_QUEUE_ENTRY(isp->isp_rquest, nxt); nxt = nxtnxt; if (dsp64) { ispcontreq64_t *crq = (ispcontreq64_t *) storage; seglim = ISP_CDSEG64; crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; crq->req_header.rqs_entry_count = 1; dsp64 = crq->req_dataseg; } else { ispcontreq_t *crq = (ispcontreq_t *) storage; seglim = ISP_CDSEG; crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; crq->req_header.rqs_entry_count = 1; dsp = crq->req_dataseg; } if (seg + seglim > nsegs) { seglim = nsegs - seg; } for (curseg = 0; curseg < seglim; curseg++) { if (dsp64) { XS_GET_DMA64_SEG(dsp64++, segp, seg++); } else { XS_GET_DMA_SEG(dsp++, segp, seg++); } } if (dsp64) { isp_put_cont64_req(isp, (ispcontreq64_t *)storage, qe1); } else { isp_put_cont_req(isp, (ispcontreq_t *)storage, qe1); } if (isp->isp_dblev & ISP_LOGTDEBUG1) { isp_print_bytes(isp, "additional queue entry", QENTRY_LEN, storage); } nqe++; } /* * Third, not patch up the first queue entry with the number of segments * we actually are going to be transmitting. At the same time, handle * any mode 2 requests. */ ((isphdr_t *)fqe)->rqs_entry_count = nqe; switch (type) { case RQSTYPE_CTIO: ((ct_entry_t *)fqe)->ct_seg_count = nsegs; isp_put_ctio(isp, fqe, qe0); break; case RQSTYPE_CTIO2: case RQSTYPE_CTIO3: if (((ct2_entry_t *)fqe)->ct_flags & CT2_FLAG_MODE2) { ((ct2_entry_t *)fqe)->ct_seg_count = 1; } else { ((ct2_entry_t *)fqe)->ct_seg_count = nsegs; } if (ISP_CAP_2KLOGIN(isp)) { isp_put_ctio2e(isp, fqe, qe0); } else { isp_put_ctio2(isp, fqe, qe0); } break; case RQSTYPE_CTIO7: if (((ct7_entry_t *)fqe)->ct_flags & CT7_FLAG_MODE2) { ((ct7_entry_t *)fqe)->ct_seg_count = 1; } else { ((ct7_entry_t *)fqe)->ct_seg_count = nsegs; } isp_put_ctio7(isp, fqe, qe0); break; default: return (CMD_COMPLETE); } if (isp->isp_dblev & ISP_LOGTDEBUG1) { isp_print_bytes(isp, "first queue entry", QENTRY_LEN, fqe); } ISP_ADD_REQUEST(isp, nxt); return (CMD_QUEUED); } int isp_allocate_xs_tgt(ispsoftc_t *isp, void *xs, uint32_t *handlep) { isp_hdl_t *hdp; hdp = isp->isp_tgtfree; if (hdp == NULL) { return (-1); } isp->isp_tgtfree = hdp->cmd; hdp->cmd = xs; hdp->handle = (hdp - isp->isp_tgtlist); hdp->handle |= (ISP_HANDLE_TARGET << ISP_HANDLE_USAGE_SHIFT); /* * Target handles for SCSI cards are only 16 bits, so * sequence number protection will be ommitted. */ if (IS_FC(isp)) { hdp->handle |= (isp->isp_seqno++ << ISP_HANDLE_SEQ_SHIFT); } *handlep = hdp->handle; return (0); } void * isp_find_xs_tgt(ispsoftc_t *isp, uint32_t handle) { if (!ISP_VALID_TGT_HANDLE(isp, handle)) { isp_prt(isp, ISP_LOGERR, "%s: bad handle 0x%x", __func__, handle); return (NULL); } return (isp->isp_tgtlist[(handle & ISP_HANDLE_CMD_MASK)].cmd); } uint32_t isp_find_tgt_handle(ispsoftc_t *isp, void *xs) { uint32_t i, foundhdl = ISP_HANDLE_FREE; if (xs != NULL) { for (i = 0; i < isp->isp_maxcmds; i++) { if (isp->isp_tgtlist[i].cmd != xs) { continue; } foundhdl = isp->isp_tgtlist[i].handle; break; } } return (foundhdl); } void isp_destroy_tgt_handle(ispsoftc_t *isp, uint32_t handle) { if (!ISP_VALID_TGT_HANDLE(isp, handle)) { isp_prt(isp, ISP_LOGERR, "%s: bad handle 0x%x", __func__, handle); } else { isp->isp_tgtlist[(handle & ISP_HANDLE_CMD_MASK)].handle = ISP_HANDLE_FREE; isp->isp_tgtlist[(handle & ISP_HANDLE_CMD_MASK)].cmd = isp->isp_tgtfree; isp->isp_tgtfree = &isp->isp_tgtlist[(handle & ISP_HANDLE_CMD_MASK)]; } } #endif /* * Find port database entries */ int isp_find_pdb_by_wwn(ispsoftc_t *isp, int chan, uint64_t wwn, fcportdb_t **lptr) { fcparam *fcp; int i; if (chan >= isp->isp_nchan) return (0); fcp = FCPARAM(isp, chan); for (i = 0; i < MAX_FC_TARG; i++) { fcportdb_t *lp = &fcp->portdb[i]; if (lp->state == FC_PORTDB_STATE_NIL) continue; if (lp->port_wwn == wwn) { *lptr = lp; return (1); } } return (0); } #ifdef ISP_TARGET_MODE int -isp_find_pdb_by_handle(ispsoftc_t *isp, int chan, uint32_t handle, fcportdb_t **lptr) +isp_find_pdb_by_handle(ispsoftc_t *isp, int chan, uint16_t handle, fcportdb_t **lptr) { fcparam *fcp; int i; if (chan >= isp->isp_nchan) return (0); fcp = FCPARAM(isp, chan); for (i = 0; i < MAX_FC_TARG; i++) { fcportdb_t *lp = &fcp->portdb[i]; if (lp->state == FC_PORTDB_STATE_NIL) continue; if (lp->handle == handle) { *lptr = lp; return (1); } } return (0); } int isp_find_pdb_by_sid(ispsoftc_t *isp, int chan, uint32_t sid, fcportdb_t **lptr) { fcparam *fcp; int i; if (chan >= isp->isp_nchan) return (0); fcp = FCPARAM(isp, chan); for (i = 0; i < MAX_FC_TARG; i++) { fcportdb_t *lp = &fcp->portdb[i]; if (lp->state == FC_PORTDB_STATE_NIL) continue; if (lp->portid == sid) { *lptr = lp; return (1); } } return (0); } void isp_find_chan_by_did(ispsoftc_t *isp, uint32_t did, uint16_t *cp) { uint16_t chan; *cp = ISP_NOCHAN; for (chan = 0; chan < isp->isp_nchan; chan++) { fcparam *fcp = FCPARAM(isp, chan); - if ((fcp->role & ISP_ROLE_TARGET) == 0 || fcp->isp_fwstate != FW_READY || fcp->isp_loopstate < LOOP_PDB_RCVD) { + if ((fcp->role & ISP_ROLE_TARGET) == 0 || + fcp->isp_loopstate < LOOP_LTEST_DONE) { continue; } if (fcp->isp_portid == did) { *cp = chan; break; } } } /* * Add an initiator device to the port database */ void isp_add_wwn_entry(ispsoftc_t *isp, int chan, uint64_t wwpn, uint64_t wwnn, uint16_t nphdl, uint32_t s_id, uint16_t prli_params) { char buf[64]; fcparam *fcp; fcportdb_t *lp; int i, change; fcp = FCPARAM(isp, chan); if (nphdl >= MAX_NPORT_HANDLE) { isp_prt(isp, ISP_LOGTINFO|ISP_LOGWARN, "Chan %d WWPN 0x%016llx " "PortID 0x%06x handle 0x%x -- bad handle", chan, (unsigned long long) wwpn, s_id, nphdl); return; } /* * If valid record for requested handle already exists, update it * with new parameters. Some cases of update can be suspicious, * so log them verbosely and dump the whole port database. */ if ((VALID_INI(wwpn) && isp_find_pdb_by_wwn(isp, chan, wwpn, &lp)) || (s_id != PORT_NONE && isp_find_pdb_by_sid(isp, chan, s_id, &lp))) { change = 0; lp->new_portid = lp->portid; lp->new_prli_word3 = lp->prli_word3; if (s_id != PORT_NONE && lp->portid != s_id) { if (lp->portid == PORT_NONE) { isp_prt(isp, ISP_LOGTINFO, "Chan %d WWPN 0x%016llx handle 0x%x " "gets PortID 0x%06x", chan, (unsigned long long) lp->port_wwn, nphdl, s_id); } else { isp_prt(isp, ISP_LOGTINFO|ISP_LOGWARN, "Chan %d WWPN 0x%016llx handle 0x%x " "changes PortID 0x%06x to 0x%06x", chan, (unsigned long long) lp->port_wwn, nphdl, lp->portid, s_id); if (isp->isp_dblev & (ISP_LOGTINFO|ISP_LOGWARN)) isp_dump_portdb(isp, chan); } lp->new_portid = s_id; change++; } if (VALID_INI(wwpn) && lp->port_wwn != wwpn) { if (!VALID_INI(lp->port_wwn)) { isp_prt(isp, ISP_LOGTINFO, "Chan %d PortID 0x%06x handle 0x%x " "gets WWPN 0x%016llxx", chan, lp->portid, nphdl, (unsigned long long) wwpn); } else if (lp->port_wwn != wwpn) { isp_prt(isp, ISP_LOGTINFO|ISP_LOGWARN, "Chan %d PortID 0x%06x handle 0x%x " "changes WWPN 0x%016llx to 0x%016llx", chan, lp->portid, nphdl, (unsigned long long) lp->port_wwn, (unsigned long long) wwpn); if (isp->isp_dblev & (ISP_LOGTINFO|ISP_LOGWARN)) isp_dump_portdb(isp, chan); } lp->port_wwn = wwpn; change++; } if (VALID_INI(wwnn) && lp->node_wwn != wwnn) { if (!VALID_INI(lp->node_wwn)) { isp_prt(isp, ISP_LOGTINFO, "Chan %d PortID 0x%06x handle 0x%x " "gets WWNN 0x%016llxx", chan, lp->portid, nphdl, (unsigned long long) wwnn); } else if (lp->port_wwn != wwnn) { isp_prt(isp, ISP_LOGTINFO, "Chan %d PortID 0x%06x handle 0x%x " "changes WWNN 0x%016llx to 0x%016llx", chan, lp->portid, nphdl, (unsigned long long) lp->node_wwn, (unsigned long long) wwnn); } lp->node_wwn = wwnn; change++; } if (prli_params != 0 && lp->prli_word3 != prli_params) { isp_gen_role_str(buf, sizeof (buf), prli_params); isp_prt(isp, ISP_LOGTINFO|ISP_LOGCONFIG, "Chan %d WWPN 0x%016llx PortID 0x%06x " "handle 0x%x changes PRLI Word 3 %s", chan, (unsigned long long) lp->port_wwn, lp->portid, lp->handle, buf); lp->new_prli_word3 = prli_params; change++; } if (lp->handle != nphdl) { isp_prt(isp, ISP_LOGTINFO|ISP_LOGCONFIG, "Chan %d WWPN 0x%016llx PortID 0x%06x " "changes handle 0x%x to 0x%x", chan, (unsigned long long) lp->port_wwn, lp->portid, lp->handle, nphdl); lp->handle = nphdl; change++; } lp->state = FC_PORTDB_STATE_VALID; if (change) { isp_async(isp, ISPASYNC_DEV_CHANGED, chan, lp); lp->portid = lp->new_portid; lp->prli_word3 = lp->new_prli_word3; lp->new_prli_word3 = 0; lp->new_portid = 0; } else { isp_prt(isp, ISP_LOGTINFO, "Chan %d WWPN 0x%016llx PortID 0x%06x " "handle 0x%x reentered", chan, (unsigned long long) lp->port_wwn, lp->portid, lp->handle); isp_async(isp, ISPASYNC_DEV_STAYED, chan, lp); } return; } /* Search for room to insert new record. */ for (i = 0; i < MAX_FC_TARG; i++) { if (fcp->portdb[i].state == FC_PORTDB_STATE_NIL) break; } if (i >= MAX_FC_TARG) { isp_prt(isp, ISP_LOGTINFO|ISP_LOGWARN, "Chan %d WWPN 0x%016llx PortID 0x%06x handle 0x%x " "-- no room in port database", chan, (unsigned long long) wwpn, s_id, nphdl); if (isp->isp_dblev & (ISP_LOGTINFO|ISP_LOGWARN)) isp_dump_portdb(isp, chan); return; } /* Insert new record and mark it valid. */ lp = &fcp->portdb[i]; ISP_MEMZERO(lp, sizeof (fcportdb_t)); lp->handle = nphdl; lp->portid = s_id; lp->port_wwn = wwpn; lp->node_wwn = wwnn; lp->prli_word3 = (prli_params != 0) ? prli_params : PRLI_WD3_INITIATOR_FUNCTION; lp->state = FC_PORTDB_STATE_VALID; isp_gen_role_str(buf, sizeof (buf), lp->prli_word3); isp_prt(isp, ISP_LOGTINFO, "Chan %d WWPN 0x%016llx " "PortID 0x%06x handle 0x%x vtgt %d %s added", chan, (unsigned long long) wwpn, s_id, nphdl, i, buf); /* Notify above levels about new port arrival. */ isp_async(isp, ISPASYNC_DEV_ARRIVED, chan, lp); } /* * Remove a target device to the port database */ void isp_del_wwn_entry(ispsoftc_t *isp, int chan, uint64_t wwpn, uint16_t nphdl, uint32_t s_id) { fcparam *fcp; fcportdb_t *lp; if (nphdl >= MAX_NPORT_HANDLE) { isp_prt(isp, ISP_LOGWARN, "Chan %d WWPN 0x%016llx PortID 0x%06x bad handle 0x%x", chan, (unsigned long long) wwpn, s_id, nphdl); return; } fcp = FCPARAM(isp, chan); if (isp_find_pdb_by_handle(isp, chan, nphdl, &lp) == 0) { isp_prt(isp, ISP_LOGWARN, "Chan %d WWPN 0x%016llx PortID 0x%06x handle 0x%x cannot be found to be deleted", chan, (unsigned long long) wwpn, s_id, nphdl); isp_dump_portdb(isp, chan); return; } isp_prt(isp, ISP_LOGTINFO, "Chan %d WWPN 0x%016llx PortID 0x%06x handle 0x%x vtgt %d deleted", chan, (unsigned long long) lp->port_wwn, lp->portid, nphdl, FC_PORTDB_TGT(isp, chan, lp)); lp->state = FC_PORTDB_STATE_NIL; /* Notify above levels about gone port. */ isp_async(isp, ISPASYNC_DEV_GONE, chan, lp); } void isp_del_all_wwn_entries(ispsoftc_t *isp, int chan) { fcparam *fcp; int i; if (!IS_FC(isp)) { return; } /* * Handle iterations over all channels via recursion */ if (chan == ISP_NOCHAN) { for (chan = 0; chan < isp->isp_nchan; chan++) { isp_del_all_wwn_entries(isp, chan); } return; } if (chan > isp->isp_nchan) { return; } fcp = FCPARAM(isp, chan); if (fcp == NULL) { return; } for (i = 0; i < MAX_FC_TARG; i++) { fcportdb_t *lp = &fcp->portdb[i]; if (lp->state != FC_PORTDB_STATE_NIL) isp_del_wwn_entry(isp, chan, lp->port_wwn, lp->handle, lp->portid); } } void isp_del_wwn_entries(ispsoftc_t *isp, isp_notify_t *mp) { fcportdb_t *lp; /* * Handle iterations over all channels via recursion */ if (mp->nt_channel == ISP_NOCHAN) { for (mp->nt_channel = 0; mp->nt_channel < isp->isp_nchan; mp->nt_channel++) { isp_del_wwn_entries(isp, mp); } mp->nt_channel = ISP_NOCHAN; return; } /* * We have an entry which is only partially identified. * * It's only known by WWN, N-Port handle, or Port ID. * We need to find the actual entry so we can delete it. */ if (mp->nt_nphdl != NIL_HANDLE) { if (isp_find_pdb_by_handle(isp, mp->nt_channel, mp->nt_nphdl, &lp)) { isp_del_wwn_entry(isp, mp->nt_channel, lp->port_wwn, lp->handle, lp->portid); return; } } if (mp->nt_wwn != INI_ANY) { if (isp_find_pdb_by_wwn(isp, mp->nt_channel, mp->nt_wwn, &lp)) { isp_del_wwn_entry(isp, mp->nt_channel, lp->port_wwn, lp->handle, lp->portid); return; } } if (mp->nt_sid != PORT_ANY && mp->nt_sid != PORT_NONE) { if (isp_find_pdb_by_sid(isp, mp->nt_channel, mp->nt_sid, &lp)) { isp_del_wwn_entry(isp, mp->nt_channel, lp->port_wwn, lp->handle, lp->portid); return; } } isp_prt(isp, ISP_LOGWARN, "Chan %d unable to find entry to delete WWPN 0x%016jx PortID 0x%06x handle 0x%x", mp->nt_channel, mp->nt_wwn, mp->nt_sid, mp->nt_nphdl); } void isp_put_atio(ispsoftc_t *isp, at_entry_t *src, at_entry_t *dst) { int i; isp_put_hdr(isp, &src->at_header, &dst->at_header); ISP_IOXPUT_16(isp, src->at_reserved, &dst->at_reserved); ISP_IOXPUT_16(isp, src->at_handle, &dst->at_handle); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->at_lun, &dst->at_iid); ISP_IOXPUT_8(isp, src->at_iid, &dst->at_lun); ISP_IOXPUT_8(isp, src->at_cdblen, &dst->at_tgt); ISP_IOXPUT_8(isp, src->at_tgt, &dst->at_cdblen); ISP_IOXPUT_8(isp, src->at_status, &dst->at_scsi_status); ISP_IOXPUT_8(isp, src->at_scsi_status, &dst->at_status); ISP_IOXPUT_8(isp, src->at_tag_val, &dst->at_tag_type); ISP_IOXPUT_8(isp, src->at_tag_type, &dst->at_tag_val); } else { ISP_IOXPUT_8(isp, src->at_lun, &dst->at_lun); ISP_IOXPUT_8(isp, src->at_iid, &dst->at_iid); ISP_IOXPUT_8(isp, src->at_cdblen, &dst->at_cdblen); ISP_IOXPUT_8(isp, src->at_tgt, &dst->at_tgt); ISP_IOXPUT_8(isp, src->at_status, &dst->at_status); ISP_IOXPUT_8(isp, src->at_scsi_status, &dst->at_scsi_status); ISP_IOXPUT_8(isp, src->at_tag_val, &dst->at_tag_val); ISP_IOXPUT_8(isp, src->at_tag_type, &dst->at_tag_type); } ISP_IOXPUT_32(isp, src->at_flags, &dst->at_flags); for (i = 0; i < ATIO_CDBLEN; i++) { ISP_IOXPUT_8(isp, src->at_cdb[i], &dst->at_cdb[i]); } for (i = 0; i < QLTM_SENSELEN; i++) { ISP_IOXPUT_8(isp, src->at_sense[i], &dst->at_sense[i]); } } void isp_get_atio(ispsoftc_t *isp, at_entry_t *src, at_entry_t *dst) { int i; isp_get_hdr(isp, &src->at_header, &dst->at_header); ISP_IOXGET_16(isp, &src->at_reserved, dst->at_reserved); ISP_IOXGET_16(isp, &src->at_handle, dst->at_handle); if (ISP_IS_SBUS(isp)) { ISP_IOXGET_8(isp, &src->at_lun, dst->at_iid); ISP_IOXGET_8(isp, &src->at_iid, dst->at_lun); ISP_IOXGET_8(isp, &src->at_cdblen, dst->at_tgt); ISP_IOXGET_8(isp, &src->at_tgt, dst->at_cdblen); ISP_IOXGET_8(isp, &src->at_status, dst->at_scsi_status); ISP_IOXGET_8(isp, &src->at_scsi_status, dst->at_status); ISP_IOXGET_8(isp, &src->at_tag_val, dst->at_tag_type); ISP_IOXGET_8(isp, &src->at_tag_type, dst->at_tag_val); } else { ISP_IOXGET_8(isp, &src->at_lun, dst->at_lun); ISP_IOXGET_8(isp, &src->at_iid, dst->at_iid); ISP_IOXGET_8(isp, &src->at_cdblen, dst->at_cdblen); ISP_IOXGET_8(isp, &src->at_tgt, dst->at_tgt); ISP_IOXGET_8(isp, &src->at_status, dst->at_status); ISP_IOXGET_8(isp, &src->at_scsi_status, dst->at_scsi_status); ISP_IOXGET_8(isp, &src->at_tag_val, dst->at_tag_val); ISP_IOXGET_8(isp, &src->at_tag_type, dst->at_tag_type); } ISP_IOXGET_32(isp, &src->at_flags, dst->at_flags); for (i = 0; i < ATIO_CDBLEN; i++) { ISP_IOXGET_8(isp, &src->at_cdb[i], dst->at_cdb[i]); } for (i = 0; i < QLTM_SENSELEN; i++) { ISP_IOXGET_8(isp, &src->at_sense[i], dst->at_sense[i]); } } void isp_put_atio2(ispsoftc_t *isp, at2_entry_t *src, at2_entry_t *dst) { int i; isp_put_hdr(isp, &src->at_header, &dst->at_header); ISP_IOXPUT_32(isp, src->at_reserved, &dst->at_reserved); ISP_IOXPUT_8(isp, src->at_lun, &dst->at_lun); ISP_IOXPUT_8(isp, src->at_iid, &dst->at_iid); ISP_IOXPUT_16(isp, src->at_rxid, &dst->at_rxid); ISP_IOXPUT_16(isp, src->at_flags, &dst->at_flags); ISP_IOXPUT_16(isp, src->at_status, &dst->at_status); ISP_IOXPUT_8(isp, src->at_crn, &dst->at_crn); ISP_IOXPUT_8(isp, src->at_taskcodes, &dst->at_taskcodes); ISP_IOXPUT_8(isp, src->at_taskflags, &dst->at_taskflags); ISP_IOXPUT_8(isp, src->at_execodes, &dst->at_execodes); for (i = 0; i < ATIO2_CDBLEN; i++) { ISP_IOXPUT_8(isp, src->at_cdb[i], &dst->at_cdb[i]); } ISP_IOXPUT_32(isp, src->at_datalen, &dst->at_datalen); ISP_IOXPUT_16(isp, src->at_scclun, &dst->at_scclun); for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->at_wwpn[i], &dst->at_wwpn[i]); } for (i = 0; i < 6; i++) { ISP_IOXPUT_16(isp, src->at_reserved2[i], &dst->at_reserved2[i]); } ISP_IOXPUT_16(isp, src->at_oxid, &dst->at_oxid); } void isp_put_atio2e(ispsoftc_t *isp, at2e_entry_t *src, at2e_entry_t *dst) { int i; isp_put_hdr(isp, &src->at_header, &dst->at_header); ISP_IOXPUT_32(isp, src->at_reserved, &dst->at_reserved); ISP_IOXPUT_16(isp, src->at_iid, &dst->at_iid); ISP_IOXPUT_16(isp, src->at_rxid, &dst->at_rxid); ISP_IOXPUT_16(isp, src->at_flags, &dst->at_flags); ISP_IOXPUT_16(isp, src->at_status, &dst->at_status); ISP_IOXPUT_8(isp, src->at_crn, &dst->at_crn); ISP_IOXPUT_8(isp, src->at_taskcodes, &dst->at_taskcodes); ISP_IOXPUT_8(isp, src->at_taskflags, &dst->at_taskflags); ISP_IOXPUT_8(isp, src->at_execodes, &dst->at_execodes); for (i = 0; i < ATIO2_CDBLEN; i++) { ISP_IOXPUT_8(isp, src->at_cdb[i], &dst->at_cdb[i]); } ISP_IOXPUT_32(isp, src->at_datalen, &dst->at_datalen); ISP_IOXPUT_16(isp, src->at_scclun, &dst->at_scclun); for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->at_wwpn[i], &dst->at_wwpn[i]); } for (i = 0; i < 6; i++) { ISP_IOXPUT_16(isp, src->at_reserved2[i], &dst->at_reserved2[i]); } ISP_IOXPUT_16(isp, src->at_oxid, &dst->at_oxid); } void isp_get_atio2(ispsoftc_t *isp, at2_entry_t *src, at2_entry_t *dst) { int i; isp_get_hdr(isp, &src->at_header, &dst->at_header); ISP_IOXGET_32(isp, &src->at_reserved, dst->at_reserved); ISP_IOXGET_8(isp, &src->at_lun, dst->at_lun); ISP_IOXGET_8(isp, &src->at_iid, dst->at_iid); ISP_IOXGET_16(isp, &src->at_rxid, dst->at_rxid); ISP_IOXGET_16(isp, &src->at_flags, dst->at_flags); ISP_IOXGET_16(isp, &src->at_status, dst->at_status); ISP_IOXGET_8(isp, &src->at_crn, dst->at_crn); ISP_IOXGET_8(isp, &src->at_taskcodes, dst->at_taskcodes); ISP_IOXGET_8(isp, &src->at_taskflags, dst->at_taskflags); ISP_IOXGET_8(isp, &src->at_execodes, dst->at_execodes); for (i = 0; i < ATIO2_CDBLEN; i++) { ISP_IOXGET_8(isp, &src->at_cdb[i], dst->at_cdb[i]); } ISP_IOXGET_32(isp, &src->at_datalen, dst->at_datalen); ISP_IOXGET_16(isp, &src->at_scclun, dst->at_scclun); for (i = 0; i < 4; i++) { ISP_IOXGET_16(isp, &src->at_wwpn[i], dst->at_wwpn[i]); } for (i = 0; i < 6; i++) { ISP_IOXGET_16(isp, &src->at_reserved2[i], dst->at_reserved2[i]); } ISP_IOXGET_16(isp, &src->at_oxid, dst->at_oxid); } void isp_get_atio2e(ispsoftc_t *isp, at2e_entry_t *src, at2e_entry_t *dst) { int i; isp_get_hdr(isp, &src->at_header, &dst->at_header); ISP_IOXGET_32(isp, &src->at_reserved, dst->at_reserved); ISP_IOXGET_16(isp, &src->at_iid, dst->at_iid); ISP_IOXGET_16(isp, &src->at_rxid, dst->at_rxid); ISP_IOXGET_16(isp, &src->at_flags, dst->at_flags); ISP_IOXGET_16(isp, &src->at_status, dst->at_status); ISP_IOXGET_8(isp, &src->at_crn, dst->at_crn); ISP_IOXGET_8(isp, &src->at_taskcodes, dst->at_taskcodes); ISP_IOXGET_8(isp, &src->at_taskflags, dst->at_taskflags); ISP_IOXGET_8(isp, &src->at_execodes, dst->at_execodes); for (i = 0; i < ATIO2_CDBLEN; i++) { ISP_IOXGET_8(isp, &src->at_cdb[i], dst->at_cdb[i]); } ISP_IOXGET_32(isp, &src->at_datalen, dst->at_datalen); ISP_IOXGET_16(isp, &src->at_scclun, dst->at_scclun); for (i = 0; i < 4; i++) { ISP_IOXGET_16(isp, &src->at_wwpn[i], dst->at_wwpn[i]); } for (i = 0; i < 6; i++) { ISP_IOXGET_16(isp, &src->at_reserved2[i], dst->at_reserved2[i]); } ISP_IOXGET_16(isp, &src->at_oxid, dst->at_oxid); } void isp_get_atio7(ispsoftc_t *isp, at7_entry_t *src, at7_entry_t *dst) { ISP_IOXGET_8(isp, &src->at_type, dst->at_type); ISP_IOXGET_8(isp, &src->at_count, dst->at_count); ISP_IOXGET_16(isp, &src->at_ta_len, dst->at_ta_len); ISP_IOXGET_32(isp, &src->at_rxid, dst->at_rxid); isp_get_fc_hdr(isp, &src->at_hdr, &dst->at_hdr); isp_get_fcp_cmnd_iu(isp, &src->at_cmnd, &dst->at_cmnd); } void isp_put_ctio(ispsoftc_t *isp, ct_entry_t *src, ct_entry_t *dst) { int i; isp_put_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXPUT_16(isp, src->ct_syshandle, &dst->ct_syshandle); ISP_IOXPUT_16(isp, src->ct_fwhandle, &dst->ct_fwhandle); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->ct_iid, &dst->ct_lun); ISP_IOXPUT_8(isp, src->ct_lun, &dst->ct_iid); ISP_IOXPUT_8(isp, src->ct_tgt, &dst->ct_reserved2); ISP_IOXPUT_8(isp, src->ct_reserved2, &dst->ct_tgt); ISP_IOXPUT_8(isp, src->ct_status, &dst->ct_scsi_status); ISP_IOXPUT_8(isp, src->ct_scsi_status, &dst->ct_status); ISP_IOXPUT_8(isp, src->ct_tag_type, &dst->ct_tag_val); ISP_IOXPUT_8(isp, src->ct_tag_val, &dst->ct_tag_type); } else { ISP_IOXPUT_8(isp, src->ct_iid, &dst->ct_iid); ISP_IOXPUT_8(isp, src->ct_lun, &dst->ct_lun); ISP_IOXPUT_8(isp, src->ct_tgt, &dst->ct_tgt); ISP_IOXPUT_8(isp, src->ct_reserved2, &dst->ct_reserved2); ISP_IOXPUT_8(isp, src->ct_scsi_status, &dst->ct_scsi_status); ISP_IOXPUT_8(isp, src->ct_status, &dst->ct_status); ISP_IOXPUT_8(isp, src->ct_tag_type, &dst->ct_tag_type); ISP_IOXPUT_8(isp, src->ct_tag_val, &dst->ct_tag_val); } ISP_IOXPUT_32(isp, src->ct_flags, &dst->ct_flags); ISP_IOXPUT_32(isp, src->ct_xfrlen, &dst->ct_xfrlen); ISP_IOXPUT_32(isp, src->ct_resid, &dst->ct_resid); ISP_IOXPUT_16(isp, src->ct_timeout, &dst->ct_timeout); ISP_IOXPUT_16(isp, src->ct_seg_count, &dst->ct_seg_count); for (i = 0; i < ISP_RQDSEG; i++) { ISP_IOXPUT_32(isp, src->ct_dataseg[i].ds_base, &dst->ct_dataseg[i].ds_base); ISP_IOXPUT_32(isp, src->ct_dataseg[i].ds_count, &dst->ct_dataseg[i].ds_count); } } void isp_get_ctio(ispsoftc_t *isp, ct_entry_t *src, ct_entry_t *dst) { int i; isp_get_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXGET_16(isp, &src->ct_syshandle, dst->ct_syshandle); ISP_IOXGET_16(isp, &src->ct_fwhandle, dst->ct_fwhandle); if (ISP_IS_SBUS(isp)) { ISP_IOXGET_8(isp, &src->ct_lun, dst->ct_iid); ISP_IOXGET_8(isp, &src->ct_iid, dst->ct_lun); ISP_IOXGET_8(isp, &src->ct_reserved2, dst->ct_tgt); ISP_IOXGET_8(isp, &src->ct_tgt, dst->ct_reserved2); ISP_IOXGET_8(isp, &src->ct_status, dst->ct_scsi_status); ISP_IOXGET_8(isp, &src->ct_scsi_status, dst->ct_status); ISP_IOXGET_8(isp, &src->ct_tag_val, dst->ct_tag_type); ISP_IOXGET_8(isp, &src->ct_tag_type, dst->ct_tag_val); } else { ISP_IOXGET_8(isp, &src->ct_lun, dst->ct_lun); ISP_IOXGET_8(isp, &src->ct_iid, dst->ct_iid); ISP_IOXGET_8(isp, &src->ct_reserved2, dst->ct_reserved2); ISP_IOXGET_8(isp, &src->ct_tgt, dst->ct_tgt); ISP_IOXGET_8(isp, &src->ct_status, dst->ct_status); ISP_IOXGET_8(isp, &src->ct_scsi_status, dst->ct_scsi_status); ISP_IOXGET_8(isp, &src->ct_tag_val, dst->ct_tag_val); ISP_IOXGET_8(isp, &src->ct_tag_type, dst->ct_tag_type); } ISP_IOXGET_32(isp, &src->ct_flags, dst->ct_flags); ISP_IOXGET_32(isp, &src->ct_xfrlen, dst->ct_xfrlen); ISP_IOXGET_32(isp, &src->ct_resid, dst->ct_resid); ISP_IOXGET_16(isp, &src->ct_timeout, dst->ct_timeout); ISP_IOXGET_16(isp, &src->ct_seg_count, dst->ct_seg_count); for (i = 0; i < ISP_RQDSEG; i++) { ISP_IOXGET_32(isp, &src->ct_dataseg[i].ds_base, dst->ct_dataseg[i].ds_base); ISP_IOXGET_32(isp, &src->ct_dataseg[i].ds_count, dst->ct_dataseg[i].ds_count); } } void isp_put_ctio2(ispsoftc_t *isp, ct2_entry_t *src, ct2_entry_t *dst) { int i; isp_put_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXPUT_32(isp, src->ct_syshandle, &dst->ct_syshandle); ISP_IOXPUT_8(isp, src->ct_lun, &dst->ct_lun); ISP_IOXPUT_8(isp, src->ct_iid, &dst->ct_iid); ISP_IOXPUT_16(isp, src->ct_rxid, &dst->ct_rxid); ISP_IOXPUT_16(isp, src->ct_flags, &dst->ct_flags); ISP_IOXPUT_16(isp, src->ct_timeout, &dst->ct_timeout); ISP_IOXPUT_16(isp, src->ct_seg_count, &dst->ct_seg_count); ISP_IOXPUT_32(isp, src->ct_resid, &dst->ct_resid); ISP_IOXPUT_32(isp, src->ct_reloff, &dst->ct_reloff); if ((src->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE0) { ISP_IOXPUT_32(isp, src->rsp.m0._reserved, &dst->rsp.m0._reserved); ISP_IOXPUT_16(isp, src->rsp.m0._reserved2, &dst->rsp.m0._reserved2); ISP_IOXPUT_16(isp, src->rsp.m0.ct_scsi_status, &dst->rsp.m0.ct_scsi_status); ISP_IOXPUT_32(isp, src->rsp.m0.ct_xfrlen, &dst->rsp.m0.ct_xfrlen); if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO2) { for (i = 0; i < ISP_RQDSEG_T2; i++) { ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg[i].ds_base, &dst->rsp.m0.u.ct_dataseg[i].ds_base); ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg[i].ds_count, &dst->rsp.m0.u.ct_dataseg[i].ds_count); } } else if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { for (i = 0; i < ISP_RQDSEG_T3; i++) { ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg64[i].ds_base, &dst->rsp.m0.u.ct_dataseg64[i].ds_base); ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg64[i].ds_basehi, &dst->rsp.m0.u.ct_dataseg64[i].ds_basehi); ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg64[i].ds_count, &dst->rsp.m0.u.ct_dataseg64[i].ds_count); } } else if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO4) { ISP_IOXPUT_16(isp, src->rsp.m0.u.ct_dslist.ds_type, &dst->rsp.m0.u.ct_dslist.ds_type); ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dslist.ds_segment, &dst->rsp.m0.u.ct_dslist.ds_segment); ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dslist.ds_base, &dst->rsp.m0.u.ct_dslist.ds_base); } } else if ((src->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE1) { ISP_IOXPUT_16(isp, src->rsp.m1._reserved, &dst->rsp.m1._reserved); ISP_IOXPUT_16(isp, src->rsp.m1._reserved2, &dst->rsp.m1._reserved2); ISP_IOXPUT_16(isp, src->rsp.m1.ct_senselen, &dst->rsp.m1.ct_senselen); ISP_IOXPUT_16(isp, src->rsp.m1.ct_scsi_status, &dst->rsp.m1.ct_scsi_status); ISP_IOXPUT_16(isp, src->rsp.m1.ct_resplen, &dst->rsp.m1.ct_resplen); for (i = 0; i < MAXRESPLEN; i++) { ISP_IOXPUT_8(isp, src->rsp.m1.ct_resp[i], &dst->rsp.m1.ct_resp[i]); } } else { ISP_IOXPUT_32(isp, src->rsp.m2._reserved, &dst->rsp.m2._reserved); ISP_IOXPUT_16(isp, src->rsp.m2._reserved2, &dst->rsp.m2._reserved2); ISP_IOXPUT_16(isp, src->rsp.m2._reserved3, &dst->rsp.m2._reserved3); ISP_IOXPUT_32(isp, src->rsp.m2.ct_datalen, &dst->rsp.m2.ct_datalen); if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO2) { ISP_IOXPUT_32(isp, src->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_base, &dst->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_base); ISP_IOXPUT_32(isp, src->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_count, &dst->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_count); } else { ISP_IOXPUT_32(isp, src->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_base, &dst->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_base); ISP_IOXPUT_32(isp, src->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_basehi, &dst->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_basehi); ISP_IOXPUT_32(isp, src->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_count, &dst->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_count); } } } void isp_put_ctio2e(ispsoftc_t *isp, ct2e_entry_t *src, ct2e_entry_t *dst) { int i; isp_put_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXPUT_32(isp, src->ct_syshandle, &dst->ct_syshandle); ISP_IOXPUT_16(isp, src->ct_iid, &dst->ct_iid); ISP_IOXPUT_16(isp, src->ct_rxid, &dst->ct_rxid); ISP_IOXPUT_16(isp, src->ct_flags, &dst->ct_flags); ISP_IOXPUT_16(isp, src->ct_timeout, &dst->ct_timeout); ISP_IOXPUT_16(isp, src->ct_seg_count, &dst->ct_seg_count); ISP_IOXPUT_32(isp, src->ct_resid, &dst->ct_resid); ISP_IOXPUT_32(isp, src->ct_reloff, &dst->ct_reloff); if ((src->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE0) { ISP_IOXPUT_32(isp, src->rsp.m0._reserved, &dst->rsp.m0._reserved); ISP_IOXPUT_16(isp, src->rsp.m0._reserved2, &dst->rsp.m0._reserved2); ISP_IOXPUT_16(isp, src->rsp.m0.ct_scsi_status, &dst->rsp.m0.ct_scsi_status); ISP_IOXPUT_32(isp, src->rsp.m0.ct_xfrlen, &dst->rsp.m0.ct_xfrlen); if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO2) { for (i = 0; i < ISP_RQDSEG_T2; i++) { ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg[i].ds_base, &dst->rsp.m0.u.ct_dataseg[i].ds_base); ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg[i].ds_count, &dst->rsp.m0.u.ct_dataseg[i].ds_count); } } else if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { for (i = 0; i < ISP_RQDSEG_T3; i++) { ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg64[i].ds_base, &dst->rsp.m0.u.ct_dataseg64[i].ds_base); ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg64[i].ds_basehi, &dst->rsp.m0.u.ct_dataseg64[i].ds_basehi); ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dataseg64[i].ds_count, &dst->rsp.m0.u.ct_dataseg64[i].ds_count); } } else if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO4) { ISP_IOXPUT_16(isp, src->rsp.m0.u.ct_dslist.ds_type, &dst->rsp.m0.u.ct_dslist.ds_type); ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dslist.ds_segment, &dst->rsp.m0.u.ct_dslist.ds_segment); ISP_IOXPUT_32(isp, src->rsp.m0.u.ct_dslist.ds_base, &dst->rsp.m0.u.ct_dslist.ds_base); } } else if ((src->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE1) { ISP_IOXPUT_16(isp, src->rsp.m1._reserved, &dst->rsp.m1._reserved); ISP_IOXPUT_16(isp, src->rsp.m1._reserved2, &dst->rsp.m1._reserved2); ISP_IOXPUT_16(isp, src->rsp.m1.ct_senselen, &dst->rsp.m1.ct_senselen); ISP_IOXPUT_16(isp, src->rsp.m1.ct_scsi_status, &dst->rsp.m1.ct_scsi_status); ISP_IOXPUT_16(isp, src->rsp.m1.ct_resplen, &dst->rsp.m1.ct_resplen); for (i = 0; i < MAXRESPLEN; i++) { ISP_IOXPUT_8(isp, src->rsp.m1.ct_resp[i], &dst->rsp.m1.ct_resp[i]); } } else { ISP_IOXPUT_32(isp, src->rsp.m2._reserved, &dst->rsp.m2._reserved); ISP_IOXPUT_16(isp, src->rsp.m2._reserved2, &dst->rsp.m2._reserved2); ISP_IOXPUT_16(isp, src->rsp.m2._reserved3, &dst->rsp.m2._reserved3); ISP_IOXPUT_32(isp, src->rsp.m2.ct_datalen, &dst->rsp.m2.ct_datalen); if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO2) { ISP_IOXPUT_32(isp, src->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_base, &dst->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_base); ISP_IOXPUT_32(isp, src->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_count, &dst->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_count); } else { ISP_IOXPUT_32(isp, src->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_base, &dst->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_base); ISP_IOXPUT_32(isp, src->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_basehi, &dst->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_basehi); ISP_IOXPUT_32(isp, src->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_count, &dst->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_count); } } } void isp_put_ctio7(ispsoftc_t *isp, ct7_entry_t *src, ct7_entry_t *dst) { int i; isp_put_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXPUT_32(isp, src->ct_syshandle, &dst->ct_syshandle); ISP_IOXPUT_16(isp, src->ct_nphdl, &dst->ct_nphdl); ISP_IOXPUT_16(isp, src->ct_timeout, &dst->ct_timeout); ISP_IOXPUT_16(isp, src->ct_seg_count, &dst->ct_seg_count); ISP_IOXPUT_8(isp, src->ct_vpidx, &dst->ct_vpidx); ISP_IOXPUT_8(isp, src->ct_xflags, &dst->ct_xflags); ISP_IOXPUT_16(isp, src->ct_iid_lo, &dst->ct_iid_lo); ISP_IOXPUT_8(isp, src->ct_iid_hi, &dst->ct_iid_hi); ISP_IOXPUT_8(isp, src->ct_reserved, &dst->ct_reserved); ISP_IOXPUT_32(isp, src->ct_rxid, &dst->ct_rxid); ISP_IOXPUT_16(isp, src->ct_senselen, &dst->ct_senselen); ISP_IOXPUT_16(isp, src->ct_flags, &dst->ct_flags); ISP_IOXPUT_32(isp, src->ct_resid, &dst->ct_resid); ISP_IOXPUT_16(isp, src->ct_oxid, &dst->ct_oxid); ISP_IOXPUT_16(isp, src->ct_scsi_status, &dst->ct_scsi_status); if ((dst->ct_flags & CT7_FLAG_MMASK) == CT7_FLAG_MODE0) { ISP_IOXPUT_32(isp, src->rsp.m0.reloff, &dst->rsp.m0.reloff); ISP_IOXPUT_32(isp, src->rsp.m0.reserved0, &dst->rsp.m0.reserved0); ISP_IOXPUT_32(isp, src->rsp.m0.ct_xfrlen, &dst->rsp.m0.ct_xfrlen); ISP_IOXPUT_32(isp, src->rsp.m0.reserved1, &dst->rsp.m0.reserved1); ISP_IOXPUT_32(isp, src->rsp.m0.ds.ds_base, &dst->rsp.m0.ds.ds_base); ISP_IOXPUT_32(isp, src->rsp.m0.ds.ds_basehi, &dst->rsp.m0.ds.ds_basehi); ISP_IOXPUT_32(isp, src->rsp.m0.ds.ds_count, &dst->rsp.m0.ds.ds_count); } else if ((dst->ct_flags & CT7_FLAG_MMASK) == CT7_FLAG_MODE1) { uint32_t *a, *b; ISP_IOXPUT_16(isp, src->rsp.m1.ct_resplen, &dst->rsp.m1.ct_resplen); ISP_IOXPUT_16(isp, src->rsp.m1.reserved, &dst->rsp.m1.reserved); a = (uint32_t *) src->rsp.m1.ct_resp; b = (uint32_t *) dst->rsp.m1.ct_resp; for (i = 0; i < (ASIZE(src->rsp.m1.ct_resp) >> 2); i++) { *b++ = ISP_SWAP32(isp, *a++); } } else { ISP_IOXPUT_32(isp, src->rsp.m2.reserved0, &dst->rsp.m2.reserved0); ISP_IOXPUT_32(isp, src->rsp.m2.reserved1, &dst->rsp.m2.reserved1); ISP_IOXPUT_32(isp, src->rsp.m2.ct_datalen, &dst->rsp.m2.ct_datalen); ISP_IOXPUT_32(isp, src->rsp.m2.reserved2, &dst->rsp.m2.reserved2); ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_base, &dst->rsp.m2.ct_fcp_rsp_iudata.ds_base); ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_basehi, &dst->rsp.m2.ct_fcp_rsp_iudata.ds_basehi); ISP_IOXPUT_32(isp, src->rsp.m2.ct_fcp_rsp_iudata.ds_count, &dst->rsp.m2.ct_fcp_rsp_iudata.ds_count); } } void isp_get_ctio2(ispsoftc_t *isp, ct2_entry_t *src, ct2_entry_t *dst) { int i; isp_get_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXGET_32(isp, &src->ct_syshandle, dst->ct_syshandle); ISP_IOXGET_8(isp, &src->ct_lun, dst->ct_lun); ISP_IOXGET_8(isp, &src->ct_iid, dst->ct_iid); ISP_IOXGET_16(isp, &src->ct_rxid, dst->ct_rxid); ISP_IOXGET_16(isp, &src->ct_flags, dst->ct_flags); ISP_IOXGET_16(isp, &src->ct_status, dst->ct_status); ISP_IOXGET_16(isp, &src->ct_timeout, dst->ct_timeout); ISP_IOXGET_16(isp, &src->ct_seg_count, dst->ct_seg_count); ISP_IOXGET_32(isp, &src->ct_reloff, dst->ct_reloff); ISP_IOXGET_32(isp, &src->ct_resid, dst->ct_resid); if ((dst->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE0) { ISP_IOXGET_32(isp, &src->rsp.m0._reserved, dst->rsp.m0._reserved); ISP_IOXGET_16(isp, &src->rsp.m0._reserved2, dst->rsp.m0._reserved2); ISP_IOXGET_16(isp, &src->rsp.m0.ct_scsi_status, dst->rsp.m0.ct_scsi_status); ISP_IOXGET_32(isp, &src->rsp.m0.ct_xfrlen, dst->rsp.m0.ct_xfrlen); if (dst->ct_header.rqs_entry_type == RQSTYPE_CTIO2) { for (i = 0; i < ISP_RQDSEG_T2; i++) { ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg[i].ds_base, dst->rsp.m0.u.ct_dataseg[i].ds_base); ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg[i].ds_count, dst->rsp.m0.u.ct_dataseg[i].ds_count); } } else if (dst->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { for (i = 0; i < ISP_RQDSEG_T3; i++) { ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg64[i].ds_base, dst->rsp.m0.u.ct_dataseg64[i].ds_base); ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg64[i].ds_basehi, dst->rsp.m0.u.ct_dataseg64[i].ds_basehi); ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg64[i].ds_count, dst->rsp.m0.u.ct_dataseg64[i].ds_count); } } else if (dst->ct_header.rqs_entry_type == RQSTYPE_CTIO4) { ISP_IOXGET_16(isp, &src->rsp.m0.u.ct_dslist.ds_type, dst->rsp.m0.u.ct_dslist.ds_type); ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dslist.ds_segment, dst->rsp.m0.u.ct_dslist.ds_segment); ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dslist.ds_base, dst->rsp.m0.u.ct_dslist.ds_base); } } else if ((dst->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE1) { ISP_IOXGET_16(isp, &src->rsp.m1._reserved, dst->rsp.m1._reserved); ISP_IOXGET_16(isp, &src->rsp.m1._reserved2, dst->rsp.m1._reserved2); ISP_IOXGET_16(isp, &src->rsp.m1.ct_senselen, dst->rsp.m1.ct_senselen); ISP_IOXGET_16(isp, &src->rsp.m1.ct_scsi_status, dst->rsp.m1.ct_scsi_status); ISP_IOXGET_16(isp, &src->rsp.m1.ct_resplen, dst->rsp.m1.ct_resplen); for (i = 0; i < MAXRESPLEN; i++) { ISP_IOXGET_8(isp, &src->rsp.m1.ct_resp[i], dst->rsp.m1.ct_resp[i]); } } else { ISP_IOXGET_32(isp, &src->rsp.m2._reserved, dst->rsp.m2._reserved); ISP_IOXGET_16(isp, &src->rsp.m2._reserved2, dst->rsp.m2._reserved2); ISP_IOXGET_16(isp, &src->rsp.m2._reserved3, dst->rsp.m2._reserved3); ISP_IOXGET_32(isp, &src->rsp.m2.ct_datalen, dst->rsp.m2.ct_datalen); if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO2) { ISP_IOXGET_32(isp, &src->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_base, dst->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_base); ISP_IOXGET_32(isp, &src->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_count, dst->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_count); } else { ISP_IOXGET_32(isp, &src->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_base, dst->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_base); ISP_IOXGET_32(isp, &src->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_basehi, dst->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_basehi); ISP_IOXGET_32(isp, &src->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_count, dst->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_count); } } } void isp_get_ctio2e(ispsoftc_t *isp, ct2e_entry_t *src, ct2e_entry_t *dst) { int i; isp_get_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXGET_32(isp, &src->ct_syshandle, dst->ct_syshandle); ISP_IOXGET_16(isp, &src->ct_iid, dst->ct_iid); ISP_IOXGET_16(isp, &src->ct_rxid, dst->ct_rxid); ISP_IOXGET_16(isp, &src->ct_flags, dst->ct_flags); ISP_IOXGET_16(isp, &src->ct_status, dst->ct_status); ISP_IOXGET_16(isp, &src->ct_timeout, dst->ct_timeout); ISP_IOXGET_16(isp, &src->ct_seg_count, dst->ct_seg_count); ISP_IOXGET_32(isp, &src->ct_reloff, dst->ct_reloff); ISP_IOXGET_32(isp, &src->ct_resid, dst->ct_resid); if ((dst->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE0) { ISP_IOXGET_32(isp, &src->rsp.m0._reserved, dst->rsp.m0._reserved); ISP_IOXGET_16(isp, &src->rsp.m0._reserved2, dst->rsp.m0._reserved2); ISP_IOXGET_16(isp, &src->rsp.m0.ct_scsi_status, dst->rsp.m0.ct_scsi_status); ISP_IOXGET_32(isp, &src->rsp.m0.ct_xfrlen, dst->rsp.m0.ct_xfrlen); if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO2) { for (i = 0; i < ISP_RQDSEG_T2; i++) { ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg[i].ds_base, dst->rsp.m0.u.ct_dataseg[i].ds_base); ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg[i].ds_count, dst->rsp.m0.u.ct_dataseg[i].ds_count); } } else if (dst->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { for (i = 0; i < ISP_RQDSEG_T3; i++) { ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg64[i].ds_base, dst->rsp.m0.u.ct_dataseg64[i].ds_base); ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg64[i].ds_basehi, dst->rsp.m0.u.ct_dataseg64[i].ds_basehi); ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dataseg64[i].ds_count, dst->rsp.m0.u.ct_dataseg64[i].ds_count); } } else if (dst->ct_header.rqs_entry_type == RQSTYPE_CTIO4) { ISP_IOXGET_16(isp, &src->rsp.m0.u.ct_dslist.ds_type, dst->rsp.m0.u.ct_dslist.ds_type); ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dslist.ds_segment, dst->rsp.m0.u.ct_dslist.ds_segment); ISP_IOXGET_32(isp, &src->rsp.m0.u.ct_dslist.ds_base, dst->rsp.m0.u.ct_dslist.ds_base); } } else if ((dst->ct_flags & CT2_FLAG_MMASK) == CT2_FLAG_MODE1) { ISP_IOXGET_16(isp, &src->rsp.m1._reserved, dst->rsp.m1._reserved); ISP_IOXGET_16(isp, &src->rsp.m1._reserved2, dst->rsp.m1._reserved2); ISP_IOXGET_16(isp, &src->rsp.m1.ct_senselen, dst->rsp.m1.ct_senselen); ISP_IOXGET_16(isp, &src->rsp.m1.ct_scsi_status, dst->rsp.m1.ct_scsi_status); ISP_IOXGET_16(isp, &src->rsp.m1.ct_resplen, dst->rsp.m1.ct_resplen); for (i = 0; i < MAXRESPLEN; i++) { ISP_IOXGET_8(isp, &src->rsp.m1.ct_resp[i], dst->rsp.m1.ct_resp[i]); } } else { ISP_IOXGET_32(isp, &src->rsp.m2._reserved, dst->rsp.m2._reserved); ISP_IOXGET_16(isp, &src->rsp.m2._reserved2, dst->rsp.m2._reserved2); ISP_IOXGET_16(isp, &src->rsp.m2._reserved3, dst->rsp.m2._reserved3); ISP_IOXGET_32(isp, &src->rsp.m2.ct_datalen, dst->rsp.m2.ct_datalen); if (src->ct_header.rqs_entry_type == RQSTYPE_CTIO2) { ISP_IOXGET_32(isp, &src->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_base, dst->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_base); ISP_IOXGET_32(isp, &src->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_count, dst->rsp.m2.u.ct_fcp_rsp_iudata_32.ds_count); } else { ISP_IOXGET_32(isp, &src->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_base, dst->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_base); ISP_IOXGET_32(isp, &src->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_basehi, dst->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_basehi); ISP_IOXGET_32(isp, &src->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_count, dst->rsp.m2.u.ct_fcp_rsp_iudata_64.ds_count); } } } void isp_get_ctio7(ispsoftc_t *isp, ct7_entry_t *src, ct7_entry_t *dst) { int i; isp_get_hdr(isp, &src->ct_header, &dst->ct_header); ISP_IOXGET_32(isp, &src->ct_syshandle, dst->ct_syshandle); ISP_IOXGET_16(isp, &src->ct_nphdl, dst->ct_nphdl); ISP_IOXGET_16(isp, &src->ct_timeout, dst->ct_timeout); ISP_IOXGET_16(isp, &src->ct_seg_count, dst->ct_seg_count); ISP_IOXGET_8(isp, &src->ct_vpidx, dst->ct_vpidx); ISP_IOXGET_8(isp, &src->ct_xflags, dst->ct_xflags); ISP_IOXGET_16(isp, &src->ct_iid_lo, dst->ct_iid_lo); ISP_IOXGET_8(isp, &src->ct_iid_hi, dst->ct_iid_hi); ISP_IOXGET_8(isp, &src->ct_reserved, dst->ct_reserved); ISP_IOXGET_32(isp, &src->ct_rxid, dst->ct_rxid); ISP_IOXGET_16(isp, &src->ct_senselen, dst->ct_senselen); ISP_IOXGET_16(isp, &src->ct_flags, dst->ct_flags); ISP_IOXGET_32(isp, &src->ct_resid, dst->ct_resid); ISP_IOXGET_16(isp, &src->ct_oxid, dst->ct_oxid); ISP_IOXGET_16(isp, &src->ct_scsi_status, dst->ct_scsi_status); if ((dst->ct_flags & CT7_FLAG_MMASK) == CT7_FLAG_MODE0) { ISP_IOXGET_32(isp, &src->rsp.m0.reloff, dst->rsp.m0.reloff); ISP_IOXGET_32(isp, &src->rsp.m0.reserved0, dst->rsp.m0.reserved0); ISP_IOXGET_32(isp, &src->rsp.m0.ct_xfrlen, dst->rsp.m0.ct_xfrlen); ISP_IOXGET_32(isp, &src->rsp.m0.reserved1, dst->rsp.m0.reserved1); ISP_IOXGET_32(isp, &src->rsp.m0.ds.ds_base, dst->rsp.m0.ds.ds_base); ISP_IOXGET_32(isp, &src->rsp.m0.ds.ds_basehi, dst->rsp.m0.ds.ds_basehi); ISP_IOXGET_32(isp, &src->rsp.m0.ds.ds_count, dst->rsp.m0.ds.ds_count); } else if ((dst->ct_flags & CT7_FLAG_MMASK) == CT7_FLAG_MODE1) { uint32_t *a, *b; ISP_IOXGET_16(isp, &src->rsp.m1.ct_resplen, dst->rsp.m1.ct_resplen); ISP_IOXGET_16(isp, &src->rsp.m1.reserved, dst->rsp.m1.reserved); a = (uint32_t *) src->rsp.m1.ct_resp; b = (uint32_t *) dst->rsp.m1.ct_resp; for (i = 0; i < MAXRESPLEN_24XX; i++) { ISP_IOXGET_8(isp, &src->rsp.m1.ct_resp[i], dst->rsp.m1.ct_resp[i]); } for (i = 0; i < (ASIZE(src->rsp.m1.ct_resp) >> 2); i++) { *b++ = ISP_SWAP32(isp, *a++); } } else { ISP_IOXGET_32(isp, &src->rsp.m2.reserved0, dst->rsp.m2.reserved0); ISP_IOXGET_32(isp, &src->rsp.m2.ct_datalen, dst->rsp.m2.ct_datalen); ISP_IOXGET_32(isp, &src->rsp.m2.reserved1, dst->rsp.m2.reserved1); ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_base, dst->rsp.m2.ct_fcp_rsp_iudata.ds_base); ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_basehi, dst->rsp.m2.ct_fcp_rsp_iudata.ds_basehi); ISP_IOXGET_32(isp, &src->rsp.m2.ct_fcp_rsp_iudata.ds_count, dst->rsp.m2.ct_fcp_rsp_iudata.ds_count); } } void isp_put_enable_lun(ispsoftc_t *isp, lun_entry_t *lesrc, lun_entry_t *ledst) { int i; isp_put_hdr(isp, &lesrc->le_header, &ledst->le_header); ISP_IOXPUT_32(isp, lesrc->le_reserved, &ledst->le_reserved); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, lesrc->le_lun, &ledst->le_rsvd); ISP_IOXPUT_8(isp, lesrc->le_rsvd, &ledst->le_lun); ISP_IOXPUT_8(isp, lesrc->le_ops, &ledst->le_tgt); ISP_IOXPUT_8(isp, lesrc->le_tgt, &ledst->le_ops); ISP_IOXPUT_8(isp, lesrc->le_status, &ledst->le_reserved2); ISP_IOXPUT_8(isp, lesrc->le_reserved2, &ledst->le_status); ISP_IOXPUT_8(isp, lesrc->le_cmd_count, &ledst->le_in_count); ISP_IOXPUT_8(isp, lesrc->le_in_count, &ledst->le_cmd_count); ISP_IOXPUT_8(isp, lesrc->le_cdb6len, &ledst->le_cdb7len); ISP_IOXPUT_8(isp, lesrc->le_cdb7len, &ledst->le_cdb6len); } else { ISP_IOXPUT_8(isp, lesrc->le_lun, &ledst->le_lun); ISP_IOXPUT_8(isp, lesrc->le_rsvd, &ledst->le_rsvd); ISP_IOXPUT_8(isp, lesrc->le_ops, &ledst->le_ops); ISP_IOXPUT_8(isp, lesrc->le_tgt, &ledst->le_tgt); ISP_IOXPUT_8(isp, lesrc->le_status, &ledst->le_status); ISP_IOXPUT_8(isp, lesrc->le_reserved2, &ledst->le_reserved2); ISP_IOXPUT_8(isp, lesrc->le_cmd_count, &ledst->le_cmd_count); ISP_IOXPUT_8(isp, lesrc->le_in_count, &ledst->le_in_count); ISP_IOXPUT_8(isp, lesrc->le_cdb6len, &ledst->le_cdb6len); ISP_IOXPUT_8(isp, lesrc->le_cdb7len, &ledst->le_cdb7len); } ISP_IOXPUT_32(isp, lesrc->le_flags, &ledst->le_flags); ISP_IOXPUT_16(isp, lesrc->le_timeout, &ledst->le_timeout); for (i = 0; i < 20; i++) { ISP_IOXPUT_8(isp, lesrc->le_reserved3[i], &ledst->le_reserved3[i]); } } void isp_get_enable_lun(ispsoftc_t *isp, lun_entry_t *lesrc, lun_entry_t *ledst) { int i; isp_get_hdr(isp, &lesrc->le_header, &ledst->le_header); ISP_IOXGET_32(isp, &lesrc->le_reserved, ledst->le_reserved); if (ISP_IS_SBUS(isp)) { ISP_IOXGET_8(isp, &lesrc->le_lun, ledst->le_rsvd); ISP_IOXGET_8(isp, &lesrc->le_rsvd, ledst->le_lun); ISP_IOXGET_8(isp, &lesrc->le_ops, ledst->le_tgt); ISP_IOXGET_8(isp, &lesrc->le_tgt, ledst->le_ops); ISP_IOXGET_8(isp, &lesrc->le_status, ledst->le_reserved2); ISP_IOXGET_8(isp, &lesrc->le_reserved2, ledst->le_status); ISP_IOXGET_8(isp, &lesrc->le_cmd_count, ledst->le_in_count); ISP_IOXGET_8(isp, &lesrc->le_in_count, ledst->le_cmd_count); ISP_IOXGET_8(isp, &lesrc->le_cdb6len, ledst->le_cdb7len); ISP_IOXGET_8(isp, &lesrc->le_cdb7len, ledst->le_cdb6len); } else { ISP_IOXGET_8(isp, &lesrc->le_lun, ledst->le_lun); ISP_IOXGET_8(isp, &lesrc->le_rsvd, ledst->le_rsvd); ISP_IOXGET_8(isp, &lesrc->le_ops, ledst->le_ops); ISP_IOXGET_8(isp, &lesrc->le_tgt, ledst->le_tgt); ISP_IOXGET_8(isp, &lesrc->le_status, ledst->le_status); ISP_IOXGET_8(isp, &lesrc->le_reserved2, ledst->le_reserved2); ISP_IOXGET_8(isp, &lesrc->le_cmd_count, ledst->le_cmd_count); ISP_IOXGET_8(isp, &lesrc->le_in_count, ledst->le_in_count); ISP_IOXGET_8(isp, &lesrc->le_cdb6len, ledst->le_cdb6len); ISP_IOXGET_8(isp, &lesrc->le_cdb7len, ledst->le_cdb7len); } ISP_IOXGET_32(isp, &lesrc->le_flags, ledst->le_flags); ISP_IOXGET_16(isp, &lesrc->le_timeout, ledst->le_timeout); for (i = 0; i < 20; i++) { ISP_IOXGET_8(isp, &lesrc->le_reserved3[i], ledst->le_reserved3[i]); } } void isp_put_notify(ispsoftc_t *isp, in_entry_t *src, in_entry_t *dst) { int i; isp_put_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXPUT_32(isp, src->in_reserved, &dst->in_reserved); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->in_lun, &dst->in_iid); ISP_IOXPUT_8(isp, src->in_iid, &dst->in_lun); ISP_IOXPUT_8(isp, src->in_reserved2, &dst->in_tgt); ISP_IOXPUT_8(isp, src->in_tgt, &dst->in_reserved2); ISP_IOXPUT_8(isp, src->in_status, &dst->in_rsvd2); ISP_IOXPUT_8(isp, src->in_rsvd2, &dst->in_status); ISP_IOXPUT_8(isp, src->in_tag_val, &dst->in_tag_type); ISP_IOXPUT_8(isp, src->in_tag_type, &dst->in_tag_val); } else { ISP_IOXPUT_8(isp, src->in_lun, &dst->in_lun); ISP_IOXPUT_8(isp, src->in_iid, &dst->in_iid); ISP_IOXPUT_8(isp, src->in_reserved2, &dst->in_reserved2); ISP_IOXPUT_8(isp, src->in_tgt, &dst->in_tgt); ISP_IOXPUT_8(isp, src->in_status, &dst->in_status); ISP_IOXPUT_8(isp, src->in_rsvd2, &dst->in_rsvd2); ISP_IOXPUT_8(isp, src->in_tag_val, &dst->in_tag_val); ISP_IOXPUT_8(isp, src->in_tag_type, &dst->in_tag_type); } ISP_IOXPUT_32(isp, src->in_flags, &dst->in_flags); ISP_IOXPUT_16(isp, src->in_seqid, &dst->in_seqid); for (i = 0; i < IN_MSGLEN; i++) { ISP_IOXPUT_8(isp, src->in_msg[i], &dst->in_msg[i]); } for (i = 0; i < IN_RSVDLEN; i++) { ISP_IOXPUT_8(isp, src->in_reserved3[i], &dst->in_reserved3[i]); } for (i = 0; i < QLTM_SENSELEN; i++) { ISP_IOXPUT_8(isp, src->in_sense[i], &dst->in_sense[i]); } } void isp_get_notify(ispsoftc_t *isp, in_entry_t *src, in_entry_t *dst) { int i; isp_get_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXGET_32(isp, &src->in_reserved, dst->in_reserved); if (ISP_IS_SBUS(isp)) { ISP_IOXGET_8(isp, &src->in_lun, dst->in_iid); ISP_IOXGET_8(isp, &src->in_iid, dst->in_lun); ISP_IOXGET_8(isp, &src->in_reserved2, dst->in_tgt); ISP_IOXGET_8(isp, &src->in_tgt, dst->in_reserved2); ISP_IOXGET_8(isp, &src->in_status, dst->in_rsvd2); ISP_IOXGET_8(isp, &src->in_rsvd2, dst->in_status); ISP_IOXGET_8(isp, &src->in_tag_val, dst->in_tag_type); ISP_IOXGET_8(isp, &src->in_tag_type, dst->in_tag_val); } else { ISP_IOXGET_8(isp, &src->in_lun, dst->in_lun); ISP_IOXGET_8(isp, &src->in_iid, dst->in_iid); ISP_IOXGET_8(isp, &src->in_reserved2, dst->in_reserved2); ISP_IOXGET_8(isp, &src->in_tgt, dst->in_tgt); ISP_IOXGET_8(isp, &src->in_status, dst->in_status); ISP_IOXGET_8(isp, &src->in_rsvd2, dst->in_rsvd2); ISP_IOXGET_8(isp, &src->in_tag_val, dst->in_tag_val); ISP_IOXGET_8(isp, &src->in_tag_type, dst->in_tag_type); } ISP_IOXGET_32(isp, &src->in_flags, dst->in_flags); ISP_IOXGET_16(isp, &src->in_seqid, dst->in_seqid); for (i = 0; i < IN_MSGLEN; i++) { ISP_IOXGET_8(isp, &src->in_msg[i], dst->in_msg[i]); } for (i = 0; i < IN_RSVDLEN; i++) { ISP_IOXGET_8(isp, &src->in_reserved3[i], dst->in_reserved3[i]); } for (i = 0; i < QLTM_SENSELEN; i++) { ISP_IOXGET_8(isp, &src->in_sense[i], dst->in_sense[i]); } } void isp_put_notify_fc(ispsoftc_t *isp, in_fcentry_t *src, in_fcentry_t *dst) { isp_put_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXPUT_32(isp, src->in_reserved, &dst->in_reserved); ISP_IOXPUT_8(isp, src->in_lun, &dst->in_lun); ISP_IOXPUT_8(isp, src->in_iid, &dst->in_iid); ISP_IOXPUT_16(isp, src->in_scclun, &dst->in_scclun); ISP_IOXPUT_32(isp, src->in_reserved2, &dst->in_reserved2); ISP_IOXPUT_16(isp, src->in_status, &dst->in_status); ISP_IOXPUT_16(isp, src->in_task_flags, &dst->in_task_flags); ISP_IOXPUT_16(isp, src->in_seqid, &dst->in_seqid); } void isp_put_notify_fc_e(ispsoftc_t *isp, in_fcentry_e_t *src, in_fcentry_e_t *dst) { isp_put_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXPUT_32(isp, src->in_reserved, &dst->in_reserved); ISP_IOXPUT_16(isp, src->in_iid, &dst->in_iid); ISP_IOXPUT_16(isp, src->in_scclun, &dst->in_scclun); ISP_IOXPUT_32(isp, src->in_reserved2, &dst->in_reserved2); ISP_IOXPUT_16(isp, src->in_status, &dst->in_status); ISP_IOXPUT_16(isp, src->in_task_flags, &dst->in_task_flags); ISP_IOXPUT_16(isp, src->in_seqid, &dst->in_seqid); } void isp_put_notify_24xx(ispsoftc_t *isp, in_fcentry_24xx_t *src, in_fcentry_24xx_t *dst) { int i; isp_put_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXPUT_32(isp, src->in_reserved, &dst->in_reserved); ISP_IOXPUT_16(isp, src->in_nphdl, &dst->in_nphdl); ISP_IOXPUT_16(isp, src->in_reserved1, &dst->in_reserved1); ISP_IOXPUT_16(isp, src->in_flags, &dst->in_flags); ISP_IOXPUT_16(isp, src->in_srr_rxid, &dst->in_srr_rxid); ISP_IOXPUT_16(isp, src->in_status, &dst->in_status); ISP_IOXPUT_8(isp, src->in_status_subcode, &dst->in_status_subcode); ISP_IOXPUT_8(isp, src->in_fwhandle, &dst->in_fwhandle); ISP_IOXPUT_32(isp, src->in_rxid, &dst->in_rxid); ISP_IOXPUT_16(isp, src->in_srr_reloff_hi, &dst->in_srr_reloff_hi); ISP_IOXPUT_16(isp, src->in_srr_reloff_lo, &dst->in_srr_reloff_lo); ISP_IOXPUT_16(isp, src->in_srr_iu, &dst->in_srr_iu); ISP_IOXPUT_16(isp, src->in_srr_oxid, &dst->in_srr_oxid); ISP_IOXPUT_16(isp, src->in_nport_id_hi, &dst->in_nport_id_hi); ISP_IOXPUT_8(isp, src->in_nport_id_lo, &dst->in_nport_id_lo); ISP_IOXPUT_8(isp, src->in_reserved3, &dst->in_reserved3); ISP_IOXPUT_16(isp, src->in_np_handle, &dst->in_np_handle); for (i = 0; i < ASIZE(src->in_reserved4); i++) { ISP_IOXPUT_8(isp, src->in_reserved4[i], &dst->in_reserved4[i]); } ISP_IOXPUT_8(isp, src->in_reserved5, &dst->in_reserved5); ISP_IOXPUT_8(isp, src->in_vpidx, &dst->in_vpidx); ISP_IOXPUT_32(isp, src->in_reserved6, &dst->in_reserved6); ISP_IOXPUT_16(isp, src->in_portid_lo, &dst->in_portid_lo); ISP_IOXPUT_8(isp, src->in_portid_hi, &dst->in_portid_hi); ISP_IOXPUT_8(isp, src->in_reserved7, &dst->in_reserved7); ISP_IOXPUT_16(isp, src->in_reserved8, &dst->in_reserved8); ISP_IOXPUT_16(isp, src->in_oxid, &dst->in_oxid); } void isp_get_notify_fc(ispsoftc_t *isp, in_fcentry_t *src, in_fcentry_t *dst) { isp_get_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXGET_32(isp, &src->in_reserved, dst->in_reserved); ISP_IOXGET_8(isp, &src->in_lun, dst->in_lun); ISP_IOXGET_8(isp, &src->in_iid, dst->in_iid); ISP_IOXGET_16(isp, &src->in_scclun, dst->in_scclun); ISP_IOXGET_32(isp, &src->in_reserved2, dst->in_reserved2); ISP_IOXGET_16(isp, &src->in_status, dst->in_status); ISP_IOXGET_16(isp, &src->in_task_flags, dst->in_task_flags); ISP_IOXGET_16(isp, &src->in_seqid, dst->in_seqid); } void isp_get_notify_fc_e(ispsoftc_t *isp, in_fcentry_e_t *src, in_fcentry_e_t *dst) { isp_get_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXGET_32(isp, &src->in_reserved, dst->in_reserved); ISP_IOXGET_16(isp, &src->in_iid, dst->in_iid); ISP_IOXGET_16(isp, &src->in_scclun, dst->in_scclun); ISP_IOXGET_32(isp, &src->in_reserved2, dst->in_reserved2); ISP_IOXGET_16(isp, &src->in_status, dst->in_status); ISP_IOXGET_16(isp, &src->in_task_flags, dst->in_task_flags); ISP_IOXGET_16(isp, &src->in_seqid, dst->in_seqid); } void isp_get_notify_24xx(ispsoftc_t *isp, in_fcentry_24xx_t *src, in_fcentry_24xx_t *dst) { int i; isp_get_hdr(isp, &src->in_header, &dst->in_header); ISP_IOXGET_32(isp, &src->in_reserved, dst->in_reserved); ISP_IOXGET_16(isp, &src->in_nphdl, dst->in_nphdl); ISP_IOXGET_16(isp, &src->in_reserved1, dst->in_reserved1); ISP_IOXGET_16(isp, &src->in_flags, dst->in_flags); ISP_IOXGET_16(isp, &src->in_srr_rxid, dst->in_srr_rxid); ISP_IOXGET_16(isp, &src->in_status, dst->in_status); ISP_IOXGET_8(isp, &src->in_status_subcode, dst->in_status_subcode); ISP_IOXGET_8(isp, &src->in_fwhandle, dst->in_fwhandle); ISP_IOXGET_32(isp, &src->in_rxid, dst->in_rxid); ISP_IOXGET_16(isp, &src->in_srr_reloff_hi, dst->in_srr_reloff_hi); ISP_IOXGET_16(isp, &src->in_srr_reloff_lo, dst->in_srr_reloff_lo); ISP_IOXGET_16(isp, &src->in_srr_iu, dst->in_srr_iu); ISP_IOXGET_16(isp, &src->in_srr_oxid, dst->in_srr_oxid); ISP_IOXGET_16(isp, &src->in_nport_id_hi, dst->in_nport_id_hi); ISP_IOXGET_8(isp, &src->in_nport_id_lo, dst->in_nport_id_lo); ISP_IOXGET_8(isp, &src->in_reserved3, dst->in_reserved3); ISP_IOXGET_16(isp, &src->in_np_handle, dst->in_np_handle); for (i = 0; i < ASIZE(src->in_reserved4); i++) { ISP_IOXGET_8(isp, &src->in_reserved4[i], dst->in_reserved4[i]); } ISP_IOXGET_8(isp, &src->in_reserved5, dst->in_reserved5); ISP_IOXGET_8(isp, &src->in_vpidx, dst->in_vpidx); ISP_IOXGET_32(isp, &src->in_reserved6, dst->in_reserved6); ISP_IOXGET_16(isp, &src->in_portid_lo, dst->in_portid_lo); ISP_IOXGET_8(isp, &src->in_portid_hi, dst->in_portid_hi); ISP_IOXGET_8(isp, &src->in_reserved7, dst->in_reserved7); ISP_IOXGET_16(isp, &src->in_reserved8, dst->in_reserved8); ISP_IOXGET_16(isp, &src->in_oxid, dst->in_oxid); } void isp_put_notify_ack(ispsoftc_t *isp, na_entry_t *src, na_entry_t *dst) { int i; isp_put_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXPUT_32(isp, src->na_reserved, &dst->na_reserved); if (ISP_IS_SBUS(isp)) { ISP_IOXPUT_8(isp, src->na_lun, &dst->na_iid); ISP_IOXPUT_8(isp, src->na_iid, &dst->na_lun); ISP_IOXPUT_8(isp, src->na_status, &dst->na_event); ISP_IOXPUT_8(isp, src->na_event, &dst->na_status); } else { ISP_IOXPUT_8(isp, src->na_lun, &dst->na_lun); ISP_IOXPUT_8(isp, src->na_iid, &dst->na_iid); ISP_IOXPUT_8(isp, src->na_status, &dst->na_status); ISP_IOXPUT_8(isp, src->na_event, &dst->na_event); } ISP_IOXPUT_32(isp, src->na_flags, &dst->na_flags); for (i = 0; i < NA_RSVDLEN; i++) { ISP_IOXPUT_16(isp, src->na_reserved3[i], &dst->na_reserved3[i]); } } void isp_get_notify_ack(ispsoftc_t *isp, na_entry_t *src, na_entry_t *dst) { int i; isp_get_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXGET_32(isp, &src->na_reserved, dst->na_reserved); if (ISP_IS_SBUS(isp)) { ISP_IOXGET_8(isp, &src->na_lun, dst->na_iid); ISP_IOXGET_8(isp, &src->na_iid, dst->na_lun); ISP_IOXGET_8(isp, &src->na_status, dst->na_event); ISP_IOXGET_8(isp, &src->na_event, dst->na_status); } else { ISP_IOXGET_8(isp, &src->na_lun, dst->na_lun); ISP_IOXGET_8(isp, &src->na_iid, dst->na_iid); ISP_IOXGET_8(isp, &src->na_status, dst->na_status); ISP_IOXGET_8(isp, &src->na_event, dst->na_event); } ISP_IOXGET_32(isp, &src->na_flags, dst->na_flags); for (i = 0; i < NA_RSVDLEN; i++) { ISP_IOXGET_16(isp, &src->na_reserved3[i], dst->na_reserved3[i]); } } void isp_put_notify_ack_fc(ispsoftc_t *isp, na_fcentry_t *src, na_fcentry_t *dst) { int i; isp_put_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXPUT_32(isp, src->na_reserved, &dst->na_reserved); ISP_IOXPUT_8(isp, src->na_reserved1, &dst->na_reserved1); ISP_IOXPUT_8(isp, src->na_iid, &dst->na_iid); ISP_IOXPUT_16(isp, src->na_response, &dst->na_response); ISP_IOXPUT_16(isp, src->na_flags, &dst->na_flags); ISP_IOXPUT_16(isp, src->na_reserved2, &dst->na_reserved2); ISP_IOXPUT_16(isp, src->na_status, &dst->na_status); ISP_IOXPUT_16(isp, src->na_task_flags, &dst->na_task_flags); ISP_IOXPUT_16(isp, src->na_seqid, &dst->na_seqid); for (i = 0; i < NA2_RSVDLEN; i++) { ISP_IOXPUT_16(isp, src->na_reserved3[i], &dst->na_reserved3[i]); } } void isp_put_notify_ack_fc_e(ispsoftc_t *isp, na_fcentry_e_t *src, na_fcentry_e_t *dst) { int i; isp_put_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXPUT_32(isp, src->na_reserved, &dst->na_reserved); ISP_IOXPUT_16(isp, src->na_iid, &dst->na_iid); ISP_IOXPUT_16(isp, src->na_response, &dst->na_response); ISP_IOXPUT_16(isp, src->na_flags, &dst->na_flags); ISP_IOXPUT_16(isp, src->na_reserved2, &dst->na_reserved2); ISP_IOXPUT_16(isp, src->na_status, &dst->na_status); ISP_IOXPUT_16(isp, src->na_task_flags, &dst->na_task_flags); ISP_IOXPUT_16(isp, src->na_seqid, &dst->na_seqid); for (i = 0; i < NA2_RSVDLEN; i++) { ISP_IOXPUT_16(isp, src->na_reserved3[i], &dst->na_reserved3[i]); } } void isp_put_notify_24xx_ack(ispsoftc_t *isp, na_fcentry_24xx_t *src, na_fcentry_24xx_t *dst) { int i; isp_put_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXPUT_32(isp, src->na_handle, &dst->na_handle); ISP_IOXPUT_16(isp, src->na_nphdl, &dst->na_nphdl); ISP_IOXPUT_16(isp, src->na_reserved1, &dst->na_reserved1); ISP_IOXPUT_16(isp, src->na_flags, &dst->na_flags); ISP_IOXPUT_16(isp, src->na_srr_rxid, &dst->na_srr_rxid); ISP_IOXPUT_16(isp, src->na_status, &dst->na_status); ISP_IOXPUT_8(isp, src->na_status_subcode, &dst->na_status_subcode); ISP_IOXPUT_8(isp, src->na_fwhandle, &dst->na_fwhandle); ISP_IOXPUT_32(isp, src->na_rxid, &dst->na_rxid); ISP_IOXPUT_16(isp, src->na_srr_reloff_hi, &dst->na_srr_reloff_hi); ISP_IOXPUT_16(isp, src->na_srr_reloff_lo, &dst->na_srr_reloff_lo); ISP_IOXPUT_16(isp, src->na_srr_iu, &dst->na_srr_iu); ISP_IOXPUT_16(isp, src->na_srr_flags, &dst->na_srr_flags); for (i = 0; i < 18; i++) { ISP_IOXPUT_8(isp, src->na_reserved3[i], &dst->na_reserved3[i]); } ISP_IOXPUT_8(isp, src->na_reserved4, &dst->na_reserved4); ISP_IOXPUT_8(isp, src->na_vpidx, &dst->na_vpidx); ISP_IOXPUT_8(isp, src->na_srr_reject_vunique, &dst->na_srr_reject_vunique); ISP_IOXPUT_8(isp, src->na_srr_reject_explanation, &dst->na_srr_reject_explanation); ISP_IOXPUT_8(isp, src->na_srr_reject_code, &dst->na_srr_reject_code); ISP_IOXPUT_8(isp, src->na_reserved5, &dst->na_reserved5); for (i = 0; i < 6; i++) { ISP_IOXPUT_8(isp, src->na_reserved6[i], &dst->na_reserved6[i]); } ISP_IOXPUT_16(isp, src->na_oxid, &dst->na_oxid); } void isp_get_notify_ack_fc(ispsoftc_t *isp, na_fcentry_t *src, na_fcentry_t *dst) { int i; isp_get_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXGET_32(isp, &src->na_reserved, dst->na_reserved); ISP_IOXGET_8(isp, &src->na_reserved1, dst->na_reserved1); ISP_IOXGET_8(isp, &src->na_iid, dst->na_iid); ISP_IOXGET_16(isp, &src->na_response, dst->na_response); ISP_IOXGET_16(isp, &src->na_flags, dst->na_flags); ISP_IOXGET_16(isp, &src->na_reserved2, dst->na_reserved2); ISP_IOXGET_16(isp, &src->na_status, dst->na_status); ISP_IOXGET_16(isp, &src->na_task_flags, dst->na_task_flags); ISP_IOXGET_16(isp, &src->na_seqid, dst->na_seqid); for (i = 0; i < NA2_RSVDLEN; i++) { ISP_IOXGET_16(isp, &src->na_reserved3[i], dst->na_reserved3[i]); } } void isp_get_notify_ack_fc_e(ispsoftc_t *isp, na_fcentry_e_t *src, na_fcentry_e_t *dst) { int i; isp_get_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXGET_32(isp, &src->na_reserved, dst->na_reserved); ISP_IOXGET_16(isp, &src->na_iid, dst->na_iid); ISP_IOXGET_16(isp, &src->na_response, dst->na_response); ISP_IOXGET_16(isp, &src->na_flags, dst->na_flags); ISP_IOXGET_16(isp, &src->na_reserved2, dst->na_reserved2); ISP_IOXGET_16(isp, &src->na_status, dst->na_status); ISP_IOXGET_16(isp, &src->na_task_flags, dst->na_task_flags); ISP_IOXGET_16(isp, &src->na_seqid, dst->na_seqid); for (i = 0; i < NA2_RSVDLEN; i++) { ISP_IOXGET_16(isp, &src->na_reserved3[i], dst->na_reserved3[i]); } } void isp_get_notify_ack_24xx(ispsoftc_t *isp, na_fcentry_24xx_t *src, na_fcentry_24xx_t *dst) { int i; isp_get_hdr(isp, &src->na_header, &dst->na_header); ISP_IOXGET_32(isp, &src->na_handle, dst->na_handle); ISP_IOXGET_16(isp, &src->na_nphdl, dst->na_nphdl); ISP_IOXGET_16(isp, &src->na_reserved1, dst->na_reserved1); ISP_IOXGET_16(isp, &src->na_flags, dst->na_flags); ISP_IOXGET_16(isp, &src->na_srr_rxid, dst->na_srr_rxid); ISP_IOXGET_16(isp, &src->na_status, dst->na_status); ISP_IOXGET_8(isp, &src->na_status_subcode, dst->na_status_subcode); ISP_IOXGET_8(isp, &src->na_fwhandle, dst->na_fwhandle); ISP_IOXGET_32(isp, &src->na_rxid, dst->na_rxid); ISP_IOXGET_16(isp, &src->na_srr_reloff_hi, dst->na_srr_reloff_hi); ISP_IOXGET_16(isp, &src->na_srr_reloff_lo, dst->na_srr_reloff_lo); ISP_IOXGET_16(isp, &src->na_srr_iu, dst->na_srr_iu); ISP_IOXGET_16(isp, &src->na_srr_flags, dst->na_srr_flags); for (i = 0; i < 18; i++) { ISP_IOXGET_8(isp, &src->na_reserved3[i], dst->na_reserved3[i]); } ISP_IOXGET_8(isp, &src->na_reserved4, dst->na_reserved4); ISP_IOXGET_8(isp, &src->na_vpidx, dst->na_vpidx); ISP_IOXGET_8(isp, &src->na_srr_reject_vunique, dst->na_srr_reject_vunique); ISP_IOXGET_8(isp, &src->na_srr_reject_explanation, dst->na_srr_reject_explanation); ISP_IOXGET_8(isp, &src->na_srr_reject_code, dst->na_srr_reject_code); ISP_IOXGET_8(isp, &src->na_reserved5, dst->na_reserved5); for (i = 0; i < 6; i++) { ISP_IOXGET_8(isp, &src->na_reserved6[i], dst->na_reserved6[i]); } ISP_IOXGET_16(isp, &src->na_oxid, dst->na_oxid); } void isp_get_abts(ispsoftc_t *isp, abts_t *src, abts_t *dst) { int i; isp_get_hdr(isp, &src->abts_header, &dst->abts_header); for (i = 0; i < 6; i++) { ISP_IOXGET_8(isp, &src->abts_reserved0[i], dst->abts_reserved0[i]); } ISP_IOXGET_16(isp, &src->abts_nphdl, dst->abts_nphdl); ISP_IOXGET_16(isp, &src->abts_reserved1, dst->abts_reserved1); ISP_IOXGET_16(isp, &src->abts_sof, dst->abts_sof); ISP_IOXGET_32(isp, &src->abts_rxid_abts, dst->abts_rxid_abts); ISP_IOXGET_16(isp, &src->abts_did_lo, dst->abts_did_lo); ISP_IOXGET_8(isp, &src->abts_did_hi, dst->abts_did_hi); ISP_IOXGET_8(isp, &src->abts_r_ctl, dst->abts_r_ctl); ISP_IOXGET_16(isp, &src->abts_sid_lo, dst->abts_sid_lo); ISP_IOXGET_8(isp, &src->abts_sid_hi, dst->abts_sid_hi); ISP_IOXGET_8(isp, &src->abts_cs_ctl, dst->abts_cs_ctl); ISP_IOXGET_16(isp, &src->abts_fs_ctl, dst->abts_fs_ctl); ISP_IOXGET_8(isp, &src->abts_f_ctl, dst->abts_f_ctl); ISP_IOXGET_8(isp, &src->abts_type, dst->abts_type); ISP_IOXGET_16(isp, &src->abts_seq_cnt, dst->abts_seq_cnt); ISP_IOXGET_8(isp, &src->abts_df_ctl, dst->abts_df_ctl); ISP_IOXGET_8(isp, &src->abts_seq_id, dst->abts_seq_id); ISP_IOXGET_16(isp, &src->abts_rx_id, dst->abts_rx_id); ISP_IOXGET_16(isp, &src->abts_ox_id, dst->abts_ox_id); ISP_IOXGET_32(isp, &src->abts_param, dst->abts_param); for (i = 0; i < 16; i++) { ISP_IOXGET_8(isp, &src->abts_reserved2[i], dst->abts_reserved2[i]); } ISP_IOXGET_32(isp, &src->abts_rxid_task, dst->abts_rxid_task); } void isp_put_abts_rsp(ispsoftc_t *isp, abts_rsp_t *src, abts_rsp_t *dst) { int i; isp_put_hdr(isp, &src->abts_rsp_header, &dst->abts_rsp_header); ISP_IOXPUT_32(isp, src->abts_rsp_handle, &dst->abts_rsp_handle); ISP_IOXPUT_16(isp, src->abts_rsp_status, &dst->abts_rsp_status); ISP_IOXPUT_16(isp, src->abts_rsp_nphdl, &dst->abts_rsp_nphdl); ISP_IOXPUT_16(isp, src->abts_rsp_ctl_flags, &dst->abts_rsp_ctl_flags); ISP_IOXPUT_16(isp, src->abts_rsp_sof, &dst->abts_rsp_sof); ISP_IOXPUT_32(isp, src->abts_rsp_rxid_abts, &dst->abts_rsp_rxid_abts); ISP_IOXPUT_16(isp, src->abts_rsp_did_lo, &dst->abts_rsp_did_lo); ISP_IOXPUT_8(isp, src->abts_rsp_did_hi, &dst->abts_rsp_did_hi); ISP_IOXPUT_8(isp, src->abts_rsp_r_ctl, &dst->abts_rsp_r_ctl); ISP_IOXPUT_16(isp, src->abts_rsp_sid_lo, &dst->abts_rsp_sid_lo); ISP_IOXPUT_8(isp, src->abts_rsp_sid_hi, &dst->abts_rsp_sid_hi); ISP_IOXPUT_8(isp, src->abts_rsp_cs_ctl, &dst->abts_rsp_cs_ctl); ISP_IOXPUT_16(isp, src->abts_rsp_f_ctl_lo, &dst->abts_rsp_f_ctl_lo); ISP_IOXPUT_8(isp, src->abts_rsp_f_ctl_hi, &dst->abts_rsp_f_ctl_hi); ISP_IOXPUT_8(isp, src->abts_rsp_type, &dst->abts_rsp_type); ISP_IOXPUT_16(isp, src->abts_rsp_seq_cnt, &dst->abts_rsp_seq_cnt); ISP_IOXPUT_8(isp, src->abts_rsp_df_ctl, &dst->abts_rsp_df_ctl); ISP_IOXPUT_8(isp, src->abts_rsp_seq_id, &dst->abts_rsp_seq_id); ISP_IOXPUT_16(isp, src->abts_rsp_rx_id, &dst->abts_rsp_rx_id); ISP_IOXPUT_16(isp, src->abts_rsp_ox_id, &dst->abts_rsp_ox_id); ISP_IOXPUT_32(isp, src->abts_rsp_param, &dst->abts_rsp_param); if (src->abts_rsp_r_ctl == BA_ACC) { ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.reserved, &dst->abts_rsp_payload.ba_acc.reserved); ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_acc.last_seq_id, &dst->abts_rsp_payload.ba_acc.last_seq_id); ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_acc.seq_id_valid, &dst->abts_rsp_payload.ba_acc.seq_id_valid); ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.aborted_rx_id, &dst->abts_rsp_payload.ba_acc.aborted_rx_id); ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.aborted_ox_id, &dst->abts_rsp_payload.ba_acc.aborted_ox_id); ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.high_seq_cnt, &dst->abts_rsp_payload.ba_acc.high_seq_cnt); ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.low_seq_cnt, &dst->abts_rsp_payload.ba_acc.low_seq_cnt); for (i = 0; i < 4; i++) { ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_acc.reserved2[i], &dst->abts_rsp_payload.ba_acc.reserved2[i]); } } else if (src->abts_rsp_r_ctl == BA_RJT) { ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_rjt.vendor_unique, &dst->abts_rsp_payload.ba_rjt.vendor_unique); ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_rjt.explanation, &dst->abts_rsp_payload.ba_rjt.explanation); ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_rjt.reason, &dst->abts_rsp_payload.ba_rjt.reason); ISP_IOXPUT_8(isp, src->abts_rsp_payload.ba_rjt.reserved, &dst->abts_rsp_payload.ba_rjt.reserved); for (i = 0; i < 12; i++) { ISP_IOXPUT_16(isp, src->abts_rsp_payload.ba_rjt.reserved2[i], &dst->abts_rsp_payload.ba_rjt.reserved2[i]); } } else { for (i = 0; i < 16; i++) { ISP_IOXPUT_8(isp, src->abts_rsp_payload.reserved[i], &dst->abts_rsp_payload.reserved[i]); } } ISP_IOXPUT_32(isp, src->abts_rsp_rxid_task, &dst->abts_rsp_rxid_task); } void isp_get_abts_rsp(ispsoftc_t *isp, abts_rsp_t *src, abts_rsp_t *dst) { int i; isp_get_hdr(isp, &src->abts_rsp_header, &dst->abts_rsp_header); ISP_IOXGET_32(isp, &src->abts_rsp_handle, dst->abts_rsp_handle); ISP_IOXGET_16(isp, &src->abts_rsp_status, dst->abts_rsp_status); ISP_IOXGET_16(isp, &src->abts_rsp_nphdl, dst->abts_rsp_nphdl); ISP_IOXGET_16(isp, &src->abts_rsp_ctl_flags, dst->abts_rsp_ctl_flags); ISP_IOXGET_16(isp, &src->abts_rsp_sof, dst->abts_rsp_sof); ISP_IOXGET_32(isp, &src->abts_rsp_rxid_abts, dst->abts_rsp_rxid_abts); ISP_IOXGET_16(isp, &src->abts_rsp_did_lo, dst->abts_rsp_did_lo); ISP_IOXGET_8(isp, &src->abts_rsp_did_hi, dst->abts_rsp_did_hi); ISP_IOXGET_8(isp, &src->abts_rsp_r_ctl, dst->abts_rsp_r_ctl); ISP_IOXGET_16(isp, &src->abts_rsp_sid_lo, dst->abts_rsp_sid_lo); ISP_IOXGET_8(isp, &src->abts_rsp_sid_hi, dst->abts_rsp_sid_hi); ISP_IOXGET_8(isp, &src->abts_rsp_cs_ctl, dst->abts_rsp_cs_ctl); ISP_IOXGET_16(isp, &src->abts_rsp_f_ctl_lo, dst->abts_rsp_f_ctl_lo); ISP_IOXGET_8(isp, &src->abts_rsp_f_ctl_hi, dst->abts_rsp_f_ctl_hi); ISP_IOXGET_8(isp, &src->abts_rsp_type, dst->abts_rsp_type); ISP_IOXGET_16(isp, &src->abts_rsp_seq_cnt, dst->abts_rsp_seq_cnt); ISP_IOXGET_8(isp, &src->abts_rsp_df_ctl, dst->abts_rsp_df_ctl); ISP_IOXGET_8(isp, &src->abts_rsp_seq_id, dst->abts_rsp_seq_id); ISP_IOXGET_16(isp, &src->abts_rsp_rx_id, dst->abts_rsp_rx_id); ISP_IOXGET_16(isp, &src->abts_rsp_ox_id, dst->abts_rsp_ox_id); ISP_IOXGET_32(isp, &src->abts_rsp_param, dst->abts_rsp_param); for (i = 0; i < 8; i++) { ISP_IOXGET_8(isp, &src->abts_rsp_payload.rsp.reserved[i], dst->abts_rsp_payload.rsp.reserved[i]); } ISP_IOXGET_32(isp, &src->abts_rsp_payload.rsp.subcode1, dst->abts_rsp_payload.rsp.subcode1); ISP_IOXGET_32(isp, &src->abts_rsp_payload.rsp.subcode2, dst->abts_rsp_payload.rsp.subcode2); ISP_IOXGET_32(isp, &src->abts_rsp_rxid_task, dst->abts_rsp_rxid_task); } #endif /* ISP_TARGET_MODE */ /* * vim:ts=8:sw=8 */ Index: projects/powernv/dev/isp/isp_library.h =================================================================== --- projects/powernv/dev/isp/isp_library.h (revision 291050) +++ projects/powernv/dev/isp/isp_library.h (revision 291051) @@ -1,221 +1,222 @@ /* $FreeBSD$ */ /*- * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #ifndef _ISP_LIBRARY_H #define _ISP_LIBRARY_H /* * Common command shipping routine. * * This used to be platform specific, but basically once you get the segment * stuff figured out, you can make all the code in one spot. */ typedef enum { ISP_TO_DEVICE, ISP_FROM_DEVICE, ISP_NOXFR} isp_ddir_t; int isp_send_cmd(ispsoftc_t *, void *, void *, uint32_t, uint32_t, isp_ddir_t, ispds64_t *); /* * Handle management functions. * * These handles are associate with a command. */ int isp_allocate_xs(ispsoftc_t *, XS_T *, uint32_t *); XS_T * isp_find_xs(ispsoftc_t *, uint32_t); uint32_t isp_find_handle(ispsoftc_t *, XS_T *); uint32_t isp_handle_index(ispsoftc_t *, uint32_t); void isp_destroy_handle(ispsoftc_t *, uint32_t); /* * Request Queue allocation */ void *isp_getrqentry(ispsoftc_t *); /* * Queue Entry debug functions */ void isp_print_qentry (ispsoftc_t *, const char *, int, void *); void isp_print_bytes(ispsoftc_t *, const char *, int, void *); /* * Fibre Channel specific routines and data. */ extern const char *isp_class3_roles[4]; int isp_fc_runstate(ispsoftc_t *, int, int); void isp_dump_portdb(ispsoftc_t *, int); void isp_gen_role_str(char *, size_t, uint16_t); const char *isp_fc_fw_statename(int); const char *isp_fc_loop_statename(int); const char *isp_fc_toponame(fcparam *); int isp_fc_change_role(ispsoftc_t *, int, int); /* * Cleanup */ void isp_clear_commands(ispsoftc_t *); /* * Common chip shutdown function */ void isp_shutdown(ispsoftc_t *); /* * Put/Get routines to push from CPU view to device view * or to pull from device view to CPU view for various * data structures (IOCB) */ void isp_put_hdr(ispsoftc_t *, isphdr_t *, isphdr_t *); void isp_get_hdr(ispsoftc_t *, isphdr_t *, isphdr_t *); int isp_get_response_type(ispsoftc_t *, isphdr_t *); void isp_put_request(ispsoftc_t *, ispreq_t *, ispreq_t *); void isp_put_marker(ispsoftc_t *, isp_marker_t *, isp_marker_t *); void isp_put_marker_24xx(ispsoftc_t *, isp_marker_24xx_t *, isp_marker_24xx_t *); void isp_put_request_t2(ispsoftc_t *, ispreqt2_t *, ispreqt2_t *); void isp_put_request_t2e(ispsoftc_t *, ispreqt2e_t *, ispreqt2e_t *); void isp_put_request_t3(ispsoftc_t *, ispreqt3_t *, ispreqt3_t *); void isp_put_request_t3e(ispsoftc_t *, ispreqt3e_t *, ispreqt3e_t *); void isp_put_extended_request(ispsoftc_t *, ispextreq_t *, ispextreq_t *); void isp_put_request_t7(ispsoftc_t *, ispreqt7_t *, ispreqt7_t *); void isp_put_24xx_tmf(ispsoftc_t *, isp24xx_tmf_t *, isp24xx_tmf_t *); void isp_put_24xx_abrt(ispsoftc_t *, isp24xx_abrt_t *, isp24xx_abrt_t *); void isp_put_cont_req(ispsoftc_t *, ispcontreq_t *, ispcontreq_t *); void isp_put_cont64_req(ispsoftc_t *, ispcontreq64_t *, ispcontreq64_t *); void isp_get_response(ispsoftc_t *, ispstatusreq_t *, ispstatusreq_t *); void isp_get_cont_response(ispsoftc_t *, ispstatus_cont_t *, ispstatus_cont_t *); void isp_get_24xx_response(ispsoftc_t *, isp24xx_statusreq_t *, isp24xx_statusreq_t *); void isp_get_24xx_abrt(ispsoftc_t *, isp24xx_abrt_t *, isp24xx_abrt_t *); void isp_get_rio1(ispsoftc_t *, isp_rio1_t *, isp_rio1_t *); void isp_get_rio2(ispsoftc_t *, isp_rio2_t *, isp_rio2_t *); void isp_put_icb(ispsoftc_t *, isp_icb_t *, isp_icb_t *); void isp_put_icb_2400(ispsoftc_t *, isp_icb_2400_t *, isp_icb_2400_t *); void isp_put_icb_2400_vpinfo(ispsoftc_t *, isp_icb_2400_vpinfo_t *, isp_icb_2400_vpinfo_t *); void isp_put_vp_port_info(ispsoftc_t *, vp_port_info_t *, vp_port_info_t *); void isp_get_vp_port_info(ispsoftc_t *, vp_port_info_t *, vp_port_info_t *); void isp_put_vp_ctrl_info(ispsoftc_t *, vp_ctrl_info_t *, vp_ctrl_info_t *); void isp_get_vp_ctrl_info(ispsoftc_t *, vp_ctrl_info_t *, vp_ctrl_info_t *); void isp_put_vp_modify(ispsoftc_t *, vp_modify_t *, vp_modify_t *); void isp_get_vp_modify(ispsoftc_t *, vp_modify_t *, vp_modify_t *); void isp_get_pdb_21xx(ispsoftc_t *, isp_pdb_21xx_t *, isp_pdb_21xx_t *); void isp_get_pdb_24xx(ispsoftc_t *, isp_pdb_24xx_t *, isp_pdb_24xx_t *); void isp_get_pnhle_21xx(ispsoftc_t *, isp_pnhle_21xx_t *, isp_pnhle_21xx_t *); void isp_get_pnhle_23xx(ispsoftc_t *, isp_pnhle_23xx_t *, isp_pnhle_23xx_t *); void isp_get_pnhle_24xx(ispsoftc_t *, isp_pnhle_24xx_t *, isp_pnhle_24xx_t *); void isp_get_pnnle(ispsoftc_t *, isp_pnnle_t *, isp_pnnle_t *); void isp_get_ridacq(ispsoftc_t *, isp_ridacq_t *, isp_ridacq_t *); void isp_get_plogx(ispsoftc_t *, isp_plogx_t *, isp_plogx_t *); void isp_put_plogx(ispsoftc_t *, isp_plogx_t *, isp_plogx_t *); void isp_get_ct_pt(ispsoftc_t *isp, isp_ct_pt_t *, isp_ct_pt_t *); void isp_get_ms(ispsoftc_t *isp, isp_ms_t *, isp_ms_t *); void isp_put_ct_pt(ispsoftc_t *isp, isp_ct_pt_t *, isp_ct_pt_t *); void isp_put_ms(ispsoftc_t *isp, isp_ms_t *, isp_ms_t *); void isp_put_sns_request(ispsoftc_t *, sns_screq_t *, sns_screq_t *); void isp_put_gid_ft_request(ispsoftc_t *, sns_gid_ft_req_t *, sns_gid_ft_req_t *); void isp_put_gxn_id_request(ispsoftc_t *, sns_gxn_id_req_t *, sns_gxn_id_req_t *); void isp_get_sns_response(ispsoftc_t *, sns_scrsp_t *, sns_scrsp_t *, int); void isp_get_gid_ft_response(ispsoftc_t *, sns_gid_ft_rsp_t *, sns_gid_ft_rsp_t *, int); void isp_get_gxn_id_response(ispsoftc_t *, sns_gxn_id_rsp_t *, sns_gxn_id_rsp_t *); void isp_get_gff_id_response(ispsoftc_t *, sns_gff_id_rsp_t *, sns_gff_id_rsp_t *); void isp_get_ga_nxt_response(ispsoftc_t *, sns_ga_nxt_rsp_t *, sns_ga_nxt_rsp_t *); void isp_get_els(ispsoftc_t *, els_t *, els_t *); void isp_put_els(ispsoftc_t *, els_t *, els_t *); void isp_get_fc_hdr(ispsoftc_t *, fc_hdr_t *, fc_hdr_t *); void isp_put_fc_hdr(ispsoftc_t *, fc_hdr_t *, fc_hdr_t *); void isp_get_fcp_cmnd_iu(ispsoftc_t *, fcp_cmnd_iu_t *, fcp_cmnd_iu_t *); void isp_put_rft_id(ispsoftc_t *, rft_id_t *, rft_id_t *); +void isp_put_rff_id(ispsoftc_t *, rff_id_t *, rff_id_t *); void isp_get_ct_hdr(ispsoftc_t *isp, ct_hdr_t *, ct_hdr_t *); void isp_put_ct_hdr(ispsoftc_t *isp, ct_hdr_t *, ct_hdr_t *); void isp_put_fcp_rsp_iu(ispsoftc_t *isp, fcp_rsp_iu_t *, fcp_rsp_iu_t *); #define ISP_HANDLE_MASK 0x7fff #ifdef ISP_TARGET_MODE #if defined(__NetBSD__) || defined(__OpenBSD__) #include #elif defined(__FreeBSD__) #include #else #include "isp_target.h" #endif int isp_send_tgt_cmd(ispsoftc_t *, void *, void *, uint32_t, uint32_t, isp_ddir_t, void *, uint32_t); int isp_allocate_xs_tgt(ispsoftc_t *, void *, uint32_t *); void *isp_find_xs_tgt(ispsoftc_t *, uint32_t); uint32_t isp_find_tgt_handle(ispsoftc_t *, void *); void isp_destroy_tgt_handle(ispsoftc_t *, uint32_t); #endif int isp_find_pdb_by_wwn(ispsoftc_t *, int, uint64_t, fcportdb_t **); #ifdef ISP_TARGET_MODE -int isp_find_pdb_by_handle(ispsoftc_t *, int, uint32_t, fcportdb_t **); +int isp_find_pdb_by_handle(ispsoftc_t *, int, uint16_t, fcportdb_t **); int isp_find_pdb_by_sid(ispsoftc_t *, int, uint32_t, fcportdb_t **); void isp_find_chan_by_did(ispsoftc_t *, uint32_t, uint16_t *); void isp_add_wwn_entry(ispsoftc_t *, int, uint64_t, uint64_t, uint16_t, uint32_t, uint16_t); void isp_del_wwn_entry(ispsoftc_t *, int, uint64_t, uint16_t, uint32_t); void isp_del_all_wwn_entries(ispsoftc_t *, int); void isp_del_wwn_entries(ispsoftc_t *, isp_notify_t *); void isp_put_atio(ispsoftc_t *, at_entry_t *, at_entry_t *); void isp_get_atio(ispsoftc_t *, at_entry_t *, at_entry_t *); void isp_put_atio2(ispsoftc_t *, at2_entry_t *, at2_entry_t *); void isp_put_atio2e(ispsoftc_t *, at2e_entry_t *, at2e_entry_t *); void isp_get_atio2(ispsoftc_t *, at2_entry_t *, at2_entry_t *); void isp_get_atio2e(ispsoftc_t *, at2e_entry_t *, at2e_entry_t *); void isp_get_atio7(ispsoftc_t *isp, at7_entry_t *, at7_entry_t *); void isp_put_ctio(ispsoftc_t *, ct_entry_t *, ct_entry_t *); void isp_get_ctio(ispsoftc_t *, ct_entry_t *, ct_entry_t *); void isp_put_ctio2(ispsoftc_t *, ct2_entry_t *, ct2_entry_t *); void isp_put_ctio2e(ispsoftc_t *, ct2e_entry_t *, ct2e_entry_t *); void isp_put_ctio7(ispsoftc_t *, ct7_entry_t *, ct7_entry_t *); void isp_get_ctio2(ispsoftc_t *, ct2_entry_t *, ct2_entry_t *); void isp_get_ctio2e(ispsoftc_t *, ct2e_entry_t *, ct2e_entry_t *); void isp_get_ctio7(ispsoftc_t *, ct7_entry_t *, ct7_entry_t *); void isp_put_enable_lun(ispsoftc_t *, lun_entry_t *, lun_entry_t *); void isp_get_enable_lun(ispsoftc_t *, lun_entry_t *, lun_entry_t *); void isp_put_notify(ispsoftc_t *, in_entry_t *, in_entry_t *); void isp_get_notify(ispsoftc_t *, in_entry_t *, in_entry_t *); void isp_put_notify_fc(ispsoftc_t *, in_fcentry_t *, in_fcentry_t *); void isp_put_notify_fc_e(ispsoftc_t *, in_fcentry_e_t *, in_fcentry_e_t *); void isp_put_notify_24xx(ispsoftc_t *, in_fcentry_24xx_t *, in_fcentry_24xx_t *); void isp_get_notify_fc(ispsoftc_t *, in_fcentry_t *, in_fcentry_t *); void isp_get_notify_fc_e(ispsoftc_t *, in_fcentry_e_t *, in_fcentry_e_t *); void isp_get_notify_24xx(ispsoftc_t *, in_fcentry_24xx_t *, in_fcentry_24xx_t *); void isp_put_notify_ack(ispsoftc_t *, na_entry_t *, na_entry_t *); void isp_get_notify_ack(ispsoftc_t *, na_entry_t *, na_entry_t *); void isp_put_notify_24xx_ack(ispsoftc_t *, na_fcentry_24xx_t *, na_fcentry_24xx_t *); void isp_put_notify_ack_fc(ispsoftc_t *, na_fcentry_t *, na_fcentry_t *); void isp_put_notify_ack_fc_e(ispsoftc_t *, na_fcentry_e_t *, na_fcentry_e_t *); void isp_put_notify_ack_24xx(ispsoftc_t *, na_fcentry_24xx_t *, na_fcentry_24xx_t *); void isp_get_notify_ack_fc(ispsoftc_t *, na_fcentry_t *, na_fcentry_t *); void isp_get_notify_ack_fc_e(ispsoftc_t *, na_fcentry_e_t *, na_fcentry_e_t *); void isp_get_notify_ack_24xx(ispsoftc_t *, na_fcentry_24xx_t *, na_fcentry_24xx_t *); void isp_get_abts(ispsoftc_t *, abts_t *, abts_t *); void isp_put_abts_rsp(ispsoftc_t *, abts_rsp_t *, abts_rsp_t *); void isp_get_abts_rsp(ispsoftc_t *, abts_rsp_t *, abts_rsp_t *); #endif /* ISP_TARGET_MODE */ #endif /* _ISP_LIBRARY_H */ Index: projects/powernv/dev/isp/isp_stds.h =================================================================== --- projects/powernv/dev/isp/isp_stds.h (revision 291050) +++ projects/powernv/dev/isp/isp_stds.h (revision 291051) @@ -1,291 +1,305 @@ /* $FreeBSD$ */ /*- * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Structures that derive directly from public standards. */ #ifndef _ISP_STDS_H #define _ISP_STDS_H /* * FC Frame Header * * Source: dpANS-X3.xxx-199x, section 18 (AKA FC-PH-2) * */ typedef struct { uint8_t r_ctl; uint8_t d_id[3]; uint8_t cs_ctl; uint8_t s_id[3]; uint8_t type; uint8_t f_ctl[3]; uint8_t seq_id; uint8_t df_ctl; uint16_t seq_cnt; uint16_t ox_id; uint16_t rx_id; uint32_t parameter; } fc_hdr_t; /* * FCP_CMND_IU Payload * * Source: NICTS T10, Project 1144D, Revision 07a, Section 9 (AKA fcp2-r07a) * * Notes: * When additional cdb length is defined in fcp_cmnd_alen_datadir, * bits 2..7, the actual cdb length is 16 + ((fcp_cmnd_alen_datadir>>2)*4), * with the datalength following in MSB format just after. */ typedef struct { uint8_t fcp_cmnd_lun[8]; uint8_t fcp_cmnd_crn; uint8_t fcp_cmnd_task_attribute; uint8_t fcp_cmnd_task_management; uint8_t fcp_cmnd_alen_datadir; union { struct { uint8_t fcp_cmnd_cdb[16]; uint32_t fcp_cmnd_dl; } sf; struct { uint8_t fcp_cmnd_cdb[1]; } lf; } cdb_dl; } fcp_cmnd_iu_t; #define FCP_CMND_TASK_ATTR_SIMPLE 0x00 #define FCP_CMND_TASK_ATTR_HEAD 0x01 #define FCP_CMND_TASK_ATTR_ORDERED 0x02 #define FCP_CMND_TASK_ATTR_ACA 0x04 #define FCP_CMND_TASK_ATTR_UNTAGGED 0x05 #define FCP_CMND_TASK_ATTR_MASK 0x07 #define FCP_CMND_ADDTL_CDBLEN_SHIFT 2 #define FCP_CMND_DATA_WRITE 0x01 #define FCP_CMND_DATA_READ 0x02 #define FCP_CMND_DATA_DIR_MASK 0x03 #define FCP_CMND_TMF_CLEAR_ACA 0x40 #define FCP_CMND_TMF_TGT_RESET 0x20 #define FCP_CMND_TMF_LUN_RESET 0x10 #define FCP_CMND_TMF_QUERY_ASYNC_EVENT 0x08 #define FCP_CMND_TMF_CLEAR_TASK_SET 0x04 #define FCP_CMND_TMF_ABORT_TASK_SET 0x02 #define FCP_CMND_TMF_QUERY_TASK_SET 0x01 /* * Basic CT IU Header * * Source: X3.288-199x Generic Services 2 Rev 5.3 (FC-GS-2) Section 4.3.1 */ typedef struct { uint8_t ct_revision; uint8_t ct_in_id[3]; uint8_t ct_fcs_type; uint8_t ct_fcs_subtype; uint8_t ct_options; uint8_t ct_reserved0; uint16_t ct_cmd_resp; uint16_t ct_bcnt_resid; uint8_t ct_reserved1; uint8_t ct_reason; uint8_t ct_explanation; uint8_t ct_vunique; } ct_hdr_t; #define CT_REVISION 1 #define CT_FC_TYPE_FC 0xFC #define CT_FC_SUBTYPE_NS 0x02 /* * RFT_ID Requet CT_IU * * Source: NCITS xxx-200x Generic Services- 5 Rev 8.5 Section 5.2.5.30 */ typedef struct { ct_hdr_t rftid_hdr; uint8_t rftid_reserved; uint8_t rftid_portid[3]; uint32_t rftid_fc4types[8]; } rft_id_t; /* + * RFF_ID Requet CT_IU + * + * Source: INCITS 463-2010 Generic Services 6 Section 5.2.5.34 + */ +typedef struct { + ct_hdr_t rffid_hdr; + uint8_t rffid_reserved; + uint8_t rffid_portid[3]; + uint16_t rffid_reserved2; + uint8_t rffid_fc4features; + uint8_t rffid_fc4type; +} rff_id_t; + +/* * FCP Response IU and bits of interest * Source: NCITS T10, Project 1828D, Revision 02b (aka FCP4r02b) */ typedef struct { uint8_t fcp_rsp_reserved[8]; uint16_t fcp_rsp_status_qualifier; /* SAM-5 Status Qualifier */ uint8_t fcp_rsp_bits; uint8_t fcp_rsp_scsi_status; /* SAM-5 SCSI Status Byte */ uint32_t fcp_rsp_resid; uint32_t fcp_rsp_snslen; uint32_t fcp_rsp_rsplen; /* * In the bytes that follow, it's going to be * FCP RESPONSE INFO (max 8 bytes, possibly 0) * FCP SENSE INFO (if any) * FCP BIDIRECTIONAL READ RESID (if any) */ uint8_t fcp_rsp_extra[0]; } fcp_rsp_iu_t; #define MIN_FCP_RESPONSE_SIZE 24 #define FCP_BIDIR_RSP 0x80 /* Bi-Directional response */ #define FCP_BIDIR_RESID_UNDERFLOW 0x40 #define FCP_BIDIR_RESID_OVERFLOW 0x20 #define FCP_CONF_REQ 0x10 #define FCP_RESID_UNDERFLOW 0x08 #define FCP_RESID_OVERFLOW 0x04 #define FCP_SNSLEN_VALID 0x02 #define FCP_RSPLEN_VALID 0x01 #define FCP_MAX_RSPLEN 0x08 /* * FCP Response Code Definitions * Source: NCITS T10, Project 1144D, Revision 08 (aka FCP2r08) */ #define FCP_RSPNS_CODE_OFFSET 3 #define FCP_RSPNS_TMF_DONE 0 #define FCP_RSPNS_DLBRSTX 1 #define FCP_RSPNS_BADCMND 2 #define FCP_RSPNS_EROFS 3 #define FCP_RSPNS_TMF_REJECT 4 #define FCP_RSPNS_TMF_FAILED 5 #define FCP_RSPNS_TMF_SUCCEEDED 8 #define FCP_RSPNS_TMF_INCORRECT_LUN 9 /* * R_CTL field definitions * * Bits 31-28 are ROUTING * Bits 27-24 are INFORMATION * * These are nibble values, not bits */ #define R_CTL_ROUTE_DATA 0x00 #define R_CTL_ROUTE_ELS 0x02 #define R_CTL_ROUTE_FC4_LINK 0x03 #define R_CTL_ROUTE_VDATA 0x04 #define R_CTL_ROUTE_EXENDED 0x05 #define R_CTL_ROUTE_BASIC 0x08 #define R_CTL_ROUTE_LINK 0x0c #define R_CTL_ROUTE_EXT_ROUTING 0x0f #define R_CTL_INFO_UNCATEGORIZED 0x00 #define R_CTL_INFO_SOLICITED_DATA 0x01 #define R_CTL_INFO_UNSOLICITED_CONTROL 0x02 #define R_CTL_INFO_SOLICITED_CONTROL 0x03 #define R_CTL_INFO_UNSOLICITED_DATA 0x04 #define R_CTL_INFO_DATA_DESCRIPTOR 0x05 #define R_CTL_INFO_UNSOLICITED_COMMAND 0x06 #define R_CTL_INFO_COMMAND_STATUS 0x07 #define MAKE_RCTL(a, b) (((a) << 4) | (b)) /* unconverted miscellany */ /* * Basic FC Link Service defines */ /* #define ABTS MAKE_RCTL(R_CTL_ROUTE_BASIC, R_CTL_INFO_SOLICITED_DATA) */ #define BA_ACC MAKE_RCTL(R_CTL_ROUTE_BASIC, R_CTL_INFO_UNSOLICITED_DATA) /* of ABORT */ #define BA_RJT MAKE_RCTL(R_CTL_ROUTE_BASIC, R_CTL_INFO_DATA_DESCRIPTOR) /* of ABORT */ /* * Link Service Accept/Reject */ #define LS_ACC 0x8002 #define LS_RJT 0x8001 /* * FC ELS Codes- bits 31-24 of the first payload word of an ELS frame. */ #define PLOGI 0x03 #define FLOGI 0x04 #define LOGO 0x05 #define ABTX 0x06 #define PRLI 0x20 #define PRLO 0x21 #define SCN 0x22 #define TPRLO 0x24 #define PDISC 0x50 #define ADISC 0x52 #define RNC 0x53 /* * PRLI Word 3 definitions * FPC4-r02b January, 2011 */ #define PRLI_WD3_ENHANCED_DISCOVERY (1 << 11) #define PRLI_WD3_REC_SUPPORT (1 << 10) #define PRLI_WD3_TASK_RETRY_IDENTIFICATION_REQUESTED (1 << 9) #define PRLI_WD3_RETRY (1 << 8) #define PRLI_WD3_CONFIRMED_COMPLETION_ALLOWED (1 << 7) #define PRLI_WD3_DATA_OVERLAY_ALLOWED (1 << 6) #define PRLI_WD3_INITIATOR_FUNCTION (1 << 5) #define PRLI_WD3_TARGET_FUNCTION (1 << 4) #define PRLI_WD3_READ_FCP_XFER_RDY_DISABLED (1 << 1) /* definitely supposed to be set */ #define PRLI_WD3_WRITE_FCP_XFER_RDY_DISABLED (1 << 0) /* * FC4 defines */ #define FC4_IP 5 /* ISO/EEC 8802-2 LLC/SNAP */ #define FC4_SCSI 8 /* SCSI-3 via Fibre Channel Protocol (FCP) */ #define FC4_FC_SVC 0x20 /* Fibre Channel Services */ #ifndef MSG_ABORT #define MSG_ABORT 0x06 #endif #ifndef MSG_BUS_DEV_RESET #define MSG_BUS_DEV_RESET 0x0c #endif #ifndef MSG_ABORT_TAG #define MSG_ABORT_TAG 0x0d #endif #ifndef MSG_CLEAR_QUEUE #define MSG_CLEAR_QUEUE 0x0e #endif #ifndef MSG_REL_RECOVERY #define MSG_REL_RECOVERY 0x10 #endif #ifndef MSG_TERM_IO_PROC #define MSG_TERM_IO_PROC 0x11 #endif #ifndef MSG_LUN_RESET #define MSG_LUN_RESET 0x17 #endif #endif /* _ISP_STDS_H */ Index: projects/powernv/dev/isp/isp_target.c =================================================================== --- projects/powernv/dev/isp/isp_target.c (revision 291050) +++ projects/powernv/dev/isp/isp_target.c (revision 291051) @@ -1,1914 +1,1914 @@ /*- * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Machine and OS Independent Target Mode Code for the Qlogic SCSI/FC adapters. */ /* * Bug fixes gratefully acknowledged from: * Oded Kedem */ /* * Include header file appropriate for platform we're building on. */ #ifdef __NetBSD__ #include #endif #ifdef __FreeBSD__ #include __FBSDID("$FreeBSD$"); #include #endif #ifdef __OpenBSD__ #include #endif #ifdef __linux__ #include "isp_linux.h" #endif #ifdef ISP_TARGET_MODE -static const char atiocope[] = "ATIO returned for lun %d because it was in the middle of Bus Device Reset on bus %d"; -static const char atior[] = "ATIO returned on for lun %d on from loopid %d because a Bus Reset occurred on bus %d"; +static const char atiocope[] = "ATIO returned for LUN %x because it was in the middle of Bus Device Reset on bus %d"; +static const char atior[] = "ATIO returned for LUN %x from handle 0x%x because a Bus Reset occurred on bus %d"; static const char rqo[] = "%s: Request Queue Overflow"; static void isp_got_msg(ispsoftc_t *, in_entry_t *); static void isp_got_msg_fc(ispsoftc_t *, in_fcentry_t *); static void isp_got_tmf_24xx(ispsoftc_t *, at7_entry_t *); static void isp_handle_atio(ispsoftc_t *, at_entry_t *); static void isp_handle_atio2(ispsoftc_t *, at2_entry_t *); static void isp_handle_ctio(ispsoftc_t *, ct_entry_t *); static void isp_handle_ctio2(ispsoftc_t *, ct2_entry_t *); static void isp_handle_ctio7(ispsoftc_t *, ct7_entry_t *); static void isp_handle_24xx_inotify(ispsoftc_t *, in_fcentry_24xx_t *); /* * The Qlogic driver gets an interrupt to look at response queue entries. * Some of these are status completions for initiatior mode commands, but * if target mode is enabled, we get a whole wad of response queue entries * to be handled here. * * Basically the split into 3 main groups: Lun Enable/Modification responses, * SCSI Command processing, and Immediate Notification events. * * You start by writing a request queue entry to enable target mode (and * establish some resource limitations which you can modify later). * The f/w responds with a LUN ENABLE or LUN MODIFY response with * the status of this action. If the enable was successful, you can expect... * * Response queue entries with SCSI commands encapsulate show up in an ATIO * (Accept Target IO) type- sometimes with enough info to stop the command at * this level. Ultimately the driver has to feed back to the f/w's request * queue a sequence of CTIOs (continue target I/O) that describe data to * be moved and/or status to be sent) and finally finishing with sending * to the f/w's response queue an ATIO which then completes the handshake * with the f/w for that command. There's a lot of variations on this theme, * including flags you can set in the CTIO for the Qlogic 2X00 fibre channel * cards that 'auto-replenish' the f/w's ATIO count, but this is the basic * gist of it. * * The third group that can show up in the response queue are Immediate * Notification events. These include things like notifications of SCSI bus * resets, or Bus Device Reset messages or other messages received. This * a classic oddbins area. It can get a little weird because you then turn * around and acknowledge the Immediate Notify by writing an entry onto the * request queue and then the f/w turns around and gives you an acknowledgement * to *your* acknowledgement on the response queue (the idea being to let * the f/w tell you when the event is *really* over I guess). * */ /* * A new response queue entry has arrived. The interrupt service code * has already swizzled it into the platform dependent from canonical form. * * Because of the way this driver is designed, unfortunately most of the * actual synchronization work has to be done in the platform specific * code- we have no synchroniation primitives in the common code. */ int isp_target_notify(ispsoftc_t *isp, void *vptr, uint32_t *optrp) { uint16_t status; uint32_t seqid; union { at_entry_t *atiop; at2_entry_t *at2iop; at2e_entry_t *at2eiop; at7_entry_t *at7iop; ct_entry_t *ctiop; ct2_entry_t *ct2iop; ct2e_entry_t *ct2eiop; ct7_entry_t *ct7iop; lun_entry_t *lunenp; in_entry_t *inotp; in_fcentry_t *inot_fcp; in_fcentry_e_t *inote_fcp; in_fcentry_24xx_t *inot_24xx; na_entry_t *nackp; na_fcentry_t *nack_fcp; na_fcentry_e_t *nacke_fcp; na_fcentry_24xx_t *nack_24xx; isphdr_t *hp; abts_t *abts; abts_rsp_t *abts_rsp; els_t *els; void * *vp; #define atiop unp.atiop #define at2iop unp.at2iop #define at2eiop unp.at2eiop #define at7iop unp.at7iop #define ctiop unp.ctiop #define ct2iop unp.ct2iop #define ct2eiop unp.ct2eiop #define ct7iop unp.ct7iop #define lunenp unp.lunenp #define inotp unp.inotp #define inot_fcp unp.inot_fcp #define inote_fcp unp.inote_fcp #define inot_24xx unp.inot_24xx #define nackp unp.nackp #define nack_fcp unp.nack_fcp #define nacke_fcp unp.nacke_fcp #define nack_24xx unp.nack_24xx #define abts unp.abts #define abts_rsp unp.abts_rsp #define els unp.els #define hdrp unp.hp } unp; uint8_t local[QENTRY_LEN]; uint16_t iid; int bus, type, level, rval = 1; isp_notify_t notify; type = isp_get_response_type(isp, (isphdr_t *)vptr); unp.vp = vptr; ISP_TDQE(isp, "isp_target_notify", (int) *optrp, vptr); switch (type) { case RQSTYPE_ATIO: if (IS_24XX(isp)) { int len; isp_get_atio7(isp, at7iop, (at7_entry_t *) local); at7iop = (at7_entry_t *) local; /* * Check for and do something with commands whose * IULEN extends past a single queue entry. */ len = at7iop->at_ta_len & 0xfffff; if (len > (QENTRY_LEN - 8)) { len -= (QENTRY_LEN - 8); isp_prt(isp, ISP_LOGINFO, "long IU length (%d) ignored", len); while (len > 0) { *optrp = ISP_NXT_QENTRY(*optrp, RESULT_QUEUE_LEN(isp)); len -= QENTRY_LEN; } } /* * Check for a task management function */ if (at7iop->at_cmnd.fcp_cmnd_task_management) { isp_got_tmf_24xx(isp, at7iop); break; } /* * Just go straight to outer layer for this one. */ isp_async(isp, ISPASYNC_TARGET_ACTION, local); } else { isp_get_atio(isp, atiop, (at_entry_t *) local); isp_handle_atio(isp, (at_entry_t *) local); } break; case RQSTYPE_CTIO: isp_get_ctio(isp, ctiop, (ct_entry_t *) local); isp_handle_ctio(isp, (ct_entry_t *) local); break; case RQSTYPE_ATIO2: if (ISP_CAP_2KLOGIN(isp)) { isp_get_atio2e(isp, at2eiop, (at2e_entry_t *) local); } else { isp_get_atio2(isp, at2iop, (at2_entry_t *) local); } isp_handle_atio2(isp, (at2_entry_t *) local); break; case RQSTYPE_CTIO3: case RQSTYPE_CTIO2: if (ISP_CAP_2KLOGIN(isp)) { isp_get_ctio2e(isp, ct2eiop, (ct2e_entry_t *) local); } else { isp_get_ctio2(isp, ct2iop, (ct2_entry_t *) local); } isp_handle_ctio2(isp, (ct2_entry_t *) local); break; case RQSTYPE_CTIO7: isp_get_ctio7(isp, ct7iop, (ct7_entry_t *) local); isp_handle_ctio7(isp, (ct7_entry_t *) local); break; case RQSTYPE_ENABLE_LUN: case RQSTYPE_MODIFY_LUN: isp_get_enable_lun(isp, lunenp, (lun_entry_t *) local); isp_async(isp, ISPASYNC_TARGET_ACTION, local); break; case RQSTYPE_NOTIFY: bus = 0; if (IS_24XX(isp)) { isp_get_notify_24xx(isp, inot_24xx, (in_fcentry_24xx_t *)local); inot_24xx = (in_fcentry_24xx_t *) local; isp_handle_24xx_inotify(isp, inot_24xx); break; } if (IS_FC(isp)) { if (ISP_CAP_2KLOGIN(isp)) { in_fcentry_e_t *ecp = (in_fcentry_e_t *)local; isp_get_notify_fc_e(isp, inote_fcp, ecp); iid = ecp->in_iid; status = ecp->in_status; seqid = ecp->in_seqid; } else { in_fcentry_t *fcp = (in_fcentry_t *)local; isp_get_notify_fc(isp, inot_fcp, fcp); iid = fcp->in_iid; status = fcp->in_status; seqid = fcp->in_seqid; } } else { in_entry_t *inp = (in_entry_t *)local; isp_get_notify(isp, inotp, inp); status = inp->in_status & 0xff; seqid = inp->in_seqid; iid = inp->in_iid; if (IS_DUALBUS(isp)) { bus = GET_BUS_VAL(inp->in_iid); SET_BUS_VAL(inp->in_iid, 0); } } isp_prt(isp, ISP_LOGTDEBUG0, "Immediate Notify On Bus %d, status=0x%x seqid=0x%x", bus, status, seqid); switch (status) { case IN_MSG_RECEIVED: case IN_IDE_RECEIVED: if (IS_FC(isp)) { isp_got_msg_fc(isp, (in_fcentry_t *)local); } else { isp_got_msg(isp, (in_entry_t *)local); } break; case IN_RSRC_UNAVAIL: isp_prt(isp, ISP_LOGINFO, "Firmware out of ATIOs"); isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, local); break; case IN_RESET: ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); notify.nt_hba = isp; notify.nt_wwn = INI_ANY; notify.nt_tgt = TGT_ANY; notify.nt_nphdl = iid; notify.nt_sid = PORT_ANY; notify.nt_did = PORT_ANY; notify.nt_lun = LUN_ANY; notify.nt_tagval = TAG_ANY; notify.nt_tagval |= (((uint64_t)(isp->isp_serno++)) << 32); notify.nt_ncode = NT_BUS_RESET; notify.nt_need_ack = 1; notify.nt_lreserved = local; isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; case IN_PORT_LOGOUT: ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); notify.nt_hba = isp; notify.nt_wwn = INI_ANY; notify.nt_nphdl = iid; notify.nt_sid = PORT_ANY; notify.nt_did = PORT_ANY; notify.nt_ncode = NT_LOGOUT; notify.nt_need_ack = 1; notify.nt_lreserved = local; isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; case IN_ABORT_TASK: ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); notify.nt_hba = isp; notify.nt_wwn = INI_ANY; notify.nt_nphdl = iid; notify.nt_sid = PORT_ANY; notify.nt_did = PORT_ANY; notify.nt_ncode = NT_ABORT_TASK; notify.nt_need_ack = 1; notify.nt_lreserved = local; isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; case IN_GLOBAL_LOGO: isp_prt(isp, ISP_LOGTINFO, "%s: all ports logged out", __func__); ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); notify.nt_hba = isp; notify.nt_wwn = INI_ANY; notify.nt_nphdl = NIL_HANDLE; notify.nt_sid = PORT_ANY; notify.nt_did = PORT_ANY; notify.nt_ncode = NT_GLOBAL_LOGOUT; notify.nt_need_ack = 1; notify.nt_lreserved = local; isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; case IN_PORT_CHANGED: isp_prt(isp, ISP_LOGTINFO, "%s: port changed", __func__); ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); notify.nt_hba = isp; notify.nt_wwn = INI_ANY; notify.nt_nphdl = NIL_HANDLE; notify.nt_sid = PORT_ANY; notify.nt_did = PORT_ANY; notify.nt_ncode = NT_CHANGED; notify.nt_need_ack = 1; notify.nt_lreserved = local; isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; default: ISP_SNPRINTF(local, sizeof local, "%s: unknown status to RQSTYPE_NOTIFY (0x%x)", __func__, status); isp_print_bytes(isp, local, QENTRY_LEN, vptr); isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, local); break; } break; case RQSTYPE_NOTIFY_ACK: /* * The ISP is acknowledging our acknowledgement of an * Immediate Notify entry for some asynchronous event. */ if (IS_24XX(isp)) { isp_get_notify_ack_24xx(isp, nack_24xx, (na_fcentry_24xx_t *) local); nack_24xx = (na_fcentry_24xx_t *) local; if (nack_24xx->na_status != NA_OK) { level = ISP_LOGINFO; } else { level = ISP_LOGTDEBUG1; } isp_prt(isp, level, "Notify Ack Status=0x%x; Subcode 0x%x seqid=0x%x", nack_24xx->na_status, nack_24xx->na_status_subcode, nack_24xx->na_rxid); } else if (IS_FC(isp)) { if (ISP_CAP_2KLOGIN(isp)) { isp_get_notify_ack_fc_e(isp, nacke_fcp, (na_fcentry_e_t *)local); } else { isp_get_notify_ack_fc(isp, nack_fcp, (na_fcentry_t *)local); } nack_fcp = (na_fcentry_t *)local; if (nack_fcp->na_status != NA_OK) { level = ISP_LOGINFO; } else { level = ISP_LOGTDEBUG1; } isp_prt(isp, level, "Notify Ack Status=0x%x seqid 0x%x", nack_fcp->na_status, nack_fcp->na_seqid); } else { isp_get_notify_ack(isp, nackp, (na_entry_t *)local); nackp = (na_entry_t *)local; if (nackp->na_status != NA_OK) { level = ISP_LOGINFO; } else { level = ISP_LOGTDEBUG1; } isp_prt(isp, level, "Notify Ack event 0x%x status=0x%x seqid 0x%x", nackp->na_event, nackp->na_status, nackp->na_seqid); } break; case RQSTYPE_ABTS_RCVD: isp_get_abts(isp, abts, (abts_t *)local); isp_async(isp, ISPASYNC_TARGET_ACTION, &local); break; case RQSTYPE_ABTS_RSP: isp_get_abts_rsp(isp, abts_rsp, (abts_rsp_t *)local); abts_rsp = (abts_rsp_t *) local; if (abts_rsp->abts_rsp_status) { level = ISP_LOGINFO; } else { level = ISP_LOGTDEBUG0; } isp_prt(isp, level, "ABTS RSP response[0x%x]: status=0x%x sub=(0x%x 0x%x)", abts_rsp->abts_rsp_rxid_task, abts_rsp->abts_rsp_status, abts_rsp->abts_rsp_payload.rsp.subcode1, abts_rsp->abts_rsp_payload.rsp.subcode2); break; default: isp_prt(isp, ISP_LOGERR, "%s: unknown entry type 0x%x", __func__, type); rval = 0; break; } #undef atiop #undef at2iop #undef at2eiop #undef at7iop #undef ctiop #undef ct2iop #undef ct2eiop #undef ct7iop #undef lunenp #undef inotp #undef inot_fcp #undef inote_fcp #undef inot_24xx #undef nackp #undef nack_fcp #undef nacke_fcp #undef hack_24xx #undef abts #undef abts_rsp #undef els #undef hdrp return (rval); } /* * Toggle (on/off) target mode for bus/target/lun. * * The caller has checked for overlap and legality. * * Note that not all of bus, target or lun can be paid attention to. * Note also that this action will not be complete until the f/w writes * a response entry. The caller is responsible for synchronizing with this. */ int isp_lun_cmd(ispsoftc_t *isp, int cmd, int bus, int lun, int cmd_cnt, int inot_cnt) { lun_entry_t el; void *outp; ISP_MEMZERO(&el, sizeof (el)); if (IS_DUALBUS(isp)) { el.le_rsvd = (bus & 0x1) << 7; } el.le_cmd_count = (cmd_cnt < 0)? -cmd_cnt : cmd_cnt; el.le_in_count = (inot_cnt < 0)? -inot_cnt : inot_cnt; if (cmd == RQSTYPE_ENABLE_LUN) { if (IS_SCSI(isp)) { el.le_flags = LUN_TQAE|LUN_DISAD; el.le_cdb6len = 12; el.le_cdb7len = 12; } } else if (cmd == RQSTYPE_MODIFY_LUN) { if (cmd_cnt == 0 && inot_cnt == 0) { isp_prt(isp, ISP_LOGWARN, "makes no sense to modify a lun with both command and immediate notify counts as zero"); return (0); } if (cmd_cnt < 0) el.le_ops |= LUN_CCDECR; else el.le_ops |= LUN_CCINCR; if (inot_cnt < 0) el.le_ops |= LUN_INDECR; else el.le_ops |= LUN_ININCR; } else { isp_prt(isp, ISP_LOGWARN, "unknown cmd (0x%x) in %s", cmd, __func__); return (-1); } el.le_header.rqs_entry_type = cmd; el.le_header.rqs_entry_count = 1; if (IS_SCSI(isp)) { el.le_tgt = SDPARAM(isp, bus)->isp_initiator_id; el.le_lun = lun; } else if (ISP_CAP_SCCFW(isp) == 0) { el.le_lun = lun; } el.le_timeout = 30; outp = isp_getrqentry(isp); if (outp == NULL) { isp_prt(isp, ISP_LOGERR, rqo, __func__); return (-1); } isp_put_enable_lun(isp, &el, outp); ISP_TDQE(isp, "isp_lun_cmd", isp->isp_reqidx, &el); ISP_SYNC_REQUEST(isp); return (0); } int isp_target_put_entry(ispsoftc_t *isp, void *ap) { void *outp; uint8_t etype = ((isphdr_t *) ap)->rqs_entry_type; outp = isp_getrqentry(isp); if (outp == NULL) { isp_prt(isp, ISP_LOGWARN, rqo, __func__); return (-1); } switch (etype) { case RQSTYPE_ATIO: isp_put_atio(isp, (at_entry_t *) ap, (at_entry_t *) outp); break; case RQSTYPE_ATIO2: if (ISP_CAP_2KLOGIN(isp)) { isp_put_atio2e(isp, (at2e_entry_t *) ap, (at2e_entry_t *) outp); } else { isp_put_atio2(isp, (at2_entry_t *) ap, (at2_entry_t *) outp); } break; case RQSTYPE_CTIO: isp_put_ctio(isp, (ct_entry_t *) ap, (ct_entry_t *) outp); break; case RQSTYPE_CTIO2: if (ISP_CAP_2KLOGIN(isp)) { isp_put_ctio2e(isp, (ct2e_entry_t *) ap, (ct2e_entry_t *) outp); } else { isp_put_ctio2(isp, (ct2_entry_t *) ap, (ct2_entry_t *) outp); } break; case RQSTYPE_CTIO7: isp_put_ctio7(isp, (ct7_entry_t *) ap, (ct7_entry_t *) outp); break; default: isp_prt(isp, ISP_LOGERR, "%s: Unknown type 0x%x", __func__, etype); return (-1); } ISP_TDQE(isp, __func__, isp->isp_reqidx, ap); ISP_SYNC_REQUEST(isp); return (0); } int isp_target_put_atio(ispsoftc_t *isp, void *arg) { union { at_entry_t _atio; at2_entry_t _atio2; at2e_entry_t _atio2e; } atun; ISP_MEMZERO(&atun, sizeof atun); if (IS_FC(isp)) { at2_entry_t *aep = arg; atun._atio2.at_header.rqs_entry_type = RQSTYPE_ATIO2; atun._atio2.at_header.rqs_entry_count = 1; if (ISP_CAP_SCCFW(isp)) { atun._atio2.at_scclun = aep->at_scclun; } else { atun._atio2.at_lun = (uint8_t) aep->at_lun; } if (ISP_CAP_2KLOGIN(isp)) { atun._atio2e.at_iid = ((at2e_entry_t *)aep)->at_iid; } else { atun._atio2.at_iid = aep->at_iid; } atun._atio2.at_rxid = aep->at_rxid; atun._atio2.at_status = CT_OK; } else { at_entry_t *aep = arg; atun._atio.at_header.rqs_entry_type = RQSTYPE_ATIO; atun._atio.at_header.rqs_entry_count = 1; atun._atio.at_handle = aep->at_handle; atun._atio.at_iid = aep->at_iid; atun._atio.at_tgt = aep->at_tgt; atun._atio.at_lun = aep->at_lun; atun._atio.at_tag_type = aep->at_tag_type; atun._atio.at_tag_val = aep->at_tag_val; atun._atio.at_status = (aep->at_flags & AT_TQAE); atun._atio.at_status |= CT_OK; } return (isp_target_put_entry(isp, &atun)); } /* * Command completion- both for handling cases of no resources or * no blackhole driver, or other cases where we have to, inline, * finish the command sanely, or for normal command completion. * * The 'completion' code value has the scsi status byte in the low 8 bits. * If status is a CHECK CONDITION and bit 8 is nonzero, then bits 12..15 have * the sense key and bits 16..23 have the ASCQ and bits 24..31 have the ASC * values. * * NB: the key, asc, ascq, cannot be used for parallel SCSI as it doesn't * NB: inline SCSI sense reporting. As such, we lose this information. XXX. * * For both parallel && fibre channel, we use the feature that does * an automatic resource autoreplenish so we don't have then later do * put of an atio to replenish the f/w's resource count. */ int isp_endcmd(ispsoftc_t *isp, ...) { uint32_t code, hdl; uint8_t sts; union { ct_entry_t _ctio; ct2_entry_t _ctio2; ct2e_entry_t _ctio2e; ct7_entry_t _ctio7; } un; va_list ap; ISP_MEMZERO(&un, sizeof un); if (IS_24XX(isp)) { int vpidx, nphdl; at7_entry_t *aep; ct7_entry_t *cto = &un._ctio7; va_start(ap, isp); aep = va_arg(ap, at7_entry_t *); nphdl = va_arg(ap, int); /* * Note that vpidx may equal 0xff (unknown) here */ vpidx = va_arg(ap, int); code = va_arg(ap, uint32_t); hdl = va_arg(ap, uint32_t); va_end(ap); isp_prt(isp, ISP_LOGTDEBUG0, "%s: [RX_ID 0x%x] chan %d code %x", __func__, aep->at_rxid, vpidx, code); sts = code & 0xff; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7; cto->ct_header.rqs_entry_count = 1; cto->ct_nphdl = nphdl; cto->ct_rxid = aep->at_rxid; cto->ct_iid_lo = (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2]; cto->ct_iid_hi = aep->at_hdr.s_id[0]; cto->ct_oxid = aep->at_hdr.ox_id; cto->ct_scsi_status = sts; cto->ct_vpidx = vpidx; cto->ct_flags = CT7_NOACK; if (code & ECMD_TERMINATE) { cto->ct_flags |= CT7_TERMINATE; } else if (code & ECMD_SVALID) { cto->ct_flags |= CT7_FLAG_MODE1 | CT7_SENDSTATUS; cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8); cto->rsp.m1.ct_resplen = cto->ct_senselen = min(16, MAXRESPLEN_24XX); ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp)); cto->rsp.m1.ct_resp[0] = 0xf0; cto->rsp.m1.ct_resp[2] = (code >> 12) & 0xf; cto->rsp.m1.ct_resp[7] = 8; cto->rsp.m1.ct_resp[12] = (code >> 16) & 0xff; cto->rsp.m1.ct_resp[13] = (code >> 24) & 0xff; } else { cto->ct_flags |= CT7_FLAG_MODE1 | CT7_SENDSTATUS; } if (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl) { cto->ct_resid = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl; if (cto->ct_resid < 0) { cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8); } else if (cto->ct_resid > 0) { cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8); } } cto->ct_syshandle = hdl; } else if (IS_FC(isp)) { at2_entry_t *aep; ct2_entry_t *cto = &un._ctio2; va_start(ap, isp); aep = va_arg(ap, at2_entry_t *); code = va_arg(ap, uint32_t); hdl = va_arg(ap, uint32_t); va_end(ap); isp_prt(isp, ISP_LOGTDEBUG0, "%s: [RX_ID 0x%x] code %x", __func__, aep->at_rxid, code); sts = code & 0xff; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; cto->ct_header.rqs_entry_count = 1; if (ISP_CAP_SCCFW(isp) == 0) { cto->ct_lun = aep->at_lun; } if (ISP_CAP_2KLOGIN(isp)) { un._ctio2e.ct_iid = ((at2e_entry_t *)aep)->at_iid; } else { cto->ct_iid = aep->at_iid; } cto->ct_rxid = aep->at_rxid; cto->rsp.m1.ct_scsi_status = sts; cto->ct_flags = CT2_SENDSTATUS | CT2_NO_DATA | CT2_FLAG_MODE1; if (hdl == 0) { cto->ct_flags |= CT2_CCINCR; } if (aep->at_datalen) { cto->ct_resid = aep->at_datalen; cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER; } if (sts == SCSI_CHECK && (code & ECMD_SVALID)) { cto->rsp.m1.ct_resp[0] = 0xf0; cto->rsp.m1.ct_resp[2] = (code >> 12) & 0xf; cto->rsp.m1.ct_resp[7] = 8; cto->rsp.m1.ct_resp[12] = (code >> 24) & 0xff; cto->rsp.m1.ct_resp[13] = (code >> 16) & 0xff; cto->rsp.m1.ct_senselen = 16; cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; } cto->ct_syshandle = hdl; } else { at_entry_t *aep; ct_entry_t *cto = &un._ctio; va_start(ap, isp); aep = va_arg(ap, at_entry_t *); code = va_arg(ap, uint32_t); hdl = va_arg(ap, uint32_t); va_end(ap); isp_prt(isp, ISP_LOGTDEBUG0, "%s: [IID %d] code %x", __func__, aep->at_iid, code); sts = code; cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; cto->ct_header.rqs_entry_count = 1; cto->ct_fwhandle = aep->at_handle; cto->ct_iid = aep->at_iid; cto->ct_tgt = aep->at_tgt; cto->ct_lun = aep->at_lun; cto->ct_tag_type = aep->at_tag_type; cto->ct_tag_val = aep->at_tag_val; if (aep->at_flags & AT_TQAE) { cto->ct_flags |= CT_TQAE; } cto->ct_flags = CT_SENDSTATUS | CT_NO_DATA; if (hdl == 0) { cto->ct_flags |= CT_CCINCR; } cto->ct_scsi_status = sts; cto->ct_syshandle = hdl; } return (isp_target_put_entry(isp, &un)); } /* * These are either broadcast events or specifically CTIO fast completion */ int isp_target_async(ispsoftc_t *isp, int bus, int event) { isp_notify_t notify; ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); notify.nt_hba = isp; notify.nt_wwn = INI_ANY; notify.nt_nphdl = NIL_HANDLE; notify.nt_sid = PORT_ANY; notify.nt_did = PORT_ANY; notify.nt_tgt = TGT_ANY; notify.nt_channel = bus; notify.nt_lun = LUN_ANY; notify.nt_tagval = TAG_ANY; notify.nt_tagval |= (((uint64_t)(isp->isp_serno++)) << 32); switch (event) { case ASYNC_LOOP_UP: case ASYNC_PTPMODE: isp_prt(isp, ISP_LOGTDEBUG0, "%s: LOOP UP", __func__); notify.nt_ncode = NT_LINK_UP; isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; case ASYNC_LOOP_DOWN: isp_prt(isp, ISP_LOGTDEBUG0, "%s: LOOP DOWN", __func__); notify.nt_ncode = NT_LINK_DOWN; isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; case ASYNC_LIP_ERROR: case ASYNC_LIP_F8: case ASYNC_LIP_OCCURRED: case ASYNC_LOOP_RESET: isp_prt(isp, ISP_LOGTDEBUG0, "%s: LIP RESET", __func__); notify.nt_ncode = NT_LIP_RESET; isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; case ASYNC_BUS_RESET: case ASYNC_TIMEOUT_RESET: /* XXX: where does this come from ? */ isp_prt(isp, ISP_LOGTDEBUG0, "%s: BUS RESET", __func__); notify.nt_ncode = NT_BUS_RESET; isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; case ASYNC_DEVICE_RESET: isp_prt(isp, ISP_LOGTDEBUG0, "%s: DEVICE RESET", __func__); notify.nt_ncode = NT_TARGET_RESET; isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); break; case ASYNC_CTIO_DONE: { uint8_t storage[QENTRY_LEN]; isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO DONE", __func__); memset(storage, 0, QENTRY_LEN); if (IS_24XX(isp)) { ct7_entry_t *ct = (ct7_entry_t *) storage; ct->ct_header.rqs_entry_type = RQSTYPE_CTIO7; ct->ct_nphdl = CT7_OK; ct->ct_syshandle = bus; ct->ct_flags = CT7_SENDSTATUS; } else if (IS_FC(isp)) { /* This should also suffice for 2K login code */ ct2_entry_t *ct = (ct2_entry_t *) storage; ct->ct_header.rqs_entry_type = RQSTYPE_CTIO2; ct->ct_status = CT_OK; ct->ct_syshandle = bus; ct->ct_flags = CT2_SENDSTATUS|CT2_FASTPOST; } else { ct_entry_t *ct = (ct_entry_t *) storage; ct->ct_header.rqs_entry_type = RQSTYPE_CTIO; ct->ct_status = CT_OK; ct->ct_syshandle = bus; /* we skip fwhandle here */ ct->ct_fwhandle = 0; ct->ct_flags = CT_SENDSTATUS; } isp_async(isp, ISPASYNC_TARGET_ACTION, storage); break; } default: isp_prt(isp, ISP_LOGERR, "%s: unknown event 0x%x", __func__, event); if (isp->isp_state == ISP_RUNSTATE) { isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, NULL); } break; } return (0); } /* * Process a received message. * The ISP firmware can handle most messages, there are only * a few that we need to deal with: * - abort: clean up the current command * - abort tag and clear queue */ static void isp_got_msg(ispsoftc_t *isp, in_entry_t *inp) { isp_notify_t notify; uint8_t status = inp->in_status & ~QLTM_SVALID; ISP_MEMZERO(¬ify, sizeof (notify)); notify.nt_hba = isp; notify.nt_wwn = INI_ANY; notify.nt_nphdl = GET_IID_VAL(inp->in_iid); notify.nt_sid = PORT_ANY; notify.nt_did = PORT_ANY; notify.nt_channel = GET_BUS_VAL(inp->in_iid); notify.nt_tgt = inp->in_tgt; notify.nt_lun = inp->in_lun; IN_MAKE_TAGID(notify.nt_tagval, inp); notify.nt_tagval |= (((uint64_t)(isp->isp_serno++)) << 32); notify.nt_lreserved = inp; if (status == IN_IDE_RECEIVED || status == IN_MSG_RECEIVED) { switch (inp->in_msg[0]) { case MSG_ABORT: notify.nt_ncode = NT_ABORT_TASK_SET; break; case MSG_BUS_DEV_RESET: notify.nt_ncode = NT_TARGET_RESET; break; case MSG_ABORT_TAG: notify.nt_ncode = NT_ABORT_TASK; break; case MSG_CLEAR_QUEUE: notify.nt_ncode = NT_CLEAR_TASK_SET; break; case MSG_REL_RECOVERY: notify.nt_ncode = NT_CLEAR_ACA; break; case MSG_TERM_IO_PROC: notify.nt_ncode = NT_ABORT_TASK; break; case MSG_LUN_RESET: notify.nt_ncode = NT_LUN_RESET; break; default: isp_prt(isp, ISP_LOGERR, "%s: unhandled message 0x%x", __func__, inp->in_msg[0]); isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inp); return; } isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); } else { isp_prt(isp, ISP_LOGERR, "%s: unknown immediate notify status 0x%x", __func__, inp->in_status); isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inp); } } /* * Synthesize a message from the task management flags in a FCP_CMND_IU. */ static void isp_got_msg_fc(ispsoftc_t *isp, in_fcentry_t *inp) { isp_notify_t notify; - static const char f1[] = "%s from N-port handle 0x%x lun %d seq 0x%x"; - static const char f2[] = "unknown %s 0x%x lun %d N-Port handle 0x%x task flags 0x%x seq 0x%x\n"; - uint16_t seqid, loopid; + static const char f1[] = "%s from N-port handle 0x%x lun %x seq 0x%x"; + static const char f2[] = "unknown %s 0x%x lun %x N-Port handle 0x%x task flags 0x%x seq 0x%x\n"; + uint16_t seqid, nphdl; ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); notify.nt_hba = isp; notify.nt_wwn = INI_ANY; if (ISP_CAP_2KLOGIN(isp)) { notify.nt_nphdl = ((in_fcentry_e_t *)inp)->in_iid; - loopid = ((in_fcentry_e_t *)inp)->in_iid; + nphdl = ((in_fcentry_e_t *)inp)->in_iid; seqid = ((in_fcentry_e_t *)inp)->in_seqid; } else { notify.nt_nphdl = inp->in_iid; - loopid = inp->in_iid; + nphdl = inp->in_iid; seqid = inp->in_seqid; } notify.nt_sid = PORT_ANY; notify.nt_did = PORT_ANY; /* nt_tgt set in outer layers */ if (ISP_CAP_SCCFW(isp)) { notify.nt_lun = inp->in_scclun; #if __FreeBSD_version < 1000700 notify.nt_lun &= 0x3fff; #endif } else { notify.nt_lun = inp->in_lun; } notify.nt_tagval = seqid; notify.nt_tagval |= (((uint64_t)(isp->isp_serno++)) << 32); notify.nt_need_ack = 1; notify.nt_lreserved = inp; if (inp->in_status != IN_MSG_RECEIVED) { - isp_prt(isp, ISP_LOGINFO, f2, "immediate notify status", inp->in_status, notify.nt_lun, loopid, inp->in_task_flags, inp->in_seqid); + isp_prt(isp, ISP_LOGINFO, f2, "immediate notify status", inp->in_status, notify.nt_lun, nphdl, inp->in_task_flags, inp->in_seqid); isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inp); return; } if (inp->in_task_flags & TASK_FLAGS_ABORT_TASK_SET) { - isp_prt(isp, ISP_LOGINFO, f1, "ABORT TASK SET", loopid, notify.nt_lun, inp->in_seqid); + isp_prt(isp, ISP_LOGINFO, f1, "ABORT TASK SET", nphdl, notify.nt_lun, inp->in_seqid); notify.nt_ncode = NT_ABORT_TASK_SET; } else if (inp->in_task_flags & TASK_FLAGS_CLEAR_TASK_SET) { - isp_prt(isp, ISP_LOGINFO, f1, "CLEAR TASK SET", loopid, notify.nt_lun, inp->in_seqid); + isp_prt(isp, ISP_LOGINFO, f1, "CLEAR TASK SET", nphdl, notify.nt_lun, inp->in_seqid); notify.nt_ncode = NT_CLEAR_TASK_SET; } else if (inp->in_task_flags & TASK_FLAGS_LUN_RESET) { - isp_prt(isp, ISP_LOGINFO, f1, "LUN RESET", loopid, notify.nt_lun, inp->in_seqid); + isp_prt(isp, ISP_LOGINFO, f1, "LUN RESET", nphdl, notify.nt_lun, inp->in_seqid); notify.nt_ncode = NT_LUN_RESET; } else if (inp->in_task_flags & TASK_FLAGS_TARGET_RESET) { - isp_prt(isp, ISP_LOGINFO, f1, "TARGET RESET", loopid, notify.nt_lun, inp->in_seqid); + isp_prt(isp, ISP_LOGINFO, f1, "TARGET RESET", nphdl, notify.nt_lun, inp->in_seqid); notify.nt_ncode = NT_TARGET_RESET; } else if (inp->in_task_flags & TASK_FLAGS_CLEAR_ACA) { - isp_prt(isp, ISP_LOGINFO, f1, "CLEAR ACA", loopid, notify.nt_lun, inp->in_seqid); + isp_prt(isp, ISP_LOGINFO, f1, "CLEAR ACA", nphdl, notify.nt_lun, inp->in_seqid); notify.nt_ncode = NT_CLEAR_ACA; } else { - isp_prt(isp, ISP_LOGWARN, f2, "task flag", inp->in_status, notify.nt_lun, loopid, inp->in_task_flags, inp->in_seqid); + isp_prt(isp, ISP_LOGWARN, f2, "task flag", inp->in_status, notify.nt_lun, nphdl, inp->in_task_flags, inp->in_seqid); isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inp); return; } isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); } static void isp_got_tmf_24xx(ispsoftc_t *isp, at7_entry_t *aep) { isp_notify_t notify; - static const char f1[] = "%s from PortID 0x%06x lun %d seq 0x%08x"; - static const char f2[] = "unknown Task Flag 0x%x lun %d PortID 0x%x tag 0x%08x"; + static const char f1[] = "%s from PortID 0x%06x lun %x seq 0x%08x"; + static const char f2[] = "unknown Task Flag 0x%x lun %x PortID 0x%x tag 0x%08x"; uint16_t chan; uint32_t sid, did; ISP_MEMZERO(¬ify, sizeof (isp_notify_t)); notify.nt_hba = isp; notify.nt_wwn = INI_ANY; notify.nt_lun = (aep->at_cmnd.fcp_cmnd_lun[0] << 8) | (aep->at_cmnd.fcp_cmnd_lun[1]); notify.nt_tagval = aep->at_rxid; notify.nt_tagval |= (((uint64_t)(isp->isp_serno++)) << 32); notify.nt_lreserved = aep; sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | (aep->at_hdr.s_id[2]); /* Channel has to derived from D_ID */ did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2]; for (chan = 0; chan < isp->isp_nchan; chan++) { if (FCPARAM(isp, chan)->isp_portid == did) { break; } } if (chan == isp->isp_nchan) { isp_prt(isp, ISP_LOGWARN, "%s: D_ID 0x%x not found on any channel", __func__, did); /* just drop on the floor */ return; } notify.nt_nphdl = NIL_HANDLE; /* unknown here */ notify.nt_sid = sid; notify.nt_did = did; notify.nt_channel = chan; if (aep->at_cmnd.fcp_cmnd_task_management & FCP_CMND_TMF_QUERY_TASK_SET) { isp_prt(isp, ISP_LOGINFO, f1, "QUERY TASK SET", sid, notify.nt_lun, aep->at_rxid); notify.nt_ncode = NT_QUERY_TASK_SET; } else if (aep->at_cmnd.fcp_cmnd_task_management & FCP_CMND_TMF_ABORT_TASK_SET) { isp_prt(isp, ISP_LOGINFO, f1, "ABORT TASK SET", sid, notify.nt_lun, aep->at_rxid); notify.nt_ncode = NT_ABORT_TASK_SET; } else if (aep->at_cmnd.fcp_cmnd_task_management & FCP_CMND_TMF_CLEAR_TASK_SET) { isp_prt(isp, ISP_LOGINFO, f1, "CLEAR TASK SET", sid, notify.nt_lun, aep->at_rxid); notify.nt_ncode = NT_CLEAR_TASK_SET; } else if (aep->at_cmnd.fcp_cmnd_task_management & FCP_CMND_TMF_QUERY_ASYNC_EVENT) { isp_prt(isp, ISP_LOGINFO, f1, "QUERY ASYNC EVENT", sid, notify.nt_lun, aep->at_rxid); notify.nt_ncode = NT_QUERY_ASYNC_EVENT; } else if (aep->at_cmnd.fcp_cmnd_task_management & FCP_CMND_TMF_LUN_RESET) { isp_prt(isp, ISP_LOGINFO, f1, "LUN RESET", sid, notify.nt_lun, aep->at_rxid); notify.nt_ncode = NT_LUN_RESET; } else if (aep->at_cmnd.fcp_cmnd_task_management & FCP_CMND_TMF_TGT_RESET) { isp_prt(isp, ISP_LOGINFO, f1, "TARGET RESET", sid, notify.nt_lun, aep->at_rxid); notify.nt_ncode = NT_TARGET_RESET; } else if (aep->at_cmnd.fcp_cmnd_task_management & FCP_CMND_TMF_CLEAR_ACA) { isp_prt(isp, ISP_LOGINFO, f1, "CLEAR ACA", sid, notify.nt_lun, aep->at_rxid); notify.nt_ncode = NT_CLEAR_ACA; } else { isp_prt(isp, ISP_LOGWARN, f2, aep->at_cmnd.fcp_cmnd_task_management, notify.nt_lun, sid, aep->at_rxid); notify.nt_ncode = NT_UNKNOWN; return; } isp_async(isp, ISPASYNC_TARGET_NOTIFY, ¬ify); } int isp_notify_ack(ispsoftc_t *isp, void *arg) { char storage[QENTRY_LEN]; void *outp; /* * This is in case a Task Management Function ends up here. */ if (IS_24XX(isp) && arg != NULL && (((isphdr_t *)arg)->rqs_entry_type == RQSTYPE_ATIO)) { at7_entry_t *aep = arg; return (isp_endcmd(isp, aep, NIL_HANDLE, 0, 0, 0)); } outp = isp_getrqentry(isp); if (outp == NULL) { isp_prt(isp, ISP_LOGWARN, rqo, __func__); return (1); } ISP_MEMZERO(storage, QENTRY_LEN); if (IS_24XX(isp)) { na_fcentry_24xx_t *na = (na_fcentry_24xx_t *) storage; na->na_header.rqs_entry_type = RQSTYPE_NOTIFY_ACK; na->na_header.rqs_entry_count = 1; if (arg) { in_fcentry_24xx_t *in = arg; na->na_nphdl = in->in_nphdl; na->na_flags = in->in_flags; na->na_status = in->in_status; na->na_status_subcode = in->in_status_subcode; na->na_fwhandle = in->in_fwhandle; na->na_rxid = in->in_rxid; na->na_oxid = in->in_oxid; na->na_vpidx = in->in_vpidx; if (in->in_status == IN24XX_SRR_RCVD) { na->na_srr_rxid = in->in_srr_rxid; na->na_srr_reloff_hi = in->in_srr_reloff_hi; na->na_srr_reloff_lo = in->in_srr_reloff_lo; na->na_srr_iu = in->in_srr_iu; /* * Whether we're accepting the SRR or rejecting * it is determined by looking at the in_reserved * field in the original notify structure. */ if (in->in_reserved) { na->na_srr_flags = 1; na->na_srr_reject_vunique = 0; na->na_srr_reject_code = 9; /* unable to perform this command at this time */ na->na_srr_reject_explanation = 0x2a; /* unable to supply the requested data */ } } } isp_put_notify_24xx_ack(isp, na, (na_fcentry_24xx_t *)outp); } else if (IS_FC(isp)) { na_fcentry_t *na = (na_fcentry_t *) storage; int iid = 0; if (arg) { in_fcentry_t *inp = arg; ISP_MEMCPY(storage, arg, sizeof (isphdr_t)); if (ISP_CAP_2KLOGIN(isp)) { ((na_fcentry_e_t *)na)->na_iid = ((in_fcentry_e_t *)inp)->in_iid; iid = ((na_fcentry_e_t *)na)->na_iid; } else { na->na_iid = inp->in_iid; iid = na->na_iid; } na->na_task_flags = inp->in_task_flags & TASK_FLAGS_RESERVED_MASK; na->na_seqid = inp->in_seqid; na->na_status = inp->in_status; na->na_flags = NAFC_RCOUNT; if (inp->in_status == IN_RESET) { na->na_flags = NAFC_RST_CLRD; /* We do not modify resource counts for LIP resets */ } if (inp->in_status == IN_MSG_RECEIVED) { na->na_flags |= NAFC_TVALID; na->na_response = 0; /* XXX SUCCEEDED XXX */ } } else { na->na_flags = NAFC_RST_CLRD; } na->na_header.rqs_entry_type = RQSTYPE_NOTIFY_ACK; na->na_header.rqs_entry_count = 1; if (ISP_CAP_2KLOGIN(isp)) { isp_put_notify_ack_fc_e(isp, (na_fcentry_e_t *) na, (na_fcentry_e_t *)outp); } else { isp_put_notify_ack_fc(isp, na, (na_fcentry_t *)outp); } - isp_prt(isp, ISP_LOGTDEBUG0, "notify ack loopid %u seqid %x flags %x tflags %x response %x", iid, na->na_seqid, + isp_prt(isp, ISP_LOGTDEBUG0, "notify ack handle %x seqid %x flags %x tflags %x response %x", iid, na->na_seqid, na->na_flags, na->na_task_flags, na->na_response); } else { na_entry_t *na = (na_entry_t *) storage; if (arg) { in_entry_t *inp = arg; ISP_MEMCPY(storage, arg, sizeof (isphdr_t)); na->na_iid = inp->in_iid; na->na_lun = inp->in_lun; na->na_tgt = inp->in_tgt; na->na_seqid = inp->in_seqid; if (inp->in_status == IN_RESET) { na->na_event = NA_RST_CLRD; } } else { na->na_event = NA_RST_CLRD; } na->na_header.rqs_entry_type = RQSTYPE_NOTIFY_ACK; na->na_header.rqs_entry_count = 1; isp_put_notify_ack(isp, na, (na_entry_t *)outp); - isp_prt(isp, ISP_LOGTDEBUG0, "notify ack loopid %u lun %u tgt %u seqid %x event %x", na->na_iid, na->na_lun, na->na_tgt, na->na_seqid, na->na_event); + isp_prt(isp, ISP_LOGTDEBUG0, "notify ack handle %x lun %x tgt %u seqid %x event %x", na->na_iid, na->na_lun, na->na_tgt, na->na_seqid, na->na_event); } ISP_TDQE(isp, "isp_notify_ack", isp->isp_reqidx, storage); ISP_SYNC_REQUEST(isp); return (0); } int isp_acknak_abts(ispsoftc_t *isp, void *arg, int errno) { char storage[QENTRY_LEN]; uint16_t tmpw; uint8_t tmpb; abts_t *abts = arg; abts_rsp_t *rsp = (abts_rsp_t *) storage; void *outp; if (!IS_24XX(isp)) { isp_prt(isp, ISP_LOGERR, "%s: called for non-24XX card", __func__); return (0); } if (abts->abts_header.rqs_entry_type != RQSTYPE_ABTS_RCVD) { isp_prt(isp, ISP_LOGERR, "%s: called for non-ABTS entry (0x%x)", __func__, abts->abts_header.rqs_entry_type); return (0); } outp = isp_getrqentry(isp); if (outp == NULL) { isp_prt(isp, ISP_LOGWARN, rqo, __func__); return (1); } ISP_MEMCPY(rsp, abts, QENTRY_LEN); rsp->abts_rsp_header.rqs_entry_type = RQSTYPE_ABTS_RSP; /* * Swap destination and source for response. */ rsp->abts_rsp_r_ctl = BA_ACC; tmpw = rsp->abts_rsp_did_lo; tmpb = rsp->abts_rsp_did_hi; rsp->abts_rsp_did_lo = rsp->abts_rsp_sid_lo; rsp->abts_rsp_did_hi = rsp->abts_rsp_sid_hi; rsp->abts_rsp_sid_lo = tmpw; rsp->abts_rsp_sid_hi = tmpb; rsp->abts_rsp_f_ctl_hi ^= 0x80; /* invert Exchange Context */ rsp->abts_rsp_f_ctl_hi &= ~0x7f; /* clear Sequence Initiator and other bits */ rsp->abts_rsp_f_ctl_hi |= 0x10; /* abort the whole exchange */ rsp->abts_rsp_f_ctl_hi |= 0x8; /* last data frame of sequence */ rsp->abts_rsp_f_ctl_hi |= 0x1; /* transfer Sequence Initiative */ rsp->abts_rsp_f_ctl_lo = 0; if (errno == 0) { uint16_t rx_id, ox_id; rx_id = rsp->abts_rsp_rx_id; ox_id = rsp->abts_rsp_ox_id; ISP_MEMZERO(&rsp->abts_rsp_payload.ba_acc, sizeof (rsp->abts_rsp_payload.ba_acc)); isp_prt(isp, ISP_LOGTINFO, "[0x%x] ABTS of 0x%x being BA_ACC'd", rsp->abts_rsp_rxid_abts, rsp->abts_rsp_rxid_task); rsp->abts_rsp_payload.ba_acc.aborted_rx_id = rx_id; rsp->abts_rsp_payload.ba_acc.aborted_ox_id = ox_id; rsp->abts_rsp_payload.ba_acc.high_seq_cnt = 0xffff; } else { ISP_MEMZERO(&rsp->abts_rsp_payload.ba_rjt, sizeof (rsp->abts_rsp_payload.ba_acc)); switch (errno) { case ENOMEM: rsp->abts_rsp_payload.ba_rjt.reason = 5; /* Logical Unit Busy */ break; default: rsp->abts_rsp_payload.ba_rjt.reason = 9; /* Unable to perform command request */ break; } } /* * The caller will have set response values as appropriate * in the ABTS structure just before calling us. */ isp_put_abts_rsp(isp, rsp, (abts_rsp_t *)outp); ISP_TDQE(isp, "isp_acknak_abts", isp->isp_reqidx, storage); ISP_SYNC_REQUEST(isp); return (0); } static void isp_handle_atio(ispsoftc_t *isp, at_entry_t *aep) { int lun; lun = aep->at_lun; /* * The firmware status (except for the QLTM_SVALID bit) indicates * why this ATIO was sent to us. * * If QLTM_SVALID is set, the firware has recommended Sense Data. * * If the DISCONNECTS DISABLED bit is set in the flags field, * we're still connected on the SCSI bus - i.e. the initiator * did not set DiscPriv in the identify message. We don't care * about this so it's ignored. */ switch (aep->at_status & ~QLTM_SVALID) { case AT_PATH_INVALID: /* * ATIO rejected by the firmware due to disabled lun. */ - isp_prt(isp, ISP_LOGERR, "rejected ATIO for disabled lun %d", lun); + isp_prt(isp, ISP_LOGERR, "rejected ATIO for disabled lun %x", lun); break; case AT_NOCAP: /* * Requested Capability not available * We sent an ATIO that overflowed the firmware's * command resource count. */ - isp_prt(isp, ISP_LOGERR, "rejected ATIO for lun %d because of command count overflow", lun); + isp_prt(isp, ISP_LOGERR, "rejected ATIO for lun %x because of command count overflow", lun); break; case AT_BDR_MSG: /* * If we send an ATIO to the firmware to increment * its command resource count, and the firmware is * recovering from a Bus Device Reset, it returns * the ATIO with this status. We set the command * resource count in the Enable Lun entry and do * not increment it. Therefore we should never get * this status here. */ isp_prt(isp, ISP_LOGERR, atiocope, lun, GET_BUS_VAL(aep->at_iid)); break; case AT_CDB: /* Got a CDB */ case AT_PHASE_ERROR: /* Bus Phase Sequence Error */ /* * Punt to platform specific layer. */ isp_async(isp, ISPASYNC_TARGET_ACTION, aep); break; case AT_RESET: /* * A bus reset came along and blew away this command. Why * they do this in addition the async event code stuff, * I dunno. * * Ignore it because the async event will clear things * up for us. */ isp_prt(isp, ISP_LOGWARN, atior, lun, GET_IID_VAL(aep->at_iid), GET_BUS_VAL(aep->at_iid)); break; default: - isp_prt(isp, ISP_LOGERR, "Unknown ATIO status 0x%x from loopid %d for lun %d", aep->at_status, aep->at_iid, lun); + isp_prt(isp, ISP_LOGERR, "Unknown ATIO status 0x%x from handle %x for lun %x", aep->at_status, aep->at_iid, lun); (void) isp_target_put_atio(isp, aep); break; } } static void isp_handle_atio2(ispsoftc_t *isp, at2_entry_t *aep) { int lun, iid; if (ISP_CAP_SCCFW(isp)) { lun = aep->at_scclun; #if __FreeBSD_version < 1000700 lun &= 0x3fff; #endif } else { lun = aep->at_lun; } if (ISP_CAP_2KLOGIN(isp)) { iid = ((at2e_entry_t *)aep)->at_iid; } else { iid = aep->at_iid; } /* * The firmware status (except for the QLTM_SVALID bit) indicates * why this ATIO was sent to us. * * If QLTM_SVALID is set, the firware has recommended Sense Data. * * If the DISCONNECTS DISABLED bit is set in the flags field, * we're still connected on the SCSI bus - i.e. the initiator * did not set DiscPriv in the identify message. We don't care * about this so it's ignored. */ switch (aep->at_status & ~QLTM_SVALID) { case AT_PATH_INVALID: /* * ATIO rejected by the firmware due to disabled lun. */ isp_prt(isp, ISP_LOGERR, "rejected ATIO2 for disabled lun %x", lun); break; case AT_NOCAP: /* * Requested Capability not available * We sent an ATIO that overflowed the firmware's * command resource count. */ isp_prt(isp, ISP_LOGERR, "rejected ATIO2 for lun %x- command count overflow", lun); break; case AT_BDR_MSG: /* * If we send an ATIO to the firmware to increment * its command resource count, and the firmware is * recovering from a Bus Device Reset, it returns * the ATIO with this status. We set the command * resource count in the Enable Lun entry and no * not increment it. Therefore we should never get * this status here. */ isp_prt(isp, ISP_LOGERR, atiocope, lun, 0); break; case AT_CDB: /* Got a CDB */ /* * Punt to platform specific layer. */ isp_async(isp, ISPASYNC_TARGET_ACTION, aep); break; case AT_RESET: /* * A bus reset came along an blew away this command. Why * they do this in addition the async event code stuff, * I dunno. * * Ignore it because the async event will clear things * up for us. */ isp_prt(isp, ISP_LOGERR, atior, lun, iid, 0); break; default: - isp_prt(isp, ISP_LOGERR, "Unknown ATIO2 status 0x%x from loopid %d for lun %x", aep->at_status, iid, lun); + isp_prt(isp, ISP_LOGERR, "Unknown ATIO2 status 0x%x from handle %d for lun %x", aep->at_status, iid, lun); (void) isp_target_put_atio(isp, aep); break; } } static void isp_handle_ctio(ispsoftc_t *isp, ct_entry_t *ct) { void *xs; int pl = ISP_LOGTDEBUG2; char *fmsg = NULL; if (ct->ct_syshandle) { xs = isp_find_xs_tgt(isp, ct->ct_syshandle); if (xs == NULL) { pl = ISP_LOGALL; } } else { xs = NULL; } switch (ct->ct_status & ~QLTM_SVALID) { case CT_OK: /* * There are generally 3 possibilities as to why we'd get * this condition: * We disconnected after receiving a CDB. * We sent or received data. * We sent status & command complete. */ if (ct->ct_flags & CT_SENDSTATUS) { break; } else if ((ct->ct_flags & CT_DATAMASK) == CT_NO_DATA) { /* * Nothing to do in this case. */ isp_prt(isp, pl, "CTIO- iid %d disconnected OK", ct->ct_iid); return; } break; case CT_BDR_MSG: /* * Bus Device Reset message received or the SCSI Bus has * been Reset; the firmware has gone to Bus Free. * * The firmware generates an async mailbox interrupt to * notify us of this and returns outstanding CTIOs with this * status. These CTIOs are handled in that same way as * CT_ABORTED ones, so just fall through here. */ fmsg = "Bus Device Reset"; /*FALLTHROUGH*/ case CT_RESET: if (fmsg == NULL) fmsg = "Bus Reset"; /*FALLTHROUGH*/ case CT_ABORTED: /* * When an Abort message is received the firmware goes to * Bus Free and returns all outstanding CTIOs with the status * set, then sends us an Immediate Notify entry. */ if (fmsg == NULL) fmsg = "ABORT TAG message sent by Initiator"; isp_prt(isp, ISP_LOGTDEBUG0, "CTIO destroyed by %s", fmsg); break; case CT_INVAL: /* * CTIO rejected by the firmware due to disabled lun. * "Cannot Happen". */ - isp_prt(isp, ISP_LOGERR, "Firmware rejected CTIO for disabled lun %d", ct->ct_lun); + isp_prt(isp, ISP_LOGERR, "Firmware rejected CTIO for disabled lun %x", ct->ct_lun); break; case CT_NOPATH: /* * CTIO rejected by the firmware due "no path for the * nondisconnecting nexus specified". This means that * we tried to access the bus while a non-disconnecting * command is in process. */ - isp_prt(isp, ISP_LOGERR, "Firmware rejected CTIO for bad nexus %d/%d/%d", ct->ct_iid, ct->ct_tgt, ct->ct_lun); + isp_prt(isp, ISP_LOGERR, "Firmware rejected CTIO for bad nexus %d/%d/%x", ct->ct_iid, ct->ct_tgt, ct->ct_lun); break; case CT_RSELTMO: fmsg = "Reselection"; /*FALLTHROUGH*/ case CT_TIMEOUT: if (fmsg == NULL) fmsg = "Command"; isp_prt(isp, ISP_LOGWARN, "Firmware timed out on %s", fmsg); break; case CT_PANIC: if (fmsg == NULL) fmsg = "Unrecoverable Error"; /*FALLTHROUGH*/ case CT_ERR: if (fmsg == NULL) fmsg = "Completed with Error"; /*FALLTHROUGH*/ case CT_PHASE_ERROR: if (fmsg == NULL) fmsg = "Phase Sequence Error"; /*FALLTHROUGH*/ case CT_TERMINATED: if (fmsg == NULL) fmsg = "terminated by TERMINATE TRANSFER"; /*FALLTHROUGH*/ case CT_NOACK: if (fmsg == NULL) fmsg = "unacknowledged Immediate Notify pending"; isp_prt(isp, ISP_LOGERR, "CTIO returned by f/w- %s", fmsg); break; default: isp_prt(isp, ISP_LOGERR, "Unknown CTIO status 0x%x", ct->ct_status & ~QLTM_SVALID); break; } if (xs == NULL) { /* * There may be more than one CTIO for a data transfer, * or this may be a status CTIO we're not monitoring. * * The assumption is that they'll all be returned in the * order we got them. */ if (ct->ct_syshandle == 0) { if ((ct->ct_flags & CT_SENDSTATUS) == 0) { isp_prt(isp, pl, "intermediate CTIO completed ok"); } else { isp_prt(isp, pl, "unmonitored CTIO completed ok"); } } else { isp_prt(isp, pl, "NO xs for CTIO (handle 0x%x) status 0x%x", ct->ct_syshandle, ct->ct_status & ~QLTM_SVALID); } } else { /* * Final CTIO completed. Release DMA resources and * notify platform dependent layers. */ if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { ISP_DMAFREE(isp, xs, ct->ct_syshandle); } isp_prt(isp, pl, "final CTIO complete"); /* * The platform layer will destroy the handle if appropriate. */ isp_async(isp, ISPASYNC_TARGET_ACTION, ct); } } static void isp_handle_ctio2(ispsoftc_t *isp, ct2_entry_t *ct) { void *xs; int pl = ISP_LOGTDEBUG2; char *fmsg = NULL; if (ct->ct_syshandle) { xs = isp_find_xs_tgt(isp, ct->ct_syshandle); if (xs == NULL) { pl = ISP_LOGALL; } } else { xs = NULL; } switch (ct->ct_status & ~QLTM_SVALID) { case CT_BUS_ERROR: isp_prt(isp, ISP_LOGERR, "PCI DMA Bus Error"); /* FALL Through */ case CT_DATA_OVER: case CT_DATA_UNDER: case CT_OK: /* * There are generally 2 possibilities as to why we'd get * this condition: * We sent or received data. * We sent status & command complete. */ break; case CT_BDR_MSG: /* * Target Reset function received. * * The firmware generates an async mailbox interrupt to * notify us of this and returns outstanding CTIOs with this * status. These CTIOs are handled in that same way as * CT_ABORTED ones, so just fall through here. */ fmsg = "TARGET RESET"; /*FALLTHROUGH*/ case CT_RESET: if (fmsg == NULL) fmsg = "LIP Reset"; /*FALLTHROUGH*/ case CT_ABORTED: /* * When an Abort message is received the firmware goes to * Bus Free and returns all outstanding CTIOs with the status * set, then sends us an Immediate Notify entry. */ if (fmsg == NULL) { fmsg = "ABORT"; } isp_prt(isp, ISP_LOGTDEBUG0, "CTIO2 destroyed by %s: RX_ID=0x%x", fmsg, ct->ct_rxid); break; case CT_INVAL: /* * CTIO rejected by the firmware - invalid data direction. */ isp_prt(isp, ISP_LOGERR, "CTIO2 had wrong data direction"); break; case CT_RSELTMO: fmsg = "failure to reconnect to initiator"; /*FALLTHROUGH*/ case CT_TIMEOUT: if (fmsg == NULL) fmsg = "command"; isp_prt(isp, ISP_LOGWARN, "Firmware timed out on %s", fmsg); break; case CT_ERR: fmsg = "Completed with Error"; /*FALLTHROUGH*/ case CT_LOGOUT: if (fmsg == NULL) fmsg = "Port Logout"; /*FALLTHROUGH*/ case CT_PORTUNAVAIL: if (fmsg == NULL) fmsg = "Port not available"; /*FALLTHROUGH*/ case CT_PORTCHANGED: if (fmsg == NULL) fmsg = "Port Changed"; /*FALLTHROUGH*/ case CT_NOACK: if (fmsg == NULL) fmsg = "unacknowledged Immediate Notify pending"; isp_prt(isp, ISP_LOGWARN, "CTIO returned by f/w- %s", fmsg); break; case CT_INVRXID: /* * CTIO rejected by the firmware because an invalid RX_ID. * Just print a message. */ isp_prt(isp, ISP_LOGWARN, "CTIO2 completed with Invalid RX_ID 0x%x", ct->ct_rxid); break; default: isp_prt(isp, ISP_LOGERR, "Unknown CTIO2 status 0x%x", ct->ct_status & ~QLTM_SVALID); break; } if (xs == NULL) { /* * There may be more than one CTIO for a data transfer, * or this may be a status CTIO we're not monitoring. * * The assumption is that they'll all be returned in the * order we got them. */ if (ct->ct_syshandle == 0) { if ((ct->ct_flags & CT2_SENDSTATUS) == 0) { isp_prt(isp, pl, "intermediate CTIO completed ok"); } else { isp_prt(isp, pl, "unmonitored CTIO completed ok"); } } else { isp_prt(isp, pl, "NO xs for CTIO (handle 0x%x) status 0x%x", ct->ct_syshandle, ct->ct_status & ~QLTM_SVALID); } } else { if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { ISP_DMAFREE(isp, xs, ct->ct_syshandle); } if (ct->ct_flags & CT2_SENDSTATUS) { /* * Sent status and command complete. * * We're now really done with this command, so we * punt to the platform dependent layers because * only there can we do the appropriate command * complete thread synchronization. */ isp_prt(isp, pl, "status CTIO complete"); } else { /* * Final CTIO completed. Release DMA resources and * notify platform dependent layers. */ isp_prt(isp, pl, "data CTIO complete"); } isp_async(isp, ISPASYNC_TARGET_ACTION, ct); /* * The platform layer will destroy the handle if appropriate. */ } } static void isp_handle_ctio7(ispsoftc_t *isp, ct7_entry_t *ct) { void *xs; int pl = ISP_LOGTDEBUG2; char *fmsg = NULL; if (ct->ct_syshandle) { xs = isp_find_xs_tgt(isp, ct->ct_syshandle); if (xs == NULL) { pl = ISP_LOGALL; } } else { xs = NULL; } switch (ct->ct_nphdl) { case CT7_BUS_ERROR: isp_prt(isp, ISP_LOGERR, "PCI DMA Bus Error"); /* FALL Through */ case CT7_DATA_OVER: case CT7_DATA_UNDER: case CT7_OK: /* * There are generally 2 possibilities as to why we'd get * this condition: * We sent or received data. * We sent status & command complete. */ break; case CT7_RESET: if (fmsg == NULL) { fmsg = "LIP Reset"; } /*FALLTHROUGH*/ case CT7_ABORTED: /* * When an Abort message is received the firmware goes to * Bus Free and returns all outstanding CTIOs with the status * set, then sends us an Immediate Notify entry. */ if (fmsg == NULL) { fmsg = "ABORT"; } isp_prt(isp, ISP_LOGTDEBUG0, "CTIO7 destroyed by %s: RX_ID=0x%x", fmsg, ct->ct_rxid); break; case CT7_TIMEOUT: if (fmsg == NULL) { fmsg = "command"; } isp_prt(isp, ISP_LOGWARN, "Firmware timed out on %s", fmsg); break; case CT7_ERR: fmsg = "Completed with Error"; /*FALLTHROUGH*/ case CT7_LOGOUT: if (fmsg == NULL) { fmsg = "Port Logout"; } /*FALLTHROUGH*/ case CT7_PORTUNAVAIL: if (fmsg == NULL) { fmsg = "Port not available"; } /*FALLTHROUGH*/ case CT7_PORTCHANGED: if (fmsg == NULL) { fmsg = "Port Changed"; } isp_prt(isp, ISP_LOGWARN, "CTIO returned by f/w- %s", fmsg); break; case CT7_INVRXID: /* * CTIO rejected by the firmware because an invalid RX_ID. * Just print a message. */ isp_prt(isp, ISP_LOGWARN, "CTIO7 completed with Invalid RX_ID 0x%x", ct->ct_rxid); break; case CT7_REASSY_ERR: isp_prt(isp, ISP_LOGWARN, "reassembly error"); break; case CT7_SRR: isp_prt(isp, ISP_LOGTDEBUG0, "SRR received"); break; default: isp_prt(isp, ISP_LOGERR, "Unknown CTIO7 status 0x%x", ct->ct_nphdl); break; } if (xs == NULL) { /* * There may be more than one CTIO for a data transfer, * or this may be a status CTIO we're not monitoring. * * The assumption is that they'll all be returned in the * order we got them. */ if (ct->ct_syshandle == 0) { if (ct->ct_flags & CT7_TERMINATE) { isp_prt(isp, ISP_LOGINFO, "termination of [RX_ID 0x%x] complete", ct->ct_rxid); } else if ((ct->ct_flags & CT7_SENDSTATUS) == 0) { isp_prt(isp, pl, "intermediate CTIO completed ok"); } else { isp_prt(isp, pl, "unmonitored CTIO completed ok"); } } else { isp_prt(isp, pl, "NO xs for CTIO (handle 0x%x) status 0x%x", ct->ct_syshandle, ct->ct_nphdl); } } else { if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA) { ISP_DMAFREE(isp, xs, ct->ct_syshandle); } if (ct->ct_flags & CT7_SENDSTATUS) { /* * Sent status and command complete. * * We're now really done with this command, so we * punt to the platform dependent layers because * only there can we do the appropriate command * complete thread synchronization. */ isp_prt(isp, pl, "status CTIO complete"); } else { /* * Final CTIO completed. Release DMA resources and * notify platform dependent layers. */ isp_prt(isp, pl, "data CTIO complete"); } isp_async(isp, ISPASYNC_TARGET_ACTION, ct); /* * The platform layer will destroy the handle if appropriate. */ } } static void isp_handle_24xx_inotify(ispsoftc_t *isp, in_fcentry_24xx_t *inot_24xx) { uint8_t ochan, chan, lochan, hichan; /* * Check to see whether we got a wildcard channel. * If so, we have to iterate over all channels. */ ochan = chan = ISP_GET_VPIDX(isp, inot_24xx->in_vpidx); if (chan == 0xff) { lochan = 0; hichan = isp->isp_nchan; } else { if (chan >= isp->isp_nchan) { char buf[64]; ISP_SNPRINTF(buf, sizeof buf, "%s: bad channel %d for status 0x%x", __func__, chan, inot_24xx->in_status); isp_print_bytes(isp, buf, QENTRY_LEN, inot_24xx); isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot_24xx); return; } lochan = chan; hichan = chan + 1; } isp_prt(isp, ISP_LOGTDEBUG1, "%s: Immediate Notify Channels %d..%d status=0x%x seqid=0x%x", __func__, lochan, hichan-1, inot_24xx->in_status, inot_24xx->in_rxid); for (chan = lochan; chan < hichan; chan++) { if (FCPARAM(isp, chan)->role == ISP_ROLE_NONE) continue; switch (inot_24xx->in_status) { case IN24XX_LIP_RESET: case IN24XX_LINK_RESET: case IN24XX_PORT_LOGOUT: case IN24XX_PORT_CHANGED: case IN24XX_LINK_FAILED: case IN24XX_SRR_RCVD: case IN24XX_ELS_RCVD: inot_24xx->in_reserved = 0; /* clear this for later usage */ inot_24xx->in_vpidx = chan; isp_async(isp, ISPASYNC_TARGET_ACTION, inot_24xx); break; default: isp_prt(isp, ISP_LOGINFO, "%s: unhandled status (0x%x) for chan %d", __func__, inot_24xx->in_status, chan); isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot_24xx); break; } } inot_24xx->in_vpidx = ochan; } #endif Index: projects/powernv/dev/isp/ispmbox.h =================================================================== --- projects/powernv/dev/isp/ispmbox.h (revision 291050) +++ projects/powernv/dev/isp/ispmbox.h (revision 291051) @@ -1,2643 +1,2644 @@ /* $FreeBSD$ */ /*- * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Mailbox and Queue Entry Definitions for for Qlogic ISP SCSI adapters. */ #ifndef _ISPMBOX_H #define _ISPMBOX_H /* * Mailbox Command Opcodes */ #define MBOX_NO_OP 0x0000 #define MBOX_LOAD_RAM 0x0001 #define MBOX_EXEC_FIRMWARE 0x0002 #define MBOX_DUMP_RAM 0x0003 #define MBOX_WRITE_RAM_WORD 0x0004 #define MBOX_READ_RAM_WORD 0x0005 #define MBOX_MAILBOX_REG_TEST 0x0006 #define MBOX_VERIFY_CHECKSUM 0x0007 #define MBOX_ABOUT_FIRMWARE 0x0008 #define MBOX_LOAD_RISC_RAM_2100 0x0009 /* a */ #define MBOX_LOAD_RISC_RAM 0x000b /* c */ #define MBOX_WRITE_RAM_WORD_EXTENDED 0x000d #define MBOX_CHECK_FIRMWARE 0x000e #define MBOX_READ_RAM_WORD_EXTENDED 0x000f #define MBOX_INIT_REQ_QUEUE 0x0010 #define MBOX_INIT_RES_QUEUE 0x0011 #define MBOX_EXECUTE_IOCB 0x0012 #define MBOX_WAKE_UP 0x0013 #define MBOX_STOP_FIRMWARE 0x0014 #define MBOX_ABORT 0x0015 #define MBOX_ABORT_DEVICE 0x0016 #define MBOX_ABORT_TARGET 0x0017 #define MBOX_BUS_RESET 0x0018 #define MBOX_STOP_QUEUE 0x0019 #define MBOX_START_QUEUE 0x001a #define MBOX_SINGLE_STEP_QUEUE 0x001b #define MBOX_ABORT_QUEUE 0x001c #define MBOX_GET_DEV_QUEUE_STATUS 0x001d /* 1e */ #define MBOX_GET_FIRMWARE_STATUS 0x001f #define MBOX_GET_INIT_SCSI_ID 0x0020 #define MBOX_GET_SELECT_TIMEOUT 0x0021 #define MBOX_GET_RETRY_COUNT 0x0022 #define MBOX_GET_TAG_AGE_LIMIT 0x0023 #define MBOX_GET_CLOCK_RATE 0x0024 #define MBOX_GET_ACT_NEG_STATE 0x0025 #define MBOX_GET_ASYNC_DATA_SETUP_TIME 0x0026 #define MBOX_GET_SBUS_PARAMS 0x0027 #define MBOX_GET_PCI_PARAMS MBOX_GET_SBUS_PARAMS #define MBOX_GET_TARGET_PARAMS 0x0028 #define MBOX_GET_DEV_QUEUE_PARAMS 0x0029 #define MBOX_GET_RESET_DELAY_PARAMS 0x002a /* 2b */ /* 2c */ /* 2d */ /* 2e */ /* 2f */ #define MBOX_SET_INIT_SCSI_ID 0x0030 #define MBOX_SET_SELECT_TIMEOUT 0x0031 #define MBOX_SET_RETRY_COUNT 0x0032 #define MBOX_SET_TAG_AGE_LIMIT 0x0033 #define MBOX_SET_CLOCK_RATE 0x0034 #define MBOX_SET_ACT_NEG_STATE 0x0035 #define MBOX_SET_ASYNC_DATA_SETUP_TIME 0x0036 #define MBOX_SET_SBUS_CONTROL_PARAMS 0x0037 #define MBOX_SET_PCI_PARAMETERS 0x0037 #define MBOX_SET_TARGET_PARAMS 0x0038 #define MBOX_SET_DEV_QUEUE_PARAMS 0x0039 #define MBOX_SET_RESET_DELAY_PARAMS 0x003a /* 3b */ /* 3c */ /* 3d */ /* 3e */ /* 3f */ #define MBOX_RETURN_BIOS_BLOCK_ADDR 0x0040 #define MBOX_WRITE_FOUR_RAM_WORDS 0x0041 #define MBOX_EXEC_BIOS_IOCB 0x0042 #define MBOX_SET_FW_FEATURES 0x004a #define MBOX_GET_FW_FEATURES 0x004b #define FW_FEATURE_FAST_POST 0x1 #define FW_FEATURE_LVD_NOTIFY 0x2 #define FW_FEATURE_RIO_32BIT 0x4 #define FW_FEATURE_RIO_16BIT 0x8 #define MBOX_INIT_REQ_QUEUE_A64 0x0052 #define MBOX_INIT_RES_QUEUE_A64 0x0053 #define MBOX_ENABLE_TARGET_MODE 0x0055 #define ENABLE_TARGET_FLAG 0x8000 #define ENABLE_TQING_FLAG 0x0004 #define ENABLE_MANDATORY_DISC 0x0002 #define MBOX_GET_TARGET_STATUS 0x0056 /* These are for the ISP2X00 FC cards */ #define MBOX_GET_LOOP_ID 0x0020 /* for 24XX cards, outgoing mailbox 7 has these values for F or FL topologies */ #define ISP24XX_INORDER 0x0100 #define ISP24XX_NPIV_SAN 0x0400 #define ISP24XX_VSAN_SAN 0x1000 #define ISP24XX_FC_SP_SAN 0x2000 #define MBOX_GET_FIRMWARE_OPTIONS 0x0028 #define MBOX_SET_FIRMWARE_OPTIONS 0x0038 #define MBOX_GET_RESOURCE_COUNT 0x0042 #define MBOX_REQUEST_OFFLINE_MODE 0x0043 #define MBOX_ENHANCED_GET_PDB 0x0047 #define MBOX_INIT_FIRMWARE_MULTI_ID 0x0048 /* 2400 only */ #define MBOX_GET_VP_DATABASE 0x0049 /* 2400 only */ #define MBOX_GET_VP_DATABASE_ENTRY 0x004a /* 2400 only */ #define MBOX_EXEC_COMMAND_IOCB_A64 0x0054 #define MBOX_INIT_FIRMWARE 0x0060 #define MBOX_GET_INIT_CONTROL_BLOCK 0x0061 #define MBOX_INIT_LIP 0x0062 #define MBOX_GET_FC_AL_POSITION_MAP 0x0063 #define MBOX_GET_PORT_DB 0x0064 #define MBOX_CLEAR_ACA 0x0065 #define MBOX_TARGET_RESET 0x0066 #define MBOX_CLEAR_TASK_SET 0x0067 #define MBOX_ABORT_TASK_SET 0x0068 #define MBOX_GET_FW_STATE 0x0069 #define MBOX_GET_PORT_NAME 0x006A #define MBOX_GET_LINK_STATUS 0x006B #define MBOX_INIT_LIP_RESET 0x006C #define MBOX_SEND_SNS 0x006E #define MBOX_FABRIC_LOGIN 0x006F #define MBOX_SEND_CHANGE_REQUEST 0x0070 #define MBOX_FABRIC_LOGOUT 0x0071 #define MBOX_INIT_LIP_LOGIN 0x0072 #define MBOX_GET_PORT_NODE_NAME_LIST 0x0075 #define MBOX_GET_ID_LIST 0x007C #define MBOX_LUN_RESET 0x007E #define MBOX_DRIVER_HEARTBEAT 0x005B #define MBOX_FW_HEARTBEAT 0x005C #define MBOX_GET_SET_DATA_RATE 0x005D /* 24XX/23XX only */ #define MBGSD_GET_RATE 0 #define MBGSD_SET_RATE 1 #define MBGSD_SET_RATE_NOW 2 /* 24XX only */ #define MBGSD_ONEGB 0 #define MBGSD_TWOGB 1 #define MBGSD_AUTO 2 #define MBGSD_FOURGB 3 /* 24XX only */ #define MBGSD_EIGHTGB 4 /* 25XX only */ #define ISP2100_SET_PCI_PARAM 0x00ff #define MBOX_BUSY 0x04 /* * Mailbox Command Complete Status Codes */ #define MBOX_COMMAND_COMPLETE 0x4000 #define MBOX_INVALID_COMMAND 0x4001 #define MBOX_HOST_INTERFACE_ERROR 0x4002 #define MBOX_TEST_FAILED 0x4003 #define MBOX_COMMAND_ERROR 0x4005 #define MBOX_COMMAND_PARAM_ERROR 0x4006 #define MBOX_PORT_ID_USED 0x4007 #define MBOX_LOOP_ID_USED 0x4008 #define MBOX_ALL_IDS_USED 0x4009 #define MBOX_NOT_LOGGED_IN 0x400A #define MBOX_LINK_DOWN_ERROR 0x400B #define MBOX_LOOPBACK_ERROR 0x400C #define MBOX_CHECKSUM_ERROR 0x4010 #define MBOX_INVALID_PRODUCT_KEY 0x4020 /* pseudo mailbox completion codes */ #define MBOX_REGS_BUSY 0x6000 /* registers in use */ #define MBOX_TIMEOUT 0x6001 /* command timed out */ #define MBLOGALL 0xffffffff #define MBLOGNONE 0x00000000 #define MBLOGMASK(x) (1 << (((x) - 1) & 0x1f)) /* * Asynchronous event status codes */ #define ASYNC_BUS_RESET 0x8001 #define ASYNC_SYSTEM_ERROR 0x8002 #define ASYNC_RQS_XFER_ERR 0x8003 #define ASYNC_RSP_XFER_ERR 0x8004 #define ASYNC_QWAKEUP 0x8005 #define ASYNC_TIMEOUT_RESET 0x8006 #define ASYNC_DEVICE_RESET 0x8007 #define ASYNC_EXTMSG_UNDERRUN 0x800A #define ASYNC_SCAM_INT 0x800B #define ASYNC_HUNG_SCSI 0x800C #define ASYNC_KILLED_BUS 0x800D #define ASYNC_BUS_TRANSIT 0x800E /* LVD -> HVD, eg. */ #define ASYNC_LIP_OCCURRED 0x8010 #define ASYNC_LOOP_UP 0x8011 #define ASYNC_LOOP_DOWN 0x8012 #define ASYNC_LOOP_RESET 0x8013 #define ASYNC_PDB_CHANGED 0x8014 #define ASYNC_CHANGE_NOTIFY 0x8015 #define ASYNC_LIP_F8 0x8016 #define ASYNC_LIP_ERROR 0x8017 #define ASYNC_SECURITY_UPDATE 0x801B #define ASYNC_CMD_CMPLT 0x8020 #define ASYNC_CTIO_DONE 0x8021 #define ASYNC_RIO32_1 0x8021 #define ASYNC_RIO32_2 0x8022 #define ASYNC_IP_XMIT_DONE 0x8022 #define ASYNC_IP_RECV_DONE 0x8023 #define ASYNC_IP_BROADCAST 0x8024 #define ASYNC_IP_RCVQ_LOW 0x8025 #define ASYNC_IP_RCVQ_EMPTY 0x8026 #define ASYNC_IP_RECV_DONE_ALIGNED 0x8027 #define ASYNC_PTPMODE 0x8030 #define ASYNC_RIO16_1 0x8031 #define ASYNC_RIO16_2 0x8032 #define ASYNC_RIO16_3 0x8033 #define ASYNC_RIO16_4 0x8034 #define ASYNC_RIO16_5 0x8035 #define ASYNC_CONNMODE 0x8036 #define ISP_CONN_LOOP 1 #define ISP_CONN_PTP 2 #define ISP_CONN_BADLIP 3 #define ISP_CONN_FATAL 4 #define ISP_CONN_LOOPBACK 5 #define ASYNC_RIOZIO_STALL 0x8040 /* there's a RIO/ZIO entry that hasn't been serviced */ #define ASYNC_RIO32_2_2200 0x8042 /* same as ASYNC_RIO32_2, but for 2100/2200 */ #define ASYNC_RCV_ERR 0x8048 /* * Firmware Options. There are a lot of them. * * IFCOPTN - ISP Fibre Channel Option Word N */ #define IFCOPT1_EQFQASYNC (1 << 13) /* enable QFULL notification */ #define IFCOPT1_EAABSRCVD (1 << 12) #define IFCOPT1_RJTASYNC (1 << 11) /* enable 8018 notification */ #define IFCOPT1_ENAPURE (1 << 10) #define IFCOPT1_ENA8017 (1 << 7) #define IFCOPT1_DISGPIO67 (1 << 6) #define IFCOPT1_LIPLOSSIMM (1 << 5) #define IFCOPT1_DISF7SWTCH (1 << 4) #define IFCOPT1_CTIO_RETRY (1 << 3) #define IFCOPT1_LIPASYNC (1 << 1) #define IFCOPT1_LIPF8 (1 << 0) #define IFCOPT2_LOOPBACK (1 << 1) #define IFCOPT2_ATIO3_ONLY (1 << 0) #define IFCOPT3_NOPRLI (1 << 4) /* disable automatic sending of PRLI on local loops */ #define IFCOPT3_RNDASYNC (1 << 1) /* * 2.01.31 2200 Only. Need Bit 13 in Mailbox 1 for Set Firmware Options * mailbox command to enable this. */ #define ASYNC_QFULL_SENT 0x8049 /* * Needs to be enabled */ #define ASYNC_AUTO_PLOGI_RJT 0x8018 /* * 24XX only */ #define ASYNC_RJT_SENT 0x8049 /* * All IOCB Queue entries are this size */ #define QENTRY_LEN 64 /* * Command Structure Definitions */ typedef struct { uint32_t ds_base; uint32_t ds_count; } ispds_t; typedef struct { uint32_t ds_base; uint32_t ds_basehi; uint32_t ds_count; } ispds64_t; #define DSTYPE_32BIT 0 #define DSTYPE_64BIT 1 typedef struct { uint16_t ds_type; /* 0-> ispds_t, 1-> ispds64_t */ uint32_t ds_segment; /* unused */ uint32_t ds_base; /* 32 bit address of DSD list */ } ispdslist_t; typedef struct { uint8_t rqs_entry_type; uint8_t rqs_entry_count; uint8_t rqs_seqno; uint8_t rqs_flags; } isphdr_t; /* RQS Flag definitions */ #define RQSFLAG_CONTINUATION 0x01 #define RQSFLAG_FULL 0x02 #define RQSFLAG_BADHEADER 0x04 #define RQSFLAG_BADPACKET 0x08 #define RQSFLAG_BADCOUNT 0x10 #define RQSFLAG_BADORDER 0x20 #define RQSFLAG_MASK 0x3f /* RQS entry_type definitions */ #define RQSTYPE_REQUEST 0x01 #define RQSTYPE_DATASEG 0x02 #define RQSTYPE_RESPONSE 0x03 #define RQSTYPE_MARKER 0x04 #define RQSTYPE_CMDONLY 0x05 #define RQSTYPE_ATIO 0x06 /* Target Mode */ #define RQSTYPE_CTIO 0x07 /* Target Mode */ #define RQSTYPE_SCAM 0x08 #define RQSTYPE_A64 0x09 #define RQSTYPE_A64_CONT 0x0a #define RQSTYPE_ENABLE_LUN 0x0b /* Target Mode */ #define RQSTYPE_MODIFY_LUN 0x0c /* Target Mode */ #define RQSTYPE_NOTIFY 0x0d /* Target Mode */ #define RQSTYPE_NOTIFY_ACK 0x0e /* Target Mode */ #define RQSTYPE_CTIO1 0x0f /* Target Mode */ #define RQSTYPE_STATUS_CONT 0x10 #define RQSTYPE_T2RQS 0x11 #define RQSTYPE_CTIO7 0x12 #define RQSTYPE_IP_XMIT 0x13 #define RQSTYPE_TSK_MGMT 0x14 #define RQSTYPE_T4RQS 0x15 #define RQSTYPE_ATIO2 0x16 /* Target Mode */ #define RQSTYPE_CTIO2 0x17 /* Target Mode */ #define RQSTYPE_T7RQS 0x18 #define RQSTYPE_T3RQS 0x19 #define RQSTYPE_IP_XMIT_64 0x1b #define RQSTYPE_CTIO4 0x1e /* Target Mode */ #define RQSTYPE_CTIO3 0x1f /* Target Mode */ #define RQSTYPE_RIO1 0x21 #define RQSTYPE_RIO2 0x22 #define RQSTYPE_IP_RECV 0x23 #define RQSTYPE_IP_RECV_CONT 0x24 #define RQSTYPE_CT_PASSTHRU 0x29 #define RQSTYPE_MS_PASSTHRU 0x29 #define RQSTYPE_VP_CTRL 0x30 /* 24XX only */ #define RQSTYPE_VP_MODIFY 0x31 /* 24XX only */ #define RQSTYPE_RPT_ID_ACQ 0x32 /* 24XX only */ #define RQSTYPE_ABORT_IO 0x33 #define RQSTYPE_T6RQS 0x48 #define RQSTYPE_LOGIN 0x52 #define RQSTYPE_ABTS_RCVD 0x54 /* 24XX only */ #define RQSTYPE_ABTS_RSP 0x55 /* 24XX only */ #define ISP_RQDSEG 4 typedef struct { isphdr_t req_header; uint32_t req_handle; uint8_t req_lun_trn; uint8_t req_target; uint16_t req_cdblen; uint16_t req_flags; uint16_t req_reserved; uint16_t req_time; uint16_t req_seg_count; uint8_t req_cdb[12]; ispds_t req_dataseg[ISP_RQDSEG]; } ispreq_t; #define ISP_RQDSEG_A64 2 typedef struct { isphdr_t mrk_header; uint32_t mrk_handle; uint8_t mrk_reserved0; uint8_t mrk_target; uint16_t mrk_modifier; uint16_t mrk_flags; uint16_t mrk_lun; uint8_t mrk_reserved1[48]; } isp_marker_t; typedef struct { isphdr_t mrk_header; uint32_t mrk_handle; uint16_t mrk_nphdl; uint8_t mrk_modifier; uint8_t mrk_reserved0; uint8_t mrk_reserved1; uint8_t mrk_vphdl; uint16_t mrk_reserved2; uint8_t mrk_lun[8]; uint8_t mrk_reserved3[40]; } isp_marker_24xx_t; #define SYNC_DEVICE 0 #define SYNC_TARGET 1 #define SYNC_ALL 2 #define SYNC_LIP 3 #define ISP_RQDSEG_T2 3 typedef struct { isphdr_t req_header; uint32_t req_handle; uint8_t req_lun_trn; uint8_t req_target; uint16_t req_scclun; uint16_t req_flags; uint8_t req_crn; uint8_t req_reserved; uint16_t req_time; uint16_t req_seg_count; uint8_t req_cdb[16]; uint32_t req_totalcnt; ispds_t req_dataseg[ISP_RQDSEG_T2]; } ispreqt2_t; typedef struct { isphdr_t req_header; uint32_t req_handle; uint16_t req_target; uint16_t req_scclun; uint16_t req_flags; uint16_t req_reserved; uint16_t req_time; uint16_t req_seg_count; uint8_t req_cdb[16]; uint32_t req_totalcnt; ispds_t req_dataseg[ISP_RQDSEG_T2]; } ispreqt2e_t; #define ISP_RQDSEG_T3 2 typedef struct { isphdr_t req_header; uint32_t req_handle; uint8_t req_lun_trn; uint8_t req_target; uint16_t req_scclun; uint16_t req_flags; uint8_t req_crn; uint8_t req_reserved; uint16_t req_time; uint16_t req_seg_count; uint8_t req_cdb[16]; uint32_t req_totalcnt; ispds64_t req_dataseg[ISP_RQDSEG_T3]; } ispreqt3_t; #define ispreq64_t ispreqt3_t /* same as.... */ typedef struct { isphdr_t req_header; uint32_t req_handle; uint16_t req_target; uint16_t req_scclun; uint16_t req_flags; uint8_t req_crn; uint8_t req_reserved; uint16_t req_time; uint16_t req_seg_count; uint8_t req_cdb[16]; uint32_t req_totalcnt; ispds64_t req_dataseg[ISP_RQDSEG_T3]; } ispreqt3e_t; /* req_flag values */ #define REQFLAG_NODISCON 0x0001 #define REQFLAG_HTAG 0x0002 #define REQFLAG_OTAG 0x0004 #define REQFLAG_STAG 0x0008 #define REQFLAG_TARGET_RTN 0x0010 #define REQFLAG_NODATA 0x0000 #define REQFLAG_DATA_IN 0x0020 #define REQFLAG_DATA_OUT 0x0040 #define REQFLAG_DATA_UNKNOWN 0x0060 #define REQFLAG_DISARQ 0x0100 #define REQFLAG_FRC_ASYNC 0x0200 #define REQFLAG_FRC_SYNC 0x0400 #define REQFLAG_FRC_WIDE 0x0800 #define REQFLAG_NOPARITY 0x1000 #define REQFLAG_STOPQ 0x2000 #define REQFLAG_XTRASNS 0x4000 #define REQFLAG_PRIORITY 0x8000 typedef struct { isphdr_t req_header; uint32_t req_handle; uint8_t req_lun_trn; uint8_t req_target; uint16_t req_cdblen; uint16_t req_flags; uint16_t req_reserved; uint16_t req_time; uint16_t req_seg_count; uint8_t req_cdb[44]; } ispextreq_t; /* * ISP24XX structures */ typedef struct { isphdr_t req_header; uint32_t req_handle; uint16_t req_nphdl; uint16_t req_time; uint16_t req_seg_count; uint16_t req_reserved; uint8_t req_lun[8]; uint8_t req_alen_datadir; uint8_t req_task_management; uint8_t req_task_attribute; uint8_t req_crn; uint8_t req_cdb[16]; uint32_t req_dl; uint16_t req_tidlo; uint8_t req_tidhi; uint8_t req_vpidx; ispds64_t req_dataseg; } ispreqt7_t; /* Task Management Request Function */ typedef struct { isphdr_t tmf_header; uint32_t tmf_handle; uint16_t tmf_nphdl; uint8_t tmf_reserved0[2]; uint16_t tmf_delay; uint16_t tmf_timeout; uint8_t tmf_lun[8]; uint32_t tmf_flags; uint8_t tmf_reserved1[20]; uint16_t tmf_tidlo; uint8_t tmf_tidhi; uint8_t tmf_vpidx; uint8_t tmf_reserved2[12]; } isp24xx_tmf_t; #define ISP24XX_TMF_NOSEND 0x80000000 #define ISP24XX_TMF_LUN_RESET 0x00000010 #define ISP24XX_TMF_ABORT_TASK_SET 0x00000008 #define ISP24XX_TMF_CLEAR_TASK_SET 0x00000004 #define ISP24XX_TMF_TARGET_RESET 0x00000002 #define ISP24XX_TMF_CLEAR_ACA 0x00000001 /* I/O Abort Structure */ typedef struct { isphdr_t abrt_header; uint32_t abrt_handle; uint16_t abrt_nphdl; uint16_t abrt_options; uint32_t abrt_cmd_handle; uint16_t abrt_queue_number; uint8_t abrt_reserved[30]; uint16_t abrt_tidlo; uint8_t abrt_tidhi; uint8_t abrt_vpidx; uint8_t abrt_reserved1[12]; } isp24xx_abrt_t; #define ISP24XX_ABRT_NOSEND 0x01 /* don't actually send ABTS */ #define ISP24XX_ABRT_OKAY 0x00 /* in nphdl on return */ #define ISP24XX_ABRT_ENXIO 0x31 /* in nphdl on return */ #define ISP_CDSEG 7 typedef struct { isphdr_t req_header; uint32_t req_reserved; ispds_t req_dataseg[ISP_CDSEG]; } ispcontreq_t; #define ISP_CDSEG64 5 typedef struct { isphdr_t req_header; ispds64_t req_dataseg[ISP_CDSEG64]; } ispcontreq64_t; typedef struct { isphdr_t req_header; uint32_t req_handle; uint16_t req_scsi_status; uint16_t req_completion_status; uint16_t req_state_flags; uint16_t req_status_flags; uint16_t req_time; #define req_response_len req_time /* FC only */ uint16_t req_sense_len; uint32_t req_resid; uint8_t req_response[8]; /* FC only */ uint8_t req_sense_data[32]; } ispstatusreq_t; /* * Status Continuation */ typedef struct { isphdr_t req_header; uint8_t req_sense_data[60]; } ispstatus_cont_t; /* * 24XX Type 0 status */ typedef struct { isphdr_t req_header; uint32_t req_handle; uint16_t req_completion_status; uint16_t req_oxid; uint32_t req_resid; uint16_t req_reserved0; uint16_t req_state_flags; uint16_t req_retry_delay; /* aka Status Qualifier */ uint16_t req_scsi_status; uint32_t req_fcp_residual; uint32_t req_sense_len; uint32_t req_response_len; uint8_t req_rsp_sense[28]; } isp24xx_statusreq_t; /* * For Qlogic 2X00, the high order byte of SCSI status has * additional meaning. */ #define RQCS_CR 0x1000 /* Confirmation Request */ #define RQCS_RU 0x0800 /* Residual Under */ #define RQCS_RO 0x0400 /* Residual Over */ #define RQCS_RESID (RQCS_RU|RQCS_RO) #define RQCS_SV 0x0200 /* Sense Length Valid */ #define RQCS_RV 0x0100 /* FCP Response Length Valid */ /* * CT Passthru IOCB */ typedef struct { isphdr_t ctp_header; uint32_t ctp_handle; uint16_t ctp_status; uint16_t ctp_nphdl; /* n-port handle */ uint16_t ctp_cmd_cnt; /* Command DSD count */ uint8_t ctp_vpidx; uint8_t ctp_reserved0; uint16_t ctp_time; uint16_t ctp_reserved1; uint16_t ctp_rsp_cnt; /* Response DSD count */ uint16_t ctp_reserved2[5]; uint32_t ctp_rsp_bcnt; /* Response byte count */ uint32_t ctp_cmd_bcnt; /* Command byte count */ ispds64_t ctp_dataseg[2]; } isp_ct_pt_t; /* * MS Passthru IOCB */ typedef struct { isphdr_t ms_header; uint32_t ms_handle; uint16_t ms_nphdl; /* handle in high byte for !2k f/w */ uint16_t ms_status; uint16_t ms_flags; uint16_t ms_reserved1; /* low 8 bits */ uint16_t ms_time; uint16_t ms_cmd_cnt; /* Command DSD count */ uint16_t ms_tot_cnt; /* Total DSD Count */ uint8_t ms_type; /* MS type */ uint8_t ms_r_ctl; /* R_CTL */ uint16_t ms_rxid; /* RX_ID */ uint16_t ms_reserved2; uint32_t ms_handle2; uint32_t ms_rsp_bcnt; /* Response byte count */ uint32_t ms_cmd_bcnt; /* Command byte count */ ispds64_t ms_dataseg[2]; } isp_ms_t; /* * Completion Status Codes. */ #define RQCS_COMPLETE 0x0000 #define RQCS_DMA_ERROR 0x0002 #define RQCS_RESET_OCCURRED 0x0004 #define RQCS_ABORTED 0x0005 #define RQCS_TIMEOUT 0x0006 #define RQCS_DATA_OVERRUN 0x0007 #define RQCS_DATA_UNDERRUN 0x0015 #define RQCS_QUEUE_FULL 0x001C /* 1X00 Only Completion Codes */ #define RQCS_INCOMPLETE 0x0001 #define RQCS_TRANSPORT_ERROR 0x0003 #define RQCS_COMMAND_OVERRUN 0x0008 #define RQCS_STATUS_OVERRUN 0x0009 #define RQCS_BAD_MESSAGE 0x000a #define RQCS_NO_MESSAGE_OUT 0x000b #define RQCS_EXT_ID_FAILED 0x000c #define RQCS_IDE_MSG_FAILED 0x000d #define RQCS_ABORT_MSG_FAILED 0x000e #define RQCS_REJECT_MSG_FAILED 0x000f #define RQCS_NOP_MSG_FAILED 0x0010 #define RQCS_PARITY_ERROR_MSG_FAILED 0x0011 #define RQCS_DEVICE_RESET_MSG_FAILED 0x0012 #define RQCS_ID_MSG_FAILED 0x0013 #define RQCS_UNEXP_BUS_FREE 0x0014 #define RQCS_XACT_ERR1 0x0018 #define RQCS_XACT_ERR2 0x0019 #define RQCS_XACT_ERR3 0x001A #define RQCS_BAD_ENTRY 0x001B #define RQCS_PHASE_SKIPPED 0x001D #define RQCS_ARQS_FAILED 0x001E #define RQCS_WIDE_FAILED 0x001F #define RQCS_SYNCXFER_FAILED 0x0020 #define RQCS_LVD_BUSERR 0x0021 /* 2X00 Only Completion Codes */ #define RQCS_PORT_UNAVAILABLE 0x0028 #define RQCS_PORT_LOGGED_OUT 0x0029 #define RQCS_PORT_CHANGED 0x002A #define RQCS_PORT_BUSY 0x002B /* 24XX Only Completion Codes */ #define RQCS_24XX_DRE 0x0011 /* data reassembly error */ #define RQCS_24XX_TABORT 0x0013 /* aborted by target */ #define RQCS_24XX_ENOMEM 0x002C /* f/w resource unavailable */ #define RQCS_24XX_TMO 0x0030 /* task management overrun */ /* * 1X00 specific State Flags */ #define RQSF_GOT_BUS 0x0100 #define RQSF_GOT_TARGET 0x0200 #define RQSF_SENT_CDB 0x0400 #define RQSF_XFRD_DATA 0x0800 #define RQSF_GOT_STATUS 0x1000 #define RQSF_GOT_SENSE 0x2000 #define RQSF_XFER_COMPLETE 0x4000 /* * 2X00 specific State Flags * (same as 1X00 except RQSF_GOT_BUS/RQSF_GOT_TARGET are not available) */ #define RQSF_DATA_IN 0x0020 #define RQSF_DATA_OUT 0x0040 #define RQSF_STAG 0x0008 #define RQSF_OTAG 0x0004 #define RQSF_HTAG 0x0002 /* * 1X00 Status Flags */ #define RQSTF_DISCONNECT 0x0001 #define RQSTF_SYNCHRONOUS 0x0002 #define RQSTF_PARITY_ERROR 0x0004 #define RQSTF_BUS_RESET 0x0008 #define RQSTF_DEVICE_RESET 0x0010 #define RQSTF_ABORTED 0x0020 #define RQSTF_TIMEOUT 0x0040 #define RQSTF_NEGOTIATION 0x0080 /* * 2X00 specific state flags */ /* RQSF_SENT_CDB */ /* RQSF_XFRD_DATA */ /* RQSF_GOT_STATUS */ /* RQSF_XFER_COMPLETE */ /* * 2X00 specific status flags */ /* RQSTF_ABORTED */ /* RQSTF_TIMEOUT */ #define RQSTF_DMA_ERROR 0x0080 #define RQSTF_LOGOUT 0x2000 /* * Miscellaneous */ #ifndef ISP_EXEC_THROTTLE #define ISP_EXEC_THROTTLE 16 #endif /* * About Firmware returns an 'attribute' word in mailbox 6. * These attributes are for 2200 and 2300. */ #define ISP_FW_ATTR_TMODE 0x0001 #define ISP_FW_ATTR_SCCLUN 0x0002 #define ISP_FW_ATTR_FABRIC 0x0004 #define ISP_FW_ATTR_CLASS2 0x0008 #define ISP_FW_ATTR_FCTAPE 0x0010 #define ISP_FW_ATTR_IP 0x0020 #define ISP_FW_ATTR_VI 0x0040 #define ISP_FW_ATTR_VI_SOLARIS 0x0080 #define ISP_FW_ATTR_2KLOGINS 0x0100 /* just a guess... */ /* and these are for the 2400 */ #define ISP2400_FW_ATTR_CLASS2 0x0001 #define ISP2400_FW_ATTR_IP 0x0002 #define ISP2400_FW_ATTR_MULTIID 0x0004 #define ISP2400_FW_ATTR_SB2 0x0008 #define ISP2400_FW_ATTR_T10CRC 0x0010 #define ISP2400_FW_ATTR_VI 0x0020 #define ISP2400_FW_ATTR_MQ 0x0040 #define ISP2400_FW_ATTR_MSIX 0x0080 #define ISP2400_FW_ATTR_FCOE 0x0800 #define ISP2400_FW_ATTR_VP0 0x1000 #define ISP2400_FW_ATTR_EXPFW 0x2000 #define ISP2400_FW_ATTR_HOTFW 0x4000 #define ISP2400_FW_ATTR_EXTNDED 0x8000 #define ISP2400_FW_ATTR_EXTVP 0x00010000 #define ISP2400_FW_ATTR_VN2VN 0x00040000 #define ISP2400_FW_ATTR_EXMOFF 0x00080000 #define ISP2400_FW_ATTR_NPMOFF 0x00100000 #define ISP2400_FW_ATTR_DIFCHOP 0x00400000 #define ISP2400_FW_ATTR_SRIOV 0x02000000 #define ISP2400_FW_ATTR_ASICTMP 0x0200000000 #define ISP2400_FW_ATTR_ATIOMQ 0x0400000000 /* * These are either manifestly true or are dependent on f/w attributes */ #define ISP_CAP_TMODE(isp) \ (IS_24XX(isp)? 1 : (isp->isp_fwattr & ISP_FW_ATTR_TMODE)) #define ISP_CAP_SCCFW(isp) \ (IS_24XX(isp)? 1 : (isp->isp_fwattr & ISP_FW_ATTR_SCCLUN)) #define ISP_CAP_2KLOGIN(isp) \ (IS_24XX(isp)? 1 : (isp->isp_fwattr & ISP_FW_ATTR_2KLOGINS)) /* * This is only true for 24XX cards with this f/w attribute */ #define ISP_CAP_MULTI_ID(isp) \ (IS_24XX(isp)? (isp->isp_fwattr & ISP2400_FW_ATTR_MULTIID) : 0) #define ISP_GET_VPIDX(isp, tag) \ (ISP_CAP_MULTI_ID(isp) ? tag : 0) #define ISP_CAP_VP0(isp) \ (IS_24XX(isp)? (isp->isp_fwattr & ISP2400_FW_ATTR_VP0) : 0) /* * This is true manifestly or is dependent on a f/w attribute * but may or may not actually be *enabled*. In any case, it * is enabled on a per-channel basis. */ #define ISP_CAP_FCTAPE(isp) \ (IS_24XX(isp)? 1 : (isp->isp_fwattr & ISP_FW_ATTR_FCTAPE)) #define ISP_FCTAPE_ENABLED(isp, chan) \ (IS_24XX(isp)? (FCPARAM(isp, chan)->isp_xfwoptions & ICB2400_OPT2_FCTAPE) != 0 : (FCPARAM(isp, chan)->isp_xfwoptions & ICBXOPT_FCTAPE) != 0) /* * Reduced Interrupt Operation Response Queue Entries */ typedef struct { isphdr_t req_header; uint32_t req_handles[15]; } isp_rio1_t; typedef struct { isphdr_t req_header; uint16_t req_handles[30]; } isp_rio2_t; /* * FC (ISP2100/ISP2200/ISP2300/ISP2400) specific data structures */ /* * Initialization Control Block * * Version One (prime) format. */ typedef struct { uint8_t icb_version; uint8_t icb_reserved0; uint16_t icb_fwoptions; uint16_t icb_maxfrmlen; uint16_t icb_maxalloc; uint16_t icb_execthrottle; uint8_t icb_retry_count; uint8_t icb_retry_delay; uint8_t icb_portname[8]; uint16_t icb_hardaddr; uint8_t icb_iqdevtype; uint8_t icb_logintime; uint8_t icb_nodename[8]; uint16_t icb_rqstout; uint16_t icb_rspnsin; uint16_t icb_rqstqlen; uint16_t icb_rsltqlen; uint16_t icb_rqstaddr[4]; uint16_t icb_respaddr[4]; uint16_t icb_lunenables; uint8_t icb_ccnt; uint8_t icb_icnt; uint16_t icb_lunetimeout; uint16_t icb_reserved1; uint16_t icb_xfwoptions; uint8_t icb_racctimer; uint8_t icb_idelaytimer; uint16_t icb_zfwoptions; uint16_t icb_reserved2[13]; } isp_icb_t; #define ICB_VERSION1 1 #define ICBOPT_EXTENDED 0x8000 #define ICBOPT_BOTH_WWNS 0x4000 #define ICBOPT_FULL_LOGIN 0x2000 #define ICBOPT_STOP_ON_QFULL 0x1000 /* 2200/2100 only */ #define ICBOPT_PREV_ADDRESS 0x0800 #define ICBOPT_SRCHDOWN 0x0400 #define ICBOPT_NOLIP 0x0200 #define ICBOPT_PDBCHANGE_AE 0x0100 #define ICBOPT_TGT_TYPE 0x0080 #define ICBOPT_INI_ADISC 0x0040 #define ICBOPT_INI_DISABLE 0x0020 #define ICBOPT_TGT_ENABLE 0x0010 #define ICBOPT_FAST_POST 0x0008 #define ICBOPT_FULL_DUPLEX 0x0004 #define ICBOPT_FAIRNESS 0x0002 #define ICBOPT_HARD_ADDRESS 0x0001 #define ICBXOPT_NO_LOGOUT 0x8000 /* no logout on link failure */ #define ICBXOPT_FCTAPE_CCQ 0x4000 /* FC-Tape Command Queueing */ #define ICBXOPT_FCTAPE_CONFIRM 0x2000 #define ICBXOPT_FCTAPE 0x1000 #define ICBXOPT_CLASS2_ACK0 0x0200 #define ICBXOPT_CLASS2 0x0100 #define ICBXOPT_NO_PLAY 0x0080 /* don't play if can't get hard addr */ #define ICBXOPT_TOPO_MASK 0x0070 #define ICBXOPT_LOOP_ONLY 0x0000 #define ICBXOPT_PTP_ONLY 0x0010 #define ICBXOPT_LOOP_2_PTP 0x0020 #define ICBXOPT_PTP_2_LOOP 0x0030 /* * The lower 4 bits of the xfwoptions field are the OPERATION MODE bits. * RIO is not defined for the 23XX cards (just 2200) */ #define ICBXOPT_RIO_OFF 0 #define ICBXOPT_RIO_16BIT 1 #define ICBXOPT_RIO_32BIT 2 #define ICBXOPT_RIO_16BIT_IOCB 3 #define ICBXOPT_RIO_32BIT_IOCB 4 #define ICBXOPT_ZIO 5 #define ICBXOPT_TIMER_MASK 0x7 #define ICBZOPT_RATE_MASK 0xC000 #define ICBZOPT_RATE_ONEGB 0x0000 #define ICBZOPT_RATE_AUTO 0x8000 #define ICBZOPT_RATE_TWOGB 0x4000 #define ICBZOPT_50_OHM 0x2000 #define ICBZOPT_ENA_OOF 0x0040 /* out of order frame handling */ #define ICBZOPT_RSPSZ_MASK 0x0030 #define ICBZOPT_RSPSZ_24 0x0000 #define ICBZOPT_RSPSZ_12 0x0010 #define ICBZOPT_RSPSZ_24A 0x0020 #define ICBZOPT_RSPSZ_32 0x0030 #define ICBZOPT_SOFTID 0x0002 #define ICBZOPT_ENA_RDXFR_RDY 0x0001 /* 2400 F/W options */ #define ICB2400_OPT1_BOTH_WWNS 0x00004000 #define ICB2400_OPT1_FULL_LOGIN 0x00002000 #define ICB2400_OPT1_PREV_ADDRESS 0x00000800 #define ICB2400_OPT1_SRCHDOWN 0x00000400 #define ICB2400_OPT1_NOLIP 0x00000200 #define ICB2400_OPT1_INI_DISABLE 0x00000020 #define ICB2400_OPT1_TGT_ENABLE 0x00000010 #define ICB2400_OPT1_FULL_DUPLEX 0x00000004 #define ICB2400_OPT1_FAIRNESS 0x00000002 #define ICB2400_OPT1_HARD_ADDRESS 0x00000001 #define ICB2400_OPT2_ENA_ATIOMQ 0x08000000 #define ICB2400_OPT2_ENA_IHA 0x04000000 #define ICB2400_OPT2_QOS 0x02000000 #define ICB2400_OPT2_IOCBS 0x01000000 #define ICB2400_OPT2_ENA_IHR 0x00400000 #define ICB2400_OPT2_ENA_VMS 0x00200000 #define ICB2400_OPT2_ENA_TA 0x00100000 #define ICB2400_OPT2_TPRLIC 0x00004000 #define ICB2400_OPT2_FCTAPE 0x00001000 #define ICB2400_OPT2_FCSP 0x00000800 #define ICB2400_OPT2_CLASS2_ACK0 0x00000200 #define ICB2400_OPT2_CLASS2 0x00000100 #define ICB2400_OPT2_NO_PLAY 0x00000080 #define ICB2400_OPT2_TOPO_MASK 0x00000070 #define ICB2400_OPT2_LOOP_ONLY 0x00000000 #define ICB2400_OPT2_PTP_ONLY 0x00000010 #define ICB2400_OPT2_LOOP_2_PTP 0x00000020 #define ICB2400_OPT2_TIMER_MASK 0x0000000f #define ICB2400_OPT2_ZIO 0x00000005 #define ICB2400_OPT2_ZIO1 0x00000006 #define ICB2400_OPT3_NO_CTXDIS 0x40000000 #define ICB2400_OPT3_ENA_ETH_RESP 0x08000000 #define ICB2400_OPT3_ENA_ETH_ATIO 0x04000000 #define ICB2400_OPT3_ENA_MFCF 0x00020000 #define ICB2400_OPT3_SKIP_FOURGB 0x00010000 #define ICB2400_OPT3_RATE_MASK 0x0000E000 #define ICB2400_OPT3_RATE_ONEGB 0x00000000 #define ICB2400_OPT3_RATE_TWOGB 0x00002000 #define ICB2400_OPT3_RATE_AUTO 0x00004000 #define ICB2400_OPT3_RATE_FOURGB 0x00006000 #define ICB2400_OPT3_RATE_EIGHTGB 0x00008000 #define ICB2400_OPT3_RATE_SIXTEENGB 0x0000A000 #define ICB2400_OPT3_ENA_OOF_XFRDY 0x00000200 #define ICB2400_OPT3_NO_N2N_LOGI 0x00000100 #define ICB2400_OPT3_NO_LOCAL_PLOGI 0x00000080 #define ICB2400_OPT3_ENA_OOF 0x00000040 /* note that a response size flag of zero is reserved! */ #define ICB2400_OPT3_RSPSZ_MASK 0x00000030 #define ICB2400_OPT3_RSPSZ_12 0x00000010 #define ICB2400_OPT3_RSPSZ_24 0x00000020 #define ICB2400_OPT3_RSPSZ_32 0x00000030 #define ICB2400_OPT3_SOFTID 0x00000002 #define ICB_MIN_FRMLEN 256 #define ICB_MAX_FRMLEN 2112 #define ICB_DFLT_FRMLEN 1024 #define ICB_DFLT_ALLOC 256 #define ICB_DFLT_THROTTLE 16 #define ICB_DFLT_RDELAY 5 #define ICB_DFLT_RCOUNT 3 #define ICB_LOGIN_TOV 30 #define ICB_LUN_ENABLE_TOV 15 /* * And somebody at QLogic had a great idea that you could just change * the structure *and* keep the version number the same as the other cards. */ typedef struct { uint16_t icb_version; uint16_t icb_reserved0; uint16_t icb_maxfrmlen; uint16_t icb_execthrottle; uint16_t icb_xchgcnt; uint16_t icb_hardaddr; uint8_t icb_portname[8]; uint8_t icb_nodename[8]; uint16_t icb_rspnsin; uint16_t icb_rqstout; uint16_t icb_retry_count; uint16_t icb_priout; uint16_t icb_rsltqlen; uint16_t icb_rqstqlen; uint16_t icb_ldn_nols; uint16_t icb_prqstqlen; uint16_t icb_rqstaddr[4]; uint16_t icb_respaddr[4]; uint16_t icb_priaddr[4]; uint16_t icb_msixresp; uint16_t icb_msixatio; uint16_t icb_reserved1[2]; uint16_t icb_atio_in; uint16_t icb_atioqlen; uint16_t icb_atioqaddr[4]; uint16_t icb_idelaytimer; uint16_t icb_logintime; uint32_t icb_fwoptions1; uint32_t icb_fwoptions2; uint32_t icb_fwoptions3; uint16_t icb_qos; uint16_t icb_reserved2[3]; uint16_t icb_enodemac[3]; uint16_t icb_disctime; uint16_t icb_reserved3[4]; } isp_icb_2400_t; #define RQRSP_ADDR0015 0 #define RQRSP_ADDR1631 1 #define RQRSP_ADDR3247 2 #define RQRSP_ADDR4863 3 #define ICB_NNM0 7 #define ICB_NNM1 6 #define ICB_NNM2 5 #define ICB_NNM3 4 #define ICB_NNM4 3 #define ICB_NNM5 2 #define ICB_NNM6 1 #define ICB_NNM7 0 #define MAKE_NODE_NAME_FROM_WWN(array, wwn) \ array[ICB_NNM0] = (uint8_t) ((wwn >> 0) & 0xff), \ array[ICB_NNM1] = (uint8_t) ((wwn >> 8) & 0xff), \ array[ICB_NNM2] = (uint8_t) ((wwn >> 16) & 0xff), \ array[ICB_NNM3] = (uint8_t) ((wwn >> 24) & 0xff), \ array[ICB_NNM4] = (uint8_t) ((wwn >> 32) & 0xff), \ array[ICB_NNM5] = (uint8_t) ((wwn >> 40) & 0xff), \ array[ICB_NNM6] = (uint8_t) ((wwn >> 48) & 0xff), \ array[ICB_NNM7] = (uint8_t) ((wwn >> 56) & 0xff) #define MAKE_WWN_FROM_NODE_NAME(wwn, array) \ wwn = ((uint64_t) array[ICB_NNM0]) | \ ((uint64_t) array[ICB_NNM1] << 8) | \ ((uint64_t) array[ICB_NNM2] << 16) | \ ((uint64_t) array[ICB_NNM3] << 24) | \ ((uint64_t) array[ICB_NNM4] << 32) | \ ((uint64_t) array[ICB_NNM5] << 40) | \ ((uint64_t) array[ICB_NNM6] << 48) | \ ((uint64_t) array[ICB_NNM7] << 56) /* * For MULTI_ID firmware, this describes a * virtual port entity for getting status. */ typedef struct { uint16_t vp_port_status; uint8_t vp_port_options; uint8_t vp_port_loopid; uint8_t vp_port_portname[8]; uint8_t vp_port_nodename[8]; uint16_t vp_port_portid_lo; /* not present when trailing icb */ uint16_t vp_port_portid_hi; /* not present when trailing icb */ } vp_port_info_t; #define ICB2400_VPOPT_ENA_SNSLOGIN 0x00000040 /* Enable SNS Login and SCR for Virtual Ports */ #define ICB2400_VPOPT_TGT_DISABLE 0x00000020 /* Target Mode Disabled */ #define ICB2400_VPOPT_INI_ENABLE 0x00000010 /* Initiator Mode Enabled */ #define ICB2400_VPOPT_ENABLED 0x00000008 /* VP Enabled */ #define ICB2400_VPOPT_NOPLAY 0x00000004 /* ID Not Acquired */ #define ICB2400_VPOPT_PREV_ADDRESS 0x00000002 /* Previously Assigned ID */ #define ICB2400_VPOPT_HARD_ADDRESS 0x00000001 /* Hard Assigned ID */ #define ICB2400_VPOPT_WRITE_SIZE 20 /* * For MULTI_ID firmware, we append this structure * to the isp_icb_2400_t above, followed by a list * structures that are *most* of the vp_port_info_t. */ typedef struct { uint16_t vp_count; uint16_t vp_global_options; } isp_icb_2400_vpinfo_t; #define ICB2400_VPINFO_OFF 0x80 /* offset from start of ICB */ #define ICB2400_VPINFO_PORT_OFF(chan) \ (ICB2400_VPINFO_OFF + \ sizeof (isp_icb_2400_vpinfo_t) + (chan * ICB2400_VPOPT_WRITE_SIZE)) #define ICB2400_VPGOPT_FCA 0x01 /* Assume Clean Address bit in FLOGI ACC set (works only in static configurations) */ #define ICB2400_VPGOPT_MID_DISABLE 0x02 /* when set, connection mode2 will work with NPIV-capable switched */ #define ICB2400_VPGOPT_VP0_DECOUPLE 0x04 /* Allow VP0 decoupling if firmware supports it */ #define ICB2400_VPGOPT_SUSP_FDISK 0x10 /* Suspend FDISC for Enabled VPs */ #define ICB2400_VPGOPT_GEN_RIDA 0x20 /* Generate RIDA if FLOGI Fails */ typedef struct { isphdr_t vp_ctrl_hdr; uint32_t vp_ctrl_handle; uint16_t vp_ctrl_index_fail; uint16_t vp_ctrl_status; uint16_t vp_ctrl_command; uint16_t vp_ctrl_vp_count; uint16_t vp_ctrl_idmap[16]; uint16_t vp_ctrl_reserved[7]; uint16_t vp_ctrl_fcf_index; } vp_ctrl_info_t; #define VP_CTRL_CMD_ENABLE_VP 0x00 #define VP_CTRL_CMD_DISABLE_VP 0x08 #define VP_CTRL_CMD_DISABLE_VP_REINIT_LINK 0x09 #define VP_CTRL_CMD_DISABLE_VP_LOGO 0x0A #define VP_CTRL_CMD_DISABLE_VP_LOGO_ALL 0x0B /* * We can use this structure for modifying either one or two VP ports after initialization */ typedef struct { isphdr_t vp_mod_hdr; uint32_t vp_mod_hdl; uint16_t vp_mod_reserved0; uint16_t vp_mod_status; uint8_t vp_mod_cmd; uint8_t vp_mod_cnt; uint8_t vp_mod_idx0; uint8_t vp_mod_idx1; struct { uint8_t options; uint8_t loopid; uint16_t reserved1; uint8_t wwpn[8]; uint8_t wwnn[8]; } vp_mod_ports[2]; uint8_t vp_mod_reserved2[8]; } vp_modify_t; #define VP_STS_OK 0x00 #define VP_STS_ERR 0x01 #define VP_CNT_ERR 0x02 #define VP_GEN_ERR 0x03 #define VP_IDX_ERR 0x04 #define VP_STS_BSY 0x05 #define VP_MODIFY 0x00 #define VP_MODIFY_ENA 0x01 #define VP_MODIFY_OPT 0x02 #define VP_RESUME 0x03 /* * Port Data Base Element */ typedef struct { uint16_t pdb_options; uint8_t pdb_mstate; uint8_t pdb_sstate; uint8_t pdb_hardaddr_bits[4]; uint8_t pdb_portid_bits[4]; uint8_t pdb_nodename[8]; uint8_t pdb_portname[8]; uint16_t pdb_execthrottle; uint16_t pdb_exec_count; uint8_t pdb_retry_count; uint8_t pdb_retry_delay; uint16_t pdb_resalloc; uint16_t pdb_curalloc; uint16_t pdb_qhead; uint16_t pdb_qtail; uint16_t pdb_tl_next; uint16_t pdb_tl_last; uint16_t pdb_features; /* PLOGI, Common Service */ uint16_t pdb_pconcurrnt; /* PLOGI, Common Service */ uint16_t pdb_roi; /* PLOGI, Common Service */ uint8_t pdb_target; uint8_t pdb_initiator; /* PLOGI, Class 3 Control Flags */ uint16_t pdb_rdsiz; /* PLOGI, Class 3 */ uint16_t pdb_ncseq; /* PLOGI, Class 3 */ uint16_t pdb_noseq; /* PLOGI, Class 3 */ uint16_t pdb_labrtflg; uint16_t pdb_lstopflg; uint16_t pdb_sqhead; uint16_t pdb_sqtail; uint16_t pdb_ptimer; uint16_t pdb_nxt_seqid; uint16_t pdb_fcount; uint16_t pdb_prli_len; uint16_t pdb_prli_svc0; uint16_t pdb_prli_svc3; uint16_t pdb_loopid; uint16_t pdb_il_ptr; uint16_t pdb_sl_ptr; } isp_pdb_21xx_t; #define PDB_OPTIONS_XMITTING (1<<11) #define PDB_OPTIONS_LNKXMIT (1<<10) #define PDB_OPTIONS_ABORTED (1<<9) #define PDB_OPTIONS_ADISC (1<<1) #define PDB_STATE_DISCOVERY 0 #define PDB_STATE_WDISC_ACK 1 #define PDB_STATE_PLOGI 2 #define PDB_STATE_PLOGI_ACK 3 #define PDB_STATE_PRLI 4 #define PDB_STATE_PRLI_ACK 5 #define PDB_STATE_LOGGED_IN 6 #define PDB_STATE_PORT_UNAVAIL 7 #define PDB_STATE_PRLO 8 #define PDB_STATE_PRLO_ACK 9 #define PDB_STATE_PLOGO 10 #define PDB_STATE_PLOG_ACK 11 #define SVC3_ROLE_MASK 0x30 #define SVC3_ROLE_SHIFT 4 #define BITS2WORD(x) ((x)[0] << 16 | (x)[3] << 8 | (x)[2]) #define BITS2WORD_24XX(x) ((x)[0] << 16 | (x)[1] << 8 | (x)[2]) /* * Port Data Base Element- 24XX cards */ typedef struct { uint16_t pdb_flags; uint8_t pdb_curstate; uint8_t pdb_laststate; uint8_t pdb_hardaddr_bits[4]; uint8_t pdb_portid_bits[4]; #define pdb_nxt_seqid_2400 pdb_portid_bits[3] uint16_t pdb_retry_timer; uint16_t pdb_handle; uint16_t pdb_rcv_dsize; uint16_t pdb_reserved0; uint16_t pdb_prli_svc0; uint16_t pdb_prli_svc3; uint8_t pdb_portname[8]; uint8_t pdb_nodename[8]; uint8_t pdb_reserved1[24]; } isp_pdb_24xx_t; #define PDB2400_TID_SUPPORTED 0x4000 #define PDB2400_FC_TAPE 0x0080 #define PDB2400_CLASS2_ACK0 0x0040 #define PDB2400_FCP_CONF 0x0020 #define PDB2400_CLASS2 0x0010 #define PDB2400_ADDR_VALID 0x0002 #define PDB2400_STATE_PLOGI_PEND 0x03 #define PDB2400_STATE_PLOGI_DONE 0x04 #define PDB2400_STATE_PRLI_PEND 0x05 #define PDB2400_STATE_LOGGED_IN 0x06 #define PDB2400_STATE_PORT_UNAVAIL 0x07 #define PDB2400_STATE_PRLO_PEND 0x09 #define PDB2400_STATE_LOGO_PEND 0x0B /* * Common elements from the above two structures that are actually useful to us. */ typedef struct { uint16_t handle; uint16_t prli_word3; uint32_t : 8, portid : 24; uint8_t portname[8]; uint8_t nodename[8]; } isp_pdb_t; /* * Port/Node Name List Element */ typedef struct { uint8_t pnnle_name[8]; uint16_t pnnle_handle; uint16_t pnnle_reserved; } isp_pnnle_t; #define PNNL_OPTIONS_NODE_NAMES (1<<0) #define PNNL_OPTIONS_PORT_DATA (1<<2) #define PNNL_OPTIONS_INITIATORS (1<<3) /* * Port and N-Port Handle List Element */ typedef struct { uint16_t pnhle_port_id_lo; uint16_t pnhle_port_id_hi_handle; } isp_pnhle_21xx_t; typedef struct { uint16_t pnhle_port_id_lo; uint16_t pnhle_port_id_hi; uint16_t pnhle_handle; } isp_pnhle_23xx_t; typedef struct { uint16_t pnhle_port_id_lo; uint16_t pnhle_port_id_hi; uint16_t pnhle_handle; uint16_t pnhle_reserved; } isp_pnhle_24xx_t; /* * Port Database Changed Async Event information for 24XX cards */ #define PDB24XX_AE_OK 0x00 #define PDB24XX_AE_IMPL_LOGO_1 0x01 #define PDB24XX_AE_IMPL_LOGO_2 0x02 #define PDB24XX_AE_IMPL_LOGO_3 0x03 #define PDB24XX_AE_PLOGI_RCVD 0x04 #define PDB24XX_AE_PLOGI_RJT 0x05 #define PDB24XX_AE_PRLI_RCVD 0x06 #define PDB24XX_AE_PRLI_RJT 0x07 #define PDB24XX_AE_TPRLO 0x08 #define PDB24XX_AE_TPRLO_RJT 0x09 #define PDB24XX_AE_PRLO_RCVD 0x0a #define PDB24XX_AE_LOGO_RCVD 0x0b #define PDB24XX_AE_TOPO_CHG 0x0c #define PDB24XX_AE_NPORT_CHG 0x0d #define PDB24XX_AE_FLOGI_RJT 0x0e #define PDB24XX_AE_BAD_FANN 0x0f #define PDB24XX_AE_FLOGI_TIMO 0x10 #define PDB24XX_AE_ABX_LOGO 0x11 #define PDB24XX_AE_PLOGI_DONE 0x12 #define PDB24XX_AE_PRLI_DONJE 0x13 #define PDB24XX_AE_OPN_1 0x14 #define PDB24XX_AE_OPN_2 0x15 #define PDB24XX_AE_TXERR 0x16 #define PDB24XX_AE_FORCED_LOGO 0x17 #define PDB24XX_AE_DISC_TIMO 0x18 /* * Genericized Port Login/Logout software structure */ typedef struct { uint16_t handle; uint16_t channel; uint32_t flags : 8, portid : 24; } isp_plcmd_t; /* the flags to use are those for PLOGX_FLG_* below */ /* * ISP24XX- Login/Logout Port IOCB */ typedef struct { isphdr_t plogx_header; uint32_t plogx_handle; uint16_t plogx_status; uint16_t plogx_nphdl; uint16_t plogx_flags; uint16_t plogx_vphdl; /* low 8 bits */ uint16_t plogx_portlo; /* low 16 bits */ uint16_t plogx_rspsz_porthi; struct { uint16_t lo16; uint16_t hi16; } plogx_ioparm[11]; } isp_plogx_t; #define PLOGX_STATUS_OK 0x00 #define PLOGX_STATUS_UNAVAIL 0x28 #define PLOGX_STATUS_LOGOUT 0x29 #define PLOGX_STATUS_IOCBERR 0x31 #define PLOGX_IOCBERR_NOLINK 0x01 #define PLOGX_IOCBERR_NOIOCB 0x02 #define PLOGX_IOCBERR_NOXGHG 0x03 #define PLOGX_IOCBERR_FAILED 0x04 /* further info in IOPARM 1 */ #define PLOGX_IOCBERR_NOFABRIC 0x05 #define PLOGX_IOCBERR_NOTREADY 0x07 #define PLOGX_IOCBERR_NOLOGIN 0x09 /* further info in IOPARM 1 */ #define PLOGX_IOCBERR_NOPCB 0x0a #define PLOGX_IOCBERR_REJECT 0x18 /* further info in IOPARM 1 */ #define PLOGX_IOCBERR_EINVAL 0x19 /* further info in IOPARM 1 */ #define PLOGX_IOCBERR_PORTUSED 0x1a /* further info in IOPARM 1 */ #define PLOGX_IOCBERR_HNDLUSED 0x1b /* further info in IOPARM 1 */ #define PLOGX_IOCBERR_NOHANDLE 0x1c #define PLOGX_IOCBERR_NOFLOGI 0x1f /* further info in IOPARM 1 */ #define PLOGX_FLG_CMD_MASK 0xf #define PLOGX_FLG_CMD_PLOGI 0 #define PLOGX_FLG_CMD_PRLI 1 #define PLOGX_FLG_CMD_PDISC 2 #define PLOGX_FLG_CMD_LOGO 8 #define PLOGX_FLG_CMD_PRLO 9 #define PLOGX_FLG_CMD_TPRLO 10 #define PLOGX_FLG_COND_PLOGI 0x10 /* if with PLOGI */ #define PLOGX_FLG_IMPLICIT 0x10 /* if with LOGO, PRLO, TPRLO */ #define PLOGX_FLG_SKIP_PRLI 0x20 /* if with PLOGI */ #define PLOGX_FLG_IMPLICIT_LOGO_ALL 0x20 /* if with LOGO */ #define PLOGX_FLG_EXPLICIT_LOGO 0x40 /* if with LOGO */ #define PLOGX_FLG_COMMON_FEATURES 0x80 /* if with PLOGI */ #define PLOGX_FLG_FREE_NPHDL 0x80 /* if with with LOGO */ #define PLOGX_FLG_CLASS2 0x100 /* if with PLOGI */ #define PLOGX_FLG_FCP2_OVERRIDE 0x200 /* if with PRLOG, PRLI */ /* * Report ID Acquisistion (24XX multi-id firmware) */ typedef struct { isphdr_t ridacq_hdr; uint32_t ridacq_handle; uint8_t ridacq_vp_acquired; uint8_t ridacq_vp_setup; uint8_t ridacq_vp_index; uint8_t ridacq_vp_status; uint16_t ridacq_vp_port_lo; uint8_t ridacq_vp_port_hi; uint8_t ridacq_format; /* 0 or 1 */ uint16_t ridacq_map[8]; uint8_t ridacq_reserved1[32]; } isp_ridacq_t; #define RIDACQ_STS_COMPLETE 0 #define RIDACQ_STS_UNACQUIRED 1 #define RIDACQ_STS_CHANGED 2 #define RIDACQ_STS_SNS_TIMEOUT 3 #define RIDACQ_STS_SNS_REJECTED 4 #define RIDACQ_STS_SCR_TIMEOUT 5 #define RIDACQ_STS_SCR_REJECTED 6 /* * Simple Name Server Data Structures */ #define SNS_GA_NXT 0x100 #define SNS_GPN_ID 0x112 #define SNS_GNN_ID 0x113 #define SNS_GFF_ID 0x11F #define SNS_GID_FT 0x171 #define SNS_RFT_ID 0x217 +#define SNS_RFF_ID 0x21F typedef struct { uint16_t snscb_rblen; /* response buffer length (words) */ uint16_t snscb_reserved0; uint16_t snscb_addr[4]; /* response buffer address */ uint16_t snscb_sblen; /* subcommand buffer length (words) */ uint16_t snscb_reserved1; uint16_t snscb_data[]; /* variable data */ } sns_screq_t; /* Subcommand Request Structure */ typedef struct { uint16_t snscb_rblen; /* response buffer length (words) */ uint16_t snscb_reserved0; uint16_t snscb_addr[4]; /* response buffer address */ uint16_t snscb_sblen; /* subcommand buffer length (words) */ uint16_t snscb_reserved1; uint16_t snscb_cmd; uint16_t snscb_reserved2; uint32_t snscb_reserved3; uint32_t snscb_port; } sns_ga_nxt_req_t; #define SNS_GA_NXT_REQ_SIZE (sizeof (sns_ga_nxt_req_t)) typedef struct { uint16_t snscb_rblen; /* response buffer length (words) */ uint16_t snscb_reserved0; uint16_t snscb_addr[4]; /* response buffer address */ uint16_t snscb_sblen; /* subcommand buffer length (words) */ uint16_t snscb_reserved1; uint16_t snscb_cmd; uint16_t snscb_reserved2; uint32_t snscb_reserved3; uint32_t snscb_portid; } sns_gxn_id_req_t; #define SNS_GXN_ID_REQ_SIZE (sizeof (sns_gxn_id_req_t)) typedef struct { uint16_t snscb_rblen; /* response buffer length (words) */ uint16_t snscb_reserved0; uint16_t snscb_addr[4]; /* response buffer address */ uint16_t snscb_sblen; /* subcommand buffer length (words) */ uint16_t snscb_reserved1; uint16_t snscb_cmd; uint16_t snscb_mword_div_2; uint32_t snscb_reserved3; uint32_t snscb_fc4_type; } sns_gid_ft_req_t; #define SNS_GID_FT_REQ_SIZE (sizeof (sns_gid_ft_req_t)) typedef struct { uint16_t snscb_rblen; /* response buffer length (words) */ uint16_t snscb_reserved0; uint16_t snscb_addr[4]; /* response buffer address */ uint16_t snscb_sblen; /* subcommand buffer length (words) */ uint16_t snscb_reserved1; uint16_t snscb_cmd; uint16_t snscb_reserved2; uint32_t snscb_reserved3; uint32_t snscb_port; uint32_t snscb_fc4_types[8]; } sns_rft_id_req_t; #define SNS_RFT_ID_REQ_SIZE (sizeof (sns_rft_id_req_t)) typedef struct { ct_hdr_t snscb_cthdr; uint8_t snscb_port_type; uint8_t snscb_port_id[3]; uint8_t snscb_portname[8]; uint16_t snscb_data[]; /* variable data */ } sns_scrsp_t; /* Subcommand Response Structure */ typedef struct { ct_hdr_t snscb_cthdr; uint8_t snscb_port_type; uint8_t snscb_port_id[3]; uint8_t snscb_portname[8]; uint8_t snscb_pnlen; /* symbolic port name length */ uint8_t snscb_pname[255]; /* symbolic port name */ uint8_t snscb_nodename[8]; uint8_t snscb_nnlen; /* symbolic node name length */ uint8_t snscb_nname[255]; /* symbolic node name */ uint8_t snscb_ipassoc[8]; uint8_t snscb_ipaddr[16]; uint8_t snscb_svc_class[4]; uint8_t snscb_fc4_types[32]; uint8_t snscb_fpname[8]; uint8_t snscb_reserved; uint8_t snscb_hardaddr[3]; } sns_ga_nxt_rsp_t; /* Subcommand Response Structure */ #define SNS_GA_NXT_RESP_SIZE (sizeof (sns_ga_nxt_rsp_t)) typedef struct { ct_hdr_t snscb_cthdr; uint8_t snscb_wwn[8]; } sns_gxn_id_rsp_t; #define SNS_GXN_ID_RESP_SIZE (sizeof (sns_gxn_id_rsp_t)) typedef struct { ct_hdr_t snscb_cthdr; uint32_t snscb_fc4_features[32]; } sns_gff_id_rsp_t; #define SNS_GFF_ID_RESP_SIZE (sizeof (sns_gff_id_rsp_t)) typedef struct { ct_hdr_t snscb_cthdr; struct { uint8_t control; uint8_t portid[3]; } snscb_ports[1]; } sns_gid_ft_rsp_t; #define SNS_GID_FT_RESP_SIZE(x) ((sizeof (sns_gid_ft_rsp_t)) + ((x - 1) << 2)) #define SNS_RFT_ID_RESP_SIZE (sizeof (ct_hdr_t)) /* * Other Misc Structures */ /* ELS Pass Through */ typedef struct { isphdr_t els_hdr; uint32_t els_handle; uint16_t els_status; uint16_t els_nphdl; uint16_t els_xmit_dsd_count; /* outgoing only */ uint8_t els_vphdl; uint8_t els_sof; uint32_t els_rxid; uint16_t els_recv_dsd_count; /* outgoing only */ uint8_t els_opcode; uint8_t els_reserved1; uint8_t els_did_lo; uint8_t els_did_mid; uint8_t els_did_hi; uint8_t els_reserved2; uint16_t els_reserved3; uint16_t els_ctl_flags; union { struct { uint32_t _els_bytecnt; uint32_t _els_subcode1; uint32_t _els_subcode2; uint8_t _els_reserved4[20]; } in; struct { uint32_t _els_recv_bytecnt; uint32_t _els_xmit_bytecnt; uint32_t _els_xmit_dsd_length; uint16_t _els_xmit_dsd_a1500; uint16_t _els_xmit_dsd_a3116; uint16_t _els_xmit_dsd_a4732; uint16_t _els_xmit_dsd_a6348; uint32_t _els_recv_dsd_length; uint16_t _els_recv_dsd_a1500; uint16_t _els_recv_dsd_a3116; uint16_t _els_recv_dsd_a4732; uint16_t _els_recv_dsd_a6348; } out; } inout; #define els_bytecnt inout.in._els_bytecnt #define els_subcode1 inout.in._els_subcode1 #define els_subcode2 inout.in._els_subcode2 #define els_reserved4 inout.in._els_reserved4 #define els_recv_bytecnt inout.out._els_recv_bytecnt #define els_xmit_bytecnt inout.out._els_xmit_bytecnt #define els_xmit_dsd_length inout.out._els_xmit_dsd_length #define els_xmit_dsd_a1500 inout.out._els_xmit_dsd_a1500 #define els_xmit_dsd_a3116 inout.out._els_xmit_dsd_a3116 #define els_xmit_dsd_a4732 inout.out._els_xmit_dsd_a4732 #define els_xmit_dsd_a6348 inout.out._els_xmit_dsd_a6348 #define els_recv_dsd_length inout.out._els_recv_dsd_length #define els_recv_dsd_a1500 inout.out._els_recv_dsd_a1500 #define els_recv_dsd_a3116 inout.out._els_recv_dsd_a3116 #define els_recv_dsd_a4732 inout.out._els_recv_dsd_a4732 #define els_recv_dsd_a6348 inout.out._els_recv_dsd_a6348 } els_t; /* * A handy package structure for running FC-SCSI commands internally */ typedef struct { uint16_t handle; uint16_t lun; uint32_t channel : 8, portid : 24; uint32_t timeout; union { struct { uint32_t data_length; uint32_t no_wait : 1, do_read : 1; uint8_t cdb[16]; void *data_ptr; } beg; struct { uint32_t data_residual; uint8_t status; uint8_t pad; uint16_t sense_length; uint8_t sense_data[32]; } end; } fcd; } isp_xcmd_t; /* * Target Mode related definitions */ #define QLTM_SENSELEN 18 /* non-FC cards only */ #define QLTM_SVALID 0x80 /* * Structure for Enable Lun and Modify Lun queue entries */ typedef struct { isphdr_t le_header; uint32_t le_reserved; uint8_t le_lun; uint8_t le_rsvd; uint8_t le_ops; /* Modify LUN only */ uint8_t le_tgt; /* Not for FC */ uint32_t le_flags; /* Not for FC */ uint8_t le_status; uint8_t le_reserved2; uint8_t le_cmd_count; uint8_t le_in_count; uint8_t le_cdb6len; /* Not for FC */ uint8_t le_cdb7len; /* Not for FC */ uint16_t le_timeout; uint16_t le_reserved3[20]; } lun_entry_t; /* * le_flags values */ #define LUN_TQAE 0x00000002 /* bit1 Tagged Queue Action Enable */ #define LUN_DSSM 0x01000000 /* bit24 Disable Sending SDP Message */ #define LUN_DISAD 0x02000000 /* bit25 Disable autodisconnect */ #define LUN_DM 0x40000000 /* bit30 Disconnects Mandatory */ /* * le_ops values */ #define LUN_CCINCR 0x01 /* increment command count */ #define LUN_CCDECR 0x02 /* decrement command count */ #define LUN_ININCR 0x40 /* increment immed. notify count */ #define LUN_INDECR 0x80 /* decrement immed. notify count */ /* * le_status values */ #define LUN_OK 0x01 /* we be rockin' */ #define LUN_ERR 0x04 /* request completed with error */ #define LUN_INVAL 0x06 /* invalid request */ #define LUN_NOCAP 0x16 /* can't provide requested capability */ #define LUN_ENABLED 0x3E /* LUN already enabled */ /* * Immediate Notify Entry structure */ #define IN_MSGLEN 8 /* 8 bytes */ #define IN_RSVDLEN 8 /* 8 words */ typedef struct { isphdr_t in_header; uint32_t in_reserved; uint8_t in_lun; /* lun */ uint8_t in_iid; /* initiator */ uint8_t in_reserved2; uint8_t in_tgt; /* target */ uint32_t in_flags; uint8_t in_status; uint8_t in_rsvd2; uint8_t in_tag_val; /* tag value */ uint8_t in_tag_type; /* tag type */ uint16_t in_seqid; /* sequence id */ uint8_t in_msg[IN_MSGLEN]; /* SCSI message bytes */ uint16_t in_reserved3[IN_RSVDLEN]; uint8_t in_sense[QLTM_SENSELEN];/* suggested sense data */ } in_entry_t; typedef struct { isphdr_t in_header; uint32_t in_reserved; uint8_t in_lun; /* lun */ uint8_t in_iid; /* initiator */ uint16_t in_scclun; uint32_t in_reserved2; uint16_t in_status; uint16_t in_task_flags; uint16_t in_seqid; /* sequence id */ } in_fcentry_t; typedef struct { isphdr_t in_header; uint32_t in_reserved; uint16_t in_iid; /* initiator */ uint16_t in_scclun; uint32_t in_reserved2; uint16_t in_status; uint16_t in_task_flags; uint16_t in_seqid; /* sequence id */ } in_fcentry_e_t; /* * Values for the in_status field */ #define IN_REJECT 0x0D /* Message Reject message received */ #define IN_RESET 0x0E /* Bus Reset occurred */ #define IN_NO_RCAP 0x16 /* requested capability not available */ #define IN_IDE_RECEIVED 0x33 /* Initiator Detected Error msg received */ #define IN_RSRC_UNAVAIL 0x34 /* resource unavailable */ #define IN_MSG_RECEIVED 0x36 /* SCSI message received */ #define IN_ABORT_TASK 0x20 /* task named in RX_ID is being aborted (FC) */ #define IN_PORT_LOGOUT 0x29 /* port has logged out (FC) */ #define IN_PORT_CHANGED 0x2A /* port changed */ #define IN_GLOBAL_LOGO 0x2E /* all ports logged out */ #define IN_NO_NEXUS 0x3B /* Nexus not established */ #define IN_SRR_RCVD 0x45 /* SRR received */ /* * Values for the in_task_flags field- should only get one at a time! */ #define TASK_FLAGS_RESERVED_MASK (0xe700) #define TASK_FLAGS_CLEAR_ACA (1<<14) #define TASK_FLAGS_TARGET_RESET (1<<13) #define TASK_FLAGS_LUN_RESET (1<<12) #define TASK_FLAGS_CLEAR_TASK_SET (1<<10) #define TASK_FLAGS_ABORT_TASK_SET (1<<9) /* * ISP24XX Immediate Notify */ typedef struct { isphdr_t in_header; uint32_t in_reserved; uint16_t in_nphdl; uint16_t in_reserved1; uint16_t in_flags; uint16_t in_srr_rxid; uint16_t in_status; uint8_t in_status_subcode; uint8_t in_fwhandle; uint32_t in_rxid; uint16_t in_srr_reloff_lo; uint16_t in_srr_reloff_hi; uint16_t in_srr_iu; uint16_t in_srr_oxid; /* * If bit 2 is set in in_flags, the N-Port and * handle tags are valid. If the received ELS is * a LOGO, then these tags contain the N Port ID * from the LOGO payload. If the received ELS * request is TPRLO, these tags contain the * Third Party Originator N Port ID. */ uint16_t in_nport_id_hi; #define in_prli_options in_nport_id_hi uint8_t in_nport_id_lo; uint8_t in_reserved3; uint16_t in_np_handle; uint8_t in_reserved4[12]; uint8_t in_reserved5; uint8_t in_vpidx; uint32_t in_reserved6; uint16_t in_portid_lo; uint8_t in_portid_hi; uint8_t in_reserved7; uint16_t in_reserved8; uint16_t in_oxid; } in_fcentry_24xx_t; #define IN24XX_FLAG_PUREX_IOCB 0x1 #define IN24XX_FLAG_GLOBAL_LOGOUT 0x2 #define IN24XX_FLAG_NPHDL_VALID 0x4 #define IN24XX_FLAG_N2N_PRLI 0x8 #define IN24XX_FLAG_PN_NN_VALID 0x10 #define IN24XX_LIP_RESET 0x0E #define IN24XX_LINK_RESET 0x0F #define IN24XX_PORT_LOGOUT 0x29 #define IN24XX_PORT_CHANGED 0x2A #define IN24XX_LINK_FAILED 0x2E #define IN24XX_SRR_RCVD 0x45 #define IN24XX_ELS_RCVD 0x46 /* * login-affectin ELS received- check * subcode for specific opcode */ /* * For f/w > 4.0.25, these offsets in the Immediate Notify contain * the WWNN/WWPN if the ELS is PLOGI, PDISC or ADISC. The WWN is in * Big Endian format. */ #define IN24XX_PRLI_WWNN_OFF 0x18 #define IN24XX_PRLI_WWPN_OFF 0x28 #define IN24XX_PLOGI_WWNN_OFF 0x20 #define IN24XX_PLOGI_WWPN_OFF 0x28 /* * For f/w > 4.0.25, this offset in the Immediate Notify contain * the WWPN if the ELS is LOGO. The WWN is in Big Endian format. */ #define IN24XX_LOGO_WWPN_OFF 0x28 /* * Immediate Notify Status Subcodes for IN24XX_PORT_LOGOUT */ #define IN24XX_PORT_LOGOUT_PDISC_TMO 0x00 #define IN24XX_PORT_LOGOUT_UXPR_DISC 0x01 #define IN24XX_PORT_LOGOUT_OWN_OPN 0x02 #define IN24XX_PORT_LOGOUT_OWN_OPN_SFT 0x03 #define IN24XX_PORT_LOGOUT_ABTS_TMO 0x04 #define IN24XX_PORT_LOGOUT_DISC_RJT 0x05 #define IN24XX_PORT_LOGOUT_LOGIN_NEEDED 0x06 #define IN24XX_PORT_LOGOUT_BAD_DISC 0x07 #define IN24XX_PORT_LOGOUT_LOST_ALPA 0x08 #define IN24XX_PORT_LOGOUT_XMIT_FAILURE 0x09 /* * Immediate Notify Status Subcodes for IN24XX_PORT_CHANGED */ #define IN24XX_PORT_CHANGED_BADFAN 0x00 #define IN24XX_PORT_CHANGED_TOPO_CHANGE 0x01 #define IN24XX_PORT_CHANGED_FLOGI_ACC 0x02 #define IN24XX_PORT_CHANGED_FLOGI_RJT 0x03 #define IN24XX_PORT_CHANGED_TIMEOUT 0x04 #define IN24XX_PORT_CHANGED_PORT_CHANGE 0x05 /* * Notify Acknowledge Entry structure */ #define NA_RSVDLEN 22 typedef struct { isphdr_t na_header; uint32_t na_reserved; uint8_t na_lun; /* lun */ uint8_t na_iid; /* initiator */ uint8_t na_reserved2; uint8_t na_tgt; /* target */ uint32_t na_flags; uint8_t na_status; uint8_t na_event; uint16_t na_seqid; /* sequence id */ uint16_t na_reserved3[NA_RSVDLEN]; } na_entry_t; /* * Value for the na_event field */ #define NA_RST_CLRD 0x80 /* Clear an async event notification */ #define NA_OK 0x01 /* Notify Acknowledge Succeeded */ #define NA_INVALID 0x06 /* Invalid Notify Acknowledge */ #define NA2_RSVDLEN 21 typedef struct { isphdr_t na_header; uint32_t na_reserved; uint8_t na_reserved1; uint8_t na_iid; /* initiator loop id */ uint16_t na_response; uint16_t na_flags; uint16_t na_reserved2; uint16_t na_status; uint16_t na_task_flags; uint16_t na_seqid; /* sequence id */ uint16_t na_reserved3[NA2_RSVDLEN]; } na_fcentry_t; typedef struct { isphdr_t na_header; uint32_t na_reserved; uint16_t na_iid; /* initiator loop id */ uint16_t na_response; /* response code */ uint16_t na_flags; uint16_t na_reserved2; uint16_t na_status; uint16_t na_task_flags; uint16_t na_seqid; /* sequence id */ uint16_t na_reserved3[NA2_RSVDLEN]; } na_fcentry_e_t; #define NAFC_RCOUNT 0x80 /* increment resource count */ #define NAFC_RST_CLRD 0x20 /* Clear LIP Reset */ #define NAFC_TVALID 0x10 /* task mangement response code is valid */ /* * ISP24XX Notify Acknowledge */ typedef struct { isphdr_t na_header; uint32_t na_handle; uint16_t na_nphdl; uint16_t na_reserved1; uint16_t na_flags; uint16_t na_srr_rxid; uint16_t na_status; uint8_t na_status_subcode; uint8_t na_fwhandle; uint32_t na_rxid; uint16_t na_srr_reloff_lo; uint16_t na_srr_reloff_hi; uint16_t na_srr_iu; uint16_t na_srr_flags; uint8_t na_reserved3[18]; uint8_t na_reserved4; uint8_t na_vpidx; uint8_t na_srr_reject_vunique; uint8_t na_srr_reject_explanation; uint8_t na_srr_reject_code; uint8_t na_reserved5; uint8_t na_reserved6[6]; uint16_t na_oxid; } na_fcentry_24xx_t; /* * Accept Target I/O Entry structure */ #define ATIO_CDBLEN 26 typedef struct { isphdr_t at_header; uint16_t at_reserved; uint16_t at_handle; uint8_t at_lun; /* lun */ uint8_t at_iid; /* initiator */ uint8_t at_cdblen; /* cdb length */ uint8_t at_tgt; /* target */ uint32_t at_flags; uint8_t at_status; /* firmware status */ uint8_t at_scsi_status; /* scsi status */ uint8_t at_tag_val; /* tag value */ uint8_t at_tag_type; /* tag type */ uint8_t at_cdb[ATIO_CDBLEN]; /* received CDB */ uint8_t at_sense[QLTM_SENSELEN];/* suggested sense data */ } at_entry_t; /* * at_flags values */ #define AT_NODISC 0x00008000 /* disconnect disabled */ #define AT_TQAE 0x00000002 /* Tagged Queue Action enabled */ /* * at_status values */ #define AT_PATH_INVALID 0x07 /* ATIO sent to firmware for disabled lun */ #define AT_RESET 0x0E /* SCSI Bus Reset Occurred */ #define AT_PHASE_ERROR 0x14 /* Bus phase sequence error */ #define AT_NOCAP 0x16 /* Requested capability not available */ #define AT_BDR_MSG 0x17 /* Bus Device Reset msg received */ #define AT_CDB 0x3D /* CDB received */ /* * Macros to create and fetch and test concatenated handle and tag value macros * (SPI only) */ #define AT_MAKE_TAGID(tid, aep) \ tid = aep->at_handle; \ if (aep->at_flags & AT_TQAE) { \ tid |= (aep->at_tag_val << 16); \ tid |= (1 << 24); \ } #define CT_MAKE_TAGID(tid, ct) \ tid = ct->ct_fwhandle; \ if (ct->ct_flags & CT_TQAE) { \ tid |= (ct->ct_tag_val << 16); \ tid |= (1 << 24); \ } #define AT_HAS_TAG(val) ((val) & (1 << 24)) #define AT_GET_TAG(val) (((val) >> 16) & 0xff) #define AT_GET_HANDLE(val) ((val) & 0xffff) #define IN_MAKE_TAGID(tid, inp) \ tid = inp->in_seqid; \ tid |= (inp->in_tag_val << 16); \ tid |= (1 << 24) /* * Accept Target I/O Entry structure, Type 2 */ #define ATIO2_CDBLEN 16 typedef struct { isphdr_t at_header; uint32_t at_reserved; uint8_t at_lun; /* lun or reserved */ uint8_t at_iid; /* initiator */ uint16_t at_rxid; /* response ID */ uint16_t at_flags; uint16_t at_status; /* firmware status */ uint8_t at_crn; /* command reference number */ uint8_t at_taskcodes; uint8_t at_taskflags; uint8_t at_execodes; uint8_t at_cdb[ATIO2_CDBLEN]; /* received CDB */ uint32_t at_datalen; /* allocated data len */ uint16_t at_scclun; /* SCC Lun or reserved */ uint16_t at_wwpn[4]; /* WWPN of initiator */ uint16_t at_reserved2[6]; uint16_t at_oxid; } at2_entry_t; typedef struct { isphdr_t at_header; uint32_t at_reserved; uint16_t at_iid; /* initiator */ uint16_t at_rxid; /* response ID */ uint16_t at_flags; uint16_t at_status; /* firmware status */ uint8_t at_crn; /* command reference number */ uint8_t at_taskcodes; uint8_t at_taskflags; uint8_t at_execodes; uint8_t at_cdb[ATIO2_CDBLEN]; /* received CDB */ uint32_t at_datalen; /* allocated data len */ uint16_t at_scclun; /* SCC Lun or reserved */ uint16_t at_wwpn[4]; /* WWPN of initiator */ uint16_t at_reserved2[6]; uint16_t at_oxid; } at2e_entry_t; #define ATIO2_WWPN_OFFSET 0x2A #define ATIO2_OXID_OFFSET 0x3E #define ATIO2_TC_ATTR_MASK 0x7 #define ATIO2_TC_ATTR_SIMPLEQ 0 #define ATIO2_TC_ATTR_HEADOFQ 1 #define ATIO2_TC_ATTR_ORDERED 2 #define ATIO2_TC_ATTR_ACAQ 4 #define ATIO2_TC_ATTR_UNTAGGED 5 #define ATIO2_EX_WRITE 0x1 #define ATIO2_EX_READ 0x2 /* * Macros to create and fetch and test concatenated handle and tag value macros */ #define AT2_MAKE_TAGID(tid, bus, inst, aep) \ tid = aep->at_rxid; \ tid |= (((uint64_t)inst) << 32); \ tid |= (((uint64_t)bus) << 48) #define CT2_MAKE_TAGID(tid, bus, inst, ct) \ tid = ct->ct_rxid; \ tid |= (((uint64_t)inst) << 32); \ tid |= (((uint64_t)(bus & 0xff)) << 48) #define AT2_HAS_TAG(val) 1 #define AT2_GET_TAG(val) ((val) & 0xffffffff) #define AT2_GET_INST(val) (((val) >> 32) & 0xffff) #define AT2_GET_HANDLE AT2_GET_TAG #define AT2_GET_BUS(val) (((val) >> 48) & 0xff) #define FC_HAS_TAG AT2_HAS_TAG #define FC_GET_TAG AT2_GET_TAG #define FC_GET_INST AT2_GET_INST #define FC_GET_HANDLE AT2_GET_HANDLE #define IN_FC_MAKE_TAGID(tid, bus, inst, seqid) \ tid = seqid; \ tid |= (((uint64_t)inst) << 32); \ tid |= (((uint64_t)(bus & 0xff)) << 48) #define FC_TAG_INSERT_INST(tid, inst) \ tid &= ~0x0000ffff00000000ull; \ tid |= (((uint64_t)inst) << 32) /* * 24XX ATIO Definition * * This is *quite* different from other entry types. * First of all, it has its own queue it comes in on. * * Secondly, it doesn't have a normal header. * * Thirdly, it's just a passthru of the FCP CMND IU * which is recorded in big endian mode. */ typedef struct { uint8_t at_type; uint8_t at_count; /* * Task attribute in high four bits, * the rest is the FCP CMND IU Length. * NB: the command can extend past the * length for a single queue entry. */ uint16_t at_ta_len; uint32_t at_rxid; fc_hdr_t at_hdr; fcp_cmnd_iu_t at_cmnd; } at7_entry_t; #define AT7_NORESRC_RXID 0xffffffff /* * Continue Target I/O Entry structure * Request from driver. The response from the * ISP firmware is the same except that the last 18 * bytes are overwritten by suggested sense data if * the 'autosense valid' bit is set in the status byte. */ typedef struct { isphdr_t ct_header; uint16_t ct_syshandle; uint16_t ct_fwhandle; /* required by f/w */ uint8_t ct_lun; /* lun */ uint8_t ct_iid; /* initiator id */ uint8_t ct_reserved2; uint8_t ct_tgt; /* our target id */ uint32_t ct_flags; uint8_t ct_status; /* isp status */ uint8_t ct_scsi_status; /* scsi status */ uint8_t ct_tag_val; /* tag value */ uint8_t ct_tag_type; /* tag type */ uint32_t ct_xfrlen; /* transfer length */ uint32_t ct_resid; /* residual length */ uint16_t ct_timeout; uint16_t ct_seg_count; ispds_t ct_dataseg[ISP_RQDSEG]; } ct_entry_t; /* * For some of the dual port SCSI adapters, port (bus #) is reported * in the MSbit of ct_iid. Bit fields are a bit too awkward here. * * Note that this does not apply to FC adapters at all which can and * do report IIDs between 0x81 && 0xfe (or 0x7ff) which represent devices * that have logged in across a SCSI fabric. */ #define GET_IID_VAL(x) (x & 0x3f) #define GET_BUS_VAL(x) ((x >> 7) & 0x1) #define SET_IID_VAL(y, x) y = ((y & ~0x3f) | (x & 0x3f)) #define SET_BUS_VAL(y, x) y = ((y & 0x3f) | ((x & 0x1) << 7)) /* * ct_flags values */ #define CT_TQAE 0x00000002 /* bit 1, Tagged Queue Action enable */ #define CT_DATA_IN 0x00000040 /* bits 6&7, Data direction - *to* initiator */ #define CT_DATA_OUT 0x00000080 /* bits 6&7, Data direction - *from* initiator */ #define CT_NO_DATA 0x000000C0 /* bits 6&7, Data direction */ #define CT_CCINCR 0x00000100 /* bit 8, autoincrement atio count */ #define CT_DATAMASK 0x000000C0 /* bits 6&7, Data direction */ #define CT_INISYNCWIDE 0x00004000 /* bit 14, Do Sync/Wide Negotiation */ #define CT_NODISC 0x00008000 /* bit 15, Disconnects disabled */ #define CT_DSDP 0x01000000 /* bit 24, Disable Save Data Pointers */ #define CT_SENDRDP 0x04000000 /* bit 26, Send Restore Pointers msg */ #define CT_SENDSTATUS 0x80000000 /* bit 31, Send SCSI status byte */ /* * ct_status values * - set by the firmware when it returns the CTIO */ #define CT_OK 0x01 /* completed without error */ #define CT_ABORTED 0x02 /* aborted by host */ #define CT_ERR 0x04 /* see sense data for error */ #define CT_INVAL 0x06 /* request for disabled lun */ #define CT_NOPATH 0x07 /* invalid ITL nexus */ #define CT_INVRXID 0x08 /* (FC only) Invalid RX_ID */ #define CT_DATA_OVER 0x09 /* (FC only) Data Overrun */ #define CT_RSELTMO 0x0A /* reselection timeout after 2 tries */ #define CT_TIMEOUT 0x0B /* timed out */ #define CT_RESET 0x0E /* SCSI Bus Reset occurred */ #define CT_PARITY 0x0F /* Uncorrectable Parity Error */ #define CT_BUS_ERROR 0x10 /* (FC Only) DMA PCI Error */ #define CT_PANIC 0x13 /* Unrecoverable Error */ #define CT_PHASE_ERROR 0x14 /* Bus phase sequence error */ #define CT_DATA_UNDER 0x15 /* (FC only) Data Underrun */ #define CT_BDR_MSG 0x17 /* Bus Device Reset msg received */ #define CT_TERMINATED 0x19 /* due to Terminate Transfer mbox cmd */ #define CT_PORTUNAVAIL 0x28 /* port not available */ #define CT_LOGOUT 0x29 /* port logout */ #define CT_PORTCHANGED 0x2A /* port changed */ #define CT_IDE 0x33 /* Initiator Detected Error */ #define CT_NOACK 0x35 /* Outstanding Immed. Notify. entry */ #define CT_SRR 0x45 /* SRR Received */ #define CT_LUN_RESET 0x48 /* Lun Reset Received */ #define CT_HBA_RESET 0xffff /* pseudo error - command destroyed by HBA reset*/ /* * When the firmware returns a CTIO entry, it may overwrite the last * part of the structure with sense data. This starts at offset 0x2E * into the entry, which is in the middle of ct_dataseg[1]. Rather * than define a new struct for this, I'm just using the sense data * offset. */ #define CTIO_SENSE_OFFSET 0x2E /* * Entry length in u_longs. All entries are the same size so * any one will do as the numerator. */ #define UINT32_ENTRY_SIZE (sizeof(at_entry_t)/sizeof(uint32_t)) /* * QLA2100 CTIO (type 2) entry */ #define MAXRESPLEN 26 typedef struct { isphdr_t ct_header; uint32_t ct_syshandle; uint8_t ct_lun; /* lun */ uint8_t ct_iid; /* initiator id */ uint16_t ct_rxid; /* response ID */ uint16_t ct_flags; uint16_t ct_status; /* isp status */ uint16_t ct_timeout; uint16_t ct_seg_count; uint32_t ct_reloff; /* relative offset */ uint32_t ct_resid; /* residual length */ union { /* * The three different modes that the target driver * can set the CTIO{2,3,4} up as. * * The first is for sending FCP_DATA_IUs as well as * (optionally) sending a terminal SCSI status FCP_RSP_IU. * * The second is for sending SCSI sense data in an FCP_RSP_IU. * Note that no FCP_DATA_IUs will be sent. * * The third is for sending FCP_RSP_IUs as built specifically * in system memory as located by the isp_dataseg. */ struct { uint32_t _reserved; uint16_t _reserved2; uint16_t ct_scsi_status; uint32_t ct_xfrlen; union { ispds_t ct_dataseg[ISP_RQDSEG_T2]; ispds64_t ct_dataseg64[ISP_RQDSEG_T3]; ispdslist_t ct_dslist; } u; } m0; struct { uint16_t _reserved; uint16_t _reserved2; uint16_t ct_senselen; uint16_t ct_scsi_status; uint16_t ct_resplen; uint8_t ct_resp[MAXRESPLEN]; } m1; struct { uint32_t _reserved; uint16_t _reserved2; uint16_t _reserved3; uint32_t ct_datalen; union { ispds_t ct_fcp_rsp_iudata_32; ispds64_t ct_fcp_rsp_iudata_64; } u; } m2; } rsp; } ct2_entry_t; typedef struct { isphdr_t ct_header; uint32_t ct_syshandle; uint16_t ct_iid; /* initiator id */ uint16_t ct_rxid; /* response ID */ uint16_t ct_flags; uint16_t ct_status; /* isp status */ uint16_t ct_timeout; uint16_t ct_seg_count; uint32_t ct_reloff; /* relative offset */ uint32_t ct_resid; /* residual length */ union { struct { uint32_t _reserved; uint16_t _reserved2; uint16_t ct_scsi_status; uint32_t ct_xfrlen; union { ispds_t ct_dataseg[ISP_RQDSEG_T2]; ispds64_t ct_dataseg64[ISP_RQDSEG_T3]; ispdslist_t ct_dslist; } u; } m0; struct { uint16_t _reserved; uint16_t _reserved2; uint16_t ct_senselen; uint16_t ct_scsi_status; uint16_t ct_resplen; uint8_t ct_resp[MAXRESPLEN]; } m1; struct { uint32_t _reserved; uint16_t _reserved2; uint16_t _reserved3; uint32_t ct_datalen; union { ispds_t ct_fcp_rsp_iudata_32; ispds64_t ct_fcp_rsp_iudata_64; } u; } m2; } rsp; } ct2e_entry_t; /* * ct_flags values for CTIO2 */ #define CT2_FLAG_MODE0 0x0000 #define CT2_FLAG_MODE1 0x0001 #define CT2_FLAG_MODE2 0x0002 #define CT2_FLAG_MMASK 0x0003 #define CT2_DATA_IN 0x0040 /* *to* initiator */ #define CT2_DATA_OUT 0x0080 /* *from* initiator */ #define CT2_NO_DATA 0x00C0 #define CT2_DATAMASK 0x00C0 #define CT2_CCINCR 0x0100 #define CT2_FASTPOST 0x0200 #define CT2_CONFIRM 0x2000 #define CT2_TERMINATE 0x4000 #define CT2_SENDSTATUS 0x8000 /* * ct_status values are (mostly) the same as that for ct_entry. */ /* * ct_scsi_status values- the low 8 bits are the normal SCSI status * we know and love. The upper 8 bits are validity markers for FCP_RSP_IU * fields. */ #define CT2_RSPLEN_VALID 0x0100 #define CT2_SNSLEN_VALID 0x0200 #define CT2_DATA_OVER 0x0400 #define CT2_DATA_UNDER 0x0800 /* * ISP24XX CTIO */ #define MAXRESPLEN_24XX 24 typedef struct { isphdr_t ct_header; uint32_t ct_syshandle; uint16_t ct_nphdl; /* status on returned CTIOs */ uint16_t ct_timeout; uint16_t ct_seg_count; uint8_t ct_vpidx; uint8_t ct_xflags; uint16_t ct_iid_lo; /* low 16 bits of portid */ uint8_t ct_iid_hi; /* hi 8 bits of portid */ uint8_t ct_reserved; uint32_t ct_rxid; uint16_t ct_senselen; /* mode 1 only */ uint16_t ct_flags; uint32_t ct_resid; /* residual length */ uint16_t ct_oxid; uint16_t ct_scsi_status; /* modes 0 && 1 only */ union { struct { uint32_t reloff; uint32_t reserved0; uint32_t ct_xfrlen; uint32_t reserved1; ispds64_t ds; } m0; struct { uint16_t ct_resplen; uint16_t reserved; uint8_t ct_resp[MAXRESPLEN_24XX]; } m1; struct { uint32_t reserved0; uint32_t reserved1; uint32_t ct_datalen; uint32_t reserved2; ispds64_t ct_fcp_rsp_iudata; } m2; } rsp; } ct7_entry_t; /* * ct_flags values for CTIO7 */ #define CT7_NO_DATA 0x0000 #define CT7_DATA_OUT 0x0001 /* *from* initiator */ #define CT7_DATA_IN 0x0002 /* *to* initiator */ #define CT7_DATAMASK 0x3 #define CT7_DSD_ENABLE 0x0004 #define CT7_CONF_STSFD 0x0010 #define CT7_EXPLCT_CONF 0x0020 #define CT7_FLAG_MODE0 0x0000 #define CT7_FLAG_MODE1 0x0040 #define CT7_FLAG_MODE2 0x0080 #define CT7_FLAG_MMASK 0x00C0 #define CT7_NOACK 0x0100 #define CT7_TASK_ATTR_SHIFT 9 #define CT7_CONFIRM 0x2000 #define CT7_TERMINATE 0x4000 #define CT7_SENDSTATUS 0x8000 /* * Type 7 CTIO status codes */ #define CT7_OK 0x01 /* completed without error */ #define CT7_ABORTED 0x02 /* aborted by host */ #define CT7_ERR 0x04 /* see sense data for error */ #define CT7_INVAL 0x06 /* request for disabled lun */ #define CT7_INVRXID 0x08 /* Invalid RX_ID */ #define CT7_DATA_OVER 0x09 /* Data Overrun */ #define CT7_TIMEOUT 0x0B /* timed out */ #define CT7_RESET 0x0E /* LIP Rset Received */ #define CT7_BUS_ERROR 0x10 /* DMA PCI Error */ #define CT7_REASSY_ERR 0x11 /* DMA reassembly error */ #define CT7_DATA_UNDER 0x15 /* Data Underrun */ #define CT7_PORTUNAVAIL 0x28 /* port not available */ #define CT7_LOGOUT 0x29 /* port logout */ #define CT7_PORTCHANGED 0x2A /* port changed */ #define CT7_SRR 0x45 /* SRR Received */ /* * Other 24XX related target IOCBs */ /* * ABTS Received */ typedef struct { isphdr_t abts_header; uint8_t abts_reserved0[6]; uint16_t abts_nphdl; uint16_t abts_reserved1; uint16_t abts_sof; uint32_t abts_rxid_abts; uint16_t abts_did_lo; uint8_t abts_did_hi; uint8_t abts_r_ctl; uint16_t abts_sid_lo; uint8_t abts_sid_hi; uint8_t abts_cs_ctl; uint16_t abts_fs_ctl; uint8_t abts_f_ctl; uint8_t abts_type; uint16_t abts_seq_cnt; uint8_t abts_df_ctl; uint8_t abts_seq_id; uint16_t abts_rx_id; uint16_t abts_ox_id; uint32_t abts_param; uint8_t abts_reserved2[16]; uint32_t abts_rxid_task; } abts_t; typedef struct { isphdr_t abts_rsp_header; uint32_t abts_rsp_handle; uint16_t abts_rsp_status; uint16_t abts_rsp_nphdl; uint16_t abts_rsp_ctl_flags; uint16_t abts_rsp_sof; uint32_t abts_rsp_rxid_abts; uint16_t abts_rsp_did_lo; uint8_t abts_rsp_did_hi; uint8_t abts_rsp_r_ctl; uint16_t abts_rsp_sid_lo; uint8_t abts_rsp_sid_hi; uint8_t abts_rsp_cs_ctl; uint16_t abts_rsp_f_ctl_lo; uint8_t abts_rsp_f_ctl_hi; uint8_t abts_rsp_type; uint16_t abts_rsp_seq_cnt; uint8_t abts_rsp_df_ctl; uint8_t abts_rsp_seq_id; uint16_t abts_rsp_rx_id; uint16_t abts_rsp_ox_id; uint32_t abts_rsp_param; union { struct { uint16_t reserved; uint8_t last_seq_id; uint8_t seq_id_valid; uint16_t aborted_rx_id; uint16_t aborted_ox_id; uint16_t high_seq_cnt; uint16_t low_seq_cnt; uint8_t reserved2[4]; } ba_acc; struct { uint8_t vendor_unique; uint8_t explanation; uint8_t reason; uint8_t reserved; uint8_t reserved2[12]; } ba_rjt; struct { uint8_t reserved[8]; uint32_t subcode1; uint32_t subcode2; } rsp; uint8_t reserved[16]; } abts_rsp_payload; uint32_t abts_rsp_rxid_task; } abts_rsp_t; /* terminate this ABTS exchange */ #define ISP24XX_ABTS_RSP_TERMINATE 0x01 #define ISP24XX_ABTS_RSP_COMPLETE 0x00 #define ISP24XX_ABTS_RSP_RESET 0x04 #define ISP24XX_ABTS_RSP_ABORTED 0x05 #define ISP24XX_ABTS_RSP_TIMEOUT 0x06 #define ISP24XX_ABTS_RSP_INVXID 0x08 #define ISP24XX_ABTS_RSP_LOGOUT 0x29 #define ISP24XX_ABTS_RSP_SUBCODE 0x31 #define ISP24XX_NO_TASK 0xffffffff /* * Miscellaneous * * These are the limits of the number of dma segments we * can deal with based not on the size of the segment counter * (which is 16 bits), but on the size of the number of * queue entries field (which is 8 bits). We assume no * segments in the first queue entry, so we can either * have 7 dma segments per continuation entry or 5 * (for 64 bit dma).. multiplying out by 254.... */ #define ISP_NSEG_MAX 1778 #define ISP_NSEG64_MAX 1270 #endif /* _ISPMBOX_H */ Index: projects/powernv/dev/isp/ispvar.h =================================================================== --- projects/powernv/dev/isp/ispvar.h (revision 291050) +++ projects/powernv/dev/isp/ispvar.h (revision 291051) @@ -1,1166 +1,1165 @@ /* $FreeBSD$ */ /*- * Copyright (c) 1997-2009 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Soft Definitions for for Qlogic ISP SCSI adapters. */ #ifndef _ISPVAR_H #define _ISPVAR_H #if defined(__NetBSD__) || defined(__OpenBSD__) #include #include #endif #ifdef __FreeBSD__ #include #include #endif #ifdef __linux__ #include "isp_stds.h" #include "ispmbox.h" #endif #ifdef __svr4__ #include "isp_stds.h" #include "ispmbox.h" #endif #define ISP_CORE_VERSION_MAJOR 7 #define ISP_CORE_VERSION_MINOR 0 /* * Vector for bus specific code to provide specific services. */ typedef struct ispsoftc ispsoftc_t; struct ispmdvec { int (*dv_rd_isr) (ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); uint32_t (*dv_rd_reg) (ispsoftc_t *, int); void (*dv_wr_reg) (ispsoftc_t *, int, uint32_t); int (*dv_mbxdma) (ispsoftc_t *); int (*dv_dmaset) (ispsoftc_t *, XS_T *, void *); void (*dv_dmaclr) (ispsoftc_t *, XS_T *, uint32_t); void (*dv_reset0) (ispsoftc_t *); void (*dv_reset1) (ispsoftc_t *); void (*dv_dregs) (ispsoftc_t *, const char *); const void * dv_ispfw; /* ptr to f/w */ uint16_t dv_conf1; uint16_t dv_clock; /* clock frequency */ }; /* * Overall parameters */ #define MAX_TARGETS 16 #ifndef MAX_FC_TARG #define MAX_FC_TARG 256 #endif #define ISP_MAX_TARGETS(isp) (IS_FC(isp)? MAX_FC_TARG : MAX_TARGETS) #define ISP_MAX_LUNS(isp) (isp)->isp_maxluns /* * Macros to access ISP registers through bus specific layers- * mostly wrappers to vector through the mdvec structure. */ #define ISP_READ_ISR(isp, isrp, semap, info) \ (*(isp)->isp_mdvec->dv_rd_isr)(isp, isrp, semap, info) #define ISP_READ(isp, reg) \ (*(isp)->isp_mdvec->dv_rd_reg)((isp), (reg)) #define ISP_WRITE(isp, reg, val) \ (*(isp)->isp_mdvec->dv_wr_reg)((isp), (reg), (val)) #define ISP_MBOXDMASETUP(isp) \ (*(isp)->isp_mdvec->dv_mbxdma)((isp)) #define ISP_DMASETUP(isp, xs, req) \ (*(isp)->isp_mdvec->dv_dmaset)((isp), (xs), (req)) #define ISP_DMAFREE(isp, xs, hndl) \ if ((isp)->isp_mdvec->dv_dmaclr) \ (*(isp)->isp_mdvec->dv_dmaclr)((isp), (xs), (hndl)) #define ISP_RESET0(isp) \ if ((isp)->isp_mdvec->dv_reset0) (*(isp)->isp_mdvec->dv_reset0)((isp)) #define ISP_RESET1(isp) \ if ((isp)->isp_mdvec->dv_reset1) (*(isp)->isp_mdvec->dv_reset1)((isp)) #define ISP_DUMPREGS(isp, m) \ if ((isp)->isp_mdvec->dv_dregs) (*(isp)->isp_mdvec->dv_dregs)((isp),(m)) #define ISP_SETBITS(isp, reg, val) \ (*(isp)->isp_mdvec->dv_wr_reg)((isp), (reg), ISP_READ((isp), (reg)) | (val)) #define ISP_CLRBITS(isp, reg, val) \ (*(isp)->isp_mdvec->dv_wr_reg)((isp), (reg), ISP_READ((isp), (reg)) & ~(val)) /* * The MEMORYBARRIER macro is defined per platform (to provide synchronization * on Request and Response Queues, Scratch DMA areas, and Registers) * * Defined Memory Barrier Synchronization Types */ #define SYNC_REQUEST 0 /* request queue synchronization */ #define SYNC_RESULT 1 /* result queue synchronization */ #define SYNC_SFORDEV 2 /* scratch, sync for ISP */ #define SYNC_SFORCPU 3 /* scratch, sync for CPU */ #define SYNC_REG 4 /* for registers */ #define SYNC_ATIOQ 5 /* atio result queue (24xx) */ /* * Request/Response Queue defines and macros. * The maximum is defined per platform (and can be based on board type). */ /* This is the size of a queue entry (request and response) */ #define QENTRY_LEN 64 /* Both request and result queue length must be a power of two */ #define RQUEST_QUEUE_LEN(x) MAXISPREQUEST(x) #ifdef ISP_TARGET_MODE #define RESULT_QUEUE_LEN(x) MAXISPREQUEST(x) #else #define RESULT_QUEUE_LEN(x) \ (((MAXISPREQUEST(x) >> 2) < 64)? 64 : MAXISPREQUEST(x) >> 2) #endif #define ISP_QUEUE_ENTRY(q, idx) (((uint8_t *)q) + ((idx) * QENTRY_LEN)) #define ISP_QUEUE_SIZE(n) ((n) * QENTRY_LEN) #define ISP_NXT_QENTRY(idx, qlen) (((idx) + 1) & ((qlen)-1)) #define ISP_QFREE(in, out, qlen) \ ((in == out)? (qlen - 1) : ((in > out)? \ ((qlen - 1) - (in - out)) : (out - in - 1))) #define ISP_QAVAIL(isp) \ ISP_QFREE(isp->isp_reqidx, isp->isp_reqodx, RQUEST_QUEUE_LEN(isp)) #define ISP_ADD_REQUEST(isp, nxti) \ MEMORYBARRIER(isp, SYNC_REQUEST, isp->isp_reqidx, QENTRY_LEN, -1); \ ISP_WRITE(isp, isp->isp_rqstinrp, nxti); \ isp->isp_reqidx = nxti #define ISP_SYNC_REQUEST(isp) \ MEMORYBARRIER(isp, SYNC_REQUEST, isp->isp_reqidx, QENTRY_LEN, -1); \ isp->isp_reqidx = ISP_NXT_QENTRY(isp->isp_reqidx, RQUEST_QUEUE_LEN(isp)); \ ISP_WRITE(isp, isp->isp_rqstinrp, isp->isp_reqidx) /* * SCSI Specific Host Adapter Parameters- per bus, per target */ typedef struct { uint32_t : 8, update : 1, sendmarker : 1, role : 2, isp_req_ack_active_neg : 1, isp_data_line_active_neg: 1, isp_cmd_dma_burst_enable: 1, isp_data_dma_burst_enabl: 1, isp_fifo_threshold : 3, isp_ptisp : 1, isp_ultramode : 1, isp_diffmode : 1, isp_lvdmode : 1, isp_fast_mttr : 1, /* fast sram */ isp_initiator_id : 4, isp_async_data_setup : 4; uint16_t isp_selection_timeout; uint16_t isp_max_queue_depth; uint8_t isp_tag_aging; uint8_t isp_bus_reset_delay; uint8_t isp_retry_count; uint8_t isp_retry_delay; struct { uint32_t exc_throttle : 8, : 1, dev_enable : 1, /* ignored */ dev_update : 1, dev_refresh : 1, actv_offset : 4, goal_offset : 4, nvrm_offset : 4; uint8_t actv_period; /* current sync period */ uint8_t goal_period; /* goal sync period */ uint8_t nvrm_period; /* nvram sync period */ uint16_t actv_flags; /* current device flags */ uint16_t goal_flags; /* goal device flags */ uint16_t nvrm_flags; /* nvram device flags */ } isp_devparam[MAX_TARGETS]; } sdparam; /* * Device Flags */ #define DPARM_DISC 0x8000 #define DPARM_PARITY 0x4000 #define DPARM_WIDE 0x2000 #define DPARM_SYNC 0x1000 #define DPARM_TQING 0x0800 #define DPARM_ARQ 0x0400 #define DPARM_QFRZ 0x0200 #define DPARM_RENEG 0x0100 #define DPARM_NARROW 0x0080 #define DPARM_ASYNC 0x0040 #define DPARM_PPR 0x0020 #define DPARM_DEFAULT (0xFF00 & ~DPARM_QFRZ) #define DPARM_SAFE_DFLT (DPARM_DEFAULT & ~(DPARM_WIDE|DPARM_SYNC|DPARM_TQING)) /* technically, not really correct, as they need to be rated based upon clock */ #define ISP_80M_SYNCPARMS 0x0c09 #define ISP_40M_SYNCPARMS 0x0c0a #define ISP_20M_SYNCPARMS 0x0c0c #define ISP_20M_SYNCPARMS_1040 0x080c #define ISP_10M_SYNCPARMS 0x0c19 #define ISP_08M_SYNCPARMS 0x0c25 #define ISP_05M_SYNCPARMS 0x0c32 #define ISP_04M_SYNCPARMS 0x0c41 /* * Fibre Channel Specifics */ /* These are for non-2K Login Firmware cards */ #define FL_ID 0x7e /* FL_Port Special ID */ #define SNS_ID 0x80 /* SNS Server Special ID */ #define NPH_MAX 0xfe /* These are for 2K Login Firmware cards */ #define NPH_RESERVED 0x7F0 /* begin of reserved N-port handles */ #define NPH_MGT_ID 0x7FA /* Management Server Special ID */ #define NPH_SNS_ID 0x7FC /* SNS Server Special ID */ #define NPH_FABRIC_CTLR 0x7FD /* Fabric Controller (0xFFFFFD) */ #define NPH_FL_ID 0x7FE /* F Port Special ID (0xFFFFFE) */ #define NPH_IP_BCST 0x7FF /* IP Broadcast Special ID (0xFFFFFF) */ #define NPH_MAX_2K 0x800 /* * "Unassigned" handle to be used internally */ #define NIL_HANDLE 0xffff /* * Limit for devices on an arbitrated loop. */ #define LOCAL_LOOP_LIM 126 /* * Limit for (2K login) N-port handle amounts */ #define MAX_NPORT_HANDLE 2048 /* * Special Constants */ #define INI_NONE ((uint64_t) 0) #define ISP_NOCHAN 0xff /* * Special Port IDs */ #define MANAGEMENT_PORT_ID 0xFFFFFA #define SNS_PORT_ID 0xFFFFFC #define FABRIC_PORT_ID 0xFFFFFE #define PORT_ANY 0xFFFFFF #define PORT_NONE 0 #define DOMAIN_CONTROLLER_BASE 0xFFFC00 #define DOMAIN_CONTROLLER_END 0xFFFCFF /* * Command Handles * * Most QLogic initiator or target have 32 bit handles associated with them. * We want to have a quick way to index back and forth between a local SCSI * command context and what the firmware is passing back to us. We also * want to avoid working on stale information. This structure handles both * at the expense of some local memory. * * The handle is architected thusly: * * 0 means "free handle" * bits 0..12 index commands * bits 13..15 bits index usage * bits 16..31 contain a rolling sequence * * */ typedef struct { void * cmd; /* associated command context */ uint32_t handle; /* handle associated with this command */ } isp_hdl_t; #define ISP_HANDLE_FREE 0x00000000 #define ISP_HANDLE_CMD_MASK 0x00001fff #define ISP_HANDLE_USAGE_MASK 0x0000e000 #define ISP_HANDLE_USAGE_SHIFT 13 #define ISP_H2HT(hdl) ((hdl & ISP_HANDLE_USAGE_MASK) >> ISP_HANDLE_USAGE_SHIFT) # define ISP_HANDLE_NONE 0 # define ISP_HANDLE_INITIATOR 1 # define ISP_HANDLE_TARGET 2 #define ISP_HANDLE_SEQ_MASK 0xffff0000 #define ISP_HANDLE_SEQ_SHIFT 16 #define ISP_H2SEQ(hdl) ((hdl & ISP_HANDLE_SEQ_MASK) >> ISP_HANDLE_SEQ_SHIFT) #define ISP_VALID_INI_HANDLE(c, hdl) \ (ISP_H2HT(hdl) == ISP_HANDLE_INITIATOR && (hdl & ISP_HANDLE_CMD_MASK) < (c)->isp_maxcmds && \ ISP_H2SEQ(hdl) == ISP_H2SEQ((c)->isp_xflist[hdl & ISP_HANDLE_CMD_MASK].handle)) #ifdef ISP_TARGET_MODE #define ISP_VALID_TGT_HANDLE(c, hdl) \ (ISP_H2HT(hdl) == ISP_HANDLE_TARGET && (hdl & ISP_HANDLE_CMD_MASK) < (c)->isp_maxcmds && \ ISP_H2SEQ(hdl) == ISP_H2SEQ((c)->isp_tgtlist[hdl & ISP_HANDLE_CMD_MASK].handle)) #define ISP_VALID_HANDLE(c, hdl) \ (ISP_VALID_INI_HANDLE((c), hdl) || ISP_VALID_TGT_HANDLE((c), hdl)) #else #define ISP_VALID_HANDLE ISP_VALID_INI_HANDLE #endif #define ISP_BAD_HANDLE_INDEX 0xffffffff /* * FC Port Database entry. * * It has a handle that the f/w uses to address commands to a device. * This handle's value may be assigned by the firmware (e.g., for local loop * devices) or by the driver (e.g., for fabric devices). * * It has a state. If the state if VALID, that means that we've logged into * the device. * * Local loop devices the firmware automatically performs PLOGI on for us * (which is why that handle is imposed upon us). Fabric devices we assign * a handle to and perform the PLOGI on. * * When a PORT DATABASE CHANGED asynchronous event occurs, we mark all VALID * entries as PROBATIONAL. This allows us, if policy says to, just keep track * of devices whose handles change but are otherwise the same device (and * thus keep 'target' constant). * * In any case, we search all possible local loop handles. For each one that * has a port database entity returned, we search for any PROBATIONAL entry * that matches it and update as appropriate. Otherwise, as a new entry, we * find room for it in the Port Database. We *try* and use the handle as the * index to put it into the Database, but that's just an optimization. We mark * the entry VALID and make sure that the target index is updated and correct. * * When we get done searching the local loop, we then search similarily for * a list of devices we've gotten from the fabric name controller (if we're * on a fabric). VALID marking is also done similarily. * * When all of this is done, we can march through the database and clean up * any entry that is still PROBATIONAL (these represent devices which have * departed). Then we're done and can resume normal operations. * * Negative invariants that we try and test for are: * * + There can never be two non-NIL entries with the same { Port, Node } WWN * duples. * * + There can never be two non-NIL entries with the same handle. */ typedef struct { /* * This is the handle that the firmware needs in order for us to * send commands to the device. For pre-24XX cards, this would be * the 'loopid'. */ uint16_t handle; /* * A device is 'autologin' if the firmware automatically logs into * it (re-logins as needed). Basically, local private loop devices. * * PRLI word 3 parameters contains role as well as other things. * * The state is the current state of this entry. * * The is_target is the current state of target on this port. * * The is_initiator is the current state of initiator on this port. * * Portid is obvious, as are node && port WWNs. The new_role and * new_portid is for when we are pending a change. */ uint16_t prli_word3; /* PRLI parameters */ uint16_t new_prli_word3; /* Incoming new PRLI parameters */ uint16_t : 12, autologin : 1, /* F/W does PLOGI/PLOGO */ state : 3; uint32_t : 6, is_target : 1, is_initiator : 1, portid : 24; uint32_t : 8, new_portid : 24; uint64_t node_wwn; uint64_t port_wwn; uint32_t gone_timer; } fcportdb_t; #define FC_PORTDB_STATE_NIL 0 #define FC_PORTDB_STATE_PROBATIONAL 1 #define FC_PORTDB_STATE_DEAD 2 #define FC_PORTDB_STATE_CHANGED 3 #define FC_PORTDB_STATE_NEW 4 #define FC_PORTDB_STATE_PENDING_VALID 5 #define FC_PORTDB_STATE_ZOMBIE 6 #define FC_PORTDB_STATE_VALID 7 #define FC_PORTDB_TGT(isp, bus, pdb) (int)(lp - FCPARAM(isp, bus)->portdb) /* * FC card specific information * * This structure is replicated across multiple channels for multi-id * capapble chipsets, with some entities different on a per-channel basis. */ typedef struct { int isp_gbspeed; /* Connection speed */ int isp_linkstate; /* Link state */ int isp_fwstate; /* ISP F/W state */ int isp_loopstate; /* Loop State */ int isp_topo; /* Connection Type */ uint32_t : 3, fctape_enabled : 1, sendmarker : 1, loop_seen_once : 1, role : 2, isp_portid : 24; /* S_ID */ uint16_t isp_fwoptions; uint16_t isp_xfwoptions; uint16_t isp_zfwoptions; uint16_t isp_loopid; /* hard loop id */ uint16_t isp_sns_hdl; /* N-port handle for SNS */ uint16_t isp_lasthdl; /* only valid for channel 0 */ uint16_t isp_maxalloc; uint16_t isp_fabric_params; uint8_t isp_retry_delay; uint8_t isp_retry_count; /* * Current active WWNN/WWPN */ uint64_t isp_wwnn; uint64_t isp_wwpn; /* * NVRAM WWNN/WWPN */ uint64_t isp_wwnn_nvram; uint64_t isp_wwpn_nvram; /* * Our Port Data Base */ fcportdb_t portdb[MAX_FC_TARG]; /* * Scratch DMA mapped in area to fetch Port Database stuff, etc. */ void * isp_scratch; XS_DMA_ADDR_T isp_scdma; } fcparam; #define FW_CONFIG_WAIT 0 -#define FW_WAIT_AL_PA 1 +#define FW_WAIT_LINK 1 #define FW_WAIT_LOGIN 2 #define FW_READY 3 #define FW_LOSS_OF_SYNC 4 #define FW_ERROR 5 #define FW_REINIT 6 #define FW_NON_PART 7 #define LOOP_NIL 0 -#define LOOP_LIP_RCVD 1 -#define LOOP_PDB_RCVD 2 +#define LOOP_TESTING_LINK 1 +#define LOOP_LTEST_DONE 2 #define LOOP_SCANNING_LOOP 3 #define LOOP_LSCAN_DONE 4 #define LOOP_SCANNING_FABRIC 5 #define LOOP_FSCAN_DONE 6 #define LOOP_SYNCING_PDB 7 #define LOOP_READY 8 #define TOPO_NL_PORT 0 #define TOPO_FL_PORT 1 #define TOPO_N_PORT 2 #define TOPO_F_PORT 3 #define TOPO_PTP_STUB 4 /* * Soft Structure per host adapter */ struct ispsoftc { /* * Platform (OS) specific data */ struct isposinfo isp_osinfo; /* * Pointer to bus specific functions and data */ struct ispmdvec * isp_mdvec; /* * (Mostly) nonvolatile state. Board specific parameters * may contain some volatile state (e.g., current loop state). */ void * isp_param; /* type specific */ uint64_t isp_fwattr; /* firmware attributes */ uint16_t isp_fwrev[3]; /* Loaded F/W revision */ uint16_t isp_maxcmds; /* max possible I/O cmds */ uint8_t isp_type; /* HBA Chip Type */ uint8_t isp_revision; /* HBA Chip H/W Revision */ uint16_t isp_nchan; /* number of channels */ uint32_t isp_maxluns; /* maximum luns supported */ uint32_t isp_clock : 8, /* input clock */ : 4, isp_port : 1, /* 23XX/24XX only */ isp_open : 1, /* opened (ioctl) */ isp_bustype : 1, /* SBus or PCI */ isp_loaded_fw : 1, /* loaded firmware */ isp_dblev : 16; /* debug log mask */ uint32_t isp_confopts; /* config options */ uint32_t isp_rqstinrp; /* register for REQINP */ uint32_t isp_rqstoutrp; /* register for REQOUTP */ uint32_t isp_respinrp; /* register for RESINP */ uint32_t isp_respoutrp; /* register for RESOUTP */ /* * Instrumentation */ uint64_t isp_intcnt; /* total int count */ uint64_t isp_intbogus; /* spurious int count */ uint64_t isp_intmboxc; /* mbox completions */ uint64_t isp_intoasync; /* other async */ uint64_t isp_rsltccmplt; /* CMDs on result q */ uint64_t isp_fphccmplt; /* CMDs via fastpost */ uint16_t isp_rscchiwater; uint16_t isp_fpcchiwater; NANOTIME_T isp_init_time; /* time were last initialized */ /* * Volatile state */ volatile uint32_t : 8, : 2, isp_dead : 1, : 1, isp_mboxbsy : 1, /* mailbox command active */ isp_state : 3, isp_nactive : 16; /* how many commands active */ volatile mbreg_t isp_curmbx; /* currently active mailbox command */ volatile uint32_t isp_reqodx; /* index of last ISP pickup */ volatile uint32_t isp_reqidx; /* index of next request */ volatile uint32_t isp_residx; /* index of last ISP write */ volatile uint32_t isp_resodx; /* index of next result */ volatile uint32_t isp_atioodx; /* index of next ATIO */ volatile uint32_t isp_obits; /* mailbox command output */ volatile uint32_t isp_serno; /* rolling serial number */ volatile uint16_t isp_mboxtmp[MAX_MAILBOX]; volatile uint16_t isp_lastmbxcmd; /* last mbox command sent */ volatile uint16_t isp_mbxwrk0; volatile uint16_t isp_mbxwrk1; volatile uint16_t isp_mbxwrk2; volatile uint16_t isp_mbxwrk8; volatile uint16_t isp_seqno; /* running sequence number */ void * isp_mbxworkp; /* * Active commands are stored here, indexed by handle functions. */ isp_hdl_t *isp_xflist; isp_hdl_t *isp_xffree; #ifdef ISP_TARGET_MODE /* * Active target commands are stored here, indexed by handle functions. */ isp_hdl_t *isp_tgtlist; isp_hdl_t *isp_tgtfree; #endif /* * request/result queue pointers and DMA handles for them. */ void * isp_rquest; void * isp_result; XS_DMA_ADDR_T isp_rquest_dma; XS_DMA_ADDR_T isp_result_dma; #ifdef ISP_TARGET_MODE /* for 24XX only */ void * isp_atioq; XS_DMA_ADDR_T isp_atioq_dma; #endif }; #define SDPARAM(isp, chan) (&((sdparam *)(isp)->isp_param)[(chan)]) #define FCPARAM(isp, chan) (&((fcparam *)(isp)->isp_param)[(chan)]) #define ISP_SET_SENDMARKER(isp, chan, val) \ if (IS_FC(isp)) { \ FCPARAM(isp, chan)->sendmarker = val; \ } else { \ SDPARAM(isp, chan)->sendmarker = val; \ } #define ISP_TST_SENDMARKER(isp, chan) \ (IS_FC(isp)? \ FCPARAM(isp, chan)->sendmarker != 0 : \ SDPARAM(isp, chan)->sendmarker != 0) /* * ISP Driver Run States */ #define ISP_NILSTATE 0 #define ISP_CRASHED 1 #define ISP_RESETSTATE 2 #define ISP_INITSTATE 3 #define ISP_RUNSTATE 4 /* * ISP Runtime Configuration Options */ #define ISP_CFG_FULL_DUPLEX 0x01 /* Full Duplex (Fibre Channel only) */ #define ISP_CFG_PORT_PREF 0x0c /* Mask for Port Prefs (all FC except 2100) */ #define ISP_CFG_LPORT 0x00 /* prefer {N/F}L-Port connection */ #define ISP_CFG_NPORT 0x04 /* prefer {N/F}-Port connection */ #define ISP_CFG_NPORT_ONLY 0x08 /* insist on {N/F}-Port connection */ #define ISP_CFG_LPORT_ONLY 0x0c /* insist on {N/F}L-Port connection */ #define ISP_CFG_ONEGB 0x10 /* force 1GB connection (23XX only) */ #define ISP_CFG_TWOGB 0x20 /* force 2GB connection (23XX only) */ #define ISP_CFG_NORELOAD 0x80 /* don't download f/w */ #define ISP_CFG_NONVRAM 0x40 /* ignore NVRAM */ #define ISP_CFG_NOFCTAPE 0x100 /* disable FC-Tape */ #define ISP_CFG_FCTAPE 0x200 /* enable FC-Tape */ #define ISP_CFG_OWNFSZ 0x400 /* override NVRAM frame size */ #define ISP_CFG_OWNLOOPID 0x800 /* override NVRAM loopid */ #define ISP_CFG_OWNEXCTHROTTLE 0x1000 /* override NVRAM execution throttle */ #define ISP_CFG_FOURGB 0x2000 /* force 4GB connection (24XX only) */ #define ISP_CFG_EIGHTGB 0x4000 /* force 8GB connection (25XX only) */ #define ISP_CFG_SIXTEENGB 0x8000 /* force 16GB connection (82XX only) */ /* * For each channel, the outer layers should know what role that channel * will take: ISP_ROLE_NONE, ISP_ROLE_INITIATOR, ISP_ROLE_TARGET, * ISP_ROLE_BOTH. * * If you set ISP_ROLE_NONE, the cards will be reset, new firmware loaded, * NVRAM read, and defaults set, but any further initialization (e.g. * INITIALIZE CONTROL BLOCK commands for 2X00 cards) won't be done. * * If INITIATOR MODE isn't set, attempts to run commands will be stopped * at isp_start and completed with the equivalent of SELECTION TIMEOUT. * * If TARGET MODE is set, it doesn't mean that the rest of target mode support * needs to be enabled, or will even work. What happens with the 2X00 cards * here is that if you have enabled it with TARGET MODE as part of the ICB * options, but you haven't given the f/w any ram resources for ATIOs or * Immediate Notifies, the f/w just handles what it can and you never see * anything. Basically, it sends a single byte of data (the first byte, * which you can set as part of the INITIALIZE CONTROL BLOCK command) for * INQUIRY, and sends back QUEUE FULL status for any other command. * */ #define ISP_ROLE_NONE 0x0 #define ISP_ROLE_TARGET 0x1 #define ISP_ROLE_INITIATOR 0x2 #define ISP_ROLE_BOTH (ISP_ROLE_TARGET|ISP_ROLE_INITIATOR) #define ISP_ROLE_EITHER ISP_ROLE_BOTH #ifndef ISP_DEFAULT_ROLES /* * Counterintuitively, we prefer to default to role 'none' * if we are enable target mode support. This gives us the * maximum flexibility as to which port will do what. */ #ifdef ISP_TARGET_MODE #define ISP_DEFAULT_ROLES ISP_ROLE_NONE #else #define ISP_DEFAULT_ROLES ISP_ROLE_INITIATOR #endif #endif /* * Firmware related defines */ #define ISP_CODE_ORG 0x1000 /* default f/w code start */ #define ISP_CODE_ORG_2300 0x0800 /* ..except for 2300s */ #define ISP_CODE_ORG_2400 0x100000 /* ..and 2400s */ #define ISP_FW_REV(maj, min, mic) ((maj << 24) | (min << 16) | mic) #define ISP_FW_MAJOR(code) ((code >> 24) & 0xff) #define ISP_FW_MINOR(code) ((code >> 16) & 0xff) #define ISP_FW_MICRO(code) ((code >> 8) & 0xff) #define ISP_FW_REVX(xp) ((xp[0]<<24) | (xp[1] << 16) | xp[2]) #define ISP_FW_MAJORX(xp) (xp[0]) #define ISP_FW_MINORX(xp) (xp[1]) #define ISP_FW_MICROX(xp) (xp[2]) #define ISP_FW_NEWER_THAN(i, major, minor, micro) \ (ISP_FW_REVX((i)->isp_fwrev) > ISP_FW_REV(major, minor, micro)) #define ISP_FW_OLDER_THAN(i, major, minor, micro) \ (ISP_FW_REVX((i)->isp_fwrev) < ISP_FW_REV(major, minor, micro)) /* * Bus (implementation) types */ #define ISP_BT_PCI 0 /* PCI Implementations */ #define ISP_BT_SBUS 1 /* SBus Implementations */ /* * If we have not otherwise defined SBus support away make sure * it is defined here such that the code is included as default */ #ifndef ISP_SBUS_SUPPORTED #define ISP_SBUS_SUPPORTED 1 #endif /* * Chip Types */ #define ISP_HA_SCSI 0xf #define ISP_HA_SCSI_UNKNOWN 0x1 #define ISP_HA_SCSI_1020 0x2 #define ISP_HA_SCSI_1020A 0x3 #define ISP_HA_SCSI_1040 0x4 #define ISP_HA_SCSI_1040A 0x5 #define ISP_HA_SCSI_1040B 0x6 #define ISP_HA_SCSI_1040C 0x7 #define ISP_HA_SCSI_1240 0x8 #define ISP_HA_SCSI_1080 0x9 #define ISP_HA_SCSI_1280 0xa #define ISP_HA_SCSI_10160 0xb #define ISP_HA_SCSI_12160 0xc #define ISP_HA_FC 0xf0 #define ISP_HA_FC_2100 0x10 #define ISP_HA_FC_2200 0x20 #define ISP_HA_FC_2300 0x30 #define ISP_HA_FC_2312 0x40 #define ISP_HA_FC_2322 0x50 #define ISP_HA_FC_2400 0x60 #define ISP_HA_FC_2500 0x70 #define IS_SCSI(isp) (isp->isp_type & ISP_HA_SCSI) #define IS_1020(isp) (isp->isp_type < ISP_HA_SCSI_1240) #define IS_1240(isp) (isp->isp_type == ISP_HA_SCSI_1240) #define IS_1080(isp) (isp->isp_type == ISP_HA_SCSI_1080) #define IS_1280(isp) (isp->isp_type == ISP_HA_SCSI_1280) #define IS_10160(isp) (isp->isp_type == ISP_HA_SCSI_10160) #define IS_12160(isp) (isp->isp_type == ISP_HA_SCSI_12160) #define IS_12X0(isp) (IS_1240(isp) || IS_1280(isp)) #define IS_1X160(isp) (IS_10160(isp) || IS_12160(isp)) #define IS_DUALBUS(isp) (IS_12X0(isp) || IS_12160(isp)) #define IS_ULTRA2(isp) (IS_1080(isp) || IS_1280(isp) || IS_1X160(isp)) #define IS_ULTRA3(isp) (IS_1X160(isp)) #define IS_FC(isp) ((isp)->isp_type & ISP_HA_FC) #define IS_2100(isp) ((isp)->isp_type == ISP_HA_FC_2100) #define IS_2200(isp) ((isp)->isp_type == ISP_HA_FC_2200) #define IS_23XX(isp) ((isp)->isp_type >= ISP_HA_FC_2300 && \ (isp)->isp_type < ISP_HA_FC_2400) #define IS_2300(isp) ((isp)->isp_type == ISP_HA_FC_2300) #define IS_2312(isp) ((isp)->isp_type == ISP_HA_FC_2312) #define IS_2322(isp) ((isp)->isp_type == ISP_HA_FC_2322) #define IS_24XX(isp) ((isp)->isp_type >= ISP_HA_FC_2400) #define IS_25XX(isp) ((isp)->isp_type >= ISP_HA_FC_2500) /* * DMA related macros */ #define DMA_WD3(x) (((uint16_t)(((uint64_t)x) >> 48)) & 0xffff) #define DMA_WD2(x) (((uint16_t)(((uint64_t)x) >> 32)) & 0xffff) #define DMA_WD1(x) ((uint16_t)((x) >> 16) & 0xffff) #define DMA_WD0(x) ((uint16_t)((x) & 0xffff)) #define DMA_LO32(x) ((uint32_t) (x)) #define DMA_HI32(x) ((uint32_t)(((uint64_t)x) >> 32)) /* * Core System Function Prototypes */ /* * Reset Hardware. Totally. Assumes that you'll follow this with a call to isp_init. */ void isp_reset(ispsoftc_t *, int); /* * Initialize Hardware to known state */ void isp_init(ispsoftc_t *); /* * Reset the ISP and call completion for any orphaned commands. */ int isp_reinit(ispsoftc_t *, int); /* * Internal Interrupt Service Routine * * The outer layers do the spade work to get the appropriate status register, * semaphore register and first mailbox register (if appropriate). This also * means that most spurious/bogus interrupts not for us can be filtered first. */ void isp_intr(ispsoftc_t *, uint16_t, uint16_t, uint16_t); /* * Command Entry Point- Platform Dependent layers call into this */ int isp_start(XS_T *); /* these values are what isp_start returns */ #define CMD_COMPLETE 101 /* command completed */ #define CMD_EAGAIN 102 /* busy- maybe retry later */ #define CMD_QUEUED 103 /* command has been queued for execution */ #define CMD_RQLATER 104 /* requeue this command later */ /* * Command Completion Point- Core layers call out from this with completed cmds */ void isp_done(XS_T *); /* * Platform Dependent to External to Internal Control Function * * Assumes locks are held on entry. You should note that with many of * these commands locks may be released while this function is called. * * ... ISPCTL_RESET_BUS, int channel); * Reset BUS on this channel * ... ISPCTL_RESET_DEV, int channel, int target); * Reset Device on this channel at this target. * ... ISPCTL_ABORT_CMD, XS_T *xs); * Abort active transaction described by xs. * ... IPCTL_UPDATE_PARAMS); * Update any operating parameters (speed, etc.) * ... ISPCTL_FCLINK_TEST, int channel); * Test FC link status on this channel - * ... ISPCTL_SCAN_FABRIC, int channel); - * Scan fabric on this channel * ... ISPCTL_SCAN_LOOP, int channel); * Scan local loop on this channel + * ... ISPCTL_SCAN_FABRIC, int channel); + * Scan fabric on this channel * ... ISPCTL_PDB_SYNC, int channel); * Synchronize port database on this channel * ... ISPCTL_SEND_LIP, int channel); * Send a LIP on this channel * ... ISPCTL_GET_NAMES, int channel, int np, uint64_t *wwnn, uint64_t *wwpn) * Get a WWNN/WWPN for this N-port handle on this channel * ... ISPCTL_RUN_MBOXCMD, mbreg_t *mbp) * Run this mailbox command * ... ISPCTL_GET_PDB, int channel, int nphandle, isp_pdb_t *pdb) * Get PDB on this channel for this N-port handle * ... ISPCTL_PLOGX, isp_plcmd_t *) * Performa a port login/logout * ... ISPCTL_CHANGE_ROLE, int channel, int role); * Change role of specified channel * * ISPCTL_PDB_SYNC is somewhat misnamed. It actually is the final step, in - * order, of ISPCTL_FCLINK_TEST, ISPCTL_SCAN_FABRIC, and ISPCTL_SCAN_LOOP. + * order, of ISPCTL_FCLINK_TEST, ISPCTL_SCAN_LOOP, and ISPCTL_SCAN_FABRIC. * The main purpose of ISPCTL_PDB_SYNC is to complete management of logging * and logging out of fabric devices (if one is on a fabric) and then marking * the 'loop state' as being ready to now be used for sending commands to - * devices. Originally fabric name server and local loop scanning were - * part of this function. It's now been separated to allow for finer control. + * devices. */ typedef enum { ISPCTL_RESET_BUS, ISPCTL_RESET_DEV, ISPCTL_ABORT_CMD, ISPCTL_UPDATE_PARAMS, ISPCTL_FCLINK_TEST, ISPCTL_SCAN_FABRIC, ISPCTL_SCAN_LOOP, ISPCTL_PDB_SYNC, ISPCTL_SEND_LIP, ISPCTL_GET_NAMES, ISPCTL_RUN_MBOXCMD, ISPCTL_GET_PDB, ISPCTL_PLOGX, ISPCTL_CHANGE_ROLE } ispctl_t; int isp_control(ispsoftc_t *, ispctl_t, ...); /* * Platform Dependent to Internal to External Control Function */ typedef enum { ISPASYNC_NEW_TGT_PARAMS, /* SPI New Target Parameters */ ISPASYNC_BUS_RESET, /* All Bus Was Reset */ ISPASYNC_LOOP_DOWN, /* FC Loop Down */ ISPASYNC_LOOP_UP, /* FC Loop Up */ ISPASYNC_LIP, /* FC LIP Received */ ISPASYNC_LOOP_RESET, /* FC Loop Reset Received */ ISPASYNC_CHANGE_NOTIFY, /* FC Change Notification */ ISPASYNC_DEV_ARRIVED, /* FC Device Arrived */ ISPASYNC_DEV_CHANGED, /* FC Device Changed */ ISPASYNC_DEV_STAYED, /* FC Device Stayed */ ISPASYNC_DEV_GONE, /* FC Device Departure */ ISPASYNC_TARGET_NOTIFY, /* All target async notification */ ISPASYNC_TARGET_NOTIFY_ACK, /* All target notify ack required */ ISPASYNC_TARGET_ACTION, /* All target action requested */ ISPASYNC_FW_CRASH, /* All Firmware has crashed */ ISPASYNC_FW_RESTARTED /* All Firmware has been restarted */ } ispasync_t; void isp_async(ispsoftc_t *, ispasync_t, ...); #define ISPASYNC_CHANGE_PDB 0 #define ISPASYNC_CHANGE_SNS 1 #define ISPASYNC_CHANGE_OTHER 2 /* * Platform Independent Error Prinout */ void isp_prt_endcmd(ispsoftc_t *, XS_T *); /* * Platform Dependent Error and Debug Printout * * Two required functions for each platform must be provided: * * void isp_prt(ispsoftc_t *, int level, const char *, ...) * void isp_xs_prt(ispsoftc_t *, XS_T *, int level, const char *, ...) * * but due to compiler differences on different platforms this won't be * formally defined here. Instead, they go in each platform definition file. */ #define ISP_LOGALL 0x0 /* log always */ #define ISP_LOGCONFIG 0x1 /* log configuration messages */ #define ISP_LOGINFO 0x2 /* log informational messages */ #define ISP_LOGWARN 0x4 /* log warning messages */ #define ISP_LOGERR 0x8 /* log error messages */ #define ISP_LOGDEBUG0 0x10 /* log simple debug messages */ #define ISP_LOGDEBUG1 0x20 /* log intermediate debug messages */ #define ISP_LOGDEBUG2 0x40 /* log most debug messages */ #define ISP_LOGDEBUG3 0x80 /* log high frequency debug messages */ #define ISP_LOG_SANCFG 0x100 /* log SAN configuration */ #define ISP_LOG_CWARN 0x200 /* log SCSI command "warnings" (e.g., check conditions) */ #define ISP_LOG_WARN1 0x400 /* log WARNS we might be interested at some time */ #define ISP_LOGTINFO 0x1000 /* log informational messages (target mode) */ #define ISP_LOGTDEBUG0 0x2000 /* log simple debug messages (target mode) */ #define ISP_LOGTDEBUG1 0x4000 /* log intermediate debug messages (target) */ #define ISP_LOGTDEBUG2 0x8000 /* log all debug messages (target) */ /* * Each Platform provides it's own isposinfo substructure of the ispsoftc * defined above. * * Each platform must also provide the following macros/defines: * * * ISP_FC_SCRLEN FC scratch area DMA length * * ISP_MEMZERO(dst, src) platform zeroing function * ISP_MEMCPY(dst, src, count) platform copying function * ISP_SNPRINTF(buf, bufsize, fmt, ...) snprintf * ISP_DELAY(usecs) microsecond spindelay function * ISP_SLEEP(isp, usecs) microsecond sleep function * * ISP_INLINE ___inline or not- depending on how * good your debugger is * ISP_MIN shorthand for ((a) < (b))? (a) : (b) * * NANOTIME_T nanosecond time type * * GET_NANOTIME(NANOTIME_T *) get current nanotime. * * GET_NANOSEC(NANOTIME_T *) get uint64_t from NANOTIME_T * * NANOTIME_SUB(NANOTIME_T *, NANOTIME_T *) * subtract two NANOTIME_T values * * MAXISPREQUEST(ispsoftc_t *) maximum request queue size * for this particular board type * * MEMORYBARRIER(ispsoftc_t *, barrier_type, offset, size, chan) * * Function/Macro the provides memory synchronization on * various objects so that the ISP's and the system's view * of the same object is consistent. * * MBOX_ACQUIRE(ispsoftc_t *) acquire lock on mailbox regs * MBOX_WAIT_COMPLETE(ispsoftc_t *, mbreg_t *) wait for cmd to be done * MBOX_NOTIFY_COMPLETE(ispsoftc_t *) notification of mbox cmd donee * MBOX_RELEASE(ispsoftc_t *) release lock on mailbox regs * * FC_SCRATCH_ACQUIRE(ispsoftc_t *, chan) acquire lock on FC scratch area * return -1 if you cannot * FC_SCRATCH_RELEASE(ispsoftc_t *, chan) acquire lock on FC scratch area * * FCP_NEXT_CRN(ispsoftc_t *, XS_T *, rslt, channel, target, lun) generate the next command reference number. XS_T * may be null. * * SCSI_GOOD SCSI 'Good' Status * SCSI_CHECK SCSI 'Check Condition' Status * SCSI_BUSY SCSI 'Busy' Status * SCSI_QFULL SCSI 'Queue Full' Status * * XS_T Platform SCSI transaction type (i.e., command for HBA) * XS_DMA_ADDR_T Platform PCI DMA Address Type * XS_GET_DMA_SEG(..) Get 32 bit dma segment list value * XS_GET_DMA64_SEG(..) Get 64 bit dma segment list value * XS_ISP(xs) gets an instance out of an XS_T * XS_CHANNEL(xs) gets the channel (bus # for DUALBUS cards) "" * XS_TGT(xs) gets the target "" * XS_LUN(xs) gets the lun "" * XS_CDBP(xs) gets a pointer to the scsi CDB "" * XS_CDBLEN(xs) gets the CDB's length "" * XS_XFRLEN(xs) gets the associated data transfer length "" * XS_TIME(xs) gets the time (in milliseconds) for this command * XS_GET_RESID(xs) gets the current residual count * XS_GET_RESID(xs, resid) sets the current residual count * XS_STSP(xs) gets a pointer to the SCSI status byte "" * XS_SNSP(xs) gets a pointer to the associate sense data * XS_TOT_SNSLEN(xs) gets the total length of sense data storage * XS_CUR_SNSLEN(xs) gets the currently used lenght of sense data storage * XS_SNSKEY(xs) dereferences XS_SNSP to get the current stored Sense Key * XS_SNSASC(xs) dereferences XS_SNSP to get the current stored Additional Sense Code * XS_SNSASCQ(xs) dereferences XS_SNSP to get the current stored Additional Sense Code Qualifier * XS_TAG_P(xs) predicate of whether this command should be tagged * XS_TAG_TYPE(xs) which type of tag to use * XS_SETERR(xs) set error state * * HBA_NOERROR command has no erros * HBA_BOTCH hba botched something * HBA_CMDTIMEOUT command timed out * HBA_SELTIMEOUT selection timed out (also port logouts for FC) * HBA_TGTBSY target returned a BUSY status * HBA_BUSRESET bus reset destroyed command * HBA_ABORTED command was aborted (by request) * HBA_DATAOVR a data overrun was detected * HBA_ARQFAIL Automatic Request Sense failed * * XS_ERR(xs) return current error state * XS_NOERR(xs) there is no error currently set * XS_INITERR(xs) initialize error state * * XS_SAVE_SENSE(xs, sp, total_len, this_len) save sense data (total and current amount) * * XS_APPEND_SENSE(xs, sp, len) append more sense data * * XS_SENSE_VALID(xs) indicates whether sense is valid * * DEFAULT_FRAMESIZE(ispsoftc_t *) Default Frame Size * DEFAULT_EXEC_THROTTLE(ispsoftc_t *) Default Execution Throttle * * GET_DEFAULT_ROLE(ispsoftc_t *, int) Get Default Role for a channel * SET_DEFAULT_ROLE(ispsoftc_t *, int, int) Set Default Role for a channel * DEFAULT_IID(ispsoftc_t *, int) Default SCSI initiator ID * DEFAULT_LOOPID(ispsoftc_t *, int) Default FC Loop ID * * These establish reasonable defaults for each platform. * These must be available independent of card NVRAM and are * to be used should NVRAM not be readable. * * DEFAULT_NODEWWN(ispsoftc_t *, chan) Default FC Node WWN to use * DEFAULT_PORTWWN(ispsoftc_t *, chan) Default FC Port WWN to use * * These defines are hooks to allow the setting of node and * port WWNs when NVRAM cannot be read or is to be overriden. * * ACTIVE_NODEWWN(ispsoftc_t *, chan) FC Node WWN to use * ACTIVE_PORTWWN(ispsoftc_t *, chan) FC Port WWN to use * * After NVRAM is read, these will be invoked to get the * node and port WWNs that will actually be used for this * channel. * * * ISP_IOXPUT_8(ispsoftc_t *, uint8_t srcval, uint8_t *dstptr) * ISP_IOXPUT_16(ispsoftc_t *, uint16_t srcval, uint16_t *dstptr) * ISP_IOXPUT_32(ispsoftc_t *, uint32_t srcval, uint32_t *dstptr) * * ISP_IOXGET_8(ispsoftc_t *, uint8_t *srcptr, uint8_t dstrval) * ISP_IOXGET_16(ispsoftc_t *, uint16_t *srcptr, uint16_t dstrval) * ISP_IOXGET_32(ispsoftc_t *, uint32_t *srcptr, uint32_t dstrval) * * ISP_SWIZZLE_NVRAM_WORD(ispsoftc_t *, uint16_t *) * ISP_SWIZZLE_NVRAM_LONG(ispsoftc_t *, uint32_t *) * ISP_SWAP16(ispsoftc_t *, uint16_t srcval) * ISP_SWAP32(ispsoftc_t *, uint32_t srcval) */ #ifdef ISP_TARGET_MODE /* * The functions below are for the publicly available * target mode functions that are internal to the Qlogic driver. */ /* * This function handles new response queue entry appropriate for target mode. */ int isp_target_notify(ispsoftc_t *, void *, uint32_t *); /* * This function externalizes the ability to acknowledge an Immediate Notify request. */ int isp_notify_ack(ispsoftc_t *, void *); /* * This function externalized acknowledging (success/fail) an ABTS frame */ int isp_acknak_abts(ispsoftc_t *, void *, int); /* * Enable/Disable/Modify a logical unit. * (softc, cmd, bus, tgt, lun, cmd_cnt, inotify_cnt) */ #define DFLT_CMND_CNT 0xff /* unmonitored */ #define DFLT_INOT_CNT 0xff /* unmonitored */ int isp_lun_cmd(ispsoftc_t *, int, int, int, int, int); /* * General request queue 'put' routine for target mode entries. */ int isp_target_put_entry(ispsoftc_t *isp, void *); /* * General routine to put back an ATIO entry- * used for replenishing f/w resource counts. * The argument is a pointer to a source ATIO * or ATIO2. */ int isp_target_put_atio(ispsoftc_t *, void *); /* * General routine to send a final CTIO for a command- used mostly for * local responses. */ int isp_endcmd(ispsoftc_t *, ...); #define ECMD_SVALID 0x100 #define ECMD_TERMINATE 0x200 /* * Handle an asynchronous event * * Return nonzero if the interrupt that generated this event has been dismissed. */ int isp_target_async(ispsoftc_t *, int, int); #endif #endif /* _ISPVAR_H */ Index: projects/powernv/dev/ntb/if_ntb/if_ntb.c =================================================================== --- projects/powernv/dev/ntb/if_ntb/if_ntb.c (revision 291050) +++ projects/powernv/dev/ntb/if_ntb/if_ntb.c (revision 291051) @@ -1,1656 +1,1675 @@ /*- * Copyright (C) 2013 Intel Corporation * Copyright (C) 2015 EMC Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../ntb_hw/ntb_hw.h" /* * The Non-Transparent Bridge (NTB) is a device on some Intel processors that * allows you to connect two systems using a PCI-e link. * * This module contains a protocol for sending and receiving messages, and * exposes that protocol through a simulated ethernet device called ntb. * * NOTE: Much of the code in this module is shared with Linux. Any patches may * be picked up and redistributed in Linux with a dual GPL/BSD license. */ #define QP_SETSIZE 64 BITSET_DEFINE(_qpset, QP_SETSIZE); #define test_bit(pos, addr) BIT_ISSET(QP_SETSIZE, (pos), (addr)) #define set_bit(pos, addr) BIT_SET(QP_SETSIZE, (pos), (addr)) #define clear_bit(pos, addr) BIT_CLR(QP_SETSIZE, (pos), (addr)) #define ffs_bit(addr) BIT_FFS(QP_SETSIZE, (addr)) #define KTR_NTB KTR_SPARE3 #define NTB_TRANSPORT_VERSION 4 #define NTB_RX_MAX_PKTS 64 #define NTB_RXQ_SIZE 300 enum ntb_link_event { NTB_LINK_DOWN = 0, NTB_LINK_UP, }; static SYSCTL_NODE(_hw, OID_AUTO, if_ntb, CTLFLAG_RW, 0, "if_ntb"); static unsigned g_if_ntb_debug_level; SYSCTL_UINT(_hw_if_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_if_ntb_debug_level, 0, "if_ntb log level -- higher is more verbose"); #define ntb_printf(lvl, ...) do { \ if ((lvl) <= g_if_ntb_debug_level) { \ if_printf(nt->ifp, __VA_ARGS__); \ } \ } while (0) static unsigned transport_mtu = 0x10000 + ETHER_HDR_LEN + ETHER_CRC_LEN; static uint64_t max_mw_size; SYSCTL_UQUAD(_hw_if_ntb, OID_AUTO, max_mw_size, CTLFLAG_RDTUN, &max_mw_size, 0, "If enabled (non-zero), limit the size of large memory windows. " "Both sides of the NTB MUST set the same value here."); static unsigned max_num_clients; SYSCTL_UINT(_hw_if_ntb, OID_AUTO, max_num_clients, CTLFLAG_RDTUN, &max_num_clients, 0, "Maximum number of NTB transport clients. " "0 (default) - use all available NTB memory windows; " "positive integer N - Limit to N memory windows."); STAILQ_HEAD(ntb_queue_list, ntb_queue_entry); -typedef unsigned ntb_q_idx_t; +typedef uint32_t ntb_q_idx_t; struct ntb_queue_entry { /* ntb_queue list reference */ STAILQ_ENTRY(ntb_queue_entry) entry; /* info on data to be transferred */ void *cb_data; void *buf; - unsigned len; - unsigned flags; + uint32_t len; + uint32_t flags; struct ntb_transport_qp *qp; struct ntb_payload_header *x_hdr; ntb_q_idx_t index; }; struct ntb_rx_info { ntb_q_idx_t entry; }; struct ntb_transport_qp { struct ntb_transport_ctx *transport; struct ntb_softc *ntb; void *cb_data; bool client_ready; volatile bool link_is_up; uint8_t qp_num; /* Only 64 QPs are allowed. 0-63 */ struct ntb_rx_info *rx_info; struct ntb_rx_info *remote_rx_info; void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); struct ntb_queue_list tx_free_q; struct mtx ntb_tx_free_q_lock; caddr_t tx_mw; bus_addr_t tx_mw_phys; ntb_q_idx_t tx_index; ntb_q_idx_t tx_max_entry; uint64_t tx_max_frame; void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); struct ntb_queue_list rx_post_q; struct ntb_queue_list rx_pend_q; - struct ntb_queue_list rx_free_q; /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ struct mtx ntb_rx_q_lock; struct task rx_completion_task; struct task rxc_db_work; caddr_t rx_buff; ntb_q_idx_t rx_index; ntb_q_idx_t rx_max_entry; uint64_t rx_max_frame; void (*event_handler)(void *data, enum ntb_link_event status); struct callout link_work; struct callout queue_full; struct callout rx_full; uint64_t last_rx_no_buf; /* Stats */ uint64_t rx_bytes; uint64_t rx_pkts; uint64_t rx_ring_empty; uint64_t rx_err_no_buf; uint64_t rx_err_oflow; uint64_t rx_err_ver; uint64_t tx_bytes; uint64_t tx_pkts; uint64_t tx_ring_full; uint64_t tx_err_no_buf; }; struct ntb_queue_handlers { void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); void (*event_handler)(void *data, enum ntb_link_event status); }; struct ntb_transport_mw { vm_paddr_t phys_addr; size_t phys_size; size_t xlat_align; size_t xlat_align_size; + bus_addr_t addr_limit; /* Tx buff is off vbase / phys_addr */ caddr_t vbase; size_t xlat_size; size_t buff_size; /* Rx buff is off virt_addr / dma_addr */ caddr_t virt_addr; bus_addr_t dma_addr; }; struct ntb_transport_ctx { struct ntb_softc *ntb; struct ifnet *ifp; struct ntb_transport_mw mw_vec[NTB_MAX_NUM_MW]; struct ntb_transport_qp *qp_vec; struct _qpset qp_bitmap; struct _qpset qp_bitmap_free; unsigned mw_count; unsigned qp_count; volatile bool link_is_up; struct callout link_work; struct task link_cleanup; uint64_t bufsize; u_char eaddr[ETHER_ADDR_LEN]; struct mtx tx_lock; struct mtx rx_lock; /* The hardcoded single queuepair in ntb_setup_interface() */ struct ntb_transport_qp *qp; }; static struct ntb_transport_ctx net_softc; enum { IF_NTB_DESC_DONE_FLAG = 1 << 0, IF_NTB_LINK_DOWN_FLAG = 1 << 1, }; struct ntb_payload_header { - uint64_t ver; - uint64_t len; - uint64_t flags; + ntb_q_idx_t ver; + uint32_t len; + uint32_t flags; }; enum { /* * The order of this enum is part of the if_ntb remote protocol. Do * not reorder without bumping protocol version (and it's probably best * to keep the protocol in lock-step with the Linux NTB driver. */ IF_NTB_VERSION = 0, IF_NTB_QP_LINKS, IF_NTB_NUM_QPS, IF_NTB_NUM_MWS, /* * N.B.: transport_link_work assumes MW1 enums = MW0 + 2. */ IF_NTB_MW0_SZ_HIGH, IF_NTB_MW0_SZ_LOW, IF_NTB_MW1_SZ_HIGH, IF_NTB_MW1_SZ_LOW, IF_NTB_MAX_SPAD, }; #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) #define NTB_QP_DEF_NUM_ENTRIES 100 #define NTB_LINK_DOWN_TIMEOUT 10 static int ntb_handle_module_events(struct module *m, int what, void *arg); static int ntb_setup_interface(void); static int ntb_teardown_interface(void); static void ntb_net_init(void *arg); static int ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void ntb_start(struct ifnet *ifp); static void ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); static void ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); static void ntb_net_event_handler(void *data, enum ntb_link_event status); static int ntb_transport_probe(struct ntb_softc *ntb); static void ntb_transport_free(struct ntb_transport_ctx *); static void ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num); static void ntb_transport_free_queue(struct ntb_transport_qp *qp); static struct ntb_transport_qp *ntb_transport_create_queue(void *data, struct ntb_softc *pdev, const struct ntb_queue_handlers *handlers); static void ntb_transport_link_up(struct ntb_transport_qp *qp); static int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len); static int ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry); static void ntb_memcpy_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset); static void ntb_qp_full(void *arg); static void ntb_transport_rxc_db(void *arg, int pending); static int ntb_process_rxc(struct ntb_transport_qp *qp); static void ntb_memcpy_rx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset); static inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp, void *data); static void ntb_complete_rxc(void *arg, int pending); static void ntb_transport_doorbell_callback(void *data, uint32_t vector); static void ntb_transport_event_callback(void *data); static void ntb_transport_link_work(void *arg); static int ntb_set_mw(struct ntb_transport_ctx *, int num_mw, size_t size); static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw); static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num); static void ntb_qp_link_work(void *arg); static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt); static void ntb_transport_link_cleanup_work(void *, int); static void ntb_qp_link_down(struct ntb_transport_qp *qp); static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp); static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp); static void ntb_transport_link_down(struct ntb_transport_qp *qp); static void ntb_send_link_down(struct ntb_transport_qp *qp); static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, struct ntb_queue_list *list); static struct ntb_queue_entry *ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list); static struct ntb_queue_entry *ntb_list_mv(struct mtx *lock, struct ntb_queue_list *from, struct ntb_queue_list *to); static void create_random_local_eui48(u_char *eaddr); static unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp); static const struct ntb_ctx_ops ntb_transport_ops = { .link_event = ntb_transport_event_callback, .db_event = ntb_transport_doorbell_callback, }; MALLOC_DEFINE(M_NTB_IF, "if_ntb", "ntb network driver"); +static inline void +iowrite32(uint32_t val, void *addr) +{ + + bus_space_write_4(X86_BUS_SPACE_MEM, 0/* HACK */, (uintptr_t)addr, + val); +} + /* Module setup and teardown */ static int ntb_handle_module_events(struct module *m, int what, void *arg) { int err = 0; switch (what) { case MOD_LOAD: err = ntb_setup_interface(); break; case MOD_UNLOAD: err = ntb_teardown_interface(); break; default: err = EOPNOTSUPP; break; } return (err); } static moduledata_t if_ntb_mod = { "if_ntb", ntb_handle_module_events, NULL }; DECLARE_MODULE(if_ntb, if_ntb_mod, SI_SUB_KLD, SI_ORDER_ANY); MODULE_DEPEND(if_ntb, ntb_hw, 1, 1, 1); static int ntb_setup_interface(void) { struct ifnet *ifp; struct ntb_queue_handlers handlers = { ntb_net_rx_handler, ntb_net_tx_handler, ntb_net_event_handler }; int rc; net_softc.ntb = devclass_get_softc(devclass_find("ntb_hw"), 0); if (net_softc.ntb == NULL) { printf("ntb: Cannot find devclass\n"); return (ENXIO); } ifp = net_softc.ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { ntb_transport_free(&net_softc); printf("ntb: Cannot allocate ifnet structure\n"); return (ENOMEM); } if_initname(ifp, "ntb", 0); rc = ntb_transport_probe(net_softc.ntb); if (rc != 0) { printf("ntb: Cannot init transport: %d\n", rc); if_free(net_softc.ifp); return (rc); } net_softc.qp = ntb_transport_create_queue(ifp, net_softc.ntb, &handlers); ifp->if_init = ntb_net_init; ifp->if_softc = &net_softc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX; ifp->if_ioctl = ntb_ioctl; ifp->if_start = ntb_start; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); create_random_local_eui48(net_softc.eaddr); ether_ifattach(ifp, net_softc.eaddr); ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_JUMBO_MTU; ifp->if_capenable = ifp->if_capabilities; ntb_transport_link_up(net_softc.qp); net_softc.bufsize = ntb_transport_max_size(net_softc.qp) + sizeof(struct ether_header); return (0); } static int ntb_teardown_interface(void) { if (net_softc.qp != NULL) { ntb_transport_link_down(net_softc.qp); ntb_transport_free_queue(net_softc.qp); ntb_transport_free(&net_softc); } if (net_softc.ifp != NULL) { ether_ifdetach(net_softc.ifp); if_free(net_softc.ifp); net_softc.ifp = NULL; } return (0); } /* Network device interface */ static void ntb_net_init(void *arg) { struct ntb_transport_ctx *ntb_softc = arg; struct ifnet *ifp = ntb_softc->ifp; ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_flags |= IFF_UP; if_link_state_change(ifp, LINK_STATE_UP); } static int ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ntb_transport_ctx *nt = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error = 0; switch (command) { case SIOCSIFMTU: { if (ifr->ifr_mtu > ntb_transport_max_size(nt->qp) - ETHER_HDR_LEN - ETHER_CRC_LEN) { error = EINVAL; break; } ifp->if_mtu = ifr->ifr_mtu; break; } default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void ntb_start(struct ifnet *ifp) { struct mbuf *m_head; struct ntb_transport_ctx *nt = ifp->if_softc; int rc; mtx_lock(&nt->tx_lock); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; CTR0(KTR_NTB, "TX: ntb_start"); while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); CTR1(KTR_NTB, "TX: start mbuf %p", m_head); rc = ntb_transport_tx_enqueue(nt->qp, m_head, m_head, m_length(m_head, NULL)); if (rc != 0) { CTR1(KTR_NTB, "TX: could not tx mbuf %p. Returning to snd q", m_head); if (rc == EAGAIN) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); callout_reset(&nt->qp->queue_full, hz / 1000, ntb_qp_full, ifp); } break; } } mtx_unlock(&nt->tx_lock); } /* Network Device Callbacks */ static void ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len) { m_freem(data); CTR1(KTR_NTB, "TX: tx_handler freeing mbuf %p", data); } static void ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len) { struct mbuf *m = data; struct ifnet *ifp = qp_data; CTR0(KTR_NTB, "RX: rx handler"); (*ifp->if_input)(ifp, m); } static void ntb_net_event_handler(void *data, enum ntb_link_event status) { struct ifnet *ifp; ifp = data; (void)ifp; /* XXX The Linux driver munges with the carrier status here. */ switch (status) { case NTB_LINK_DOWN: break; case NTB_LINK_UP: break; default: panic("Bogus ntb_link_event %u\n", status); } } /* Transport Init and teardown */ static int ntb_transport_probe(struct ntb_softc *ntb) { struct ntb_transport_ctx *nt = &net_softc; struct ntb_transport_mw *mw; uint64_t qp_bitmap; int rc; unsigned i; nt->mw_count = ntb_mw_count(ntb); for (i = 0; i < nt->mw_count; i++) { mw = &nt->mw_vec[i]; rc = ntb_mw_get_range(ntb, i, &mw->phys_addr, &mw->vbase, - &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size); + &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size, + &mw->addr_limit); if (rc != 0) goto err; mw->buff_size = 0; mw->xlat_size = 0; mw->virt_addr = NULL; mw->dma_addr = 0; } qp_bitmap = ntb_db_valid_mask(ntb); nt->qp_count = flsll(qp_bitmap); KASSERT(nt->qp_count != 0, ("bogus db bitmap")); nt->qp_count -= 1; if (max_num_clients != 0 && max_num_clients < nt->qp_count) nt->qp_count = max_num_clients; else if (nt->mw_count < nt->qp_count) nt->qp_count = nt->mw_count; KASSERT(nt->qp_count <= QP_SETSIZE, ("invalid qp_count")); mtx_init(&nt->tx_lock, "ntb transport tx", NULL, MTX_DEF); mtx_init(&nt->rx_lock, "ntb transport rx", NULL, MTX_DEF); nt->qp_vec = malloc(nt->qp_count * sizeof(*nt->qp_vec), M_NTB_IF, M_WAITOK | M_ZERO); for (i = 0; i < nt->qp_count; i++) { set_bit(i, &nt->qp_bitmap); set_bit(i, &nt->qp_bitmap_free); ntb_transport_init_queue(nt, i); } callout_init(&nt->link_work, 0); TASK_INIT(&nt->link_cleanup, 0, ntb_transport_link_cleanup_work, nt); rc = ntb_set_ctx(ntb, nt, &ntb_transport_ops); if (rc != 0) goto err; nt->link_is_up = false; ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); ntb_link_event(ntb); callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); return (0); err: free(nt->qp_vec, M_NTB_IF); nt->qp_vec = NULL; return (rc); } static void ntb_transport_free(struct ntb_transport_ctx *nt) { struct ntb_softc *ntb = nt->ntb; struct _qpset qp_bitmap_alloc; uint8_t i; ntb_transport_link_cleanup(nt); taskqueue_drain(taskqueue_swi, &nt->link_cleanup); callout_drain(&nt->link_work); BIT_COPY(QP_SETSIZE, &nt->qp_bitmap, &qp_bitmap_alloc); BIT_NAND(QP_SETSIZE, &qp_bitmap_alloc, &nt->qp_bitmap_free); /* Verify that all the QPs are freed */ for (i = 0; i < nt->qp_count; i++) if (test_bit(i, &qp_bitmap_alloc)) ntb_transport_free_queue(&nt->qp_vec[i]); ntb_link_disable(ntb); ntb_clear_ctx(ntb); for (i = 0; i < nt->mw_count; i++) ntb_free_mw(nt, i); free(nt->qp_vec, M_NTB_IF); } static void ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num) { struct ntb_transport_mw *mw; struct ntb_transport_qp *qp; vm_paddr_t mw_base; uint64_t mw_size, qp_offset; size_t tx_size; unsigned num_qps_mw, mw_num, mw_count; mw_count = nt->mw_count; mw_num = QP_TO_MW(nt, qp_num); mw = &nt->mw_vec[mw_num]; qp = &nt->qp_vec[qp_num]; qp->qp_num = qp_num; qp->transport = nt; qp->ntb = nt->ntb; qp->client_ready = false; qp->event_handler = NULL; ntb_qp_link_down_reset(qp); if (nt->qp_count % mw_count && mw_num + 1 < nt->qp_count / mw_count) num_qps_mw = nt->qp_count / mw_count + 1; else num_qps_mw = nt->qp_count / mw_count; mw_base = mw->phys_addr; mw_size = mw->phys_size; tx_size = mw_size / num_qps_mw; qp_offset = tx_size * (qp_num / mw_count); qp->tx_mw = mw->vbase + qp_offset; KASSERT(qp->tx_mw != NULL, ("uh oh?")); /* XXX Assumes that a vm_paddr_t is equivalent to bus_addr_t */ qp->tx_mw_phys = mw_base + qp_offset; KASSERT(qp->tx_mw_phys != 0, ("uh oh?")); tx_size -= sizeof(struct ntb_rx_info); qp->rx_info = (void *)(qp->tx_mw + tx_size); /* Due to house-keeping, there must be at least 2 buffs */ qp->tx_max_frame = qmin(tx_size / 2, transport_mtu + sizeof(struct ntb_payload_header)); qp->tx_max_entry = tx_size / qp->tx_max_frame; callout_init(&qp->link_work, 0); callout_init(&qp->queue_full, 1); callout_init(&qp->rx_full, 1); mtx_init(&qp->ntb_rx_q_lock, "ntb rx q", NULL, MTX_SPIN); mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN); TASK_INIT(&qp->rx_completion_task, 0, ntb_complete_rxc, qp); TASK_INIT(&qp->rxc_db_work, 0, ntb_transport_rxc_db, qp); STAILQ_INIT(&qp->rx_post_q); STAILQ_INIT(&qp->rx_pend_q); - STAILQ_INIT(&qp->rx_free_q); STAILQ_INIT(&qp->tx_free_q); callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); } static void ntb_transport_free_queue(struct ntb_transport_qp *qp) { struct ntb_queue_entry *entry; if (qp == NULL) return; callout_drain(&qp->link_work); ntb_db_set_mask(qp->ntb, 1ull << qp->qp_num); taskqueue_drain(taskqueue_swi, &qp->rxc_db_work); taskqueue_drain(taskqueue_swi, &qp->rx_completion_task); qp->cb_data = NULL; qp->rx_handler = NULL; qp->tx_handler = NULL; qp->event_handler = NULL; - while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) - free(entry, M_NTB_IF); - while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) free(entry, M_NTB_IF); while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) free(entry, M_NTB_IF); while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) free(entry, M_NTB_IF); set_bit(qp->qp_num, &qp->transport->qp_bitmap_free); } /** * ntb_transport_create_queue - Create a new NTB transport layer queue * @rx_handler: receive callback function * @tx_handler: transmit callback function * @event_handler: event callback function * * Create a new NTB transport layer queue and provide the queue with a callback * routine for both transmit and receive. The receive callback routine will be * used to pass up data when the transport has received it on the queue. The * transmit callback routine will be called when the transport has completed the * transmission of the data on the queue and the data is ready to be freed. * * RETURNS: pointer to newly created ntb_queue, NULL on error. */ static struct ntb_transport_qp * ntb_transport_create_queue(void *data, struct ntb_softc *ntb, const struct ntb_queue_handlers *handlers) { struct ntb_queue_entry *entry; struct ntb_transport_qp *qp; struct ntb_transport_ctx *nt; unsigned int free_queue; int i; nt = ntb_get_ctx(ntb, NULL); KASSERT(nt != NULL, ("bogus")); free_queue = ffs_bit(&nt->qp_bitmap); if (free_queue == 0) return (NULL); /* decrement free_queue to make it zero based */ free_queue--; qp = &nt->qp_vec[free_queue]; clear_bit(qp->qp_num, &nt->qp_bitmap_free); qp->cb_data = data; qp->rx_handler = handlers->rx_handler; qp->tx_handler = handlers->tx_handler; qp->event_handler = handlers->event_handler; for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { entry = malloc(sizeof(*entry), M_NTB_IF, M_WAITOK | M_ZERO); entry->cb_data = nt->ifp; entry->buf = NULL; entry->len = transport_mtu; ntb_list_add(&qp->ntb_rx_q_lock, entry, &qp->rx_pend_q); } for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { entry = malloc(sizeof(*entry), M_NTB_IF, M_WAITOK | M_ZERO); ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); } ntb_db_clear(ntb, 1ull << qp->qp_num); ntb_db_clear_mask(ntb, 1ull << qp->qp_num); return (qp); } /** * ntb_transport_link_up - Notify NTB transport of client readiness to use queue * @qp: NTB transport layer queue to be enabled * * Notify NTB transport layer of client readiness to use queue */ static void ntb_transport_link_up(struct ntb_transport_qp *qp) { struct ntb_transport_ctx *nt; if (qp == NULL) return; qp->client_ready = true; nt = qp->transport; ntb_printf(2, "qp client ready\n"); if (qp->transport->link_is_up) callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); } /* Transport Tx */ /** * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry * @qp: NTB transport layer queue the entry is to be enqueued on * @cb: per buffer pointer for callback function to use * @data: pointer to data buffer that will be sent * @len: length of the data buffer * * Enqueue a new transmit buffer onto the transport queue from which a NTB * payload will be transmitted. This assumes that a lock is being held to * serialize access to the qp. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ static int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len) { struct ntb_queue_entry *entry; int rc; if (qp == NULL || !qp->link_is_up || len == 0) { CTR0(KTR_NTB, "TX: link not up"); return (EINVAL); } entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); if (entry == NULL) { CTR0(KTR_NTB, "TX: could not get entry from tx_free_q"); qp->tx_err_no_buf++; return (EBUSY); } CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry); entry->cb_data = cb; entry->buf = data; entry->len = len; entry->flags = 0; rc = ntb_process_tx(qp, entry); if (rc != 0) { ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); CTR1(KTR_NTB, "TX: process_tx failed. Returning entry %p to tx_free_q", entry); } return (rc); } static int ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) { void *offset; - offset = (char *)qp->tx_mw + qp->tx_max_frame * qp->tx_index; + offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; CTR3(KTR_NTB, - "TX: process_tx: tx_pkts=%u, tx_index=%u, remote entry=%u", + "TX: process_tx: tx_pkts=%lu, tx_index=%u, remote entry=%u", qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry); if (qp->tx_index == qp->remote_rx_info->entry) { CTR0(KTR_NTB, "TX: ring full"); qp->tx_ring_full++; return (EAGAIN); } if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { if (qp->tx_handler != NULL) qp->tx_handler(qp, qp->cb_data, entry->buf, - EIO); + EIO); + else + m_freem(entry->buf); + entry->buf = NULL; ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); CTR1(KTR_NTB, "TX: frame too big. returning entry %p to tx_free_q", entry); return (0); } CTR2(KTR_NTB, "TX: copying entry %p to offset %p", entry, offset); ntb_memcpy_tx(qp, entry, offset); qp->tx_index++; qp->tx_index %= qp->tx_max_entry; qp->tx_pkts++; return (0); } static void ntb_memcpy_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset) { struct ntb_payload_header *hdr; /* This piece is from Linux' ntb_async_tx() */ hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame - sizeof(struct ntb_payload_header)); entry->x_hdr = hdr; - hdr->len = entry->len; /* TODO: replace with bus_space_write */ - hdr->ver = qp->tx_pkts; /* TODO: replace with bus_space_write */ + iowrite32(entry->len, &hdr->len); + iowrite32(qp->tx_pkts, &hdr->ver); /* This piece is ntb_memcpy_tx() */ CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset); if (entry->buf != NULL) { m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset); /* * Ensure that the data is fully copied before setting the * flags */ wmb(); } /* The rest is ntb_tx_copy_callback() */ - /* TODO: replace with bus_space_write */ - hdr->flags = entry->flags | IF_NTB_DESC_DONE_FLAG; + iowrite32(entry->flags | IF_NTB_DESC_DONE_FLAG, &hdr->flags); + CTR1(KTR_NTB, "TX: hdr %p set DESC_DONE", hdr); ntb_peer_db_set(qp->ntb, 1ull << qp->qp_num); /* * The entry length can only be zero if the packet is intended to be a * "link down" or similar. Since no payload is being sent in these * cases, there is nothing to add to the completion queue. */ if (entry->len > 0) { qp->tx_bytes += entry->len; if (qp->tx_handler) - qp->tx_handler(qp, qp->cb_data, entry->cb_data, - entry->len); + qp->tx_handler(qp, qp->cb_data, entry->buf, + entry->len); + else + m_freem(entry->buf); + entry->buf = NULL; } - CTR2(KTR_NTB, - "TX: entry %p sent. hdr->ver = %d, Returning to tx_free_q", entry, - hdr->ver); + CTR3(KTR_NTB, + "TX: entry %p sent. hdr->ver = %u, hdr->flags = 0x%x, Returning " + "to tx_free_q", entry, hdr->ver, hdr->flags); ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); } static void ntb_qp_full(void *arg) { CTR0(KTR_NTB, "TX: qp_full callout"); ntb_start(arg); } /* Transport Rx */ static void ntb_transport_rxc_db(void *arg, int pending __unused) { struct ntb_transport_qp *qp = arg; ntb_q_idx_t i; int rc; /* * Limit the number of packets processed in a single interrupt to * provide fairness to others */ CTR0(KTR_NTB, "RX: transport_rx"); mtx_lock(&qp->transport->rx_lock); for (i = 0; i < qp->rx_max_entry; i++) { rc = ntb_process_rxc(qp); if (rc != 0) { CTR0(KTR_NTB, "RX: process_rxc failed"); break; } } mtx_unlock(&qp->transport->rx_lock); if (i == qp->rx_max_entry) taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); else if ((ntb_db_read(qp->ntb) & (1ull << qp->qp_num)) != 0) { /* If db is set, clear it and read it back to commit clear. */ ntb_db_clear(qp->ntb, 1ull << qp->qp_num); (void)ntb_db_read(qp->ntb); /* * An interrupt may have arrived between finishing * ntb_process_rxc and clearing the doorbell bit: there might * be some more work to do. */ taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); } } static int ntb_process_rxc(struct ntb_transport_qp *qp) { struct ntb_payload_header *hdr; struct ntb_queue_entry *entry; - void *offset; + caddr_t offset; - offset = (void *) - ((char *)qp->rx_buff + qp->rx_max_frame * qp->rx_index); - hdr = (void *) - ((char *)offset + qp->rx_max_frame - - sizeof(struct ntb_payload_header)); + offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; + hdr = (void *)(offset + qp->rx_max_frame - + sizeof(struct ntb_payload_header)); CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index); if ((hdr->flags & IF_NTB_DESC_DONE_FLAG) == 0) { CTR0(KTR_NTB, "RX: hdr not done"); qp->rx_ring_empty++; return (EAGAIN); } if ((hdr->flags & IF_NTB_LINK_DOWN_FLAG) != 0) { CTR0(KTR_NTB, "RX: link down"); ntb_qp_link_down(qp); hdr->flags = 0; return (EAGAIN); } if (hdr->ver != (uint32_t)qp->rx_pkts) { CTR2(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). " - "Returning entry %p to rx_pend_q", hdr->ver, qp->rx_pkts); + "Returning entry to rx_pend_q", hdr->ver, qp->rx_pkts); qp->rx_err_ver++; return (EIO); } entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); if (entry == NULL) { qp->rx_err_no_buf++; CTR0(KTR_NTB, "RX: No entries in rx_pend_q"); return (EAGAIN); } callout_stop(&qp->rx_full); CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry); entry->x_hdr = hdr; entry->index = qp->rx_index; if (hdr->len > entry->len) { CTR2(KTR_NTB, "RX: len too long. Wanted %ju got %ju", (uintmax_t)hdr->len, (uintmax_t)entry->len); qp->rx_err_oflow++; entry->len = -EIO; entry->flags |= IF_NTB_DESC_DONE_FLAG; taskqueue_enqueue(taskqueue_swi, &qp->rx_completion_task); } else { qp->rx_bytes += hdr->len; qp->rx_pkts++; CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts); entry->len = hdr->len; ntb_memcpy_rx(qp, entry, offset); } qp->rx_index++; qp->rx_index %= qp->rx_max_entry; return (0); } static void ntb_memcpy_rx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset) { struct ifnet *ifp = entry->cb_data; unsigned int len = entry->len; struct mbuf *m; CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset); m = m_devget(offset, len, 0, ifp, NULL); m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID; entry->buf = (void *)m; /* Ensure that the data is globally visible before clearing the flag */ wmb(); CTR2(KTR_NTB, "RX: copied entry %p to mbuf %p.", entry, m); ntb_rx_copy_callback(qp, entry); } static inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp, void *data) { struct ntb_queue_entry *entry; entry = data; entry->flags |= IF_NTB_DESC_DONE_FLAG; taskqueue_enqueue(taskqueue_swi, &qp->rx_completion_task); } static void ntb_complete_rxc(void *arg, int pending) { struct ntb_transport_qp *qp = arg; struct ntb_queue_entry *entry; struct mbuf *m; unsigned len; CTR0(KTR_NTB, "RX: rx_completion_task"); mtx_lock_spin(&qp->ntb_rx_q_lock); while (!STAILQ_EMPTY(&qp->rx_post_q)) { entry = STAILQ_FIRST(&qp->rx_post_q); if ((entry->flags & IF_NTB_DESC_DONE_FLAG) == 0) break; entry->x_hdr->flags = 0; - /* XXX bus_space_write */ - qp->rx_info->entry = entry->index; + iowrite32(entry->index, &qp->rx_info->entry); + STAILQ_REMOVE_HEAD(&qp->rx_post_q, entry); + len = entry->len; m = entry->buf; - STAILQ_REMOVE_HEAD(&qp->rx_post_q, entry); - STAILQ_INSERT_TAIL(&qp->rx_free_q, entry, entry); + /* + * Re-initialize queue_entry for reuse; rx_handler takes + * ownership of the mbuf. + */ + entry->buf = NULL; + entry->len = transport_mtu; + entry->cb_data = qp->transport->ifp; + STAILQ_INSERT_TAIL(&qp->rx_pend_q, entry, entry); + mtx_unlock_spin(&qp->ntb_rx_q_lock); CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m); if (qp->rx_handler != NULL && qp->client_ready) qp->rx_handler(qp, qp->cb_data, m, len); + else + m_freem(m); mtx_lock_spin(&qp->ntb_rx_q_lock); } mtx_unlock_spin(&qp->ntb_rx_q_lock); } static void ntb_transport_doorbell_callback(void *data, uint32_t vector) { struct ntb_transport_ctx *nt = data; struct ntb_transport_qp *qp; struct _qpset db_bits; uint64_t vec_mask; unsigned qp_num; BIT_COPY(QP_SETSIZE, &nt->qp_bitmap, &db_bits); BIT_NAND(QP_SETSIZE, &db_bits, &nt->qp_bitmap_free); vec_mask = ntb_db_vector_mask(nt->ntb, vector); while (vec_mask != 0) { qp_num = ffsll(vec_mask) - 1; if (test_bit(qp_num, &db_bits)) { qp = &nt->qp_vec[qp_num]; taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); } vec_mask &= ~(1ull << qp_num); } } /* Link Event handler */ static void ntb_transport_event_callback(void *data) { struct ntb_transport_ctx *nt = data; if (ntb_link_is_up(nt->ntb, NULL, NULL)) { ntb_printf(1, "HW link up\n"); callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); } else { ntb_printf(1, "HW link down\n"); taskqueue_enqueue(taskqueue_swi, &nt->link_cleanup); } } /* Link bring up */ static void ntb_transport_link_work(void *arg) { struct ntb_transport_ctx *nt = arg; struct ntb_softc *ntb = nt->ntb; struct ntb_transport_qp *qp; uint64_t val64, size; uint32_t val; unsigned i; int rc; /* send the local info, in the opposite order of the way we read it */ for (i = 0; i < nt->mw_count; i++) { size = nt->mw_vec[i].phys_size; if (max_mw_size != 0 && size > max_mw_size) size = max_mw_size; ntb_peer_spad_write(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2), size >> 32); ntb_peer_spad_write(ntb, IF_NTB_MW0_SZ_LOW + (i * 2), size); } ntb_peer_spad_write(ntb, IF_NTB_NUM_MWS, nt->mw_count); ntb_peer_spad_write(ntb, IF_NTB_NUM_QPS, nt->qp_count); ntb_peer_spad_write(ntb, IF_NTB_VERSION, NTB_TRANSPORT_VERSION); /* Query the remote side for its info */ val = 0; ntb_spad_read(ntb, IF_NTB_VERSION, &val); if (val != NTB_TRANSPORT_VERSION) goto out; ntb_spad_read(ntb, IF_NTB_NUM_QPS, &val); if (val != nt->qp_count) goto out; ntb_spad_read(ntb, IF_NTB_NUM_MWS, &val); if (val != nt->mw_count) goto out; for (i = 0; i < nt->mw_count; i++) { ntb_spad_read(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2), &val); val64 = (uint64_t)val << 32; ntb_spad_read(ntb, IF_NTB_MW0_SZ_LOW + (i * 2), &val); val64 |= val; rc = ntb_set_mw(nt, i, val64); if (rc != 0) goto free_mws; } nt->link_is_up = true; ntb_printf(1, "transport link up\n"); for (i = 0; i < nt->qp_count; i++) { qp = &nt->qp_vec[i]; ntb_transport_setup_qp_mw(nt, i); if (qp->client_ready) callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); } return; free_mws: for (i = 0; i < nt->mw_count; i++) ntb_free_mw(nt, i); out: if (ntb_link_is_up(ntb, NULL, NULL)) callout_reset(&nt->link_work, NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt); } static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, size_t size) { struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; size_t xlat_size, buff_size; int rc; if (size == 0) return (EINVAL); xlat_size = roundup(size, mw->xlat_align_size); buff_size = roundup(size, mw->xlat_align); /* No need to re-setup */ if (mw->xlat_size == xlat_size) return (0); if (mw->buff_size != 0) ntb_free_mw(nt, num_mw); /* Alloc memory for receiving data. Must be aligned */ mw->xlat_size = xlat_size; mw->buff_size = buff_size; mw->virt_addr = contigmalloc(mw->buff_size, M_NTB_IF, M_ZERO, 0, - BUS_SPACE_MAXADDR, mw->xlat_align, 0); + mw->addr_limit, mw->xlat_align, 0); if (mw->virt_addr == NULL) { mw->xlat_size = 0; mw->buff_size = 0; printf("ntb: Unable to allocate MW buffer of size %zu\n", mw->xlat_size); return (ENOMEM); } /* TODO: replace with bus_space_* functions */ mw->dma_addr = vtophys(mw->virt_addr); /* * Ensure that the allocation from contigmalloc is aligned as * requested. XXX: This may not be needed -- brought in for parity * with the Linux driver. */ if (mw->dma_addr % mw->xlat_align != 0) { ntb_printf(0, "DMA memory 0x%jx not aligned to BAR size 0x%zx\n", (uintmax_t)mw->dma_addr, size); ntb_free_mw(nt, num_mw); return (ENOMEM); } /* Notify HW the memory location of the receive buffer */ rc = ntb_mw_set_trans(nt->ntb, num_mw, mw->dma_addr, mw->xlat_size); if (rc) { ntb_printf(0, "Unable to set mw%d translation\n", num_mw); ntb_free_mw(nt, num_mw); return (rc); } return (0); } static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) { struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; if (mw->virt_addr == NULL) return; ntb_mw_clear_trans(nt->ntb, num_mw); contigfree(mw->virt_addr, mw->xlat_size, M_NTB_IF); mw->xlat_size = 0; mw->buff_size = 0; mw->virt_addr = NULL; } static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num) { struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; struct ntb_transport_mw *mw; void *offset; ntb_q_idx_t i; size_t rx_size; unsigned num_qps_mw, mw_num, mw_count; mw_count = nt->mw_count; mw_num = QP_TO_MW(nt, qp_num); mw = &nt->mw_vec[mw_num]; if (mw->virt_addr == NULL) return (ENOMEM); if (nt->qp_count % mw_count && mw_num + 1 < nt->qp_count / mw_count) num_qps_mw = nt->qp_count / mw_count + 1; else num_qps_mw = nt->qp_count / mw_count; rx_size = mw->xlat_size / num_qps_mw; qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); rx_size -= sizeof(struct ntb_rx_info); qp->remote_rx_info = (void*)(qp->rx_buff + rx_size); /* Due to house-keeping, there must be at least 2 buffs */ qp->rx_max_frame = qmin(rx_size / 2, transport_mtu + sizeof(struct ntb_payload_header)); qp->rx_max_entry = rx_size / qp->rx_max_frame; qp->rx_index = 0; qp->remote_rx_info->entry = qp->rx_max_entry - 1; /* Set up the hdr offsets with 0s */ for (i = 0; i < qp->rx_max_entry; i++) { offset = (void *)(qp->rx_buff + qp->rx_max_frame * (i + 1) - sizeof(struct ntb_payload_header)); memset(offset, 0, sizeof(struct ntb_payload_header)); } qp->rx_pkts = 0; qp->tx_pkts = 0; qp->tx_index = 0; return (0); } static void ntb_qp_link_work(void *arg) { struct ntb_transport_qp *qp = arg; struct ntb_softc *ntb = qp->ntb; struct ntb_transport_ctx *nt = qp->transport; uint32_t val, dummy; ntb_spad_read(ntb, IF_NTB_QP_LINKS, &val); ntb_peer_spad_write(ntb, IF_NTB_QP_LINKS, val | (1ull << qp->qp_num)); /* query remote spad for qp ready bits */ ntb_peer_spad_read(ntb, IF_NTB_QP_LINKS, &dummy); /* See if the remote side is up */ if ((val & (1ull << qp->qp_num)) != 0) { ntb_printf(2, "qp link up\n"); qp->link_is_up = true; if (qp->event_handler != NULL) qp->event_handler(qp->cb_data, NTB_LINK_UP); taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); } else if (nt->link_is_up) callout_reset(&qp->link_work, NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); } /* Link down event*/ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) { struct ntb_transport_qp *qp; struct _qpset qp_bitmap_alloc; unsigned i; BIT_COPY(QP_SETSIZE, &nt->qp_bitmap, &qp_bitmap_alloc); BIT_NAND(QP_SETSIZE, &qp_bitmap_alloc, &nt->qp_bitmap_free); /* Pass along the info to any clients */ for (i = 0; i < nt->qp_count; i++) if (test_bit(i, &qp_bitmap_alloc)) { qp = &nt->qp_vec[i]; ntb_qp_link_cleanup(qp); callout_drain(&qp->link_work); } if (!nt->link_is_up) callout_drain(&nt->link_work); /* * The scratchpad registers keep the values if the remote side * goes down, blast them now to give them a sane value the next * time they are accessed */ for (i = 0; i < IF_NTB_MAX_SPAD; i++) ntb_spad_write(nt->ntb, i, 0); } static void ntb_transport_link_cleanup_work(void *arg, int pending __unused) { ntb_transport_link_cleanup(arg); } static void ntb_qp_link_down(struct ntb_transport_qp *qp) { ntb_qp_link_cleanup(qp); } static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) { qp->link_is_up = false; qp->tx_index = qp->rx_index = 0; qp->tx_bytes = qp->rx_bytes = 0; qp->tx_pkts = qp->rx_pkts = 0; qp->rx_ring_empty = 0; qp->tx_ring_full = 0; qp->rx_err_no_buf = qp->tx_err_no_buf = 0; qp->rx_err_oflow = qp->rx_err_ver = 0; } static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) { struct ntb_transport_ctx *nt = qp->transport; callout_drain(&qp->link_work); ntb_qp_link_down_reset(qp); if (qp->event_handler != NULL) qp->event_handler(qp->cb_data, NTB_LINK_DOWN); if (nt->link_is_up) callout_reset(&qp->link_work, NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); } /* Link commanded down */ /** * ntb_transport_link_down - Notify NTB transport to no longer enqueue data * @qp: NTB transport layer queue to be disabled * * Notify NTB transport layer of client's desire to no longer receive data on * transport queue specified. It is the client's responsibility to ensure all * entries on queue are purged or otherwise handled appropriately. */ static void ntb_transport_link_down(struct ntb_transport_qp *qp) { uint32_t val; if (qp == NULL) return; qp->client_ready = false; ntb_spad_read(qp->ntb, IF_NTB_QP_LINKS, &val); ntb_peer_spad_write(qp->ntb, IF_NTB_QP_LINKS, val & ~(1 << qp->qp_num)); if (qp->link_is_up) ntb_send_link_down(qp); else callout_drain(&qp->link_work); } static void ntb_send_link_down(struct ntb_transport_qp *qp) { struct ntb_queue_entry *entry; int i, rc; if (!qp->link_is_up) return; for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); if (entry != NULL) break; pause("NTB Wait for link down", hz / 10); } if (entry == NULL) return; entry->cb_data = NULL; entry->buf = NULL; entry->len = 0; entry->flags = IF_NTB_LINK_DOWN_FLAG; mtx_lock(&qp->transport->tx_lock); rc = ntb_process_tx(qp, entry); if (rc != 0) printf("ntb: Failed to send link down\n"); mtx_unlock(&qp->transport->tx_lock); ntb_qp_link_down_reset(qp); } /* List Management */ static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, struct ntb_queue_list *list) { mtx_lock_spin(lock); STAILQ_INSERT_TAIL(list, entry, entry); mtx_unlock_spin(lock); } static struct ntb_queue_entry * ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list) { struct ntb_queue_entry *entry; mtx_lock_spin(lock); if (STAILQ_EMPTY(list)) { entry = NULL; goto out; } entry = STAILQ_FIRST(list); STAILQ_REMOVE_HEAD(list, entry); out: mtx_unlock_spin(lock); return (entry); } static struct ntb_queue_entry * ntb_list_mv(struct mtx *lock, struct ntb_queue_list *from, struct ntb_queue_list *to) { struct ntb_queue_entry *entry; mtx_lock_spin(lock); if (STAILQ_EMPTY(from)) { entry = NULL; goto out; } entry = STAILQ_FIRST(from); STAILQ_REMOVE_HEAD(from, entry); STAILQ_INSERT_TAIL(to, entry, entry); out: mtx_unlock_spin(lock); return (entry); } /* Helper functions */ /* TODO: This too should really be part of the kernel */ #define EUI48_MULTICAST 1 << 0 #define EUI48_LOCALLY_ADMINISTERED 1 << 1 static void create_random_local_eui48(u_char *eaddr) { static uint8_t counter = 0; uint32_t seed = ticks; eaddr[0] = EUI48_LOCALLY_ADMINISTERED; memcpy(&eaddr[1], &seed, sizeof(uint32_t)); eaddr[5] = counter++; } /** * ntb_transport_max_size - Query the max payload size of a qp * @qp: NTB transport layer queue to be queried * * Query the maximum payload size permissible on the given qp * * RETURNS: the max payload size of a qp */ static unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) { if (qp == NULL) return (0); return (qp->tx_max_frame - sizeof(struct ntb_payload_header)); } Index: projects/powernv/dev/ntb/ntb_hw/ntb_hw.c =================================================================== --- projects/powernv/dev/ntb/ntb_hw/ntb_hw.c (revision 291050) +++ projects/powernv/dev/ntb/ntb_hw/ntb_hw.c (revision 291051) @@ -1,2760 +1,2837 @@ /*- * Copyright (C) 2013 Intel Corporation * Copyright (C) 2015 EMC Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ntb_regs.h" #include "ntb_hw.h" /* * The Non-Transparent Bridge (NTB) is a device on some Intel processors that * allows you to connect two systems using a PCI-e link. * * This module contains the hardware abstraction layer for the NTB. It allows * you to send and recieve interrupts, map the memory windows and send and * receive messages in the scratch-pad registers. * * NOTE: Much of the code in this module is shared with Linux. Any patches may * be picked up and redistributed in Linux with a dual GPL/BSD license. */ #define MAX_MSIX_INTERRUPTS MAX(XEON_DB_COUNT, ATOM_DB_COUNT) #define NTB_HB_TIMEOUT 1 /* second */ #define ATOM_LINK_RECOVERY_TIME 500 /* ms */ +#define BAR_HIGH_MASK (~((1ull << 12) - 1)) #define DEVICE2SOFTC(dev) ((struct ntb_softc *) device_get_softc(dev)) enum ntb_device_type { NTB_XEON, NTB_ATOM }; /* ntb_conn_type are hardware numbers, cannot change. */ enum ntb_conn_type { NTB_CONN_TRANSPARENT = 0, NTB_CONN_B2B = 1, NTB_CONN_RP = 2, }; enum ntb_b2b_direction { NTB_DEV_USD = 0, NTB_DEV_DSD = 1, }; enum ntb_bar { NTB_CONFIG_BAR = 0, NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3, NTB_MAX_BARS }; /* Device features and workarounds */ #define HAS_FEATURE(feature) \ ((ntb->features & (feature)) != 0) struct ntb_hw_info { uint32_t device_id; const char *desc; enum ntb_device_type type; uint32_t features; }; struct ntb_pci_bar_info { bus_space_tag_t pci_bus_tag; bus_space_handle_t pci_bus_handle; int pci_resource_id; struct resource *pci_resource; vm_paddr_t pbase; caddr_t vbase; vm_size_t size; + bool mapped_wc : 1; /* Configuration register offsets */ uint32_t psz_off; uint32_t ssz_off; uint32_t pbarxlat_off; }; struct ntb_int_info { struct resource *res; int rid; void *tag; }; struct ntb_vec { struct ntb_softc *ntb; uint32_t num; }; struct ntb_reg { uint32_t ntb_ctl; uint32_t lnk_sta; uint8_t db_size; unsigned mw_bar[NTB_MAX_BARS]; }; struct ntb_alt_reg { uint32_t db_bell; uint32_t db_mask; uint32_t spad; }; struct ntb_xlat_reg { uint32_t bar0_base; uint32_t bar2_base; uint32_t bar4_base; uint32_t bar5_base; uint32_t bar2_xlat; uint32_t bar4_xlat; uint32_t bar5_xlat; uint32_t bar2_limit; uint32_t bar4_limit; uint32_t bar5_limit; }; struct ntb_b2b_addr { uint64_t bar0_addr; uint64_t bar2_addr64; uint64_t bar4_addr64; uint64_t bar4_addr32; uint64_t bar5_addr32; }; struct ntb_softc { device_t device; enum ntb_device_type type; uint32_t features; struct ntb_pci_bar_info bar_info[NTB_MAX_BARS]; struct ntb_int_info int_info[MAX_MSIX_INTERRUPTS]; uint32_t allocated_interrupts; struct callout heartbeat_timer; struct callout lr_timer; void *ntb_ctx; const struct ntb_ctx_ops *ctx_ops; struct ntb_vec *msix_vec; #define CTX_LOCK(sc) mtx_lock(&(sc)->ctx_lock) #define CTX_UNLOCK(sc) mtx_unlock(&(sc)->ctx_lock) #define CTX_ASSERT(sc,f) mtx_assert(&(sc)->ctx_lock, (f)) struct mtx ctx_lock; uint32_t ppd; enum ntb_conn_type conn_type; enum ntb_b2b_direction dev_type; /* Offset of peer bar0 in B2B BAR */ uint64_t b2b_off; /* Memory window used to access peer bar0 */ #define B2B_MW_DISABLED UINT8_MAX uint8_t b2b_mw_idx; uint8_t mw_count; uint8_t spad_count; uint8_t db_count; uint8_t db_vec_count; uint8_t db_vec_shift; /* Protects local db_mask. */ #define DB_MASK_LOCK(sc) mtx_lock_spin(&(sc)->db_mask_lock) #define DB_MASK_UNLOCK(sc) mtx_unlock_spin(&(sc)->db_mask_lock) #define DB_MASK_ASSERT(sc,f) mtx_assert(&(sc)->db_mask_lock, (f)) struct mtx db_mask_lock; volatile uint32_t ntb_ctl; volatile uint32_t lnk_sta; uint64_t db_valid_mask; uint64_t db_link_mask; uint64_t db_mask; int last_ts; /* ticks @ last irq */ const struct ntb_reg *reg; const struct ntb_alt_reg *self_reg; const struct ntb_alt_reg *peer_reg; const struct ntb_xlat_reg *xlat_reg; }; #ifdef __i386__ static __inline uint64_t bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t offset) { return (bus_space_read_4(tag, handle, offset) | ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32); } static __inline void bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t offset, uint64_t val) { bus_space_write_4(tag, handle, offset, val); bus_space_write_4(tag, handle, offset + 4, val >> 32); } #endif #define ntb_bar_read(SIZE, bar, offset) \ bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ ntb->bar_info[(bar)].pci_bus_handle, (offset)) #define ntb_bar_write(SIZE, bar, offset, val) \ bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ ntb->bar_info[(bar)].pci_bus_handle, (offset), (val)) #define ntb_reg_read(SIZE, offset) ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset) #define ntb_reg_write(SIZE, offset, val) \ ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val) #define ntb_mw_read(SIZE, offset) \ ntb_bar_read(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), offset) #define ntb_mw_write(SIZE, offset, val) \ ntb_bar_write(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \ offset, val) static int ntb_probe(device_t device); static int ntb_attach(device_t device); static int ntb_detach(device_t device); static inline enum ntb_bar ntb_mw_to_bar(struct ntb_softc *, unsigned mw); static inline bool bar_is_64bit(struct ntb_softc *, enum ntb_bar); static inline void bar_get_xlat_params(struct ntb_softc *, enum ntb_bar, uint32_t *base, uint32_t *xlat, uint32_t *lmt); static int ntb_map_pci_bars(struct ntb_softc *ntb); static void print_map_success(struct ntb_softc *, struct ntb_pci_bar_info *, const char *); static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar); static int map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar); static void ntb_unmap_pci_bar(struct ntb_softc *ntb); static int ntb_remap_msix(device_t, uint32_t desired, uint32_t avail); static int ntb_init_isr(struct ntb_softc *ntb); static int ntb_setup_legacy_interrupt(struct ntb_softc *ntb); static int ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors); static void ntb_teardown_interrupts(struct ntb_softc *ntb); static inline uint64_t ntb_vec_mask(struct ntb_softc *, uint64_t db_vector); static void ntb_interrupt(struct ntb_softc *, uint32_t vec); static void ndev_vec_isr(void *arg); static void ndev_irq_isr(void *arg); static inline uint64_t db_ioread(struct ntb_softc *, uint64_t regoff); static inline void db_iowrite(struct ntb_softc *, uint64_t regoff, uint64_t); static inline void db_iowrite_raw(struct ntb_softc *, uint64_t regoff, uint64_t); static int ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors); static void ntb_free_msix_vec(struct ntb_softc *ntb); static struct ntb_hw_info *ntb_get_device_info(uint32_t device_id); static void ntb_detect_max_mw(struct ntb_softc *ntb); static int ntb_detect_xeon(struct ntb_softc *ntb); static int ntb_detect_atom(struct ntb_softc *ntb); static int ntb_xeon_init_dev(struct ntb_softc *ntb); static int ntb_atom_init_dev(struct ntb_softc *ntb); static void ntb_teardown_xeon(struct ntb_softc *ntb); static void configure_atom_secondary_side_bars(struct ntb_softc *ntb); static void xeon_reset_sbar_size(struct ntb_softc *, enum ntb_bar idx, enum ntb_bar regbar); static void xeon_set_sbar_base_and_limit(struct ntb_softc *, uint64_t base_addr, enum ntb_bar idx, enum ntb_bar regbar); static void xeon_set_pbar_xlat(struct ntb_softc *, uint64_t base_addr, enum ntb_bar idx); static int xeon_setup_b2b_mw(struct ntb_softc *, const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr); static inline bool link_is_up(struct ntb_softc *ntb); static inline bool atom_link_is_err(struct ntb_softc *ntb); static inline enum ntb_speed ntb_link_sta_speed(struct ntb_softc *); static inline enum ntb_width ntb_link_sta_width(struct ntb_softc *); static void atom_link_hb(void *arg); static void ntb_db_event(struct ntb_softc *ntb, uint32_t vec); static void recover_atom_link(void *arg); static bool ntb_poll_link(struct ntb_softc *ntb); static void save_bar_parameters(struct ntb_pci_bar_info *bar); static void ntb_sysctl_init(struct ntb_softc *); static int sysctl_handle_features(SYSCTL_HANDLER_ARGS); static int sysctl_handle_link_status(SYSCTL_HANDLER_ARGS); static int sysctl_handle_register(SYSCTL_HANDLER_ARGS); static unsigned g_ntb_hw_debug_level; SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ntb_hw_debug_level, 0, "ntb_hw log level -- higher is more verbose"); #define ntb_printf(lvl, ...) do { \ if ((lvl) <= g_ntb_hw_debug_level) { \ device_printf(ntb->device, __VA_ARGS__); \ } \ } while (0) +static unsigned g_ntb_enable_wc = 1; +SYSCTL_UINT(_hw_ntb, OID_AUTO, enable_writecombine, CTLFLAG_RDTUN, + &g_ntb_enable_wc, 0, "Set to 1 to map memory windows write combining"); + static struct ntb_hw_info pci_ids[] = { /* XXX: PS/SS IDs left out until they are supported. */ { 0x0C4E8086, "BWD Atom Processor S1200 Non-Transparent Bridge B2B", NTB_ATOM, 0 }, { 0x37258086, "JSF Xeon C35xx/C55xx Non-Transparent Bridge B2B", NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, { 0x3C0D8086, "SNB Xeon E5/Core i7 Non-Transparent Bridge B2B", NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, { 0x0E0D8086, "IVT Xeon E5 V2 Non-Transparent Bridge B2B", NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | NTB_SB01BASE_LOCKUP | NTB_BAR_SIZE_4K }, { 0x2F0D8086, "HSX Xeon E5 V3 Non-Transparent Bridge B2B", NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | NTB_SB01BASE_LOCKUP }, { 0x6F0D8086, "BDX Xeon E5 V4 Non-Transparent Bridge B2B", NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | NTB_SB01BASE_LOCKUP }, { 0x00000000, NULL, NTB_ATOM, 0 } }; static const struct ntb_reg atom_reg = { .ntb_ctl = ATOM_NTBCNTL_OFFSET, .lnk_sta = ATOM_LINK_STATUS_OFFSET, .db_size = sizeof(uint64_t), .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 }, }; static const struct ntb_alt_reg atom_pri_reg = { .db_bell = ATOM_PDOORBELL_OFFSET, .db_mask = ATOM_PDBMSK_OFFSET, .spad = ATOM_SPAD_OFFSET, }; static const struct ntb_alt_reg atom_b2b_reg = { .db_bell = ATOM_B2B_DOORBELL_OFFSET, .spad = ATOM_B2B_SPAD_OFFSET, }; static const struct ntb_xlat_reg atom_sec_xlat = { #if 0 /* "FIXME" says the Linux driver. */ .bar0_base = ATOM_SBAR0BASE_OFFSET, .bar2_base = ATOM_SBAR2BASE_OFFSET, .bar4_base = ATOM_SBAR4BASE_OFFSET, .bar2_limit = ATOM_SBAR2LMT_OFFSET, .bar4_limit = ATOM_SBAR4LMT_OFFSET, #endif .bar2_xlat = ATOM_SBAR2XLAT_OFFSET, .bar4_xlat = ATOM_SBAR4XLAT_OFFSET, }; static const struct ntb_reg xeon_reg = { .ntb_ctl = XEON_NTBCNTL_OFFSET, .lnk_sta = XEON_LINK_STATUS_OFFSET, .db_size = sizeof(uint16_t), .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3 }, }; static const struct ntb_alt_reg xeon_pri_reg = { .db_bell = XEON_PDOORBELL_OFFSET, .db_mask = XEON_PDBMSK_OFFSET, .spad = XEON_SPAD_OFFSET, }; static const struct ntb_alt_reg xeon_b2b_reg = { .db_bell = XEON_B2B_DOORBELL_OFFSET, .spad = XEON_B2B_SPAD_OFFSET, }; static const struct ntb_xlat_reg xeon_sec_xlat = { .bar0_base = XEON_SBAR0BASE_OFFSET, .bar2_base = XEON_SBAR2BASE_OFFSET, .bar4_base = XEON_SBAR4BASE_OFFSET, .bar5_base = XEON_SBAR5BASE_OFFSET, .bar2_limit = XEON_SBAR2LMT_OFFSET, .bar4_limit = XEON_SBAR4LMT_OFFSET, .bar5_limit = XEON_SBAR5LMT_OFFSET, .bar2_xlat = XEON_SBAR2XLAT_OFFSET, .bar4_xlat = XEON_SBAR4XLAT_OFFSET, .bar5_xlat = XEON_SBAR5XLAT_OFFSET, }; static struct ntb_b2b_addr xeon_b2b_usd_addr = { .bar0_addr = XEON_B2B_BAR0_ADDR, .bar2_addr64 = XEON_B2B_BAR2_ADDR64, .bar4_addr64 = XEON_B2B_BAR4_ADDR64, .bar4_addr32 = XEON_B2B_BAR4_ADDR32, .bar5_addr32 = XEON_B2B_BAR5_ADDR32, }; static struct ntb_b2b_addr xeon_b2b_dsd_addr = { .bar0_addr = XEON_B2B_BAR0_ADDR, .bar2_addr64 = XEON_B2B_BAR2_ADDR64, .bar4_addr64 = XEON_B2B_BAR4_ADDR64, .bar4_addr32 = XEON_B2B_BAR4_ADDR32, .bar5_addr32 = XEON_B2B_BAR5_ADDR32, }; SYSCTL_NODE(_hw_ntb, OID_AUTO, xeon_b2b, CTLFLAG_RW, 0, "B2B MW segment overrides -- MUST be the same on both sides"); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar2_addr64, CTLFLAG_RDTUN, &xeon_b2b_usd_addr.bar2_addr64, 0, "If using B2B topology on Xeon " "hardware, use this 64-bit address on the bus between the NTB devices for " "the window at BAR2, on the upstream side of the link. MUST be the same " "address on both sides."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr64, CTLFLAG_RDTUN, &xeon_b2b_usd_addr.bar4_addr64, 0, "See usd_bar2_addr64, but BAR4."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr32, CTLFLAG_RDTUN, &xeon_b2b_usd_addr.bar4_addr32, 0, "See usd_bar2_addr64, but BAR4 " "(split-BAR mode)."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar5_addr32, CTLFLAG_RDTUN, &xeon_b2b_usd_addr.bar5_addr32, 0, "See usd_bar2_addr64, but BAR5 " "(split-BAR mode)."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar2_addr64, CTLFLAG_RDTUN, &xeon_b2b_dsd_addr.bar2_addr64, 0, "If using B2B topology on Xeon " "hardware, use this 64-bit address on the bus between the NTB devices for " "the window at BAR2, on the downstream side of the link. MUST be the same" " address on both sides."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr64, CTLFLAG_RDTUN, &xeon_b2b_dsd_addr.bar4_addr64, 0, "See dsd_bar2_addr64, but BAR4."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr32, CTLFLAG_RDTUN, &xeon_b2b_dsd_addr.bar4_addr32, 0, "See dsd_bar2_addr64, but BAR4 " "(split-BAR mode)."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar5_addr32, CTLFLAG_RDTUN, &xeon_b2b_dsd_addr.bar5_addr32, 0, "See dsd_bar2_addr64, but BAR5 " "(split-BAR mode)."); /* * OS <-> Driver interface structures */ MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations"); static device_method_t ntb_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ntb_probe), DEVMETHOD(device_attach, ntb_attach), DEVMETHOD(device_detach, ntb_detach), DEVMETHOD_END }; static driver_t ntb_pci_driver = { "ntb_hw", ntb_pci_methods, sizeof(struct ntb_softc), }; static devclass_t ntb_devclass; DRIVER_MODULE(ntb_hw, pci, ntb_pci_driver, ntb_devclass, NULL, NULL); MODULE_VERSION(ntb_hw, 1); SYSCTL_NODE(_hw, OID_AUTO, ntb, CTLFLAG_RW, 0, "NTB sysctls"); /* * OS <-> Driver linkage functions */ static int ntb_probe(device_t device) { struct ntb_hw_info *p; p = ntb_get_device_info(pci_get_devid(device)); if (p == NULL) return (ENXIO); device_set_desc(device, p->desc); return (0); } static int ntb_attach(device_t device) { struct ntb_softc *ntb; struct ntb_hw_info *p; int error; ntb = DEVICE2SOFTC(device); p = ntb_get_device_info(pci_get_devid(device)); ntb->device = device; ntb->type = p->type; ntb->features = p->features; ntb->b2b_mw_idx = B2B_MW_DISABLED; /* Heartbeat timer for NTB_ATOM since there is no link interrupt */ callout_init(&ntb->heartbeat_timer, 1); callout_init(&ntb->lr_timer, 1); mtx_init(&ntb->db_mask_lock, "ntb hw bits", NULL, MTX_SPIN); mtx_init(&ntb->ctx_lock, "ntb ctx", NULL, MTX_DEF); if (ntb->type == NTB_ATOM) error = ntb_detect_atom(ntb); else error = ntb_detect_xeon(ntb); if (error != 0) goto out; ntb_detect_max_mw(ntb); pci_enable_busmaster(ntb->device); error = ntb_map_pci_bars(ntb); if (error != 0) goto out; if (ntb->type == NTB_ATOM) error = ntb_atom_init_dev(ntb); else error = ntb_xeon_init_dev(ntb); if (error != 0) goto out; ntb_poll_link(ntb); ntb_sysctl_init(ntb); out: if (error != 0) ntb_detach(device); return (error); } static int ntb_detach(device_t device) { struct ntb_softc *ntb; ntb = DEVICE2SOFTC(device); if (ntb->self_reg != NULL) ntb_db_set_mask(ntb, ntb->db_valid_mask); callout_drain(&ntb->heartbeat_timer); callout_drain(&ntb->lr_timer); pci_disable_busmaster(ntb->device); if (ntb->type == NTB_XEON) ntb_teardown_xeon(ntb); ntb_teardown_interrupts(ntb); mtx_destroy(&ntb->db_mask_lock); mtx_destroy(&ntb->ctx_lock); /* * Redetect total MWs so we unmap properly -- in case we lowered the * maximum to work around Xeon errata. */ ntb_detect_max_mw(ntb); ntb_unmap_pci_bar(ntb); return (0); } /* * Driver internal routines */ static inline enum ntb_bar ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw) { KASSERT(mw < ntb->mw_count || (mw != B2B_MW_DISABLED && mw == ntb->b2b_mw_idx), ("%s: mw:%u > count:%u", __func__, mw, (unsigned)ntb->mw_count)); KASSERT(ntb->reg->mw_bar[mw] != 0, ("invalid mw")); return (ntb->reg->mw_bar[mw]); } static inline bool bar_is_64bit(struct ntb_softc *ntb, enum ntb_bar bar) { /* XXX This assertion could be stronger. */ KASSERT(bar < NTB_MAX_BARS, ("bogus bar")); return (bar < NTB_B2B_BAR_2 || !HAS_FEATURE(NTB_SPLIT_BAR)); } static inline void bar_get_xlat_params(struct ntb_softc *ntb, enum ntb_bar bar, uint32_t *base, uint32_t *xlat, uint32_t *lmt) { uint32_t basev, lmtv, xlatv; switch (bar) { case NTB_B2B_BAR_1: basev = ntb->xlat_reg->bar2_base; lmtv = ntb->xlat_reg->bar2_limit; xlatv = ntb->xlat_reg->bar2_xlat; break; case NTB_B2B_BAR_2: basev = ntb->xlat_reg->bar4_base; lmtv = ntb->xlat_reg->bar4_limit; xlatv = ntb->xlat_reg->bar4_xlat; break; case NTB_B2B_BAR_3: basev = ntb->xlat_reg->bar5_base; lmtv = ntb->xlat_reg->bar5_limit; xlatv = ntb->xlat_reg->bar5_xlat; break; default: KASSERT(bar >= NTB_B2B_BAR_1 && bar < NTB_MAX_BARS, ("bad bar")); basev = lmtv = xlatv = 0; break; } if (base != NULL) *base = basev; if (xlat != NULL) *xlat = xlatv; if (lmt != NULL) *lmt = lmtv; } static int ntb_map_pci_bars(struct ntb_softc *ntb) { int rc; ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0); rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]); if (rc != 0) goto out; ntb->bar_info[NTB_B2B_BAR_1].pci_resource_id = PCIR_BAR(2); rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_1]); if (rc != 0) goto out; ntb->bar_info[NTB_B2B_BAR_1].psz_off = XEON_PBAR23SZ_OFFSET; ntb->bar_info[NTB_B2B_BAR_1].ssz_off = XEON_SBAR23SZ_OFFSET; ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off = XEON_PBAR2XLAT_OFFSET; ntb->bar_info[NTB_B2B_BAR_2].pci_resource_id = PCIR_BAR(4); /* XXX Are shared MW B2Bs write-combining? */ if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP) && !HAS_FEATURE(NTB_SPLIT_BAR)) rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_2]); else rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_2]); ntb->bar_info[NTB_B2B_BAR_2].psz_off = XEON_PBAR4SZ_OFFSET; ntb->bar_info[NTB_B2B_BAR_2].ssz_off = XEON_SBAR4SZ_OFFSET; ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off = XEON_PBAR4XLAT_OFFSET; if (!HAS_FEATURE(NTB_SPLIT_BAR)) goto out; ntb->bar_info[NTB_B2B_BAR_3].pci_resource_id = PCIR_BAR(5); if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_3]); else rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_3]); ntb->bar_info[NTB_B2B_BAR_3].psz_off = XEON_PBAR5SZ_OFFSET; ntb->bar_info[NTB_B2B_BAR_3].ssz_off = XEON_SBAR5SZ_OFFSET; ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off = XEON_PBAR5XLAT_OFFSET; out: if (rc != 0) device_printf(ntb->device, "unable to allocate pci resource\n"); return (rc); } static void print_map_success(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar, const char *kind) { device_printf(ntb->device, "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n", PCI_RID2BAR(bar->pci_resource_id), bar->vbase, (char *)bar->vbase + bar->size - 1, (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), (uintmax_t)bar->size, kind); } static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) { bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, &bar->pci_resource_id, RF_ACTIVE); if (bar->pci_resource == NULL) return (ENXIO); save_bar_parameters(bar); print_map_success(ntb, bar, "mmr"); return (0); } static int map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) { int rc; uint8_t bar_size_bits = 0; bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, &bar->pci_resource_id, RF_ACTIVE); if (bar->pci_resource == NULL) return (ENXIO); save_bar_parameters(bar); /* * Ivytown NTB BAR sizes are misreported by the hardware due to a * hardware issue. To work around this, query the size it should be * configured to by the device and modify the resource to correspond to * this new size. The BIOS on systems with this problem is required to * provide enough address space to allow the driver to make this change * safely. * * Ideally I could have just specified the size when I allocated the * resource like: * bus_alloc_resource(ntb->device, * SYS_RES_MEMORY, &bar->pci_resource_id, 0ul, ~0ul, * 1ul << bar_size_bits, RF_ACTIVE); * but the PCI driver does not honor the size in this call, so we have * to modify it after the fact. */ if (HAS_FEATURE(NTB_BAR_SIZE_4K)) { if (bar->pci_resource_id == PCIR_BAR(2)) bar_size_bits = pci_read_config(ntb->device, XEON_PBAR23SZ_OFFSET, 1); else bar_size_bits = pci_read_config(ntb->device, XEON_PBAR45SZ_OFFSET, 1); rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY, bar->pci_resource, bar->pbase, bar->pbase + (1ul << bar_size_bits) - 1); if (rc != 0) { device_printf(ntb->device, "unable to resize bar\n"); return (rc); } save_bar_parameters(bar); } + print_map_success(ntb, bar, "mw"); + if (g_ntb_enable_wc == 0) + return (0); + /* Mark bar region as write combining to improve performance. */ rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, VM_MEMATTR_WRITE_COMBINING); - print_map_success(ntb, bar, "mw"); - if (rc == 0) + if (rc == 0) { + bar->mapped_wc = true; device_printf(ntb->device, "Marked BAR%d v:[%p-%p] p:[%p-%p] as " "WRITE_COMBINING.\n", PCI_RID2BAR(bar->pci_resource_id), bar->vbase, (char *)bar->vbase + bar->size - 1, (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1)); - else + } else device_printf(ntb->device, "Unable to mark BAR%d v:[%p-%p] p:[%p-%p] as " "WRITE_COMBINING: %d\n", PCI_RID2BAR(bar->pci_resource_id), bar->vbase, (char *)bar->vbase + bar->size - 1, (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), rc); /* Proceed anyway */ return (0); } static void ntb_unmap_pci_bar(struct ntb_softc *ntb) { struct ntb_pci_bar_info *current_bar; int i; for (i = 0; i < NTB_MAX_BARS; i++) { current_bar = &ntb->bar_info[i]; if (current_bar->pci_resource != NULL) bus_release_resource(ntb->device, SYS_RES_MEMORY, current_bar->pci_resource_id, current_bar->pci_resource); } } static int ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors) { uint32_t i; int rc; for (i = 0; i < num_vectors; i++) { ntb->int_info[i].rid = i + 1; ntb->int_info[i].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE); if (ntb->int_info[i].res == NULL) { device_printf(ntb->device, "bus_alloc_resource failed\n"); return (ENOMEM); } ntb->int_info[i].tag = NULL; ntb->allocated_interrupts++; rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_vec_isr, &ntb->msix_vec[i], &ntb->int_info[i].tag); if (rc != 0) { device_printf(ntb->device, "bus_setup_intr failed\n"); return (ENXIO); } } return (0); } /* * The Linux NTB driver drops from MSI-X to legacy INTx if a unique vector * cannot be allocated for each MSI-X message. JHB seems to think remapping * should be okay. This tunable should enable us to test that hypothesis * when someone gets their hands on some Xeon hardware. */ static int ntb_force_remap_mode; SYSCTL_INT(_hw_ntb, OID_AUTO, force_remap_mode, CTLFLAG_RDTUN, &ntb_force_remap_mode, 0, "If enabled, force MSI-X messages to be remapped" " to a smaller number of ithreads, even if the desired number are " "available"); /* * In case it is NOT ok, give consumers an abort button. */ static int ntb_prefer_intx; SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN, &ntb_prefer_intx, 0, "If enabled, prefer to use legacy INTx mode rather " "than remapping MSI-X messages over available slots (match Linux driver " "behavior)"); /* * Remap the desired number of MSI-X messages to available ithreads in a simple * round-robin fashion. */ static int ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail) { u_int *vectors; uint32_t i; int rc; if (ntb_prefer_intx != 0) return (ENXIO); vectors = malloc(desired * sizeof(*vectors), M_NTB, M_ZERO | M_WAITOK); for (i = 0; i < desired; i++) vectors[i] = (i % avail) + 1; rc = pci_remap_msix(dev, desired, vectors); free(vectors, M_NTB); return (rc); } static int ntb_init_isr(struct ntb_softc *ntb) { uint32_t desired_vectors, num_vectors; int rc; ntb->allocated_interrupts = 0; ntb->last_ts = ticks; /* * Mask all doorbell interrupts. */ ntb_db_set_mask(ntb, ntb->db_valid_mask); num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device), ntb->db_count); if (desired_vectors >= 1) { rc = pci_alloc_msix(ntb->device, &num_vectors); if (ntb_force_remap_mode != 0 && rc == 0 && num_vectors == desired_vectors) num_vectors--; if (rc == 0 && num_vectors < desired_vectors) { rc = ntb_remap_msix(ntb->device, desired_vectors, num_vectors); if (rc == 0) num_vectors = desired_vectors; else pci_release_msi(ntb->device); } if (rc != 0) num_vectors = 1; } else num_vectors = 1; if (ntb->type == NTB_XEON && num_vectors < ntb->db_vec_count) { ntb->db_vec_count = 1; ntb->db_vec_shift = XEON_DB_TOTAL_SHIFT; rc = ntb_setup_legacy_interrupt(ntb); } else { ntb_create_msix_vec(ntb, num_vectors); rc = ntb_setup_msix(ntb, num_vectors); } if (rc != 0) { device_printf(ntb->device, "Error allocating interrupts: %d\n", rc); ntb_free_msix_vec(ntb); } return (rc); } static int ntb_setup_legacy_interrupt(struct ntb_softc *ntb) { int rc; ntb->int_info[0].rid = 0; ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ, &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE); if (ntb->int_info[0].res == NULL) { device_printf(ntb->device, "bus_alloc_resource failed\n"); return (ENOMEM); } ntb->int_info[0].tag = NULL; ntb->allocated_interrupts = 1; rc = bus_setup_intr(ntb->device, ntb->int_info[0].res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_irq_isr, ntb, &ntb->int_info[0].tag); if (rc != 0) { device_printf(ntb->device, "bus_setup_intr failed\n"); return (ENXIO); } return (0); } static void ntb_teardown_interrupts(struct ntb_softc *ntb) { struct ntb_int_info *current_int; int i; for (i = 0; i < ntb->allocated_interrupts; i++) { current_int = &ntb->int_info[i]; if (current_int->tag != NULL) bus_teardown_intr(ntb->device, current_int->res, current_int->tag); if (current_int->res != NULL) bus_release_resource(ntb->device, SYS_RES_IRQ, rman_get_rid(current_int->res), current_int->res); } ntb_free_msix_vec(ntb); pci_release_msi(ntb->device); } /* * Doorbell register and mask are 64-bit on Atom, 16-bit on Xeon. Abstract it * out to make code clearer. */ static inline uint64_t db_ioread(struct ntb_softc *ntb, uint64_t regoff) { if (ntb->type == NTB_ATOM) return (ntb_reg_read(8, regoff)); KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); return (ntb_reg_read(2, regoff)); } static inline void db_iowrite(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) { KASSERT((val & ~ntb->db_valid_mask) == 0, ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, (uintmax_t)(val & ~ntb->db_valid_mask), (uintmax_t)ntb->db_valid_mask)); if (regoff == ntb->self_reg->db_mask) DB_MASK_ASSERT(ntb, MA_OWNED); db_iowrite_raw(ntb, regoff, val); } static inline void db_iowrite_raw(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) { if (ntb->type == NTB_ATOM) { ntb_reg_write(8, regoff, val); return; } KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); ntb_reg_write(2, regoff, (uint16_t)val); } void ntb_db_set_mask(struct ntb_softc *ntb, uint64_t bits) { DB_MASK_LOCK(ntb); ntb->db_mask |= bits; db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); DB_MASK_UNLOCK(ntb); } void ntb_db_clear_mask(struct ntb_softc *ntb, uint64_t bits) { KASSERT((bits & ~ntb->db_valid_mask) == 0, ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, (uintmax_t)(bits & ~ntb->db_valid_mask), (uintmax_t)ntb->db_valid_mask)); DB_MASK_LOCK(ntb); ntb->db_mask &= ~bits; db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); DB_MASK_UNLOCK(ntb); } uint64_t ntb_db_read(struct ntb_softc *ntb) { return (db_ioread(ntb, ntb->self_reg->db_bell)); } void ntb_db_clear(struct ntb_softc *ntb, uint64_t bits) { KASSERT((bits & ~ntb->db_valid_mask) == 0, ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, (uintmax_t)(bits & ~ntb->db_valid_mask), (uintmax_t)ntb->db_valid_mask)); db_iowrite(ntb, ntb->self_reg->db_bell, bits); } static inline uint64_t ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector) { uint64_t shift, mask; shift = ntb->db_vec_shift; mask = (1ull << shift) - 1; return (mask << (shift * db_vector)); } static void ntb_interrupt(struct ntb_softc *ntb, uint32_t vec) { uint64_t vec_mask; ntb->last_ts = ticks; vec_mask = ntb_vec_mask(ntb, vec); if ((vec_mask & ntb->db_link_mask) != 0) { if (ntb_poll_link(ntb)) ntb_link_event(ntb); } if ((vec_mask & ntb->db_valid_mask) != 0) ntb_db_event(ntb, vec); } static void ndev_vec_isr(void *arg) { struct ntb_vec *nvec = arg; ntb_interrupt(nvec->ntb, nvec->num); } static void ndev_irq_isr(void *arg) { /* If we couldn't set up MSI-X, we only have the one vector. */ ntb_interrupt(arg, 0); } static int ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors) { uint32_t i; ntb->msix_vec = malloc(num_vectors * sizeof(*ntb->msix_vec), M_NTB, M_ZERO | M_WAITOK); for (i = 0; i < num_vectors; i++) { ntb->msix_vec[i].num = i; ntb->msix_vec[i].ntb = ntb; } return (0); } static void ntb_free_msix_vec(struct ntb_softc *ntb) { if (ntb->msix_vec == NULL) return; free(ntb->msix_vec, M_NTB); ntb->msix_vec = NULL; } static struct ntb_hw_info * ntb_get_device_info(uint32_t device_id) { struct ntb_hw_info *ep = pci_ids; while (ep->device_id) { if (ep->device_id == device_id) return (ep); ++ep; } return (NULL); } static void ntb_teardown_xeon(struct ntb_softc *ntb) { if (ntb->reg != NULL) ntb_link_disable(ntb); } static void ntb_detect_max_mw(struct ntb_softc *ntb) { if (ntb->type == NTB_ATOM) { ntb->mw_count = ATOM_MW_COUNT; return; } if (HAS_FEATURE(NTB_SPLIT_BAR)) ntb->mw_count = XEON_HSX_SPLIT_MW_COUNT; else ntb->mw_count = XEON_SNB_MW_COUNT; } static int ntb_detect_xeon(struct ntb_softc *ntb) { uint8_t ppd, conn_type; ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 1); ntb->ppd = ppd; if ((ppd & XEON_PPD_DEV_TYPE) != 0) ntb->dev_type = NTB_DEV_DSD; else ntb->dev_type = NTB_DEV_USD; if ((ppd & XEON_PPD_SPLIT_BAR) != 0) ntb->features |= NTB_SPLIT_BAR; /* SB01BASE_LOCKUP errata is a superset of SDOORBELL errata */ if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) ntb->features |= NTB_SDOORBELL_LOCKUP; conn_type = ppd & XEON_PPD_CONN_TYPE; switch (conn_type) { case NTB_CONN_B2B: ntb->conn_type = conn_type; break; case NTB_CONN_RP: case NTB_CONN_TRANSPARENT: default: device_printf(ntb->device, "Unsupported connection type: %u\n", (unsigned)conn_type); return (ENXIO); } return (0); } static int ntb_detect_atom(struct ntb_softc *ntb) { uint32_t ppd, conn_type; ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4); ntb->ppd = ppd; if ((ppd & ATOM_PPD_DEV_TYPE) != 0) ntb->dev_type = NTB_DEV_DSD; else ntb->dev_type = NTB_DEV_USD; conn_type = (ppd & ATOM_PPD_CONN_TYPE) >> 8; switch (conn_type) { case NTB_CONN_B2B: ntb->conn_type = conn_type; break; default: device_printf(ntb->device, "Unsupported NTB configuration\n"); return (ENXIO); } return (0); } static int ntb_xeon_init_dev(struct ntb_softc *ntb) { int rc; ntb->spad_count = XEON_SPAD_COUNT; ntb->db_count = XEON_DB_COUNT; ntb->db_link_mask = XEON_DB_LINK_BIT; ntb->db_vec_count = XEON_DB_MSIX_VECTOR_COUNT; ntb->db_vec_shift = XEON_DB_MSIX_VECTOR_SHIFT; if (ntb->conn_type != NTB_CONN_B2B) { device_printf(ntb->device, "Connection type %d not supported\n", ntb->conn_type); return (ENXIO); } ntb->reg = &xeon_reg; ntb->self_reg = &xeon_pri_reg; ntb->peer_reg = &xeon_b2b_reg; ntb->xlat_reg = &xeon_sec_xlat; /* * There is a Xeon hardware errata related to writes to SDOORBELL or * B2BDOORBELL in conjunction with inbound access to NTB MMIO space, * which may hang the system. To workaround this use the second memory * window to access the interrupt and scratch pad registers on the * remote system. */ if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) /* Use the last MW for mapping remote spad */ ntb->b2b_mw_idx = ntb->mw_count - 1; else if (HAS_FEATURE(NTB_B2BDOORBELL_BIT14)) /* * HW Errata on bit 14 of b2bdoorbell register. Writes will not be * mirrored to the remote system. Shrink the number of bits by one, * since bit 14 is the last bit. * * On REGS_THRU_MW errata mode, we don't use the b2bdoorbell register * anyway. Nor for non-B2B connection types. */ ntb->db_count = XEON_DB_COUNT - 1; ntb->db_valid_mask = (1ull << ntb->db_count) - 1; if (ntb->dev_type == NTB_DEV_USD) rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_dsd_addr, &xeon_b2b_usd_addr); else rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_usd_addr, &xeon_b2b_dsd_addr); if (rc != 0) return (rc); /* Enable Bus Master and Memory Space on the secondary side */ ntb_reg_write(2, XEON_SPCICMD_OFFSET, PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); /* * Mask all doorbell interrupts. */ ntb_db_set_mask(ntb, ntb->db_valid_mask); rc = ntb_init_isr(ntb); return (rc); } static int ntb_atom_init_dev(struct ntb_softc *ntb) { int error; KASSERT(ntb->conn_type == NTB_CONN_B2B, ("Unsupported NTB configuration (%d)\n", ntb->conn_type)); ntb->spad_count = ATOM_SPAD_COUNT; ntb->db_count = ATOM_DB_COUNT; ntb->db_vec_count = ATOM_DB_MSIX_VECTOR_COUNT; ntb->db_vec_shift = ATOM_DB_MSIX_VECTOR_SHIFT; ntb->db_valid_mask = (1ull << ntb->db_count) - 1; ntb->reg = &atom_reg; ntb->self_reg = &atom_pri_reg; ntb->peer_reg = &atom_b2b_reg; ntb->xlat_reg = &atom_sec_xlat; /* * FIXME - MSI-X bug on early Atom HW, remove once internal issue is * resolved. Mask transaction layer internal parity errors. */ pci_write_config(ntb->device, 0xFC, 0x4, 4); configure_atom_secondary_side_bars(ntb); /* Enable Bus Master and Memory Space on the secondary side */ ntb_reg_write(2, ATOM_SPCICMD_OFFSET, PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); error = ntb_init_isr(ntb); if (error != 0) return (error); /* Initiate PCI-E link training */ ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); callout_reset(&ntb->heartbeat_timer, 0, atom_link_hb, ntb); return (0); } /* XXX: Linux driver doesn't seem to do any of this for Atom. */ static void configure_atom_secondary_side_bars(struct ntb_softc *ntb) { if (ntb->dev_type == NTB_DEV_USD) { ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET, XEON_B2B_BAR2_ADDR64); ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET, XEON_B2B_BAR4_ADDR64); ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64); ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64); } else { ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET, XEON_B2B_BAR2_ADDR64); ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET, XEON_B2B_BAR4_ADDR64); ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64); ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64); } } /* * When working around Xeon SDOORBELL errata by remapping remote registers in a * MW, limit the B2B MW to half a MW. By sharing a MW, half the shared MW * remains for use by a higher layer. * * Will only be used if working around SDOORBELL errata and the BIOS-configured * MW size is sufficiently large. */ static unsigned int ntb_b2b_mw_share; SYSCTL_UINT(_hw_ntb, OID_AUTO, b2b_mw_share, CTLFLAG_RDTUN, &ntb_b2b_mw_share, 0, "If enabled (non-zero), prefer to share half of the B2B peer register " "MW with higher level consumers. Both sides of the NTB MUST set the same " "value here."); static void xeon_reset_sbar_size(struct ntb_softc *ntb, enum ntb_bar idx, enum ntb_bar regbar) { struct ntb_pci_bar_info *bar; uint8_t bar_sz; if (!HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_3) return; bar = &ntb->bar_info[idx]; bar_sz = pci_read_config(ntb->device, bar->psz_off, 1); if (idx == regbar) { if (ntb->b2b_off != 0) bar_sz--; else bar_sz = 0; } pci_write_config(ntb->device, bar->ssz_off, bar_sz, 1); bar_sz = pci_read_config(ntb->device, bar->ssz_off, 1); (void)bar_sz; } static void xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t bar_addr, enum ntb_bar idx, enum ntb_bar regbar) { uint64_t reg_val; uint32_t base_reg, lmt_reg; bar_get_xlat_params(ntb, idx, &base_reg, NULL, &lmt_reg); if (idx == regbar) bar_addr += ntb->b2b_off; if (!bar_is_64bit(ntb, idx)) { ntb_reg_write(4, base_reg, bar_addr); reg_val = ntb_reg_read(4, base_reg); (void)reg_val; ntb_reg_write(4, lmt_reg, bar_addr); reg_val = ntb_reg_read(4, lmt_reg); (void)reg_val; } else { ntb_reg_write(8, base_reg, bar_addr); reg_val = ntb_reg_read(8, base_reg); (void)reg_val; ntb_reg_write(8, lmt_reg, bar_addr); reg_val = ntb_reg_read(8, lmt_reg); (void)reg_val; } } static void xeon_set_pbar_xlat(struct ntb_softc *ntb, uint64_t base_addr, enum ntb_bar idx) { struct ntb_pci_bar_info *bar; bar = &ntb->bar_info[idx]; if (HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) { ntb_reg_write(4, bar->pbarxlat_off, base_addr); base_addr = ntb_reg_read(4, bar->pbarxlat_off); } else { ntb_reg_write(8, bar->pbarxlat_off, base_addr); base_addr = ntb_reg_read(8, bar->pbarxlat_off); } (void)base_addr; } static int xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr) { struct ntb_pci_bar_info *b2b_bar; vm_size_t bar_size; uint64_t bar_addr; enum ntb_bar b2b_bar_num, i; if (ntb->b2b_mw_idx == B2B_MW_DISABLED) { b2b_bar = NULL; b2b_bar_num = NTB_CONFIG_BAR; ntb->b2b_off = 0; } else { b2b_bar_num = ntb_mw_to_bar(ntb, ntb->b2b_mw_idx); KASSERT(b2b_bar_num > 0 && b2b_bar_num < NTB_MAX_BARS, ("invalid b2b mw bar")); b2b_bar = &ntb->bar_info[b2b_bar_num]; bar_size = b2b_bar->size; if (ntb_b2b_mw_share != 0 && (bar_size >> 1) >= XEON_B2B_MIN_SIZE) ntb->b2b_off = bar_size >> 1; else if (bar_size >= XEON_B2B_MIN_SIZE) { ntb->b2b_off = 0; ntb->mw_count--; } else { device_printf(ntb->device, "B2B bar size is too small!\n"); return (EIO); } } /* * Reset the secondary bar sizes to match the primary bar sizes. * (Except, disable or halve the size of the B2B secondary bar.) */ for (i = NTB_B2B_BAR_1; i < NTB_MAX_BARS; i++) xeon_reset_sbar_size(ntb, i, b2b_bar_num); bar_addr = 0; if (b2b_bar_num == NTB_CONFIG_BAR) bar_addr = addr->bar0_addr; else if (b2b_bar_num == NTB_B2B_BAR_1) bar_addr = addr->bar2_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR)) bar_addr = addr->bar4_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2) bar_addr = addr->bar4_addr32; else if (b2b_bar_num == NTB_B2B_BAR_3) bar_addr = addr->bar5_addr32; else KASSERT(false, ("invalid bar")); ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr); /* * Other SBARs are normally hit by the PBAR xlat, except for the b2b * register BAR. The B2B BAR is either disabled above or configured * half-size. It starts at PBAR xlat + offset. * * Also set up incoming BAR limits == base (zero length window). */ xeon_set_sbar_base_and_limit(ntb, addr->bar2_addr64, NTB_B2B_BAR_1, b2b_bar_num); if (HAS_FEATURE(NTB_SPLIT_BAR)) { xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr32, NTB_B2B_BAR_2, b2b_bar_num); xeon_set_sbar_base_and_limit(ntb, addr->bar5_addr32, NTB_B2B_BAR_3, b2b_bar_num); } else xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr64, NTB_B2B_BAR_2, b2b_bar_num); /* Zero incoming translation addrs */ ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0); ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0); /* Zero outgoing translation limits (whole bar size windows) */ ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0); ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0); /* Set outgoing translation offsets */ xeon_set_pbar_xlat(ntb, peer_addr->bar2_addr64, NTB_B2B_BAR_1); if (HAS_FEATURE(NTB_SPLIT_BAR)) { xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr32, NTB_B2B_BAR_2); xeon_set_pbar_xlat(ntb, peer_addr->bar5_addr32, NTB_B2B_BAR_3); } else xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr64, NTB_B2B_BAR_2); /* Set the translation offset for B2B registers */ bar_addr = 0; if (b2b_bar_num == NTB_CONFIG_BAR) bar_addr = peer_addr->bar0_addr; else if (b2b_bar_num == NTB_B2B_BAR_1) bar_addr = peer_addr->bar2_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR)) bar_addr = peer_addr->bar4_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2) bar_addr = peer_addr->bar4_addr32; else if (b2b_bar_num == NTB_B2B_BAR_3) bar_addr = peer_addr->bar5_addr32; else KASSERT(false, ("invalid bar")); /* * B2B_XLAT_OFFSET is a 64-bit register but can only be written 32 bits * at a time. */ ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff); ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32); return (0); } static inline bool link_is_up(struct ntb_softc *ntb) { if (ntb->type == NTB_XEON) { if (ntb->conn_type == NTB_CONN_TRANSPARENT) return (true); return ((ntb->lnk_sta & NTB_LINK_STATUS_ACTIVE) != 0); } KASSERT(ntb->type == NTB_ATOM, ("ntb type")); return ((ntb->ntb_ctl & ATOM_CNTL_LINK_DOWN) == 0); } static inline bool atom_link_is_err(struct ntb_softc *ntb) { uint32_t status; KASSERT(ntb->type == NTB_ATOM, ("ntb type")); status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET); if ((status & ATOM_LTSSMSTATEJMP_FORCEDETECT) != 0) return (true); status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET); return ((status & ATOM_IBIST_ERR_OFLOW) != 0); } /* Atom does not have link status interrupt, poll on that platform */ static void atom_link_hb(void *arg) { struct ntb_softc *ntb = arg; sbintime_t timo, poll_ts; timo = NTB_HB_TIMEOUT * hz; poll_ts = ntb->last_ts + timo; /* * Delay polling the link status if an interrupt was received, unless * the cached link status says the link is down. */ if ((sbintime_t)ticks - poll_ts < 0 && link_is_up(ntb)) { timo = poll_ts - ticks; goto out; } if (ntb_poll_link(ntb)) ntb_link_event(ntb); if (!link_is_up(ntb) && atom_link_is_err(ntb)) { /* Link is down with error, proceed with recovery */ callout_reset(&ntb->lr_timer, 0, recover_atom_link, ntb); return; } out: callout_reset(&ntb->heartbeat_timer, timo, atom_link_hb, ntb); } static void atom_perform_link_restart(struct ntb_softc *ntb) { uint32_t status; /* Driver resets the NTB ModPhy lanes - magic! */ ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0); ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40); ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60); ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60); /* Driver waits 100ms to allow the NTB ModPhy to settle */ pause("ModPhy", hz / 10); /* Clear AER Errors, write to clear */ status = ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET); status &= PCIM_AER_COR_REPLAY_ROLLOVER; ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status); /* Clear unexpected electrical idle event in LTSSM, write to clear */ status = ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET); status |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI; ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status); /* Clear DeSkew Buffer error, write to clear */ status = ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET); status |= ATOM_DESKEWSTS_DBERR; ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status); status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET); status &= ATOM_IBIST_ERR_OFLOW; ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status); /* Releases the NTB state machine to allow the link to retrain */ status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET); status &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT; ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status); } /* * ntb_set_ctx() - associate a driver context with an ntb device * @ntb: NTB device context * @ctx: Driver context * @ctx_ops: Driver context operations * * Associate a driver context and operations with a ntb device. The context is * provided by the client driver, and the driver may associate a different * context with each ntb device. * * Return: Zero if the context is associated, otherwise an error number. */ int ntb_set_ctx(struct ntb_softc *ntb, void *ctx, const struct ntb_ctx_ops *ops) { if (ctx == NULL || ops == NULL) return (EINVAL); if (ntb->ctx_ops != NULL) return (EINVAL); CTX_LOCK(ntb); if (ntb->ctx_ops != NULL) { CTX_UNLOCK(ntb); return (EINVAL); } ntb->ntb_ctx = ctx; ntb->ctx_ops = ops; CTX_UNLOCK(ntb); return (0); } /* * It is expected that this will only be used from contexts where the ctx_lock * is not needed to protect ntb_ctx lifetime. */ void * ntb_get_ctx(struct ntb_softc *ntb, const struct ntb_ctx_ops **ops) { KASSERT(ntb->ntb_ctx != NULL && ntb->ctx_ops != NULL, ("bogus")); if (ops != NULL) *ops = ntb->ctx_ops; return (ntb->ntb_ctx); } /* * ntb_clear_ctx() - disassociate any driver context from an ntb device * @ntb: NTB device context * * Clear any association that may exist between a driver context and the ntb * device. */ void ntb_clear_ctx(struct ntb_softc *ntb) { CTX_LOCK(ntb); ntb->ntb_ctx = NULL; ntb->ctx_ops = NULL; CTX_UNLOCK(ntb); } /* * ntb_link_event() - notify driver context of a change in link status * @ntb: NTB device context * * Notify the driver context that the link status may have changed. The driver * should call ntb_link_is_up() to get the current status. */ void ntb_link_event(struct ntb_softc *ntb) { CTX_LOCK(ntb); if (ntb->ctx_ops != NULL && ntb->ctx_ops->link_event != NULL) ntb->ctx_ops->link_event(ntb->ntb_ctx); CTX_UNLOCK(ntb); } /* * ntb_db_event() - notify driver context of a doorbell event * @ntb: NTB device context * @vector: Interrupt vector number * * Notify the driver context of a doorbell event. If hardware supports * multiple interrupt vectors for doorbells, the vector number indicates which * vector received the interrupt. The vector number is relative to the first * vector used for doorbells, starting at zero, and must be less than * ntb_db_vector_count(). The driver may call ntb_db_read() to check which * doorbell bits need service, and ntb_db_vector_mask() to determine which of * those bits are associated with the vector number. */ static void ntb_db_event(struct ntb_softc *ntb, uint32_t vec) { CTX_LOCK(ntb); if (ntb->ctx_ops != NULL && ntb->ctx_ops->db_event != NULL) ntb->ctx_ops->db_event(ntb->ntb_ctx, vec); CTX_UNLOCK(ntb); } /* * ntb_link_enable() - enable the link on the secondary side of the ntb * @ntb: NTB device context * @max_speed: The maximum link speed expressed as PCIe generation number[0] * @max_width: The maximum link width expressed as the number of PCIe lanes[0] * * Enable the link on the secondary side of the ntb. This can only be done * from the primary side of the ntb in primary or b2b topology. The ntb device * should train the link to its maximum speed and width, or the requested speed * and width, whichever is smaller, if supported. * * Return: Zero on success, otherwise an error number. * * [0]: Only NTB_SPEED_AUTO and NTB_WIDTH_AUTO are valid inputs; other speed * and width input will be ignored. */ int ntb_link_enable(struct ntb_softc *ntb, enum ntb_speed s __unused, enum ntb_width w __unused) { uint32_t cntl; if (ntb->type == NTB_ATOM) { pci_write_config(ntb->device, NTB_PPD_OFFSET, ntb->ppd | ATOM_PPD_INIT_LINK, 4); return (0); } if (ntb->conn_type == NTB_CONN_TRANSPARENT) { ntb_link_event(ntb); return (0); } cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK); cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP; cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP; if (HAS_FEATURE(NTB_SPLIT_BAR)) cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP; ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); return (0); } /* * ntb_link_disable() - disable the link on the secondary side of the ntb * @ntb: NTB device context * * Disable the link on the secondary side of the ntb. This can only be done * from the primary side of the ntb in primary or b2b topology. The ntb device * should disable the link. Returning from this call must indicate that a * barrier has passed, though with no more writes may pass in either direction * across the link, except if this call returns an error number. * * Return: Zero on success, otherwise an error number. */ int ntb_link_disable(struct ntb_softc *ntb) { uint32_t cntl; if (ntb->conn_type == NTB_CONN_TRANSPARENT) { ntb_link_event(ntb); return (0); } cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP); cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP); if (HAS_FEATURE(NTB_SPLIT_BAR)) cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP); cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK; ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); return (0); } static void recover_atom_link(void *arg) { struct ntb_softc *ntb = arg; unsigned speed, width, oldspeed, oldwidth; uint32_t status32; atom_perform_link_restart(ntb); /* * There is a potential race between the 2 NTB devices recovering at * the same time. If the times are the same, the link will not recover * and the driver will be stuck in this loop forever. Add a random * interval to the recovery time to prevent this race. */ status32 = arc4random() % ATOM_LINK_RECOVERY_TIME; pause("Link", (ATOM_LINK_RECOVERY_TIME + status32) * hz / 1000); if (atom_link_is_err(ntb)) goto retry; status32 = ntb_reg_read(4, ntb->reg->ntb_ctl); if ((status32 & ATOM_CNTL_LINK_DOWN) != 0) goto out; status32 = ntb_reg_read(4, ntb->reg->lnk_sta); width = NTB_LNK_STA_WIDTH(status32); speed = status32 & NTB_LINK_SPEED_MASK; oldwidth = NTB_LNK_STA_WIDTH(ntb->lnk_sta); oldspeed = ntb->lnk_sta & NTB_LINK_SPEED_MASK; if (oldwidth != width || oldspeed != speed) goto retry; out: callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, atom_link_hb, ntb); return; retry: callout_reset(&ntb->lr_timer, NTB_HB_TIMEOUT * hz, recover_atom_link, ntb); } /* * Polls the HW link status register(s); returns true if something has changed. */ static bool ntb_poll_link(struct ntb_softc *ntb) { uint32_t ntb_cntl; uint16_t reg_val; if (ntb->type == NTB_ATOM) { ntb_cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); if (ntb_cntl == ntb->ntb_ctl) return (false); ntb->ntb_ctl = ntb_cntl; ntb->lnk_sta = ntb_reg_read(4, ntb->reg->lnk_sta); } else { db_iowrite_raw(ntb, ntb->self_reg->db_bell, ntb->db_link_mask); reg_val = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2); if (reg_val == ntb->lnk_sta) return (false); ntb->lnk_sta = reg_val; } return (true); } static inline enum ntb_speed ntb_link_sta_speed(struct ntb_softc *ntb) { if (!link_is_up(ntb)) return (NTB_SPEED_NONE); return (ntb->lnk_sta & NTB_LINK_SPEED_MASK); } static inline enum ntb_width ntb_link_sta_width(struct ntb_softc *ntb) { if (!link_is_up(ntb)) return (NTB_WIDTH_NONE); return (NTB_LNK_STA_WIDTH(ntb->lnk_sta)); } SYSCTL_NODE(_hw_ntb, OID_AUTO, debug_info, CTLFLAG_RW, 0, "Driver state, statistics, and HW registers"); #define NTB_REGSZ_MASK (3ul << 30) #define NTB_REG_64 (1ul << 30) #define NTB_REG_32 (2ul << 30) #define NTB_REG_16 (3ul << 30) #define NTB_REG_8 (0ul << 30) #define NTB_DB_READ (1ul << 29) #define NTB_PCI_REG (1ul << 28) #define NTB_REGFLAGS_MASK (NTB_REGSZ_MASK | NTB_DB_READ | NTB_PCI_REG) static void ntb_sysctl_init(struct ntb_softc *ntb) { struct sysctl_oid_list *tree_par, *regpar, *statpar, *errpar; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree, *tmptree; ctx = device_get_sysctl_ctx(ntb->device); tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device)), OID_AUTO, "debug_info", CTLFLAG_RD, NULL, "Driver state, statistics, and HW registers"); tree_par = SYSCTL_CHILDREN(tree); SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "conn_type", CTLFLAG_RD, &ntb->conn_type, 0, "0 - Transparent; 1 - B2B; 2 - Root Port"); SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "dev_type", CTLFLAG_RD, &ntb->dev_type, 0, "0 - USD; 1 - DSD"); SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ppd", CTLFLAG_RD, &ntb->ppd, 0, "Raw PPD register (cached)"); if (ntb->b2b_mw_idx != B2B_MW_DISABLED) { SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "b2b_idx", CTLFLAG_RD, &ntb->b2b_mw_idx, 0, "Index of the MW used for B2B remote register access"); SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "b2b_off", CTLFLAG_RD, &ntb->b2b_off, "If non-zero, offset of B2B register region in shared MW"); } SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "features", CTLFLAG_RD | CTLTYPE_STRING, ntb, 0, sysctl_handle_features, "A", "Features/errata of this NTB device"); SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ntb_ctl", CTLFLAG_RD, __DEVOLATILE(uint32_t *, &ntb->ntb_ctl), 0, "NTB CTL register (cached)"); SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "lnk_sta", CTLFLAG_RD, __DEVOLATILE(uint32_t *, &ntb->lnk_sta), 0, "LNK STA register (cached)"); SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "link_status", CTLFLAG_RD | CTLTYPE_STRING, ntb, 0, sysctl_handle_link_status, "A", "Link status"); SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "mw_count", CTLFLAG_RD, &ntb->mw_count, 0, "MW count (excl. non-shared B2B register BAR)"); SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "spad_count", CTLFLAG_RD, &ntb->spad_count, 0, "Scratchpad count"); SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_count", CTLFLAG_RD, &ntb->db_count, 0, "Doorbell count"); SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_count", CTLFLAG_RD, &ntb->db_vec_count, 0, "Doorbell vector count"); SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_shift", CTLFLAG_RD, &ntb->db_vec_shift, 0, "Doorbell vector shift"); SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_valid_mask", CTLFLAG_RD, &ntb->db_valid_mask, "Doorbell valid mask"); SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_link_mask", CTLFLAG_RD, &ntb->db_link_mask, "Doorbell link mask"); SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_mask", CTLFLAG_RD, &ntb->db_mask, "Doorbell mask (cached)"); tmptree = SYSCTL_ADD_NODE(ctx, tree_par, OID_AUTO, "registers", CTLFLAG_RD, NULL, "Raw HW registers (big-endian)"); regpar = SYSCTL_CHILDREN(tmptree); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ntbcntl", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->reg->ntb_ctl, sysctl_handle_register, "IU", "NTB Control register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcap", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 0x19c, sysctl_handle_register, "IU", "NTB Link Capabilities"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcon", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 0x1a0, sysctl_handle_register, "IU", "NTB Link Control register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_mask", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_mask, sysctl_handle_register, "QU", "Doorbell mask register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_bell", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_bell, sysctl_handle_register, "QU", "Doorbell register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat23", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->xlat_reg->bar2_xlat, sysctl_handle_register, "QU", "Incoming XLAT23 register"); if (HAS_FEATURE(NTB_SPLIT_BAR)) { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat4", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->xlat_reg->bar4_xlat, sysctl_handle_register, "IU", "Incoming XLAT4 register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat5", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->xlat_reg->bar5_xlat, sysctl_handle_register, "IU", "Incoming XLAT5 register"); } else { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat45", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->xlat_reg->bar4_xlat, sysctl_handle_register, "QU", "Incoming XLAT45 register"); } SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt23", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->xlat_reg->bar2_limit, sysctl_handle_register, "QU", "Incoming LMT23 register"); if (HAS_FEATURE(NTB_SPLIT_BAR)) { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt4", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->xlat_reg->bar4_limit, sysctl_handle_register, "IU", "Incoming LMT4 register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt5", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->xlat_reg->bar5_limit, sysctl_handle_register, "IU", "Incoming LMT5 register"); } else { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt45", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->xlat_reg->bar4_limit, sysctl_handle_register, "QU", "Incoming LMT45 register"); } if (ntb->type == NTB_ATOM) return; tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_stats", CTLFLAG_RD, NULL, "Xeon HW statistics"); statpar = SYSCTL_CHILDREN(tmptree); SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "upstream_mem_miss", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_16 | XEON_USMEMMISS_OFFSET, sysctl_handle_register, "SU", "Upstream Memory Miss"); tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_hw_err", CTLFLAG_RD, NULL, "Xeon HW errors"); errpar = SYSCTL_CHILDREN(tmptree); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ppd", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_8 | NTB_PCI_REG | NTB_PPD_OFFSET, sysctl_handle_register, "CU", "PPD"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar23_sz", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_PBAR23SZ_OFFSET, sysctl_handle_register, "CU", "PBAR23 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar4_sz", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_PBAR4SZ_OFFSET, sysctl_handle_register, "CU", "PBAR4 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar5_sz", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_PBAR5SZ_OFFSET, sysctl_handle_register, "CU", "PBAR5 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_sz", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_SBAR23SZ_OFFSET, sysctl_handle_register, "CU", "SBAR23 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_sz", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_SBAR4SZ_OFFSET, sysctl_handle_register, "CU", "SBAR4 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_sz", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_SBAR5SZ_OFFSET, sysctl_handle_register, "CU", "SBAR5 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "devsts", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_16 | NTB_PCI_REG | XEON_DEVSTS_OFFSET, sysctl_handle_register, "SU", "DEVSTS"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnksts", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_16 | NTB_PCI_REG | XEON_LINK_STATUS_OFFSET, sysctl_handle_register, "SU", "LNKSTS"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "slnksts", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_16 | NTB_PCI_REG | XEON_SLINK_STATUS_OFFSET, sysctl_handle_register, "SU", "SLNKSTS"); SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "uncerrsts", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | NTB_PCI_REG | XEON_UNCERRSTS_OFFSET, sysctl_handle_register, "IU", "UNCERRSTS"); SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "corerrsts", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | NTB_PCI_REG | XEON_CORERRSTS_OFFSET, sysctl_handle_register, "IU", "CORERRSTS"); if (ntb->conn_type != NTB_CONN_B2B) return; SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat23", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off, sysctl_handle_register, "QU", "Outgoing XLAT23 register"); if (HAS_FEATURE(NTB_SPLIT_BAR)) { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat4", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off, sysctl_handle_register, "IU", "Outgoing XLAT4 register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat5", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off, sysctl_handle_register, "IU", "Outgoing XLAT5 register"); } else { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat45", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off, sysctl_handle_register, "QU", "Outgoing XLAT45 register"); } SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt23", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | XEON_PBAR2LMT_OFFSET, sysctl_handle_register, "QU", "Outgoing LMT23 register"); if (HAS_FEATURE(NTB_SPLIT_BAR)) { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt4", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | XEON_PBAR4LMT_OFFSET, sysctl_handle_register, "IU", "Outgoing LMT4 register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt5", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | XEON_PBAR5LMT_OFFSET, sysctl_handle_register, "IU", "Outgoing LMT5 register"); } else { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt45", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | XEON_PBAR4LMT_OFFSET, sysctl_handle_register, "QU", "Outgoing LMT45 register"); } SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar01_base", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->xlat_reg->bar0_base, sysctl_handle_register, "QU", "Secondary BAR01 base register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_base", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->xlat_reg->bar2_base, sysctl_handle_register, "QU", "Secondary BAR23 base register"); if (HAS_FEATURE(NTB_SPLIT_BAR)) { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_base", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->xlat_reg->bar4_base, sysctl_handle_register, "IU", "Secondary BAR4 base register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_base", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->xlat_reg->bar5_base, sysctl_handle_register, "IU", "Secondary BAR5 base register"); } else { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar45_base", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->xlat_reg->bar4_base, sysctl_handle_register, "QU", "Secondary BAR45 base register"); } } static int sysctl_handle_features(SYSCTL_HANDLER_ARGS) { struct ntb_softc *ntb; struct sbuf sb; int error; error = 0; ntb = arg1; sbuf_new_for_sysctl(&sb, NULL, 256, req); sbuf_printf(&sb, "%b", ntb->features, NTB_FEATURES_STR); error = sbuf_finish(&sb); sbuf_delete(&sb); if (error || !req->newptr) return (error); return (EINVAL); } static int sysctl_handle_link_status(SYSCTL_HANDLER_ARGS) { struct ntb_softc *ntb; struct sbuf sb; enum ntb_speed speed; enum ntb_width width; int error; error = 0; ntb = arg1; sbuf_new_for_sysctl(&sb, NULL, 32, req); if (ntb_link_is_up(ntb, &speed, &width)) sbuf_printf(&sb, "up / PCIe Gen %u / Width x%u", (unsigned)speed, (unsigned)width); else sbuf_printf(&sb, "down"); error = sbuf_finish(&sb); sbuf_delete(&sb); if (error || !req->newptr) return (error); return (EINVAL); } static int sysctl_handle_register(SYSCTL_HANDLER_ARGS) { struct ntb_softc *ntb; const void *outp; uintptr_t sz; uint64_t umv; char be[sizeof(umv)]; size_t outsz; uint32_t reg; bool db, pci; int error; ntb = arg1; reg = arg2 & ~NTB_REGFLAGS_MASK; sz = arg2 & NTB_REGSZ_MASK; db = (arg2 & NTB_DB_READ) != 0; pci = (arg2 & NTB_PCI_REG) != 0; KASSERT(!(db && pci), ("bogus")); if (db) { KASSERT(sz == NTB_REG_64, ("bogus")); umv = db_ioread(ntb, reg); outsz = sizeof(uint64_t); } else { switch (sz) { case NTB_REG_64: if (pci) umv = pci_read_config(ntb->device, reg, 8); else umv = ntb_reg_read(8, reg); outsz = sizeof(uint64_t); break; case NTB_REG_32: if (pci) umv = pci_read_config(ntb->device, reg, 4); else umv = ntb_reg_read(4, reg); outsz = sizeof(uint32_t); break; case NTB_REG_16: if (pci) umv = pci_read_config(ntb->device, reg, 2); else umv = ntb_reg_read(2, reg); outsz = sizeof(uint16_t); break; case NTB_REG_8: if (pci) umv = pci_read_config(ntb->device, reg, 1); else umv = ntb_reg_read(1, reg); outsz = sizeof(uint8_t); break; default: panic("bogus"); break; } } /* Encode bigendian so that sysctl -x is legible. */ be64enc(be, umv); outp = ((char *)be) + sizeof(umv) - outsz; error = SYSCTL_OUT(req, outp, outsz); if (error || !req->newptr) return (error); return (EINVAL); } /* * Public API to the rest of the OS */ /** * ntb_get_max_spads() - get the total scratch regs usable * @ntb: pointer to ntb_softc instance * * This function returns the max 32bit scratchpad registers usable by the * upper layer. * * RETURNS: total number of scratch pad registers available */ uint8_t ntb_get_max_spads(struct ntb_softc *ntb) { return (ntb->spad_count); } uint8_t ntb_mw_count(struct ntb_softc *ntb) { return (ntb->mw_count); } /** * ntb_spad_write() - write to the secondary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to the scratchpad register, 0 based * @val: the data value to put into the register * * This function allows writing of a 32bit value to the indexed scratchpad * register. The register resides on the secondary (external) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val) { if (idx >= ntb->spad_count) return (EINVAL); ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val); return (0); } /** * ntb_spad_read() - read from the primary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to scratchpad register, 0 based * @val: pointer to 32bit integer for storing the register value * * This function allows reading of the 32bit scratchpad register on * the primary (internal) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val) { if (idx >= ntb->spad_count) return (EINVAL); *val = ntb_reg_read(4, ntb->self_reg->spad + idx * 4); return (0); } /** * ntb_peer_spad_write() - write to the secondary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to the scratchpad register, 0 based * @val: the data value to put into the register * * This function allows writing of a 32bit value to the indexed scratchpad * register. The register resides on the secondary (external) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_peer_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val) { if (idx >= ntb->spad_count) return (EINVAL); if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) ntb_mw_write(4, XEON_SPAD_OFFSET + idx * 4, val); else ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val); return (0); } /** * ntb_peer_spad_read() - read from the primary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to scratchpad register, 0 based * @val: pointer to 32bit integer for storing the register value * * This function allows reading of the 32bit scratchpad register on * the primary (internal) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_peer_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val) { if (idx >= ntb->spad_count) return (EINVAL); if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) *val = ntb_mw_read(4, XEON_SPAD_OFFSET + idx * 4); else *val = ntb_reg_read(4, ntb->peer_reg->spad + idx * 4); return (0); } /* * ntb_mw_get_range() - get the range of a memory window * @ntb: NTB device context * @idx: Memory window number * @base: OUT - the base address for mapping the memory window * @size: OUT - the size for mapping the memory window * @align: OUT - the base alignment for translating the memory window * @align_size: OUT - the size alignment for translating the memory window * * Get the range of a memory window. NULL may be given for any output * parameter if the value is not needed. The base and size may be used for * mapping the memory window, to access the peer memory. The alignment and * size may be used for translating the memory window, for the peer to access * memory on the local system. * * Return: Zero on success, otherwise an error number. */ int ntb_mw_get_range(struct ntb_softc *ntb, unsigned mw_idx, vm_paddr_t *base, - caddr_t *vbase, size_t *size, size_t *align, size_t *align_size) + caddr_t *vbase, size_t *size, size_t *align, size_t *align_size, + bus_addr_t *plimit) { struct ntb_pci_bar_info *bar; + bus_addr_t limit; size_t bar_b2b_off; + enum ntb_bar bar_num; if (mw_idx >= ntb_mw_count(ntb)) return (EINVAL); - bar = &ntb->bar_info[ntb_mw_to_bar(ntb, mw_idx)]; + bar_num = ntb_mw_to_bar(ntb, mw_idx); + bar = &ntb->bar_info[bar_num]; bar_b2b_off = 0; if (mw_idx == ntb->b2b_mw_idx) { KASSERT(ntb->b2b_off != 0, ("user shouldn't get non-shared b2b mw")); bar_b2b_off = ntb->b2b_off; } + if (bar_is_64bit(ntb, bar_num)) + limit = BUS_SPACE_MAXADDR; + else + limit = BUS_SPACE_MAXADDR_32BIT; + if (base != NULL) *base = bar->pbase + bar_b2b_off; if (vbase != NULL) *vbase = bar->vbase + bar_b2b_off; if (size != NULL) *size = bar->size - bar_b2b_off; if (align != NULL) *align = bar->size; if (align_size != NULL) *align_size = 1; + if (plimit != NULL) + *plimit = limit; return (0); } /* * ntb_mw_set_trans() - set the translation of a memory window * @ntb: NTB device context * @idx: Memory window number * @addr: The dma address local memory to expose to the peer * @size: The size of the local memory to expose to the peer * * Set the translation of a memory window. The peer may access local memory * through the window starting at the address, up to the size. The address * must be aligned to the alignment specified by ntb_mw_get_range(). The size - * must be aligned to the size alignment specified by ntb_mw_get_range(). + * must be aligned to the size alignment specified by ntb_mw_get_range(). The + * address must be below the plimit specified by ntb_mw_get_range() (i.e. for + * 32-bit BARs). * * Return: Zero on success, otherwise an error number. */ int ntb_mw_set_trans(struct ntb_softc *ntb, unsigned idx, bus_addr_t addr, size_t size) { struct ntb_pci_bar_info *bar; uint64_t base, limit, reg_val; size_t bar_size, mw_size; uint32_t base_reg, xlat_reg, limit_reg; enum ntb_bar bar_num; if (idx >= ntb_mw_count(ntb)) return (EINVAL); bar_num = ntb_mw_to_bar(ntb, idx); bar = &ntb->bar_info[bar_num]; bar_size = bar->size; if (idx == ntb->b2b_mw_idx) mw_size = bar_size - ntb->b2b_off; else mw_size = bar_size; /* Hardware requires that addr is aligned to bar size */ if ((addr & (bar_size - 1)) != 0) return (EINVAL); if (size > mw_size) return (EINVAL); bar_get_xlat_params(ntb, bar_num, &base_reg, &xlat_reg, &limit_reg); limit = 0; if (bar_is_64bit(ntb, bar_num)) { - base = ntb_reg_read(8, base_reg); + base = ntb_reg_read(8, base_reg) & BAR_HIGH_MASK; if (limit_reg != 0 && size != mw_size) limit = base + size; /* Set and verify translation address */ ntb_reg_write(8, xlat_reg, addr); - reg_val = ntb_reg_read(8, xlat_reg); + reg_val = ntb_reg_read(8, xlat_reg) & BAR_HIGH_MASK; if (reg_val != addr) { ntb_reg_write(8, xlat_reg, 0); return (EIO); } /* Set and verify the limit */ ntb_reg_write(8, limit_reg, limit); - reg_val = ntb_reg_read(8, limit_reg); + reg_val = ntb_reg_read(8, limit_reg) & BAR_HIGH_MASK; if (reg_val != limit) { ntb_reg_write(8, limit_reg, base); ntb_reg_write(8, xlat_reg, 0); return (EIO); } } else { /* Configure 32-bit (split) BAR MW */ - if ((addr & ~UINT32_MAX) != 0) - return (EINVAL); - if (((addr + size) & ~UINT32_MAX) != 0) - return (EINVAL); + if ((addr & UINT32_MAX) != addr) + return (ERANGE); + if (((addr + size) & UINT32_MAX) != (addr + size)) + return (ERANGE); - base = ntb_reg_read(4, base_reg); + base = ntb_reg_read(4, base_reg) & BAR_HIGH_MASK; if (limit_reg != 0 && size != mw_size) limit = base + size; /* Set and verify translation address */ ntb_reg_write(4, xlat_reg, addr); - reg_val = ntb_reg_read(4, xlat_reg); + reg_val = ntb_reg_read(4, xlat_reg) & BAR_HIGH_MASK; if (reg_val != addr) { ntb_reg_write(4, xlat_reg, 0); return (EIO); } /* Set and verify the limit */ ntb_reg_write(4, limit_reg, limit); - reg_val = ntb_reg_read(4, limit_reg); + reg_val = ntb_reg_read(4, limit_reg) & BAR_HIGH_MASK; if (reg_val != limit) { ntb_reg_write(4, limit_reg, base); ntb_reg_write(4, xlat_reg, 0); return (EIO); } } return (0); } /* * ntb_mw_clear_trans() - clear the translation of a memory window * @ntb: NTB device context * @idx: Memory window number * * Clear the translation of a memory window. The peer may no longer access * local memory through the window. * * Return: Zero on success, otherwise an error number. */ int ntb_mw_clear_trans(struct ntb_softc *ntb, unsigned mw_idx) { return (ntb_mw_set_trans(ntb, mw_idx, 0, 0)); +} + +/* + * ntb_mw_get_wc - Get the write-combine status of a memory window + * + * Returns: Zero on success, setting *wc; otherwise an error number (e.g. if + * idx is an invalid memory window). + */ +int +ntb_mw_get_wc(struct ntb_softc *ntb, unsigned idx, bool *wc) +{ + struct ntb_pci_bar_info *bar; + + if (idx >= ntb_mw_count(ntb)) + return (EINVAL); + + bar = &ntb->bar_info[ntb_mw_to_bar(ntb, idx)]; + *wc = bar->mapped_wc; + return (0); +} + +/* + * ntb_mw_set_wc - Set the write-combine status of a memory window + * + * If 'wc' matches the current status, this does nothing and succeeds. + * + * Returns: Zero on success, setting the caching attribute on the virtual + * mapping of the BAR; otherwise an error number (e.g. if idx is an invalid + * memory window, or if changing the caching attribute fails). + */ +int +ntb_mw_set_wc(struct ntb_softc *ntb, unsigned idx, bool wc) +{ + struct ntb_pci_bar_info *bar; + vm_memattr_t attr; + int rc; + + if (idx >= ntb_mw_count(ntb)) + return (EINVAL); + + bar = &ntb->bar_info[ntb_mw_to_bar(ntb, idx)]; + if (bar->mapped_wc == wc) + return (0); + + if (wc) + attr = VM_MEMATTR_WRITE_COMBINING; + else + attr = VM_MEMATTR_DEFAULT; + + rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, attr); + if (rc == 0) + bar->mapped_wc = wc; + + return (rc); } /** * ntb_peer_db_set() - Set the doorbell on the secondary/external side * @ntb: pointer to ntb_softc instance * @bit: doorbell bits to ring * * This function allows triggering of a doorbell on the secondary/external * side that will initiate an interrupt on the remote host */ void ntb_peer_db_set(struct ntb_softc *ntb, uint64_t bit) { if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) { ntb_mw_write(2, XEON_PDOORBELL_OFFSET, bit); return; } db_iowrite(ntb, ntb->peer_reg->db_bell, bit); } /* * ntb_get_peer_db_addr() - Return the address of the remote doorbell register, * as well as the size of the register (via *sz_out). * * This function allows a caller using I/OAT DMA to chain the remote doorbell * ring to its memory window write. * * Note that writing the peer doorbell via a memory window will *not* generate * an interrupt on the remote host; that must be done seperately. */ bus_addr_t ntb_get_peer_db_addr(struct ntb_softc *ntb, vm_size_t *sz_out) { struct ntb_pci_bar_info *bar; uint64_t regoff; KASSERT(sz_out != NULL, ("must be non-NULL")); if (!HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) { bar = &ntb->bar_info[NTB_CONFIG_BAR]; regoff = ntb->peer_reg->db_bell; } else { KASSERT((HAS_FEATURE(NTB_SPLIT_BAR) && ntb->mw_count == 2) || (!HAS_FEATURE(NTB_SPLIT_BAR) && ntb->mw_count == 1), ("mw_count invalid after setup")); KASSERT(ntb->b2b_mw_idx != B2B_MW_DISABLED, ("invalid b2b idx")); bar = &ntb->bar_info[ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)]; regoff = XEON_PDOORBELL_OFFSET; } KASSERT(bar->pci_bus_tag != X86_BUS_SPACE_IO, ("uh oh")); *sz_out = ntb->reg->db_size; /* HACK: Specific to current x86 bus implementation. */ return ((uint64_t)bar->pci_bus_handle + regoff); } /* * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb * @ntb: NTB device context * * Hardware may support different number or arrangement of doorbell bits. * * Return: A mask of doorbell bits supported by the ntb. */ uint64_t ntb_db_valid_mask(struct ntb_softc *ntb) { return (ntb->db_valid_mask); } /* * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector * @ntb: NTB device context * @vector: Doorbell vector number * * Each interrupt vector may have a different number or arrangement of bits. * * Return: A mask of doorbell bits serviced by a vector. */ uint64_t ntb_db_vector_mask(struct ntb_softc *ntb, uint32_t vector) { if (vector > ntb->db_vec_count) return (0); return (ntb->db_valid_mask & ntb_vec_mask(ntb, vector)); } /** * ntb_link_is_up() - get the current ntb link state * @ntb: NTB device context * @speed: OUT - The link speed expressed as PCIe generation number * @width: OUT - The link width expressed as the number of PCIe lanes * * RETURNS: true or false based on the hardware link state */ bool ntb_link_is_up(struct ntb_softc *ntb, enum ntb_speed *speed, enum ntb_width *width) { if (speed != NULL) *speed = ntb_link_sta_speed(ntb); if (width != NULL) *width = ntb_link_sta_width(ntb); return (link_is_up(ntb)); } static void save_bar_parameters(struct ntb_pci_bar_info *bar) { bar->pci_bus_tag = rman_get_bustag(bar->pci_resource); bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource); bar->pbase = rman_get_start(bar->pci_resource); bar->size = rman_get_size(bar->pci_resource); bar->vbase = rman_get_virtual(bar->pci_resource); } device_t ntb_get_device(struct ntb_softc *ntb) { return (ntb->device); } /* Export HW-specific errata information. */ bool ntb_has_feature(struct ntb_softc *ntb, uint32_t feature) { return (HAS_FEATURE(feature)); } Index: projects/powernv/dev/ntb/ntb_hw/ntb_hw.h =================================================================== --- projects/powernv/dev/ntb/ntb_hw/ntb_hw.h (revision 291050) +++ projects/powernv/dev/ntb/ntb_hw/ntb_hw.h (revision 291051) @@ -1,116 +1,120 @@ /*- * Copyright (C) 2013 Intel Corporation * Copyright (C) 2015 EMC Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _NTB_HW_H_ #define _NTB_HW_H_ struct ntb_softc; #define NTB_MAX_NUM_MW 3 enum ntb_speed { NTB_SPEED_AUTO = -1, NTB_SPEED_NONE = 0, NTB_SPEED_GEN1 = 1, NTB_SPEED_GEN2 = 2, NTB_SPEED_GEN3 = 3, }; enum ntb_width { NTB_WIDTH_AUTO = -1, NTB_WIDTH_NONE = 0, NTB_WIDTH_1 = 1, NTB_WIDTH_2 = 2, NTB_WIDTH_4 = 4, NTB_WIDTH_8 = 8, NTB_WIDTH_12 = 12, NTB_WIDTH_16 = 16, NTB_WIDTH_32 = 32, }; SYSCTL_DECL(_hw_ntb); typedef void (*ntb_db_callback)(void *data, uint32_t vector); typedef void (*ntb_event_callback)(void *data); struct ntb_ctx_ops { ntb_event_callback link_event; ntb_db_callback db_event; }; device_t ntb_get_device(struct ntb_softc *); bool ntb_link_is_up(struct ntb_softc *, enum ntb_speed *, enum ntb_width *); void ntb_link_event(struct ntb_softc *); int ntb_link_enable(struct ntb_softc *, enum ntb_speed, enum ntb_width); int ntb_link_disable(struct ntb_softc *); int ntb_set_ctx(struct ntb_softc *, void *, const struct ntb_ctx_ops *); void *ntb_get_ctx(struct ntb_softc *, const struct ntb_ctx_ops **); void ntb_clear_ctx(struct ntb_softc *); uint8_t ntb_mw_count(struct ntb_softc *); int ntb_mw_get_range(struct ntb_softc *, unsigned mw_idx, vm_paddr_t *base, - caddr_t *vbase, size_t *size, size_t *align, size_t *align_size); + caddr_t *vbase, size_t *size, size_t *align, size_t *align_size, + bus_addr_t *plimit); int ntb_mw_set_trans(struct ntb_softc *, unsigned mw_idx, bus_addr_t, size_t); int ntb_mw_clear_trans(struct ntb_softc *, unsigned mw_idx); + +int ntb_mw_get_wc(struct ntb_softc *, unsigned mw_idx, bool *wc); +int ntb_mw_set_wc(struct ntb_softc *, unsigned mw_idx, bool wc); uint8_t ntb_get_max_spads(struct ntb_softc *ntb); int ntb_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val); int ntb_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val); int ntb_peer_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val); int ntb_peer_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val); uint64_t ntb_db_valid_mask(struct ntb_softc *); uint64_t ntb_db_vector_mask(struct ntb_softc *, uint32_t vector); bus_addr_t ntb_get_peer_db_addr(struct ntb_softc *, vm_size_t *sz_out); void ntb_db_clear(struct ntb_softc *, uint64_t bits); void ntb_db_clear_mask(struct ntb_softc *, uint64_t bits); uint64_t ntb_db_read(struct ntb_softc *); void ntb_db_set_mask(struct ntb_softc *, uint64_t bits); void ntb_peer_db_set(struct ntb_softc *, uint64_t bits); /* Hardware owns the low 16 bits of features. */ #define NTB_BAR_SIZE_4K (1 << 0) #define NTB_SDOORBELL_LOCKUP (1 << 1) #define NTB_SB01BASE_LOCKUP (1 << 2) #define NTB_B2BDOORBELL_BIT14 (1 << 3) /* Software/configuration owns the top 16 bits. */ #define NTB_SPLIT_BAR (1ull << 16) #define NTB_FEATURES_STR \ "\20\21SPLIT_BAR4\04B2B_DOORBELL_BIT14\03SB01BASE_LOCKUP" \ "\02SDOORBELL_LOCKUP\01BAR_SIZE_4K" bool ntb_has_feature(struct ntb_softc *, uint32_t); #endif /* _NTB_HW_H_ */ Index: projects/powernv/dev/uart/uart_dev_ns8250.c =================================================================== --- projects/powernv/dev/uart/uart_dev_ns8250.c (revision 291050) +++ projects/powernv/dev/uart/uart_dev_ns8250.c (revision 291051) @@ -1,971 +1,982 @@ /*- * Copyright (c) 2003 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "opt_platform.h" +#include "opt_uart.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #ifdef FDT #include #include #include #endif #include #include #ifdef FDT #include #endif #include #include #include #include "uart_if.h" #define DEFAULT_RCLK 1843200 +/* + * Set the default baudrate tolerance to 3.0%. + * + * Some embedded boards have odd reference clocks (eg 25MHz) + * and we need to handle higher variances in the target baud rate. + */ +#ifndef UART_DEV_TOLERANCE_PCT +#define UART_DEV_TOLERANCE_PCT 30 +#endif /* UART_DEV_TOLERANCE_PCT */ + static int broken_txfifo = 0; SYSCTL_INT(_hw, OID_AUTO, broken_txfifo, CTLFLAG_RWTUN, &broken_txfifo, 0, "UART FIFO has QEMU emulation bug"); /* * Clear pending interrupts. THRE is cleared by reading IIR. Data * that may have been received gets lost here. */ static void ns8250_clrint(struct uart_bas *bas) { uint8_t iir, lsr; iir = uart_getreg(bas, REG_IIR); while ((iir & IIR_NOPEND) == 0) { iir &= IIR_IMASK; if (iir == IIR_RLS) { lsr = uart_getreg(bas, REG_LSR); if (lsr & (LSR_BI|LSR_FE|LSR_PE)) (void)uart_getreg(bas, REG_DATA); } else if (iir == IIR_RXRDY || iir == IIR_RXTOUT) (void)uart_getreg(bas, REG_DATA); else if (iir == IIR_MLSC) (void)uart_getreg(bas, REG_MSR); uart_barrier(bas); iir = uart_getreg(bas, REG_IIR); } } static int ns8250_delay(struct uart_bas *bas) { int divisor; u_char lcr; lcr = uart_getreg(bas, REG_LCR); uart_setreg(bas, REG_LCR, lcr | LCR_DLAB); uart_barrier(bas); divisor = uart_getreg(bas, REG_DLL) | (uart_getreg(bas, REG_DLH) << 8); uart_barrier(bas); uart_setreg(bas, REG_LCR, lcr); uart_barrier(bas); /* 1/10th the time to transmit 1 character (estimate). */ if (divisor <= 134) return (16000000 * divisor / bas->rclk); return (16000 * divisor / (bas->rclk / 1000)); } static int ns8250_divisor(int rclk, int baudrate) { int actual_baud, divisor; int error; if (baudrate == 0) return (0); divisor = (rclk / (baudrate << 3) + 1) >> 1; if (divisor == 0 || divisor >= 65536) return (0); actual_baud = rclk / (divisor << 4); /* 10 times error in percent: */ error = ((actual_baud - baudrate) * 2000 / baudrate + 1) >> 1; - /* 3.0% maximum error tolerance: */ - if (error < -30 || error > 30) + /* enforce maximum error tolerance: */ + if (error < -UART_DEV_TOLERANCE_PCT || error > UART_DEV_TOLERANCE_PCT) return (0); return (divisor); } static int ns8250_drain(struct uart_bas *bas, int what) { int delay, limit; delay = ns8250_delay(bas); if (what & UART_DRAIN_TRANSMITTER) { /* * Pick an arbitrary high limit to avoid getting stuck in * an infinite loop when the hardware is broken. Make the * limit high enough to handle large FIFOs. */ limit = 10*1024; while ((uart_getreg(bas, REG_LSR) & LSR_TEMT) == 0 && --limit) DELAY(delay); if (limit == 0) { /* printf("ns8250: transmitter appears stuck... "); */ return (EIO); } } if (what & UART_DRAIN_RECEIVER) { /* * Pick an arbitrary high limit to avoid getting stuck in * an infinite loop when the hardware is broken. Make the * limit high enough to handle large FIFOs and integrated * UARTs. The HP rx2600 for example has 3 UARTs on the * management board that tend to get a lot of data send * to it when the UART is first activated. */ limit=10*4096; while ((uart_getreg(bas, REG_LSR) & LSR_RXRDY) && --limit) { (void)uart_getreg(bas, REG_DATA); uart_barrier(bas); DELAY(delay << 2); } if (limit == 0) { /* printf("ns8250: receiver appears broken... "); */ return (EIO); } } return (0); } /* * We can only flush UARTs with FIFOs. UARTs without FIFOs should be * drained. WARNING: this function clobbers the FIFO setting! */ static void ns8250_flush(struct uart_bas *bas, int what) { uint8_t fcr; fcr = FCR_ENABLE; if (what & UART_FLUSH_TRANSMITTER) fcr |= FCR_XMT_RST; if (what & UART_FLUSH_RECEIVER) fcr |= FCR_RCV_RST; uart_setreg(bas, REG_FCR, fcr); uart_barrier(bas); } static int ns8250_param(struct uart_bas *bas, int baudrate, int databits, int stopbits, int parity) { int divisor; uint8_t lcr; lcr = 0; if (databits >= 8) lcr |= LCR_8BITS; else if (databits == 7) lcr |= LCR_7BITS; else if (databits == 6) lcr |= LCR_6BITS; else lcr |= LCR_5BITS; if (stopbits > 1) lcr |= LCR_STOPB; lcr |= parity << 3; /* Set baudrate. */ if (baudrate > 0) { divisor = ns8250_divisor(bas->rclk, baudrate); if (divisor == 0) return (EINVAL); uart_setreg(bas, REG_LCR, lcr | LCR_DLAB); uart_barrier(bas); uart_setreg(bas, REG_DLL, divisor & 0xff); uart_setreg(bas, REG_DLH, (divisor >> 8) & 0xff); uart_barrier(bas); } /* Set LCR and clear DLAB. */ uart_setreg(bas, REG_LCR, lcr); uart_barrier(bas); return (0); } /* * Low-level UART interface. */ static int ns8250_probe(struct uart_bas *bas); static void ns8250_init(struct uart_bas *bas, int, int, int, int); static void ns8250_term(struct uart_bas *bas); static void ns8250_putc(struct uart_bas *bas, int); static int ns8250_rxready(struct uart_bas *bas); static int ns8250_getc(struct uart_bas *bas, struct mtx *); struct uart_ops uart_ns8250_ops = { .probe = ns8250_probe, .init = ns8250_init, .term = ns8250_term, .putc = ns8250_putc, .rxready = ns8250_rxready, .getc = ns8250_getc, }; static int ns8250_probe(struct uart_bas *bas) { u_char val; /* Check known 0 bits that don't depend on DLAB. */ val = uart_getreg(bas, REG_IIR); if (val & 0x30) return (ENXIO); /* * Bit 6 of the MCR (= 0x40) appears to be 1 for the Sun1699 * chip, but otherwise doesn't seem to have a function. In * other words, uart(4) works regardless. Ignore that bit so * the probe succeeds. */ val = uart_getreg(bas, REG_MCR); if (val & 0xa0) return (ENXIO); return (0); } static void ns8250_init(struct uart_bas *bas, int baudrate, int databits, int stopbits, int parity) { u_char ier; if (bas->rclk == 0) bas->rclk = DEFAULT_RCLK; ns8250_param(bas, baudrate, databits, stopbits, parity); /* Disable all interrupt sources. */ /* * We use 0xe0 instead of 0xf0 as the mask because the XScale PXA * UARTs split the receive time-out interrupt bit out separately as * 0x10. This gets handled by ier_mask and ier_rxbits below. */ ier = uart_getreg(bas, REG_IER) & 0xe0; uart_setreg(bas, REG_IER, ier); uart_barrier(bas); /* Disable the FIFO (if present). */ uart_setreg(bas, REG_FCR, 0); uart_barrier(bas); /* Set RTS & DTR. */ uart_setreg(bas, REG_MCR, MCR_IE | MCR_RTS | MCR_DTR); uart_barrier(bas); ns8250_clrint(bas); } static void ns8250_term(struct uart_bas *bas) { /* Clear RTS & DTR. */ uart_setreg(bas, REG_MCR, MCR_IE); uart_barrier(bas); } static void ns8250_putc(struct uart_bas *bas, int c) { int limit; limit = 250000; while ((uart_getreg(bas, REG_LSR) & LSR_THRE) == 0 && --limit) DELAY(4); uart_setreg(bas, REG_DATA, c); uart_barrier(bas); limit = 250000; while ((uart_getreg(bas, REG_LSR) & LSR_TEMT) == 0 && --limit) DELAY(4); } static int ns8250_rxready(struct uart_bas *bas) { return ((uart_getreg(bas, REG_LSR) & LSR_RXRDY) != 0 ? 1 : 0); } static int ns8250_getc(struct uart_bas *bas, struct mtx *hwmtx) { int c; uart_lock(hwmtx); while ((uart_getreg(bas, REG_LSR) & LSR_RXRDY) == 0) { uart_unlock(hwmtx); DELAY(4); uart_lock(hwmtx); } c = uart_getreg(bas, REG_DATA); uart_unlock(hwmtx); return (c); } static kobj_method_t ns8250_methods[] = { KOBJMETHOD(uart_attach, ns8250_bus_attach), KOBJMETHOD(uart_detach, ns8250_bus_detach), KOBJMETHOD(uart_flush, ns8250_bus_flush), KOBJMETHOD(uart_getsig, ns8250_bus_getsig), KOBJMETHOD(uart_ioctl, ns8250_bus_ioctl), KOBJMETHOD(uart_ipend, ns8250_bus_ipend), KOBJMETHOD(uart_param, ns8250_bus_param), KOBJMETHOD(uart_probe, ns8250_bus_probe), KOBJMETHOD(uart_receive, ns8250_bus_receive), KOBJMETHOD(uart_setsig, ns8250_bus_setsig), KOBJMETHOD(uart_transmit, ns8250_bus_transmit), KOBJMETHOD(uart_grab, ns8250_bus_grab), KOBJMETHOD(uart_ungrab, ns8250_bus_ungrab), { 0, 0 } }; struct uart_class uart_ns8250_class = { "ns8250", ns8250_methods, sizeof(struct ns8250_softc), .uc_ops = &uart_ns8250_ops, .uc_range = 8, .uc_rclk = DEFAULT_RCLK, .uc_rshift = 0 }; #ifdef FDT static struct ofw_compat_data compat_data[] = { {"ns16550", (uintptr_t)&uart_ns8250_class}, {NULL, (uintptr_t)NULL}, }; UART_FDT_CLASS_AND_DEVICE(compat_data); #endif #define SIGCHG(c, i, s, d) \ if (c) { \ i |= (i & s) ? s : s | d; \ } else { \ i = (i & s) ? (i & ~s) | d : i; \ } int ns8250_bus_attach(struct uart_softc *sc) { struct ns8250_softc *ns8250 = (struct ns8250_softc*)sc; struct uart_bas *bas; unsigned int ivar; #ifdef FDT phandle_t node; pcell_t cell; #endif ns8250->busy_detect = 0; #ifdef FDT /* * Check whether uart requires to read USR reg when IIR_BUSY and * has broken txfifo. */ node = ofw_bus_get_node(sc->sc_dev); if ((OF_getencprop(node, "busy-detect", &cell, sizeof(cell))) > 0) ns8250->busy_detect = cell ? 1 : 0; if ((OF_getencprop(node, "broken-txfifo", &cell, sizeof(cell))) > 0) broken_txfifo = cell ? 1 : 0; #endif bas = &sc->sc_bas; ns8250->mcr = uart_getreg(bas, REG_MCR); ns8250->fcr = FCR_ENABLE; if (!resource_int_value("uart", device_get_unit(sc->sc_dev), "flags", &ivar)) { if (UART_FLAGS_FCR_RX_LOW(ivar)) ns8250->fcr |= FCR_RX_LOW; else if (UART_FLAGS_FCR_RX_MEDL(ivar)) ns8250->fcr |= FCR_RX_MEDL; else if (UART_FLAGS_FCR_RX_HIGH(ivar)) ns8250->fcr |= FCR_RX_HIGH; else ns8250->fcr |= FCR_RX_MEDH; } else ns8250->fcr |= FCR_RX_MEDH; /* Get IER mask */ ivar = 0xf0; resource_int_value("uart", device_get_unit(sc->sc_dev), "ier_mask", &ivar); ns8250->ier_mask = (uint8_t)(ivar & 0xff); /* Get IER RX interrupt bits */ ivar = IER_EMSC | IER_ERLS | IER_ERXRDY; resource_int_value("uart", device_get_unit(sc->sc_dev), "ier_rxbits", &ivar); ns8250->ier_rxbits = (uint8_t)(ivar & 0xff); uart_setreg(bas, REG_FCR, ns8250->fcr); uart_barrier(bas); ns8250_bus_flush(sc, UART_FLUSH_RECEIVER|UART_FLUSH_TRANSMITTER); if (ns8250->mcr & MCR_DTR) sc->sc_hwsig |= SER_DTR; if (ns8250->mcr & MCR_RTS) sc->sc_hwsig |= SER_RTS; ns8250_bus_getsig(sc); ns8250_clrint(bas); ns8250->ier = uart_getreg(bas, REG_IER) & ns8250->ier_mask; ns8250->ier |= ns8250->ier_rxbits; uart_setreg(bas, REG_IER, ns8250->ier); uart_barrier(bas); /* * Timing of the H/W access was changed with r253161 of uart_core.c * It has been observed that an ITE IT8513E would signal a break * condition with pretty much every character it received, unless * it had enough time to settle between ns8250_bus_attach() and * ns8250_bus_ipend() -- which it accidentally had before r253161. * It's not understood why the UART chip behaves this way and it * could very well be that the DELAY make the H/W work in the same * accidental manner as before. More analysis is warranted, but * at least now we fixed a known regression. */ DELAY(200); return (0); } int ns8250_bus_detach(struct uart_softc *sc) { struct ns8250_softc *ns8250; struct uart_bas *bas; u_char ier; ns8250 = (struct ns8250_softc *)sc; bas = &sc->sc_bas; ier = uart_getreg(bas, REG_IER) & ns8250->ier_mask; uart_setreg(bas, REG_IER, ier); uart_barrier(bas); ns8250_clrint(bas); return (0); } int ns8250_bus_flush(struct uart_softc *sc, int what) { struct ns8250_softc *ns8250 = (struct ns8250_softc*)sc; struct uart_bas *bas; int error; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); if (sc->sc_rxfifosz > 1) { ns8250_flush(bas, what); uart_setreg(bas, REG_FCR, ns8250->fcr); uart_barrier(bas); error = 0; } else error = ns8250_drain(bas, what); uart_unlock(sc->sc_hwmtx); return (error); } int ns8250_bus_getsig(struct uart_softc *sc) { uint32_t new, old, sig; uint8_t msr; do { old = sc->sc_hwsig; sig = old; uart_lock(sc->sc_hwmtx); msr = uart_getreg(&sc->sc_bas, REG_MSR); uart_unlock(sc->sc_hwmtx); SIGCHG(msr & MSR_DSR, sig, SER_DSR, SER_DDSR); SIGCHG(msr & MSR_CTS, sig, SER_CTS, SER_DCTS); SIGCHG(msr & MSR_DCD, sig, SER_DCD, SER_DDCD); SIGCHG(msr & MSR_RI, sig, SER_RI, SER_DRI); new = sig & ~SER_MASK_DELTA; } while (!atomic_cmpset_32(&sc->sc_hwsig, old, new)); return (sig); } int ns8250_bus_ioctl(struct uart_softc *sc, int request, intptr_t data) { struct uart_bas *bas; int baudrate, divisor, error; uint8_t efr, lcr; bas = &sc->sc_bas; error = 0; uart_lock(sc->sc_hwmtx); switch (request) { case UART_IOCTL_BREAK: lcr = uart_getreg(bas, REG_LCR); if (data) lcr |= LCR_SBREAK; else lcr &= ~LCR_SBREAK; uart_setreg(bas, REG_LCR, lcr); uart_barrier(bas); break; case UART_IOCTL_IFLOW: lcr = uart_getreg(bas, REG_LCR); uart_barrier(bas); uart_setreg(bas, REG_LCR, 0xbf); uart_barrier(bas); efr = uart_getreg(bas, REG_EFR); if (data) efr |= EFR_RTS; else efr &= ~EFR_RTS; uart_setreg(bas, REG_EFR, efr); uart_barrier(bas); uart_setreg(bas, REG_LCR, lcr); uart_barrier(bas); break; case UART_IOCTL_OFLOW: lcr = uart_getreg(bas, REG_LCR); uart_barrier(bas); uart_setreg(bas, REG_LCR, 0xbf); uart_barrier(bas); efr = uart_getreg(bas, REG_EFR); if (data) efr |= EFR_CTS; else efr &= ~EFR_CTS; uart_setreg(bas, REG_EFR, efr); uart_barrier(bas); uart_setreg(bas, REG_LCR, lcr); uart_barrier(bas); break; case UART_IOCTL_BAUD: lcr = uart_getreg(bas, REG_LCR); uart_setreg(bas, REG_LCR, lcr | LCR_DLAB); uart_barrier(bas); divisor = uart_getreg(bas, REG_DLL) | (uart_getreg(bas, REG_DLH) << 8); uart_barrier(bas); uart_setreg(bas, REG_LCR, lcr); uart_barrier(bas); baudrate = (divisor > 0) ? bas->rclk / divisor / 16 : 0; if (baudrate > 0) *(int*)data = baudrate; else error = ENXIO; break; default: error = EINVAL; break; } uart_unlock(sc->sc_hwmtx); return (error); } int ns8250_bus_ipend(struct uart_softc *sc) { struct uart_bas *bas; struct ns8250_softc *ns8250; int ipend; uint8_t iir, lsr; ns8250 = (struct ns8250_softc *)sc; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); iir = uart_getreg(bas, REG_IIR); if (ns8250->busy_detect && (iir & IIR_BUSY) == IIR_BUSY) { (void)uart_getreg(bas, DW_REG_USR); uart_unlock(sc->sc_hwmtx); return (0); } if (iir & IIR_NOPEND) { uart_unlock(sc->sc_hwmtx); return (0); } ipend = 0; if (iir & IIR_RXRDY) { lsr = uart_getreg(bas, REG_LSR); if (lsr & LSR_OE) ipend |= SER_INT_OVERRUN; if (lsr & LSR_BI) ipend |= SER_INT_BREAK; if (lsr & LSR_RXRDY) ipend |= SER_INT_RXREADY; } else { if (iir & IIR_TXRDY) { ipend |= SER_INT_TXIDLE; uart_setreg(bas, REG_IER, ns8250->ier); } else ipend |= SER_INT_SIGCHG; } if (ipend == 0) ns8250_clrint(bas); uart_unlock(sc->sc_hwmtx); return (ipend); } int ns8250_bus_param(struct uart_softc *sc, int baudrate, int databits, int stopbits, int parity) { struct ns8250_softc *ns8250; struct uart_bas *bas; int error, limit; ns8250 = (struct ns8250_softc*)sc; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); /* * When using DW UART with BUSY detection it is necessary to wait * until all serial transfers are finished before manipulating the * line control. LCR will not be affected when UART is busy. */ if (ns8250->busy_detect != 0) { /* * Pick an arbitrary high limit to avoid getting stuck in * an infinite loop in case when the hardware is broken. */ limit = 10 * 1024; while (((uart_getreg(bas, DW_REG_USR) & USR_BUSY) != 0) && --limit) DELAY(4); if (limit <= 0) { /* UART appears to be stuck */ uart_unlock(sc->sc_hwmtx); return (EIO); } } error = ns8250_param(bas, baudrate, databits, stopbits, parity); uart_unlock(sc->sc_hwmtx); return (error); } int ns8250_bus_probe(struct uart_softc *sc) { struct ns8250_softc *ns8250; struct uart_bas *bas; int count, delay, error, limit; uint8_t lsr, mcr, ier; ns8250 = (struct ns8250_softc *)sc; bas = &sc->sc_bas; error = ns8250_probe(bas); if (error) return (error); mcr = MCR_IE; if (sc->sc_sysdev == NULL) { /* By using ns8250_init() we also set DTR and RTS. */ ns8250_init(bas, 115200, 8, 1, UART_PARITY_NONE); } else mcr |= MCR_DTR | MCR_RTS; error = ns8250_drain(bas, UART_DRAIN_TRANSMITTER); if (error) return (error); /* * Set loopback mode. This avoids having garbage on the wire and * also allows us send and receive data. We set DTR and RTS to * avoid the possibility that automatic flow-control prevents * any data from being sent. */ uart_setreg(bas, REG_MCR, MCR_LOOPBACK | MCR_IE | MCR_DTR | MCR_RTS); uart_barrier(bas); /* * Enable FIFOs. And check that the UART has them. If not, we're * done. Since this is the first time we enable the FIFOs, we reset * them. */ uart_setreg(bas, REG_FCR, FCR_ENABLE); uart_barrier(bas); if (!(uart_getreg(bas, REG_IIR) & IIR_FIFO_MASK)) { /* * NS16450 or INS8250. We don't bother to differentiate * between them. They're too old to be interesting. */ uart_setreg(bas, REG_MCR, mcr); uart_barrier(bas); sc->sc_rxfifosz = sc->sc_txfifosz = 1; device_set_desc(sc->sc_dev, "8250 or 16450 or compatible"); return (0); } uart_setreg(bas, REG_FCR, FCR_ENABLE | FCR_XMT_RST | FCR_RCV_RST); uart_barrier(bas); count = 0; delay = ns8250_delay(bas); /* We have FIFOs. Drain the transmitter and receiver. */ error = ns8250_drain(bas, UART_DRAIN_RECEIVER|UART_DRAIN_TRANSMITTER); if (error) { uart_setreg(bas, REG_MCR, mcr); uart_setreg(bas, REG_FCR, 0); uart_barrier(bas); goto describe; } /* * We should have a sufficiently clean "pipe" to determine the * size of the FIFOs. We send as much characters as is reasonable * and wait for the overflow bit in the LSR register to be * asserted, counting the characters as we send them. Based on * that count we know the FIFO size. */ do { uart_setreg(bas, REG_DATA, 0); uart_barrier(bas); count++; limit = 30; lsr = 0; /* * LSR bits are cleared upon read, so we must accumulate * them to be able to test LSR_OE below. */ while (((lsr |= uart_getreg(bas, REG_LSR)) & LSR_TEMT) == 0 && --limit) DELAY(delay); if (limit == 0) { ier = uart_getreg(bas, REG_IER) & ns8250->ier_mask; uart_setreg(bas, REG_IER, ier); uart_setreg(bas, REG_MCR, mcr); uart_setreg(bas, REG_FCR, 0); uart_barrier(bas); count = 0; goto describe; } } while ((lsr & LSR_OE) == 0 && count < 130); count--; uart_setreg(bas, REG_MCR, mcr); /* Reset FIFOs. */ ns8250_flush(bas, UART_FLUSH_RECEIVER|UART_FLUSH_TRANSMITTER); describe: if (count >= 14 && count <= 16) { sc->sc_rxfifosz = 16; device_set_desc(sc->sc_dev, "16550 or compatible"); } else if (count >= 28 && count <= 32) { sc->sc_rxfifosz = 32; device_set_desc(sc->sc_dev, "16650 or compatible"); } else if (count >= 56 && count <= 64) { sc->sc_rxfifosz = 64; device_set_desc(sc->sc_dev, "16750 or compatible"); } else if (count >= 112 && count <= 128) { sc->sc_rxfifosz = 128; device_set_desc(sc->sc_dev, "16950 or compatible"); } else { sc->sc_rxfifosz = 16; device_set_desc(sc->sc_dev, "Non-standard ns8250 class UART with FIFOs"); } /* * Force the Tx FIFO size to 16 bytes for now. We don't program the * Tx trigger. Also, we assume that all data has been sent when the * interrupt happens. */ sc->sc_txfifosz = 16; #if 0 /* * XXX there are some issues related to hardware flow control and * it's likely that uart(4) is the cause. This basicly needs more * investigation, but we avoid using for hardware flow control * until then. */ /* 16650s or higher have automatic flow control. */ if (sc->sc_rxfifosz > 16) { sc->sc_hwiflow = 1; sc->sc_hwoflow = 1; } #endif return (0); } int ns8250_bus_receive(struct uart_softc *sc) { struct uart_bas *bas; int xc; uint8_t lsr; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); lsr = uart_getreg(bas, REG_LSR); while (lsr & LSR_RXRDY) { if (uart_rx_full(sc)) { sc->sc_rxbuf[sc->sc_rxput] = UART_STAT_OVERRUN; break; } xc = uart_getreg(bas, REG_DATA); if (lsr & LSR_FE) xc |= UART_STAT_FRAMERR; if (lsr & LSR_PE) xc |= UART_STAT_PARERR; uart_rx_put(sc, xc); lsr = uart_getreg(bas, REG_LSR); } /* Discard everything left in the Rx FIFO. */ while (lsr & LSR_RXRDY) { (void)uart_getreg(bas, REG_DATA); uart_barrier(bas); lsr = uart_getreg(bas, REG_LSR); } uart_unlock(sc->sc_hwmtx); return (0); } int ns8250_bus_setsig(struct uart_softc *sc, int sig) { struct ns8250_softc *ns8250 = (struct ns8250_softc*)sc; struct uart_bas *bas; uint32_t new, old; bas = &sc->sc_bas; do { old = sc->sc_hwsig; new = old; if (sig & SER_DDTR) { SIGCHG(sig & SER_DTR, new, SER_DTR, SER_DDTR); } if (sig & SER_DRTS) { SIGCHG(sig & SER_RTS, new, SER_RTS, SER_DRTS); } } while (!atomic_cmpset_32(&sc->sc_hwsig, old, new)); uart_lock(sc->sc_hwmtx); ns8250->mcr &= ~(MCR_DTR|MCR_RTS); if (new & SER_DTR) ns8250->mcr |= MCR_DTR; if (new & SER_RTS) ns8250->mcr |= MCR_RTS; uart_setreg(bas, REG_MCR, ns8250->mcr); uart_barrier(bas); uart_unlock(sc->sc_hwmtx); return (0); } int ns8250_bus_transmit(struct uart_softc *sc) { struct ns8250_softc *ns8250 = (struct ns8250_softc*)sc; struct uart_bas *bas; int i; bas = &sc->sc_bas; uart_lock(sc->sc_hwmtx); while ((uart_getreg(bas, REG_LSR) & LSR_THRE) == 0) ; uart_setreg(bas, REG_IER, ns8250->ier | IER_ETXRDY); uart_barrier(bas); for (i = 0; i < sc->sc_txdatasz; i++) { uart_setreg(bas, REG_DATA, sc->sc_txbuf[i]); uart_barrier(bas); } if (broken_txfifo) ns8250_drain(bas, UART_DRAIN_TRANSMITTER); else sc->sc_txbusy = 1; uart_unlock(sc->sc_hwmtx); if (broken_txfifo) uart_sched_softih(sc, SER_INT_TXIDLE); return (0); } void ns8250_bus_grab(struct uart_softc *sc) { struct uart_bas *bas = &sc->sc_bas; struct ns8250_softc *ns8250 = (struct ns8250_softc*)sc; u_char ier; /* * turn off all interrupts to enter polling mode. Leave the * saved mask alone. We'll restore whatever it was in ungrab. * All pending interupt signals are reset when IER is set to 0. */ uart_lock(sc->sc_hwmtx); ier = uart_getreg(bas, REG_IER); uart_setreg(bas, REG_IER, ier & ns8250->ier_mask); uart_barrier(bas); uart_unlock(sc->sc_hwmtx); } void ns8250_bus_ungrab(struct uart_softc *sc) { struct ns8250_softc *ns8250 = (struct ns8250_softc*)sc; struct uart_bas *bas = &sc->sc_bas; /* * Restore previous interrupt mask */ uart_lock(sc->sc_hwmtx); uart_setreg(bas, REG_IER, ns8250->ier); uart_barrier(bas); uart_unlock(sc->sc_hwmtx); } Index: projects/powernv/dev/xen/control/control.c =================================================================== --- projects/powernv/dev/xen/control/control.c (revision 291050) +++ projects/powernv/dev/xen/control/control.c (revision 291051) @@ -1,434 +1,438 @@ /*- * Copyright (c) 2010 Justin T. Gibbs, Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ /*- * PV suspend/resume support: * * Copyright (c) 2004 Christian Limpach. * Copyright (c) 2004-2006,2008 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Christian Limpach. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * HVM suspend/resume support: * * Copyright (c) 2008 Citrix Systems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /** * \file control.c * * \brief Device driver to repond to control domain events that impact * this VM. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include + #include #include #include #include #include #include #include #include #include #include #include #include /*--------------------------- Forward Declarations --------------------------*/ /** Function signature for shutdown event handlers. */ typedef void (xctrl_shutdown_handler_t)(void); static xctrl_shutdown_handler_t xctrl_poweroff; static xctrl_shutdown_handler_t xctrl_reboot; static xctrl_shutdown_handler_t xctrl_suspend; static xctrl_shutdown_handler_t xctrl_crash; /*-------------------------- Private Data Structures -------------------------*/ /** Element type for lookup table of event name to handler. */ struct xctrl_shutdown_reason { const char *name; xctrl_shutdown_handler_t *handler; }; /** Lookup table for shutdown event name to handler. */ static const struct xctrl_shutdown_reason xctrl_shutdown_reasons[] = { { "poweroff", xctrl_poweroff }, { "reboot", xctrl_reboot }, { "suspend", xctrl_suspend }, { "crash", xctrl_crash }, { "halt", xctrl_poweroff }, }; struct xctrl_softc { struct xs_watch xctrl_watch; }; /*------------------------------ Event Handlers ------------------------------*/ static void xctrl_poweroff() { shutdown_nice(RB_POWEROFF|RB_HALT); } static void xctrl_reboot() { shutdown_nice(0); } static void xctrl_suspend() { #ifdef SMP cpuset_t cpu_suspend_map; #endif int suspend_cancelled; EVENTHANDLER_INVOKE(power_suspend); if (smp_started) { thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); } KASSERT((PCPU_GET(cpuid) == 0), ("Not running on CPU#0")); /* * Clear our XenStore node so the toolstack knows we are * responding to the suspend request. */ xs_write(XST_NIL, "control", "shutdown", ""); /* * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE * drivers need this. */ mtx_lock(&Giant); if (DEVICE_SUSPEND(root_bus) != 0) { mtx_unlock(&Giant); printf("%s: device_suspend failed\n", __func__); return; } mtx_unlock(&Giant); #ifdef SMP CPU_ZERO(&cpu_suspend_map); /* silence gcc */ if (smp_started) { /* * Suspend other CPUs. This prevents IPIs while we * are resuming, and will allow us to reset per-cpu * vcpu_info on resume. */ cpu_suspend_map = all_cpus; CPU_CLR(PCPU_GET(cpuid), &cpu_suspend_map); if (!CPU_EMPTY(&cpu_suspend_map)) suspend_cpus(cpu_suspend_map); } #endif /* * Prevent any races with evtchn_interrupt() handler. */ disable_intr(); intr_suspend(); xen_hvm_suspend(); suspend_cancelled = HYPERVISOR_suspend(0); xen_hvm_resume(suspend_cancelled != 0); intr_resume(suspend_cancelled != 0); enable_intr(); /* * Reset grant table info. */ gnttab_resume(NULL); #ifdef SMP + /* Send an IPI_BITMAP in case there are pending bitmap IPIs. */ + lapic_ipi_vectored(IPI_BITMAP_VECTOR, APIC_IPI_DEST_ALL); if (smp_started && !CPU_EMPTY(&cpu_suspend_map)) { /* * Now that event channels have been initialized, * resume CPUs. */ resume_cpus(cpu_suspend_map); } #endif /* * FreeBSD really needs to add DEVICE_SUSPEND_CANCEL or * similar. */ mtx_lock(&Giant); DEVICE_RESUME(root_bus); mtx_unlock(&Giant); if (smp_started) { thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); } EVENTHANDLER_INVOKE(power_resume); if (bootverbose) printf("System resumed after suspension\n"); } static void xctrl_crash() { panic("Xen directed crash"); } static void xen_pv_shutdown_final(void *arg, int howto) { /* * Inform the hypervisor that shutdown is complete. * This is not necessary in HVM domains since Xen * emulates ACPI in that mode and FreeBSD's ACPI * support will request this transition. */ if (howto & (RB_HALT | RB_POWEROFF)) HYPERVISOR_shutdown(SHUTDOWN_poweroff); else HYPERVISOR_shutdown(SHUTDOWN_reboot); } /*------------------------------ Event Reception -----------------------------*/ static void xctrl_on_watch_event(struct xs_watch *watch, const char **vec, unsigned int len) { const struct xctrl_shutdown_reason *reason; const struct xctrl_shutdown_reason *last_reason; char *result; int error; int result_len; error = xs_read(XST_NIL, "control", "shutdown", &result_len, (void **)&result); if (error != 0) return; reason = xctrl_shutdown_reasons; last_reason = reason + nitems(xctrl_shutdown_reasons); while (reason < last_reason) { if (!strcmp(result, reason->name)) { reason->handler(); break; } reason++; } free(result, M_XENSTORE); } /*------------------ Private Device Attachment Functions --------------------*/ /** * \brief Identify instances of this device type in the system. * * \param driver The driver performing this identify action. * \param parent The NewBus parent device for any devices this method adds. */ static void xctrl_identify(driver_t *driver __unused, device_t parent) { /* * A single device instance for our driver is always present * in a system operating under Xen. */ BUS_ADD_CHILD(parent, 0, driver->name, 0); } /** * \brief Probe for the existance of the Xen Control device * * \param dev NewBus device_t for this Xen control instance. * * \return Always returns 0 indicating success. */ static int xctrl_probe(device_t dev) { device_set_desc(dev, "Xen Control Device"); return (BUS_PROBE_NOWILDCARD); } /** * \brief Attach the Xen control device. * * \param dev NewBus device_t for this Xen control instance. * * \return On success, 0. Otherwise an errno value indicating the * type of failure. */ static int xctrl_attach(device_t dev) { struct xctrl_softc *xctrl; xctrl = device_get_softc(dev); /* Activate watch */ xctrl->xctrl_watch.node = "control/shutdown"; xctrl->xctrl_watch.callback = xctrl_on_watch_event; xctrl->xctrl_watch.callback_data = (uintptr_t)xctrl; xs_register_watch(&xctrl->xctrl_watch); if (xen_pv_domain()) EVENTHANDLER_REGISTER(shutdown_final, xen_pv_shutdown_final, NULL, SHUTDOWN_PRI_LAST); return (0); } /** * \brief Detach the Xen control device. * * \param dev NewBus device_t for this Xen control device instance. * * \return On success, 0. Otherwise an errno value indicating the * type of failure. */ static int xctrl_detach(device_t dev) { struct xctrl_softc *xctrl; xctrl = device_get_softc(dev); /* Release watch */ xs_unregister_watch(&xctrl->xctrl_watch); return (0); } /*-------------------- Private Device Attachment Data -----------------------*/ static device_method_t xctrl_methods[] = { /* Device interface */ DEVMETHOD(device_identify, xctrl_identify), DEVMETHOD(device_probe, xctrl_probe), DEVMETHOD(device_attach, xctrl_attach), DEVMETHOD(device_detach, xctrl_detach), DEVMETHOD_END }; DEFINE_CLASS_0(xctrl, xctrl_driver, xctrl_methods, sizeof(struct xctrl_softc)); devclass_t xctrl_devclass; DRIVER_MODULE(xctrl, xenstore, xctrl_driver, xctrl_devclass, NULL, NULL); Index: projects/powernv/geom/geom_dev.c =================================================================== --- projects/powernv/geom/geom_dev.c (revision 291050) +++ projects/powernv/geom/geom_dev.c (revision 291051) @@ -1,694 +1,711 @@ /*- * Copyright (c) 2002 Poul-Henning Kamp * Copyright (c) 2002 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Poul-Henning Kamp * and NAI Labs, the Security Research Division of Network Associates, Inc. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the * DARPA CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct g_dev_softc { struct mtx sc_mtx; struct cdev *sc_dev; struct cdev *sc_alias; int sc_open; int sc_active; }; static d_open_t g_dev_open; static d_close_t g_dev_close; static d_strategy_t g_dev_strategy; static d_ioctl_t g_dev_ioctl; static struct cdevsw g_dev_cdevsw = { .d_version = D_VERSION, .d_open = g_dev_open, .d_close = g_dev_close, .d_read = physread, .d_write = physwrite, .d_ioctl = g_dev_ioctl, .d_strategy = g_dev_strategy, .d_name = "g_dev", .d_flags = D_DISK | D_TRACKCLOSE, }; static g_init_t g_dev_init; static g_fini_t g_dev_fini; static g_taste_t g_dev_taste; static g_orphan_t g_dev_orphan; static g_attrchanged_t g_dev_attrchanged; static struct g_class g_dev_class = { .name = "DEV", .version = G_VERSION, .init = g_dev_init, .fini = g_dev_fini, .taste = g_dev_taste, .orphan = g_dev_orphan, .attrchanged = g_dev_attrchanged }; /* * We target 262144 (8 x 32768) sectors by default as this significantly * increases the throughput on commonly used SSD's with a marginal * increase in non-interruptible request latency. */ static uint64_t g_dev_del_max_sectors = 262144; SYSCTL_DECL(_kern_geom); SYSCTL_NODE(_kern_geom, OID_AUTO, dev, CTLFLAG_RW, 0, "GEOM_DEV stuff"); SYSCTL_QUAD(_kern_geom_dev, OID_AUTO, delete_max_sectors, CTLFLAG_RW, &g_dev_del_max_sectors, 0, "Maximum number of sectors in a single " "delete request sent to the provider. Larger requests are chunked " "so they can be interrupted. (0 = disable chunking)"); static char *dumpdev = NULL; static void g_dev_init(struct g_class *mp) { dumpdev = kern_getenv("dumpdev"); } static void g_dev_fini(struct g_class *mp) { freeenv(dumpdev); dumpdev = NULL; } static int g_dev_setdumpdev(struct cdev *dev, struct thread *td) { struct g_kerneldump kd; struct g_consumer *cp; int error, len; if (dev == NULL) return (set_dumper(NULL, NULL, td)); cp = dev->si_drv2; len = sizeof(kd); kd.offset = 0; kd.length = OFF_MAX; error = g_io_getattr("GEOM::kerneldump", cp, &len, &kd); if (error == 0) { error = set_dumper(&kd.di, devtoname(dev), td); if (error == 0) dev->si_flags |= SI_DUMPDEV; } return (error); } -static void +static int init_dumpdev(struct cdev *dev) { + struct g_consumer *cp; const char *devprefix = "/dev/", *devname; + int error; size_t len; if (dumpdev == NULL) - return; + return (0); + len = strlen(devprefix); devname = devtoname(dev); if (strcmp(devname, dumpdev) != 0 && (strncmp(dumpdev, devprefix, len) != 0 || strcmp(devname, dumpdev + len) != 0)) - return; - if (g_dev_setdumpdev(dev, curthread) == 0) { + return (0); + + cp = (struct g_consumer *)dev->si_drv2; + error = g_access(cp, 1, 0, 0); + if (error != 0) + return (error); + + error = g_dev_setdumpdev(dev, curthread); + if (error == 0) { freeenv(dumpdev); dumpdev = NULL; } + + (void)g_access(cp, -1, 0, 0); + + return (error); } static void g_dev_destroy(void *arg, int flags __unused) { struct g_consumer *cp; struct g_geom *gp; struct g_dev_softc *sc; char buf[SPECNAMELEN + 6]; g_topology_assert(); cp = arg; gp = cp->geom; sc = cp->private; g_trace(G_T_TOPOLOGY, "g_dev_destroy(%p(%s))", cp, gp->name); snprintf(buf, sizeof(buf), "cdev=%s", gp->name); devctl_notify_f("GEOM", "DEV", "DESTROY", buf, M_WAITOK); if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) g_access(cp, -cp->acr, -cp->acw, -cp->ace); g_detach(cp); g_destroy_consumer(cp); g_destroy_geom(gp); mtx_destroy(&sc->sc_mtx); g_free(sc); } void g_dev_print(void) { struct g_geom *gp; char const *p = ""; LIST_FOREACH(gp, &g_dev_class.geom, geom) { printf("%s%s", p, gp->name); p = " "; } printf("\n"); } static void g_dev_attrchanged(struct g_consumer *cp, const char *attr) { struct g_dev_softc *sc; struct cdev *dev; char buf[SPECNAMELEN + 6]; sc = cp->private; if (strcmp(attr, "GEOM::media") == 0) { dev = sc->sc_dev; snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name); devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf, M_WAITOK); devctl_notify_f("GEOM", "DEV", "MEDIACHANGE", buf, M_WAITOK); dev = sc->sc_alias; if (dev != NULL) { snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name); devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf, M_WAITOK); devctl_notify_f("GEOM", "DEV", "MEDIACHANGE", buf, M_WAITOK); } return; } if (strcmp(attr, "GEOM::physpath") != 0) return; if (g_access(cp, 1, 0, 0) == 0) { char *physpath; int error, physpath_len; physpath_len = MAXPATHLEN; physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO); error = g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath); g_access(cp, -1, 0, 0); if (error == 0 && strlen(physpath) != 0) { struct cdev *old_alias_dev; struct cdev **alias_devp; dev = sc->sc_dev; old_alias_dev = sc->sc_alias; alias_devp = (struct cdev **)&sc->sc_alias; make_dev_physpath_alias(MAKEDEV_WAITOK, alias_devp, dev, old_alias_dev, physpath); } else if (sc->sc_alias) { destroy_dev((struct cdev *)sc->sc_alias); sc->sc_alias = NULL; } g_free(physpath); } } struct g_provider * g_dev_getprovider(struct cdev *dev) { struct g_consumer *cp; g_topology_assert(); if (dev == NULL) return (NULL); if (dev->si_devsw != &g_dev_cdevsw) return (NULL); cp = dev->si_drv2; return (cp->provider); } static struct g_geom * g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused) { struct g_geom *gp; struct g_consumer *cp; struct g_dev_softc *sc; int error; struct cdev *dev; char buf[SPECNAMELEN + 6]; g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name); g_topology_assert(); gp = g_new_geomf(mp, "%s", pp->name); sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO); mtx_init(&sc->sc_mtx, "g_dev", NULL, MTX_DEF); cp = g_new_consumer(gp); cp->private = sc; cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; error = g_attach(cp, pp); KASSERT(error == 0, ("g_dev_taste(%s) failed to g_attach, err=%d", pp->name, error)); error = make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev, &g_dev_cdevsw, NULL, UID_ROOT, GID_OPERATOR, 0640, "%s", gp->name); if (error != 0) { printf("%s: make_dev_p() failed (gp->name=%s, error=%d)\n", __func__, gp->name, error); g_detach(cp); g_destroy_consumer(cp); g_destroy_geom(gp); mtx_destroy(&sc->sc_mtx); g_free(sc); return (NULL); } dev->si_flags |= SI_UNMAPPED; sc->sc_dev = dev; dev->si_iosize_max = MAXPHYS; dev->si_drv2 = cp; - init_dumpdev(dev); + error = init_dumpdev(dev); + if (error != 0) + printf("%s: init_dumpdev() failed (gp->name=%s, error=%d)\n", + __func__, gp->name, error); g_dev_attrchanged(cp, "GEOM::physpath"); snprintf(buf, sizeof(buf), "cdev=%s", gp->name); devctl_notify_f("GEOM", "DEV", "CREATE", buf, M_WAITOK); return (gp); } static int g_dev_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct g_consumer *cp; struct g_dev_softc *sc; int error, r, w, e; cp = dev->si_drv2; if (cp == NULL) return (ENXIO); /* g_dev_taste() not done yet */ g_trace(G_T_ACCESS, "g_dev_open(%s, %d, %d, %p)", cp->geom->name, flags, fmt, td); r = flags & FREAD ? 1 : 0; w = flags & FWRITE ? 1 : 0; #ifdef notyet e = flags & O_EXCL ? 1 : 0; #else e = 0; #endif /* * This happens on attempt to open a device node with O_EXEC. */ if (r + w + e == 0) return (EINVAL); if (w) { /* * When running in very secure mode, do not allow * opens for writing of any disks. */ error = securelevel_ge(td->td_ucred, 2); if (error) return (error); } g_topology_lock(); error = g_access(cp, r, w, e); g_topology_unlock(); if (error == 0) { sc = cp->private; mtx_lock(&sc->sc_mtx); if (sc->sc_open == 0 && sc->sc_active != 0) wakeup(&sc->sc_active); sc->sc_open += r + w + e; mtx_unlock(&sc->sc_mtx); } return (error); } static int g_dev_close(struct cdev *dev, int flags, int fmt, struct thread *td) { struct g_consumer *cp; struct g_dev_softc *sc; int error, r, w, e; cp = dev->si_drv2; if (cp == NULL) return (ENXIO); g_trace(G_T_ACCESS, "g_dev_close(%s, %d, %d, %p)", cp->geom->name, flags, fmt, td); r = flags & FREAD ? -1 : 0; w = flags & FWRITE ? -1 : 0; #ifdef notyet e = flags & O_EXCL ? -1 : 0; #else e = 0; #endif /* * The vgonel(9) - caused by eg. forced unmount of devfs - calls * VOP_CLOSE(9) on devfs vnode without any FREAD or FWRITE flags, * which would result in zero deltas, which in turn would cause * panic in g_access(9). * * Note that we cannot zero the counters (ie. do "r = cp->acr" * etc) instead, because the consumer might be opened in another * devfs instance. */ if (r + w + e == 0) return (EINVAL); sc = cp->private; mtx_lock(&sc->sc_mtx); sc->sc_open += r + w + e; while (sc->sc_open == 0 && sc->sc_active != 0) msleep(&sc->sc_active, &sc->sc_mtx, 0, "PRIBIO", 0); mtx_unlock(&sc->sc_mtx); g_topology_lock(); error = g_access(cp, r, w, e); g_topology_unlock(); return (error); } /* * XXX: Until we have unmessed the ioctl situation, there is a race against * XXX: a concurrent orphanization. We cannot close it by holding topology * XXX: since that would prevent us from doing our job, and stalling events * XXX: will break (actually: stall) the BSD disklabel hacks. */ static int g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct g_consumer *cp; struct g_provider *pp; off_t offset, length, chunk; int i, error; cp = dev->si_drv2; pp = cp->provider; error = 0; KASSERT(cp->acr || cp->acw, ("Consumer with zero access count in g_dev_ioctl")); i = IOCPARM_LEN(cmd); switch (cmd) { case DIOCGSECTORSIZE: *(u_int *)data = cp->provider->sectorsize; if (*(u_int *)data == 0) error = ENOENT; break; case DIOCGMEDIASIZE: *(off_t *)data = cp->provider->mediasize; if (*(off_t *)data == 0) error = ENOENT; break; case DIOCGFWSECTORS: error = g_io_getattr("GEOM::fwsectors", cp, &i, data); if (error == 0 && *(u_int *)data == 0) error = ENOENT; break; case DIOCGFWHEADS: error = g_io_getattr("GEOM::fwheads", cp, &i, data); if (error == 0 && *(u_int *)data == 0) error = ENOENT; break; case DIOCGFRONTSTUFF: error = g_io_getattr("GEOM::frontstuff", cp, &i, data); break; case DIOCSKERNELDUMP: if (*(u_int *)data == 0) error = g_dev_setdumpdev(NULL, td); else error = g_dev_setdumpdev(dev, td); break; case DIOCGFLUSH: error = g_io_flush(cp); break; case DIOCGDELETE: offset = ((off_t *)data)[0]; length = ((off_t *)data)[1]; if ((offset % cp->provider->sectorsize) != 0 || (length % cp->provider->sectorsize) != 0 || length <= 0) { printf("%s: offset=%jd length=%jd\n", __func__, offset, length); error = EINVAL; break; } while (length > 0) { chunk = length; if (g_dev_del_max_sectors != 0 && chunk > g_dev_del_max_sectors * cp->provider->sectorsize) { chunk = g_dev_del_max_sectors * cp->provider->sectorsize; } error = g_delete_data(cp, offset, chunk); length -= chunk; offset += chunk; if (error) break; /* * Since the request size can be large, the service * time can be is likewise. We make this ioctl * interruptible by checking for signals for each bio. */ if (SIGPENDING(td)) break; } break; case DIOCGIDENT: error = g_io_getattr("GEOM::ident", cp, &i, data); break; case DIOCGPROVIDERNAME: if (pp == NULL) return (ENOENT); strlcpy(data, pp->name, i); break; case DIOCGSTRIPESIZE: *(off_t *)data = cp->provider->stripesize; break; case DIOCGSTRIPEOFFSET: *(off_t *)data = cp->provider->stripeoffset; break; case DIOCGPHYSPATH: error = g_io_getattr("GEOM::physpath", cp, &i, data); if (error == 0 && *(char *)data == '\0') error = ENOENT; break; case DIOCGATTR: { struct diocgattr_arg *arg = (struct diocgattr_arg *)data; if (arg->len > sizeof(arg->value)) { error = EINVAL; break; } error = g_io_getattr(arg->name, cp, &arg->len, &arg->value); break; } default: if (cp->provider->geom->ioctl != NULL) { error = cp->provider->geom->ioctl(cp->provider, cmd, data, fflag, td); } else { error = ENOIOCTL; } } return (error); } static void g_dev_done(struct bio *bp2) { struct g_consumer *cp; struct g_dev_softc *sc; struct bio *bp; int destroy; cp = bp2->bio_from; sc = cp->private; bp = bp2->bio_parent; bp->bio_error = bp2->bio_error; bp->bio_completed = bp2->bio_completed; bp->bio_resid = bp->bio_length - bp2->bio_completed; if (bp2->bio_error != 0) { g_trace(G_T_BIO, "g_dev_done(%p) had error %d", bp2, bp2->bio_error); bp->bio_flags |= BIO_ERROR; } else { g_trace(G_T_BIO, "g_dev_done(%p/%p) resid %ld completed %jd", bp2, bp, bp2->bio_resid, (intmax_t)bp2->bio_completed); } g_destroy_bio(bp2); destroy = 0; mtx_lock(&sc->sc_mtx); if ((--sc->sc_active) == 0) { if (sc->sc_open == 0) wakeup(&sc->sc_active); if (sc->sc_dev == NULL) destroy = 1; } mtx_unlock(&sc->sc_mtx); if (destroy) g_post_event(g_dev_destroy, cp, M_NOWAIT, NULL); biodone(bp); } static void g_dev_strategy(struct bio *bp) { struct g_consumer *cp; struct bio *bp2; struct cdev *dev; struct g_dev_softc *sc; KASSERT(bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_FLUSH, ("Wrong bio_cmd bio=%p cmd=%d", bp, bp->bio_cmd)); dev = bp->bio_dev; cp = dev->si_drv2; sc = cp->private; KASSERT(cp->acr || cp->acw, ("Consumer with zero access count in g_dev_strategy")); #ifdef INVARIANTS if ((bp->bio_offset % cp->provider->sectorsize) != 0 || (bp->bio_bcount % cp->provider->sectorsize) != 0) { bp->bio_resid = bp->bio_bcount; biofinish(bp, NULL, EINVAL); return; } #endif mtx_lock(&sc->sc_mtx); KASSERT(sc->sc_open > 0, ("Closed device in g_dev_strategy")); sc->sc_active++; mtx_unlock(&sc->sc_mtx); for (;;) { /* * XXX: This is not an ideal solution, but I belive it to * XXX: deadlock safe, all things considered. */ bp2 = g_clone_bio(bp); if (bp2 != NULL) break; pause("gdstrat", hz / 10); } KASSERT(bp2 != NULL, ("XXX: ENOMEM in a bad place")); bp2->bio_done = g_dev_done; g_trace(G_T_BIO, "g_dev_strategy(%p/%p) offset %jd length %jd data %p cmd %d", bp, bp2, (intmax_t)bp->bio_offset, (intmax_t)bp2->bio_length, bp2->bio_data, bp2->bio_cmd); g_io_request(bp2, cp); KASSERT(cp->acr || cp->acw, ("g_dev_strategy raced with g_dev_close and lost")); } /* * g_dev_callback() * * Called by devfs when asynchronous device destruction is completed. * - Mark that we have no attached device any more. * - If there are no outstanding requests, schedule geom destruction. * Otherwise destruction will be scheduled later by g_dev_done(). */ static void g_dev_callback(void *arg) { struct g_consumer *cp; struct g_dev_softc *sc; int destroy; cp = arg; sc = cp->private; g_trace(G_T_TOPOLOGY, "g_dev_callback(%p(%s))", cp, cp->geom->name); mtx_lock(&sc->sc_mtx); sc->sc_dev = NULL; sc->sc_alias = NULL; destroy = (sc->sc_active == 0); mtx_unlock(&sc->sc_mtx); if (destroy) g_post_event(g_dev_destroy, cp, M_WAITOK, NULL); } /* * g_dev_orphan() * * Called from below when the provider orphaned us. * - Clear any dump settings. * - Request asynchronous device destruction to prevent any more requests * from coming in. The provider is already marked with an error, so * anything which comes in in the interrim will be returned immediately. */ static void g_dev_orphan(struct g_consumer *cp) { struct cdev *dev; struct g_dev_softc *sc; g_topology_assert(); sc = cp->private; dev = sc->sc_dev; g_trace(G_T_TOPOLOGY, "g_dev_orphan(%p(%s))", cp, cp->geom->name); /* Reset any dump-area set on this device */ if (dev->si_flags & SI_DUMPDEV) (void)set_dumper(NULL, NULL, curthread); /* Destroy the struct cdev *so we get no more requests */ destroy_dev_sched_cb(dev, g_dev_callback, cp); } DECLARE_GEOM_CLASS(g_dev_class, g_dev); Index: projects/powernv/kern/imgact_elf.c =================================================================== --- projects/powernv/kern/imgact_elf.c (revision 291050) +++ projects/powernv/kern/imgact_elf.c (revision 291051) @@ -1,2296 +1,2299 @@ /*- * Copyright (c) 2000 David O'Brien * Copyright (c) 1995-1996 Søren Schmidt * Copyright (c) 1996 Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_capsicum.h" #include "opt_compat.h" #include "opt_gzio.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ELF_NOTE_ROUNDSIZE 4 #define OLD_EI_BRAND 8 static int __elfN(check_header)(const Elf_Ehdr *hdr); static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp, const char *interp, int interp_name_len, int32_t *osrel); static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry, size_t pagesize); static int __elfN(load_section)(struct image_params *imgp, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, size_t pagesize); static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel); static boolean_t kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel); static boolean_t __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote, int32_t *osrel); static vm_prot_t __elfN(trans_prot)(Elf_Word); static Elf_Word __elfN(untrans_prot)(vm_prot_t); SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0, ""); #define CORE_BUF_SIZE (16 * 1024) int __elfN(fallback_brand) = -1; SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort"); static int elf_legacy_coredump = 0; SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, &elf_legacy_coredump, 0, ""); int __elfN(nxstack) = #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */ 1; #else 0; #endif SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, nxstack, CTLFLAG_RW, &__elfN(nxstack), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack"); #if __ELF_WORD_SIZE == 32 #if defined(__amd64__) int i386_read_exec = 0; SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0, "enable execution from readable segments"); #endif #endif static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; #define trunc_page_ps(va, ps) ((va) & ~(ps - 1)) #define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1)) #define aligned(a, t) (trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a)) static const char FREEBSD_ABI_VENDOR[] = "FreeBSD"; Elf_Brandnote __elfN(freebsd_brandnote) = { .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR), .hdr.n_descsz = sizeof(int32_t), .hdr.n_type = 1, .vendor = FREEBSD_ABI_VENDOR, .flags = BN_TRANSLATE_OSREL, .trans_osrel = __elfN(freebsd_trans_osrel) }; static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel) { uintptr_t p; p = (uintptr_t)(note + 1); p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); *osrel = *(const int32_t *)(p); return (TRUE); } static const char GNU_ABI_VENDOR[] = "GNU"; static int GNU_KFREEBSD_ABI_DESC = 3; Elf_Brandnote __elfN(kfreebsd_brandnote) = { .hdr.n_namesz = sizeof(GNU_ABI_VENDOR), .hdr.n_descsz = 16, /* XXX at least 16 */ .hdr.n_type = 1, .vendor = GNU_ABI_VENDOR, .flags = BN_TRANSLATE_OSREL, .trans_osrel = kfreebsd_trans_osrel }; static boolean_t kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel) { const Elf32_Word *desc; uintptr_t p; p = (uintptr_t)(note + 1); p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); desc = (const Elf32_Word *)p; if (desc[0] != GNU_KFREEBSD_ABI_DESC) return (FALSE); /* * Debian GNU/kFreeBSD embed the earliest compatible kernel version * (__FreeBSD_version: Rxx) in the LSB way. */ *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3]; return (TRUE); } int __elfN(insert_brand_entry)(Elf_Brandinfo *entry) { int i; for (i = 0; i < MAX_BRANDS; i++) { if (elf_brand_list[i] == NULL) { elf_brand_list[i] = entry; break; } } if (i == MAX_BRANDS) { printf("WARNING: %s: could not insert brandinfo entry: %p\n", __func__, entry); return (-1); } return (0); } int __elfN(remove_brand_entry)(Elf_Brandinfo *entry) { int i; for (i = 0; i < MAX_BRANDS; i++) { if (elf_brand_list[i] == entry) { elf_brand_list[i] = NULL; break; } } if (i == MAX_BRANDS) return (-1); return (0); } int __elfN(brand_inuse)(Elf_Brandinfo *entry) { struct proc *p; int rval = FALSE; sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { if (p->p_sysent == entry->sysvec) { rval = TRUE; break; } } sx_sunlock(&allproc_lock); return (rval); } static Elf_Brandinfo * __elfN(get_brandinfo)(struct image_params *imgp, const char *interp, int interp_name_len, int32_t *osrel) { const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; Elf_Brandinfo *bi; boolean_t ret; int i; /* * We support four types of branding -- (1) the ELF EI_OSABI field * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string * branding w/in the ELF header, (3) path of the `interp_path' * field, and (4) the ".note.ABI-tag" ELF section. */ /* Look for an ".note.ABI-tag" ELF section */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL) continue; if (hdr->e_machine == bi->machine && (bi->flags & (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) { ret = __elfN(check_note)(imgp, bi->brand_note, osrel); if (ret) return (bi); } } /* If the executable has a brand, search for it in the brand list. */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY) continue; if (hdr->e_machine == bi->machine && (hdr->e_ident[EI_OSABI] == bi->brand || strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND], - bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0)) - return (bi); + bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0)) { + /* Looks good, but give brand a chance to veto */ + if (!bi->header_supported || bi->header_supported(imgp)) + return (bi); + } } /* No known brand, see if the header is recognized by any brand */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY || bi->header_supported == NULL) continue; if (hdr->e_machine == bi->machine) { ret = bi->header_supported(imgp); if (ret) return (bi); } } /* Lacking a known brand, search for a recognized interpreter. */ if (interp != NULL) { for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY) continue; if (hdr->e_machine == bi->machine && /* ELF image p_filesz includes terminating zero */ strlen(bi->interp_path) + 1 == interp_name_len && strncmp(interp, bi->interp_path, interp_name_len) == 0) return (bi); } } /* Lacking a recognized interpreter, try the default brand */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY) continue; if (hdr->e_machine == bi->machine && __elfN(fallback_brand) == bi->brand) return (bi); } return (NULL); } static int __elfN(check_header)(const Elf_Ehdr *hdr) { Elf_Brandinfo *bi; int i; if (!IS_ELF(*hdr) || hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || hdr->e_ident[EI_DATA] != ELF_TARG_DATA || hdr->e_ident[EI_VERSION] != EV_CURRENT || hdr->e_phentsize != sizeof(Elf_Phdr) || hdr->e_version != ELF_TARG_VER) return (ENOEXEC); /* * Make sure we have at least one brand for this machine. */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi != NULL && bi->machine == hdr->e_machine) break; } if (i == MAX_BRANDS) return (ENOEXEC); return (0); } static int __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot) { struct sf_buf *sf; int error; vm_offset_t off; /* * Create the page if it doesn't exist yet. Ignore errors. */ vm_map_lock(map); vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), VM_PROT_ALL, VM_PROT_ALL, 0); vm_map_unlock(map); /* * Find the page from the underlying object. */ if (object) { sf = vm_imgact_map_page(object, offset); if (sf == NULL) return (KERN_FAILURE); off = offset - trunc_page(offset); error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, end - start); vm_imgact_unmap_page(sf); if (error) { return (KERN_FAILURE); } } return (KERN_SUCCESS); } static int __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow) { struct sf_buf *sf; vm_offset_t off; vm_size_t sz; int error, rv; if (start != trunc_page(start)) { rv = __elfN(map_partial)(map, object, offset, start, round_page(start), prot); if (rv) return (rv); offset += round_page(start) - start; start = round_page(start); } if (end != round_page(end)) { rv = __elfN(map_partial)(map, object, offset + trunc_page(end) - start, trunc_page(end), end, prot); if (rv) return (rv); end = trunc_page(end); } if (end > start) { if (offset & PAGE_MASK) { /* * The mapping is not page aligned. This means we have * to copy the data. Sigh. */ rv = vm_map_find(map, NULL, 0, &start, end - start, 0, VMFS_NO_SPACE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0); if (rv) return (rv); if (object == NULL) return (KERN_SUCCESS); for (; start < end; start += sz) { sf = vm_imgact_map_page(object, offset); if (sf == NULL) return (KERN_FAILURE); off = offset - trunc_page(offset); sz = end - start; if (sz > PAGE_SIZE - off) sz = PAGE_SIZE - off; error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, sz); vm_imgact_unmap_page(sf); if (error) { return (KERN_FAILURE); } offset += sz; } rv = KERN_SUCCESS; } else { vm_object_reference(object); vm_map_lock(map); rv = vm_map_insert(map, object, offset, start, end, prot, VM_PROT_ALL, cow); vm_map_unlock(map); if (rv != KERN_SUCCESS) vm_object_deallocate(object); } return (rv); } else { return (KERN_SUCCESS); } } static int __elfN(load_section)(struct image_params *imgp, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, size_t pagesize) { struct sf_buf *sf; size_t map_len; vm_map_t map; vm_object_t object; vm_offset_t map_addr; int error, rv, cow; size_t copy_len; vm_offset_t file_addr; /* * It's necessary to fail if the filsz + offset taken from the * header is greater than the actual file pager object's size. * If we were to allow this, then the vm_map_find() below would * walk right off the end of the file object and into the ether. * * While I'm here, might as well check for something else that * is invalid: filsz cannot be greater than memsz. */ if ((off_t)filsz + offset > imgp->attr->va_size || filsz > memsz) { uprintf("elf_load_section: truncated ELF file\n"); return (ENOEXEC); } object = imgp->object; map = &imgp->proc->p_vmspace->vm_map; map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize); file_addr = trunc_page_ps(offset, pagesize); /* * We have two choices. We can either clear the data in the last page * of an oversized mapping, or we can start the anon mapping a page * early and copy the initialized data into that first page. We * choose the second.. */ if (memsz > filsz) map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr; else map_len = round_page_ps(offset + filsz, pagesize) - file_addr; if (map_len != 0) { /* cow flags: don't dump readonly sections in core */ cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); rv = __elfN(map_insert)(map, object, file_addr, /* file offset */ map_addr, /* virtual start */ map_addr + map_len,/* virtual end */ prot, cow); if (rv != KERN_SUCCESS) return (EINVAL); /* we can stop now if we've covered it all */ if (memsz == filsz) { return (0); } } /* * We have to get the remaining bit of the file into the first part * of the oversized map segment. This is normally because the .data * segment in the file is extended to provide bss. It's a neat idea * to try and save a page, but it's a pain in the behind to implement. */ copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize); map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize); map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) - map_addr; /* This had damn well better be true! */ if (map_len != 0) { rv = __elfN(map_insert)(map, NULL, 0, map_addr, map_addr + map_len, VM_PROT_ALL, 0); if (rv != KERN_SUCCESS) { return (EINVAL); } } if (copy_len != 0) { vm_offset_t off; sf = vm_imgact_map_page(object, offset + filsz); if (sf == NULL) return (EIO); /* send the page fragment to user space */ off = trunc_page_ps(offset + filsz, pagesize) - trunc_page(offset + filsz); error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)map_addr, copy_len); vm_imgact_unmap_page(sf); if (error) { return (error); } } /* * set it to the specified protection. * XXX had better undo the damage from pasting over the cracks here! */ vm_map_protect(map, trunc_page(map_addr), round_page(map_addr + map_len), prot, FALSE); return (0); } /* * Load the file "file" into memory. It may be either a shared object * or an executable. * * The "addr" reference parameter is in/out. On entry, it specifies * the address where a shared object should be loaded. If the file is * an executable, this value is ignored. On exit, "addr" specifies * where the file was actually loaded. * * The "entry" reference parameter is out only. On exit, it specifies * the entry point for the loaded file. */ static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry, size_t pagesize) { struct { struct nameidata nd; struct vattr attr; struct image_params image_params; } *tempdata; const Elf_Ehdr *hdr = NULL; const Elf_Phdr *phdr = NULL; struct nameidata *nd; struct vattr *attr; struct image_params *imgp; vm_prot_t prot; u_long rbase; u_long base_addr = 0; int error, i, numsegs; #ifdef CAPABILITY_MODE /* * XXXJA: This check can go away once we are sufficiently confident * that the checks in namei() are correct. */ if (IN_CAPABILITY_MODE(curthread)) return (ECAPMODE); #endif tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK); nd = &tempdata->nd; attr = &tempdata->attr; imgp = &tempdata->image_params; /* * Initialize part of the common data */ imgp->proc = p; imgp->attr = attr; imgp->firstpage = NULL; imgp->image_header = NULL; imgp->object = NULL; imgp->execlabel = NULL; NDINIT(nd, LOOKUP, LOCKLEAF | FOLLOW, UIO_SYSSPACE, file, curthread); if ((error = namei(nd)) != 0) { nd->ni_vp = NULL; goto fail; } NDFREE(nd, NDF_ONLY_PNBUF); imgp->vp = nd->ni_vp; /* * Check permissions, modes, uid, etc on the file, and "open" it. */ error = exec_check_permissions(imgp); if (error) goto fail; error = exec_map_first_page(imgp); if (error) goto fail; /* * Also make certain that the interpreter stays the same, so set * its VV_TEXT flag, too. */ VOP_SET_TEXT(nd->ni_vp); imgp->object = nd->ni_vp->v_object; hdr = (const Elf_Ehdr *)imgp->image_header; if ((error = __elfN(check_header)(hdr)) != 0) goto fail; if (hdr->e_type == ET_DYN) rbase = *addr; else if (hdr->e_type == ET_EXEC) rbase = 0; else { error = ENOEXEC; goto fail; } /* Only support headers that fit within first page for now */ if ((hdr->e_phoff > PAGE_SIZE) || (u_int)hdr->e_phentsize * hdr->e_phnum > PAGE_SIZE - hdr->e_phoff) { error = ENOEXEC; goto fail; } phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); if (!aligned(phdr, Elf_Addr)) { error = ENOEXEC; goto fail; } for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) { if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) { /* Loadable segment */ prot = __elfN(trans_prot)(phdr[i].p_flags); error = __elfN(load_section)(imgp, phdr[i].p_offset, (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, phdr[i].p_memsz, phdr[i].p_filesz, prot, pagesize); if (error != 0) goto fail; /* * Establish the base address if this is the * first segment. */ if (numsegs == 0) base_addr = trunc_page(phdr[i].p_vaddr + rbase); numsegs++; } } *addr = base_addr; *entry = (unsigned long)hdr->e_entry + rbase; fail: if (imgp->firstpage) exec_unmap_first_page(imgp); if (nd->ni_vp) vput(nd->ni_vp); free(tempdata, M_TEMP); return (error); } static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) { struct thread *td; const Elf_Ehdr *hdr; const Elf_Phdr *phdr; Elf_Auxargs *elf_auxargs; struct vmspace *vmspace; const char *err_str, *newinterp; char *interp, *interp_buf, *path; Elf_Brandinfo *brand_info; struct sysentvec *sv; vm_prot_t prot; u_long text_size, data_size, total_size, text_addr, data_addr; u_long seg_size, seg_addr, addr, baddr, et_dyn_addr, entry, proghdr; int32_t osrel; int error, i, n, interp_name_len, have_interp; hdr = (const Elf_Ehdr *)imgp->image_header; /* * Do we have a valid ELF header ? * * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later * if particular brand doesn't support it. */ if (__elfN(check_header)(hdr) != 0 || (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)) return (-1); /* * From here on down, we return an errno, not -1, as we've * detected an ELF file. */ if ((hdr->e_phoff > PAGE_SIZE) || (u_int)hdr->e_phentsize * hdr->e_phnum > PAGE_SIZE - hdr->e_phoff) { /* Only support headers in first page for now */ uprintf("Program headers not in the first page\n"); return (ENOEXEC); } phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); if (!aligned(phdr, Elf_Addr)) { uprintf("Unaligned program headers\n"); return (ENOEXEC); } n = error = 0; baddr = 0; osrel = 0; text_size = data_size = total_size = text_addr = data_addr = 0; entry = proghdr = 0; interp_name_len = 0; err_str = newinterp = NULL; interp = interp_buf = NULL; td = curthread; for (i = 0; i < hdr->e_phnum; i++) { switch (phdr[i].p_type) { case PT_LOAD: if (n == 0) baddr = phdr[i].p_vaddr; n++; break; case PT_INTERP: /* Path to interpreter */ if (phdr[i].p_filesz > MAXPATHLEN) { uprintf("Invalid PT_INTERP\n"); error = ENOEXEC; goto ret; } interp_name_len = phdr[i].p_filesz; if (phdr[i].p_offset > PAGE_SIZE || interp_name_len > PAGE_SIZE - phdr[i].p_offset) { VOP_UNLOCK(imgp->vp, 0); interp_buf = malloc(interp_name_len + 1, M_TEMP, M_WAITOK); vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); error = vn_rdwr(UIO_READ, imgp->vp, interp_buf, interp_name_len, phdr[i].p_offset, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, NULL, td); if (error != 0) { uprintf("i/o error PT_INTERP\n"); goto ret; } interp_buf[interp_name_len] = '\0'; interp = interp_buf; } else { interp = __DECONST(char *, imgp->image_header) + phdr[i].p_offset; } break; case PT_GNU_STACK: if (__elfN(nxstack)) imgp->stack_prot = __elfN(trans_prot)(phdr[i].p_flags); imgp->stack_sz = phdr[i].p_memsz; break; } } brand_info = __elfN(get_brandinfo)(imgp, interp, interp_name_len, &osrel); if (brand_info == NULL) { uprintf("ELF binary type \"%u\" not known.\n", hdr->e_ident[EI_OSABI]); error = ENOEXEC; goto ret; } if (hdr->e_type == ET_DYN) { if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) { uprintf("Cannot execute shared object\n"); error = ENOEXEC; goto ret; } /* * Honour the base load address from the dso if it is * non-zero for some reason. */ if (baddr == 0) et_dyn_addr = ET_DYN_LOAD_ADDR; else et_dyn_addr = 0; } else et_dyn_addr = 0; sv = brand_info->sysvec; if (interp != NULL && brand_info->interp_newpath != NULL) newinterp = brand_info->interp_newpath; /* * Avoid a possible deadlock if the current address space is destroyed * and that address space maps the locked vnode. In the common case, * the locked vnode's v_usecount is decremented but remains greater * than zero. Consequently, the vnode lock is not needed by vrele(). * However, in cases where the vnode lock is external, such as nullfs, * v_usecount may become zero. * * The VV_TEXT flag prevents modifications to the executable while * the vnode is unlocked. */ VOP_UNLOCK(imgp->vp, 0); error = exec_new_vmspace(imgp, sv); imgp->proc->p_sysent = sv; vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); if (error != 0) goto ret; for (i = 0; i < hdr->e_phnum; i++) { switch (phdr[i].p_type) { case PT_LOAD: /* Loadable segment */ if (phdr[i].p_memsz == 0) break; prot = __elfN(trans_prot)(phdr[i].p_flags); error = __elfN(load_section)(imgp, phdr[i].p_offset, (caddr_t)(uintptr_t)phdr[i].p_vaddr + et_dyn_addr, phdr[i].p_memsz, phdr[i].p_filesz, prot, sv->sv_pagesize); if (error != 0) goto ret; /* * If this segment contains the program headers, * remember their virtual address for the AT_PHDR * aux entry. Static binaries don't usually include * a PT_PHDR entry. */ if (phdr[i].p_offset == 0 && hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize <= phdr[i].p_filesz) proghdr = phdr[i].p_vaddr + hdr->e_phoff + et_dyn_addr; seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr); seg_size = round_page(phdr[i].p_memsz + phdr[i].p_vaddr + et_dyn_addr - seg_addr); /* * Make the largest executable segment the official * text segment and all others data. * * Note that obreak() assumes that data_addr + * data_size == end of data load area, and the ELF * file format expects segments to be sorted by * address. If multiple data segments exist, the * last one will be used. */ if (phdr[i].p_flags & PF_X && text_size < seg_size) { text_size = seg_size; text_addr = seg_addr; } else { data_size = seg_size; data_addr = seg_addr; } total_size += seg_size; break; case PT_PHDR: /* Program header table info */ proghdr = phdr[i].p_vaddr + et_dyn_addr; break; default: break; } } if (data_addr == 0 && data_size == 0) { data_addr = text_addr; data_size = text_size; } entry = (u_long)hdr->e_entry + et_dyn_addr; /* * Check limits. It should be safe to check the * limits after loading the segments since we do * not actually fault in all the segments pages. */ PROC_LOCK(imgp->proc); if (data_size > lim_cur_proc(imgp->proc, RLIMIT_DATA)) err_str = "Data segment size exceeds process limit"; else if (text_size > maxtsiz) err_str = "Text segment size exceeds system limit"; else if (total_size > lim_cur_proc(imgp->proc, RLIMIT_VMEM)) err_str = "Total segment size exceeds process limit"; else if (racct_set(imgp->proc, RACCT_DATA, data_size) != 0) err_str = "Data segment size exceeds resource limit"; else if (racct_set(imgp->proc, RACCT_VMEM, total_size) != 0) err_str = "Total segment size exceeds resource limit"; if (err_str != NULL) { PROC_UNLOCK(imgp->proc); uprintf("%s\n", err_str); error = ENOMEM; goto ret; } vmspace = imgp->proc->p_vmspace; vmspace->vm_tsize = text_size >> PAGE_SHIFT; vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; vmspace->vm_dsize = data_size >> PAGE_SHIFT; vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; /* * We load the dynamic linker where a userland call * to mmap(0, ...) would put it. The rationale behind this * calculation is that it leaves room for the heap to grow to * its maximum allowed size. */ addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(td, RLIMIT_DATA)); PROC_UNLOCK(imgp->proc); imgp->entry_addr = entry; if (interp != NULL) { have_interp = FALSE; VOP_UNLOCK(imgp->vp, 0); if (brand_info->emul_path != NULL && brand_info->emul_path[0] != '\0') { path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path, interp); error = __elfN(load_file)(imgp->proc, path, &addr, &imgp->entry_addr, sv->sv_pagesize); free(path, M_TEMP); if (error == 0) have_interp = TRUE; } if (!have_interp && newinterp != NULL) { error = __elfN(load_file)(imgp->proc, newinterp, &addr, &imgp->entry_addr, sv->sv_pagesize); if (error == 0) have_interp = TRUE; } if (!have_interp) { error = __elfN(load_file)(imgp->proc, interp, &addr, &imgp->entry_addr, sv->sv_pagesize); } vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); if (error != 0) { uprintf("ELF interpreter %s not found\n", interp); goto ret; } } else addr = et_dyn_addr; /* * Construct auxargs table (used by the fixup routine) */ elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); elf_auxargs->execfd = -1; elf_auxargs->phdr = proghdr; elf_auxargs->phent = hdr->e_phentsize; elf_auxargs->phnum = hdr->e_phnum; elf_auxargs->pagesz = PAGE_SIZE; elf_auxargs->base = addr; elf_auxargs->flags = 0; elf_auxargs->entry = entry; elf_auxargs->hdr_eflags = hdr->e_flags; imgp->auxargs = elf_auxargs; imgp->interpreted = 0; imgp->reloc_base = addr; imgp->proc->p_osrel = osrel; ret: free(interp_buf, M_TEMP); return (error); } #define suword __CONCAT(suword, __ELF_WORD_SIZE) int __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp) { Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; Elf_Addr *base; Elf_Addr *pos; base = (Elf_Addr *)*stack_base; pos = base + (imgp->args->argc + imgp->args->envc + 2); if (args->execfd != -1) AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); AUXARGS_ENTRY(pos, AT_PHENT, args->phent); AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); AUXARGS_ENTRY(pos, AT_BASE, args->base); #ifdef AT_EHDRFLAGS AUXARGS_ENTRY(pos, AT_EHDRFLAGS, args->hdr_eflags); #endif if (imgp->execpathp != 0) AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp); AUXARGS_ENTRY(pos, AT_OSRELDATE, imgp->proc->p_ucred->cr_prison->pr_osreldate); if (imgp->canary != 0) { AUXARGS_ENTRY(pos, AT_CANARY, imgp->canary); AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen); } AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus); if (imgp->pagesizes != 0) { AUXARGS_ENTRY(pos, AT_PAGESIZES, imgp->pagesizes); AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen); } if (imgp->sysent->sv_timekeep_base != 0) { AUXARGS_ENTRY(pos, AT_TIMEKEEP, imgp->sysent->sv_timekeep_base); } AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj != NULL && imgp->stack_prot != 0 ? imgp->stack_prot : imgp->sysent->sv_stackprot); AUXARGS_ENTRY(pos, AT_NULL, 0); free(imgp->auxargs, M_TEMP); imgp->auxargs = NULL; base--; suword(base, (long)imgp->args->argc); *stack_base = (register_t *)base; return (0); } /* * Code for generating ELF core dumps. */ typedef void (*segment_callback)(vm_map_entry_t, void *); /* Closure for cb_put_phdr(). */ struct phdr_closure { Elf_Phdr *phdr; /* Program header to fill in */ Elf_Off offset; /* Offset of segment in core file */ }; /* Closure for cb_size_segment(). */ struct sseg_closure { int count; /* Count of writable segments. */ size_t size; /* Total size of all writable segments. */ }; typedef void (*outfunc_t)(void *, struct sbuf *, size_t *); struct note_info { int type; /* Note type. */ outfunc_t outfunc; /* Output function. */ void *outarg; /* Argument for the output function. */ size_t outsize; /* Output size. */ TAILQ_ENTRY(note_info) link; /* Link to the next note info. */ }; TAILQ_HEAD(note_info_list, note_info); /* Coredump output parameters. */ struct coredump_params { off_t offset; struct ucred *active_cred; struct ucred *file_cred; struct thread *td; struct vnode *vp; struct gzio_stream *gzs; }; static void cb_put_phdr(vm_map_entry_t, void *); static void cb_size_segment(vm_map_entry_t, void *); static int core_write(struct coredump_params *, void *, size_t, off_t, enum uio_seg); static void each_writable_segment(struct thread *, segment_callback, void *); static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t, struct note_info_list *, size_t); static void __elfN(prepare_notes)(struct thread *, struct note_info_list *, size_t *); static void __elfN(puthdr)(struct thread *, void *, size_t, int, size_t); static void __elfN(putnote)(struct note_info *, struct sbuf *); static size_t register_note(struct note_info_list *, int, outfunc_t, void *); static int sbuf_drain_core_output(void *, const char *, int); static int sbuf_drain_count(void *arg, const char *data, int len); static void __elfN(note_fpregset)(void *, struct sbuf *, size_t *); static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *); static void __elfN(note_prstatus)(void *, struct sbuf *, size_t *); static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *); static void __elfN(note_thrmisc)(void *, struct sbuf *, size_t *); static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *); static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *); static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *); static void note_procstat_files(void *, struct sbuf *, size_t *); static void note_procstat_groups(void *, struct sbuf *, size_t *); static void note_procstat_osrel(void *, struct sbuf *, size_t *); static void note_procstat_rlimit(void *, struct sbuf *, size_t *); static void note_procstat_umask(void *, struct sbuf *, size_t *); static void note_procstat_vmmap(void *, struct sbuf *, size_t *); #ifdef GZIO extern int compress_user_cores_gzlevel; /* * Write out a core segment to the compression stream. */ static int compress_chunk(struct coredump_params *p, char *base, char *buf, u_int len) { u_int chunk_len; int error; while (len > 0) { chunk_len = MIN(len, CORE_BUF_SIZE); copyin(base, buf, chunk_len); error = gzio_write(p->gzs, buf, chunk_len); if (error != 0) break; base += chunk_len; len -= chunk_len; } return (error); } static int core_gz_write(void *base, size_t len, off_t offset, void *arg) { return (core_write((struct coredump_params *)arg, base, len, offset, UIO_SYSSPACE)); } #endif /* GZIO */ static int core_write(struct coredump_params *p, void *base, size_t len, off_t offset, enum uio_seg seg) { return (vn_rdwr_inchunks(UIO_WRITE, p->vp, base, len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED, p->active_cred, p->file_cred, NULL, p->td)); } static int core_output(void *base, size_t len, off_t offset, struct coredump_params *p, void *tmpbuf) { #ifdef GZIO if (p->gzs != NULL) return (compress_chunk(p, base, tmpbuf, len)); #endif return (core_write(p, base, len, offset, UIO_USERSPACE)); } /* * Drain into a core file. */ static int sbuf_drain_core_output(void *arg, const char *data, int len) { struct coredump_params *p; int error, locked; p = (struct coredump_params *)arg; /* * Some kern_proc out routines that print to this sbuf may * call us with the process lock held. Draining with the * non-sleepable lock held is unsafe. The lock is needed for * those routines when dumping a live process. In our case we * can safely release the lock before draining and acquire * again after. */ locked = PROC_LOCKED(p->td->td_proc); if (locked) PROC_UNLOCK(p->td->td_proc); #ifdef GZIO if (p->gzs != NULL) error = gzio_write(p->gzs, __DECONST(char *, data), len); else #endif error = core_write(p, __DECONST(void *, data), len, p->offset, UIO_SYSSPACE); if (locked) PROC_LOCK(p->td->td_proc); if (error != 0) return (-error); p->offset += len; return (len); } /* * Drain into a counter. */ static int sbuf_drain_count(void *arg, const char *data __unused, int len) { size_t *sizep; sizep = (size_t *)arg; *sizep += len; return (len); } int __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) { struct ucred *cred = td->td_ucred; int error = 0; struct sseg_closure seginfo; struct note_info_list notelst; struct coredump_params params; struct note_info *ninfo; void *hdr, *tmpbuf; size_t hdrsize, notesz, coresize; #ifdef GZIO boolean_t compress; compress = (flags & IMGACT_CORE_COMPRESS) != 0; #endif hdr = NULL; tmpbuf = NULL; TAILQ_INIT(¬elst); /* Size the program segments. */ seginfo.count = 0; seginfo.size = 0; each_writable_segment(td, cb_size_segment, &seginfo); /* * Collect info about the core file header area. */ hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count); __elfN(prepare_notes)(td, ¬elst, ¬esz); coresize = round_page(hdrsize + notesz) + seginfo.size; /* Set up core dump parameters. */ params.offset = 0; params.active_cred = cred; params.file_cred = NOCRED; params.td = td; params.vp = vp; params.gzs = NULL; #ifdef RACCT if (racct_enable) { PROC_LOCK(td->td_proc); error = racct_add(td->td_proc, RACCT_CORE, coresize); PROC_UNLOCK(td->td_proc); if (error != 0) { error = EFAULT; goto done; } } #endif if (coresize >= limit) { error = EFAULT; goto done; } #ifdef GZIO /* Create a compression stream if necessary. */ if (compress) { params.gzs = gzio_init(core_gz_write, GZIO_DEFLATE, CORE_BUF_SIZE, compress_user_cores_gzlevel, ¶ms); if (params.gzs == NULL) { error = EFAULT; goto done; } tmpbuf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO); } #endif /* * Allocate memory for building the header, fill it up, * and write it out following the notes. */ hdr = malloc(hdrsize, M_TEMP, M_WAITOK); if (hdr == NULL) { error = EINVAL; goto done; } error = __elfN(corehdr)(¶ms, seginfo.count, hdr, hdrsize, ¬elst, notesz); /* Write the contents of all of the writable segments. */ if (error == 0) { Elf_Phdr *php; off_t offset; int i; php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; offset = round_page(hdrsize + notesz); for (i = 0; i < seginfo.count; i++) { error = core_output((caddr_t)(uintptr_t)php->p_vaddr, php->p_filesz, offset, ¶ms, tmpbuf); if (error != 0) break; offset += php->p_filesz; php++; } #ifdef GZIO if (error == 0 && compress) error = gzio_flush(params.gzs); #endif } if (error) { log(LOG_WARNING, "Failed to write core file for process %s (error %d)\n", curproc->p_comm, error); } done: #ifdef GZIO if (compress) { free(tmpbuf, M_TEMP); if (params.gzs != NULL) gzio_fini(params.gzs); } #endif while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) { TAILQ_REMOVE(¬elst, ninfo, link); free(ninfo, M_TEMP); } if (hdr != NULL) free(hdr, M_TEMP); return (error); } /* * A callback for each_writable_segment() to write out the segment's * program header entry. */ static void cb_put_phdr(entry, closure) vm_map_entry_t entry; void *closure; { struct phdr_closure *phc = (struct phdr_closure *)closure; Elf_Phdr *phdr = phc->phdr; phc->offset = round_page(phc->offset); phdr->p_type = PT_LOAD; phdr->p_offset = phc->offset; phdr->p_vaddr = entry->start; phdr->p_paddr = 0; phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; phdr->p_align = PAGE_SIZE; phdr->p_flags = __elfN(untrans_prot)(entry->protection); phc->offset += phdr->p_filesz; phc->phdr++; } /* * A callback for each_writable_segment() to gather information about * the number of segments and their total size. */ static void cb_size_segment(entry, closure) vm_map_entry_t entry; void *closure; { struct sseg_closure *ssc = (struct sseg_closure *)closure; ssc->count++; ssc->size += entry->end - entry->start; } /* * For each writable segment in the process's memory map, call the given * function with a pointer to the map entry and some arbitrary * caller-supplied data. */ static void each_writable_segment(td, func, closure) struct thread *td; segment_callback func; void *closure; { struct proc *p = td->td_proc; vm_map_t map = &p->p_vmspace->vm_map; vm_map_entry_t entry; vm_object_t backing_object, object; boolean_t ignore_entry; vm_map_lock_read(map); for (entry = map->header.next; entry != &map->header; entry = entry->next) { /* * Don't dump inaccessible mappings, deal with legacy * coredump mode. * * Note that read-only segments related to the elf binary * are marked MAP_ENTRY_NOCOREDUMP now so we no longer * need to arbitrarily ignore such segments. */ if (elf_legacy_coredump) { if ((entry->protection & VM_PROT_RW) != VM_PROT_RW) continue; } else { if ((entry->protection & VM_PROT_ALL) == 0) continue; } /* * Dont include memory segment in the coredump if * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in * madvise(2). Do not dump submaps (i.e. parts of the * kernel map). */ if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP)) continue; if ((object = entry->object.vm_object) == NULL) continue; /* Ignore memory-mapped devices and such things. */ VM_OBJECT_RLOCK(object); while ((backing_object = object->backing_object) != NULL) { VM_OBJECT_RLOCK(backing_object); VM_OBJECT_RUNLOCK(object); object = backing_object; } ignore_entry = object->type != OBJT_DEFAULT && object->type != OBJT_SWAP && object->type != OBJT_VNODE && object->type != OBJT_PHYS; VM_OBJECT_RUNLOCK(object); if (ignore_entry) continue; (*func)(entry, closure); } vm_map_unlock_read(map); } /* * Write the core file header to the file, including padding up to * the page boundary. */ static int __elfN(corehdr)(struct coredump_params *p, int numsegs, void *hdr, size_t hdrsize, struct note_info_list *notelst, size_t notesz) { struct note_info *ninfo; struct sbuf *sb; int error; /* Fill in the header. */ bzero(hdr, hdrsize); __elfN(puthdr)(p->td, hdr, hdrsize, numsegs, notesz); sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_drain_core_output, p); sbuf_start_section(sb, NULL); sbuf_bcat(sb, hdr, hdrsize); TAILQ_FOREACH(ninfo, notelst, link) __elfN(putnote)(ninfo, sb); /* Align up to a page boundary for the program segments. */ sbuf_end_section(sb, -1, PAGE_SIZE, 0); error = sbuf_finish(sb); sbuf_delete(sb); return (error); } static void __elfN(prepare_notes)(struct thread *td, struct note_info_list *list, size_t *sizep) { struct proc *p; struct thread *thr; size_t size; p = td->td_proc; size = 0; size += register_note(list, NT_PRPSINFO, __elfN(note_prpsinfo), p); /* * To have the debugger select the right thread (LWP) as the initial * thread, we dump the state of the thread passed to us in td first. * This is the thread that causes the core dump and thus likely to * be the right thread one wants to have selected in the debugger. */ thr = td; while (thr != NULL) { size += register_note(list, NT_PRSTATUS, __elfN(note_prstatus), thr); size += register_note(list, NT_FPREGSET, __elfN(note_fpregset), thr); size += register_note(list, NT_THRMISC, __elfN(note_thrmisc), thr); size += register_note(list, -1, __elfN(note_threadmd), thr); thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) : TAILQ_NEXT(thr, td_plist); if (thr == td) thr = TAILQ_NEXT(thr, td_plist); } size += register_note(list, NT_PROCSTAT_PROC, __elfN(note_procstat_proc), p); size += register_note(list, NT_PROCSTAT_FILES, note_procstat_files, p); size += register_note(list, NT_PROCSTAT_VMMAP, note_procstat_vmmap, p); size += register_note(list, NT_PROCSTAT_GROUPS, note_procstat_groups, p); size += register_note(list, NT_PROCSTAT_UMASK, note_procstat_umask, p); size += register_note(list, NT_PROCSTAT_RLIMIT, note_procstat_rlimit, p); size += register_note(list, NT_PROCSTAT_OSREL, note_procstat_osrel, p); size += register_note(list, NT_PROCSTAT_PSSTRINGS, __elfN(note_procstat_psstrings), p); size += register_note(list, NT_PROCSTAT_AUXV, __elfN(note_procstat_auxv), p); *sizep = size; } static void __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs, size_t notesz) { Elf_Ehdr *ehdr; Elf_Phdr *phdr; struct phdr_closure phc; ehdr = (Elf_Ehdr *)hdr; phdr = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)); ehdr->e_ident[EI_MAG0] = ELFMAG0; ehdr->e_ident[EI_MAG1] = ELFMAG1; ehdr->e_ident[EI_MAG2] = ELFMAG2; ehdr->e_ident[EI_MAG3] = ELFMAG3; ehdr->e_ident[EI_CLASS] = ELF_CLASS; ehdr->e_ident[EI_DATA] = ELF_DATA; ehdr->e_ident[EI_VERSION] = EV_CURRENT; ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; ehdr->e_ident[EI_ABIVERSION] = 0; ehdr->e_ident[EI_PAD] = 0; ehdr->e_type = ET_CORE; #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 ehdr->e_machine = ELF_ARCH32; #else ehdr->e_machine = ELF_ARCH; #endif ehdr->e_version = EV_CURRENT; ehdr->e_entry = 0; ehdr->e_phoff = sizeof(Elf_Ehdr); ehdr->e_flags = 0; ehdr->e_ehsize = sizeof(Elf_Ehdr); ehdr->e_phentsize = sizeof(Elf_Phdr); ehdr->e_phnum = numsegs + 1; ehdr->e_shentsize = sizeof(Elf_Shdr); ehdr->e_shnum = 0; ehdr->e_shstrndx = SHN_UNDEF; /* * Fill in the program header entries. */ /* The note segement. */ phdr->p_type = PT_NOTE; phdr->p_offset = hdrsize; phdr->p_vaddr = 0; phdr->p_paddr = 0; phdr->p_filesz = notesz; phdr->p_memsz = 0; phdr->p_flags = PF_R; phdr->p_align = ELF_NOTE_ROUNDSIZE; phdr++; /* All the writable segments from the program. */ phc.phdr = phdr; phc.offset = round_page(hdrsize + notesz); each_writable_segment(td, cb_put_phdr, &phc); } static size_t register_note(struct note_info_list *list, int type, outfunc_t out, void *arg) { struct note_info *ninfo; size_t size, notesize; size = 0; out(arg, NULL, &size); ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK); ninfo->type = type; ninfo->outfunc = out; ninfo->outarg = arg; ninfo->outsize = size; TAILQ_INSERT_TAIL(list, ninfo, link); if (type == -1) return (size); notesize = sizeof(Elf_Note) + /* note header */ roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) + /* note name */ roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ return (notesize); } static size_t append_note_data(const void *src, void *dst, size_t len) { size_t padded_len; padded_len = roundup2(len, ELF_NOTE_ROUNDSIZE); if (dst != NULL) { bcopy(src, dst, len); bzero((char *)dst + len, padded_len - len); } return (padded_len); } size_t __elfN(populate_note)(int type, void *src, void *dst, size_t size, void **descp) { Elf_Note *note; char *buf; size_t notesize; buf = dst; if (buf != NULL) { note = (Elf_Note *)buf; note->n_namesz = sizeof(FREEBSD_ABI_VENDOR); note->n_descsz = size; note->n_type = type; buf += sizeof(*note); buf += append_note_data(FREEBSD_ABI_VENDOR, buf, sizeof(FREEBSD_ABI_VENDOR)); append_note_data(src, buf, size); if (descp != NULL) *descp = buf; } notesize = sizeof(Elf_Note) + /* note header */ roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) + /* note name */ roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ return (notesize); } static void __elfN(putnote)(struct note_info *ninfo, struct sbuf *sb) { Elf_Note note; ssize_t old_len, sect_len; size_t new_len, descsz, i; if (ninfo->type == -1) { ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); return; } note.n_namesz = sizeof(FREEBSD_ABI_VENDOR); note.n_descsz = ninfo->outsize; note.n_type = ninfo->type; sbuf_bcat(sb, ¬e, sizeof(note)); sbuf_start_section(sb, &old_len); sbuf_bcat(sb, FREEBSD_ABI_VENDOR, sizeof(FREEBSD_ABI_VENDOR)); sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); if (note.n_descsz == 0) return; sbuf_start_section(sb, &old_len); ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); sect_len = sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); if (sect_len < 0) return; new_len = (size_t)sect_len; descsz = roundup(note.n_descsz, ELF_NOTE_ROUNDSIZE); if (new_len < descsz) { /* * It is expected that individual note emitters will correctly * predict their expected output size and fill up to that size * themselves, padding in a format-specific way if needed. * However, in case they don't, just do it here with zeros. */ for (i = 0; i < descsz - new_len; i++) sbuf_putc(sb, 0); } else if (new_len > descsz) { /* * We can't always truncate sb -- we may have drained some * of it already. */ KASSERT(new_len == descsz, ("%s: Note type %u changed as we " "read it (%zu > %zu). Since it is longer than " "expected, this coredump's notes are corrupt. THIS " "IS A BUG in the note_procstat routine for type %u.\n", __func__, (unsigned)note.n_type, new_len, descsz, (unsigned)note.n_type)); } } /* * Miscellaneous note out functions. */ #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 #include typedef struct prstatus32 elf_prstatus_t; typedef struct prpsinfo32 elf_prpsinfo_t; typedef struct fpreg32 elf_prfpregset_t; typedef struct fpreg32 elf_fpregset_t; typedef struct reg32 elf_gregset_t; typedef struct thrmisc32 elf_thrmisc_t; #define ELF_KERN_PROC_MASK KERN_PROC_MASK32 typedef struct kinfo_proc32 elf_kinfo_proc_t; typedef uint32_t elf_ps_strings_t; #else typedef prstatus_t elf_prstatus_t; typedef prpsinfo_t elf_prpsinfo_t; typedef prfpregset_t elf_prfpregset_t; typedef prfpregset_t elf_fpregset_t; typedef gregset_t elf_gregset_t; typedef thrmisc_t elf_thrmisc_t; #define ELF_KERN_PROC_MASK 0 typedef struct kinfo_proc elf_kinfo_proc_t; typedef vm_offset_t elf_ps_strings_t; #endif static void __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; elf_prpsinfo_t *psinfo; p = (struct proc *)arg; if (sb != NULL) { KASSERT(*sizep == sizeof(*psinfo), ("invalid size")); psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK); psinfo->pr_version = PRPSINFO_VERSION; psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t); strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname)); /* * XXX - We don't fill in the command line arguments properly * yet. */ strlcpy(psinfo->pr_psargs, p->p_comm, sizeof(psinfo->pr_psargs)); sbuf_bcat(sb, psinfo, sizeof(*psinfo)); free(psinfo, M_TEMP); } *sizep = sizeof(*psinfo); } static void __elfN(note_prstatus)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; elf_prstatus_t *status; td = (struct thread *)arg; if (sb != NULL) { KASSERT(*sizep == sizeof(*status), ("invalid size")); status = malloc(sizeof(*status), M_TEMP, M_ZERO | M_WAITOK); status->pr_version = PRSTATUS_VERSION; status->pr_statussz = sizeof(elf_prstatus_t); status->pr_gregsetsz = sizeof(elf_gregset_t); status->pr_fpregsetsz = sizeof(elf_fpregset_t); status->pr_osreldate = osreldate; status->pr_cursig = td->td_proc->p_sig; status->pr_pid = td->td_tid; #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 fill_regs32(td, &status->pr_reg); #else fill_regs(td, &status->pr_reg); #endif sbuf_bcat(sb, status, sizeof(*status)); free(status, M_TEMP); } *sizep = sizeof(*status); } static void __elfN(note_fpregset)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; elf_prfpregset_t *fpregset; td = (struct thread *)arg; if (sb != NULL) { KASSERT(*sizep == sizeof(*fpregset), ("invalid size")); fpregset = malloc(sizeof(*fpregset), M_TEMP, M_ZERO | M_WAITOK); #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 fill_fpregs32(td, fpregset); #else fill_fpregs(td, fpregset); #endif sbuf_bcat(sb, fpregset, sizeof(*fpregset)); free(fpregset, M_TEMP); } *sizep = sizeof(*fpregset); } static void __elfN(note_thrmisc)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; elf_thrmisc_t thrmisc; td = (struct thread *)arg; if (sb != NULL) { KASSERT(*sizep == sizeof(thrmisc), ("invalid size")); bzero(&thrmisc._pad, sizeof(thrmisc._pad)); strcpy(thrmisc.pr_tname, td->td_name); sbuf_bcat(sb, &thrmisc, sizeof(thrmisc)); } *sizep = sizeof(thrmisc); } /* * Allow for MD specific notes, as well as any MD * specific preparations for writing MI notes. */ static void __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; void *buf; size_t size; td = (struct thread *)arg; size = *sizep; if (size != 0 && sb != NULL) buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK); else buf = NULL; size = 0; __elfN(dump_thread)(td, buf, &size); KASSERT(sb == NULL || *sizep == size, ("invalid size")); if (size != 0 && sb != NULL) sbuf_bcat(sb, buf, size); free(buf, M_TEMP); *sizep = size; } #ifdef KINFO_PROC_SIZE CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); #endif static void __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = (struct proc *)arg; size = sizeof(structsize) + p->p_numthreads * sizeof(elf_kinfo_proc_t); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(elf_kinfo_proc_t); sbuf_bcat(sb, &structsize, sizeof(structsize)); sx_slock(&proctree_lock); PROC_LOCK(p); kern_proc_out(p, sb, ELF_KERN_PROC_MASK); sx_sunlock(&proctree_lock); } *sizep = size; } #ifdef KINFO_FILE_SIZE CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE); #endif static void note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size, sect_sz, i; ssize_t start_len, sect_len; int structsize, filedesc_flags; if (coredump_pack_fileinfo) filedesc_flags = KERN_FILEDESC_PACK_KINFO; else filedesc_flags = 0; p = (struct proc *)arg; structsize = sizeof(struct kinfo_file); if (sb == NULL) { size = 0; sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_drain_count, &size); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_filedesc_out(p, sb, -1, filedesc_flags); sbuf_finish(sb); sbuf_delete(sb); *sizep = size; } else { sbuf_start_section(sb, &start_len); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_filedesc_out(p, sb, *sizep - sizeof(structsize), filedesc_flags); sect_len = sbuf_end_section(sb, start_len, 0, 0); if (sect_len < 0) return; sect_sz = sect_len; KASSERT(sect_sz <= *sizep, ("kern_proc_filedesc_out did not respect maxlen; " "requested %zu, got %zu", *sizep - sizeof(structsize), sect_sz - sizeof(structsize))); for (i = 0; i < *sizep - sect_sz && sb->s_error == 0; i++) sbuf_putc(sb, 0); } } #ifdef KINFO_VMENTRY_SIZE CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE); #endif static void note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize, vmmap_flags; if (coredump_pack_vmmapinfo) vmmap_flags = KERN_VMMAP_PACK_KINFO; else vmmap_flags = 0; p = (struct proc *)arg; structsize = sizeof(struct kinfo_vmentry); if (sb == NULL) { size = 0; sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_drain_count, &size); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_vmmap_out(p, sb, -1, vmmap_flags); sbuf_finish(sb); sbuf_delete(sb); *sizep = size; } else { sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_vmmap_out(p, sb, *sizep - sizeof(structsize), vmmap_flags); } } static void note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = (struct proc *)arg; size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(gid_t); sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups * sizeof(gid_t)); } *sizep = size; } static void note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = (struct proc *)arg; size = sizeof(structsize) + sizeof(p->p_fd->fd_cmask); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(p->p_fd->fd_cmask); sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, &p->p_fd->fd_cmask, sizeof(p->p_fd->fd_cmask)); } *sizep = size; } static void note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; struct rlimit rlim[RLIM_NLIMITS]; size_t size; int structsize, i; p = (struct proc *)arg; size = sizeof(structsize) + sizeof(rlim); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(rlim); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); for (i = 0; i < RLIM_NLIMITS; i++) lim_rlimit_proc(p, i, &rlim[i]); PROC_UNLOCK(p); sbuf_bcat(sb, rlim, sizeof(rlim)); } *sizep = size; } static void note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = (struct proc *)arg; size = sizeof(structsize) + sizeof(p->p_osrel); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(p->p_osrel); sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel)); } *sizep = size; } static void __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; elf_ps_strings_t ps_strings; size_t size; int structsize; p = (struct proc *)arg; size = sizeof(structsize) + sizeof(ps_strings); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(ps_strings); #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 ps_strings = PTROUT(p->p_sysent->sv_psstrings); #else ps_strings = p->p_sysent->sv_psstrings; #endif sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, &ps_strings, sizeof(ps_strings)); } *sizep = size; } static void __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = (struct proc *)arg; if (sb == NULL) { size = 0; sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_drain_count, &size); sbuf_bcat(sb, &structsize, sizeof(structsize)); PHOLD(p); proc_getauxv(curthread, p, sb); PRELE(p); sbuf_finish(sb); sbuf_delete(sb); *sizep = size; } else { structsize = sizeof(Elf_Auxinfo); sbuf_bcat(sb, &structsize, sizeof(structsize)); PHOLD(p); proc_getauxv(curthread, p, sb); PRELE(p); } } static boolean_t __elfN(parse_notes)(struct image_params *imgp, Elf_Brandnote *checknote, int32_t *osrel, const Elf_Phdr *pnote) { const Elf_Note *note, *note0, *note_end; const char *note_name; char *buf; int i, error; boolean_t res; /* We need some limit, might as well use PAGE_SIZE. */ if (pnote == NULL || pnote->p_filesz > PAGE_SIZE) return (FALSE); ASSERT_VOP_LOCKED(imgp->vp, "parse_notes"); if (pnote->p_offset > PAGE_SIZE || pnote->p_filesz > PAGE_SIZE - pnote->p_offset) { VOP_UNLOCK(imgp->vp, 0); buf = malloc(pnote->p_filesz, M_TEMP, M_WAITOK); vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY); error = vn_rdwr(UIO_READ, imgp->vp, buf, pnote->p_filesz, pnote->p_offset, UIO_SYSSPACE, IO_NODELOCKED, curthread->td_ucred, NOCRED, NULL, curthread); if (error != 0) { uprintf("i/o error PT_NOTE\n"); res = FALSE; goto ret; } note = note0 = (const Elf_Note *)buf; note_end = (const Elf_Note *)(buf + pnote->p_filesz); } else { note = note0 = (const Elf_Note *)(imgp->image_header + pnote->p_offset); note_end = (const Elf_Note *)(imgp->image_header + pnote->p_offset + pnote->p_filesz); buf = NULL; } for (i = 0; i < 100 && note >= note0 && note < note_end; i++) { if (!aligned(note, Elf32_Addr) || (const char *)note_end - (const char *)note < sizeof(Elf_Note)) { res = FALSE; goto ret; } if (note->n_namesz != checknote->hdr.n_namesz || note->n_descsz != checknote->hdr.n_descsz || note->n_type != checknote->hdr.n_type) goto nextnote; note_name = (const char *)(note + 1); if (note_name + checknote->hdr.n_namesz >= (const char *)note_end || strncmp(checknote->vendor, note_name, checknote->hdr.n_namesz) != 0) goto nextnote; /* * Fetch the osreldate for binary * from the ELF OSABI-note if necessary. */ if ((checknote->flags & BN_TRANSLATE_OSREL) != 0 && checknote->trans_osrel != NULL) { res = checknote->trans_osrel(note, osrel); goto ret; } res = TRUE; goto ret; nextnote: note = (const Elf_Note *)((const char *)(note + 1) + roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) + roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE)); } res = FALSE; ret: free(buf, M_TEMP); return (res); } /* * Try to find the appropriate ABI-note section for checknote, * fetch the osreldate for binary from the ELF OSABI-note. Only the * first page of the image is searched, the same as for headers. */ static boolean_t __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote, int32_t *osrel) { const Elf_Phdr *phdr; const Elf_Ehdr *hdr; int i; hdr = (const Elf_Ehdr *)imgp->image_header; phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); for (i = 0; i < hdr->e_phnum; i++) { if (phdr[i].p_type == PT_NOTE && __elfN(parse_notes)(imgp, checknote, osrel, &phdr[i])) return (TRUE); } return (FALSE); } /* * Tell kern_execve.c about it, with a little help from the linker. */ static struct execsw __elfN(execsw) = { __CONCAT(exec_, __elfN(imgact)), __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) }; EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); static vm_prot_t __elfN(trans_prot)(Elf_Word flags) { vm_prot_t prot; prot = 0; if (flags & PF_X) prot |= VM_PROT_EXECUTE; if (flags & PF_W) prot |= VM_PROT_WRITE; if (flags & PF_R) prot |= VM_PROT_READ; #if __ELF_WORD_SIZE == 32 #if defined(__amd64__) if (i386_read_exec && (flags & PF_R)) prot |= VM_PROT_EXECUTE; #endif #endif return (prot); } static Elf_Word __elfN(untrans_prot)(vm_prot_t prot) { Elf_Word flags; flags = 0; if (prot & VM_PROT_EXECUTE) flags |= PF_X; if (prot & VM_PROT_READ) flags |= PF_R; if (prot & VM_PROT_WRITE) flags |= PF_W; return (flags); } Index: projects/powernv/mips/conf/ALFA_HORNET_UB =================================================================== --- projects/powernv/mips/conf/ALFA_HORNET_UB (revision 291050) +++ projects/powernv/mips/conf/ALFA_HORNET_UB (revision 291051) @@ -1,57 +1,59 @@ # # Alfa Networks Hornet UB - an AR933x based SoC wifi device. # # http://www.alfa.com.tw/products_show.php?pc=99&ps=50 # # This is for the 64MB RAM/16MB flash part. They also # do various other versions; they have different RAM/flash # configurations. # # * AR9330 SoC # * 64MB RAM # * 16MB flash # * Integrated 1x1 2GHz wifi and 10/100 bridge # # $FreeBSD$ # +#NO_UNIVERSE + # Include the default AR933x parameters include "AR933X_BASE" ident ALFA_HORNET_UB # Override hints with board values hints "ALFA_HORNET_UB.hints" # Board memory - 64MB options AR71XX_REALMEM=(64*1024*1024) # i2c GPIO bus #device gpioiic #device iicbb #device iicbus #device iic # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch # read MSDOS formatted disks - USB #options MSDOSFS # Enable the uboot environment stuff rather then the # redboot stuff. options AR71XX_ENV_UBOOT # uzip - to boot natively from flash device geom_uncompress options GEOM_UNCOMPRESS # Used for the static uboot partition map device geom_map # Boot off of the rootfs, as defined in the geom_map setup. options ROOTDEVNAME=\"ufs:map/rootfs.uncompress\" Index: projects/powernv/mips/conf/AP121 =================================================================== --- projects/powernv/mips/conf/AP121 (revision 291050) +++ projects/powernv/mips/conf/AP121 (revision 291051) @@ -1,51 +1,53 @@ # # AP121 - the AP121 reference board from Qualcomm Atheros includes: # # * AR9330 SoC # * 16MB RAM # * 4MB flash # * Integrated 1x1 2GHz wifi and 10/100 bridge # # $FreeBSD$ # +#NO_UNIVERSE + # Include the default AR933x parameters include "AR933X_BASE" ident AP121 # Override hints with board values hints "AP121.hints" # Force the board memory - the base AP121 only has 16MB RAM options AR71XX_REALMEM=(16*1024*1024) # i2c GPIO bus #device gpioiic #device iicbb #device iicbus #device iic # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch # read MSDOS formatted disks - USB #options MSDOSFS # Enable the uboot environment stuff rather then the # redboot stuff. options AR71XX_ENV_UBOOT # uzip - to boot natively from flash device geom_uncompress options GEOM_UNCOMPRESS # Used for the static uboot partition map device geom_map # Boot off of the rootfs, as defined in the geom_map setup. options ROOTDEVNAME=\"ufs:map/rootfs.uncompress\" Index: projects/powernv/mips/conf/AP135 =================================================================== --- projects/powernv/mips/conf/AP135 (revision 291050) +++ projects/powernv/mips/conf/AP135 (revision 291051) @@ -1,68 +1,70 @@ # # AP135 - the QCA955x SoC reference design # # This contains a QCA9558 MIPS74k SoC with on-board 3x3 2GHz wifi, # 128MiB RAM, an AR8327 5-port gigabit ethernet switch and # a QCA 11ac 5GHz AP NIC. # # The to things not currently support are the QCA 11ac NIC and # PCIe host controllers - there's two of them, and the existing # PCIe code here doesn't support that just yet. # # $FreeBSD$ # +#NO_UNIVERSE + # Include the default QCA955x parameters include "QCA955X_BASE" ident AP135 # Override hints with board values hints "AP135.hints" # Force the board memory - the base AP135 has 128MB RAM options AR71XX_REALMEM=(128*1024*1024) # i2c GPIO bus #device gpioiic #device iicbb #device iicbus #device iic # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch # read MSDOS formatted disks - USB #options MSDOSFS # Enable the uboot environment stuff rather then the # redboot stuff. options AR71XX_ENV_UBOOT # uzip - to boot natively from flash device geom_uncompress options GEOM_UNCOMPRESS # Used for the static uboot partition map device geom_map # yes, this board has a PCIe connected atheros device # add ath_pci so it can at least attach things when there's a # ath(4) in there, rather than the 11ac chip we don't support. device pci device qca955x_pci device ath_pci options AR71XX_ATH_EEPROM device firmware # Used by the above options ATH_EEPROM_FIRMWARE # Boot off of the rootfs, as defined in the geom_map setup. options ROOTDEVNAME=\"ufs:map/rootfs.uncompress\" # Default to accept options IPFIREWALL_DEFAULT_TO_ACCEPT Index: projects/powernv/mips/conf/AP143 =================================================================== --- projects/powernv/mips/conf/AP143 (nonexistent) +++ projects/powernv/mips/conf/AP143 (revision 291051) @@ -0,0 +1,53 @@ +# +# AP143 - the AP143 reference board from Qualcomm Atheros includes: +# +# * AR9330 SoC +# * 32MB RAM +# * 4MB flash +# * Integrated 1x1 2GHz wifi and 10/100 bridge +# +# $FreeBSD$ +# + +#NO_UNIVERSE + +# Include the default QCA953x parameters +include "QCA953X_BASE" + +ident AP143 + +# Override hints with board values +hints "AP143.hints" + +# Force the board memory - the base AP121 only has 16MB RAM +options AR71XX_REALMEM=(32*1024*1024) + +# i2c GPIO bus +#device gpioiic +#device iicbb +#device iicbus +#device iic + +# Options required for miiproxy and mdiobus +options ARGE_MDIO # Export an MDIO bus separate from arge +device miiproxy # MDIO bus <-> MII PHY rendezvous + +device etherswitch +device arswitch + +# read MSDOS formatted disks - USB +#options MSDOSFS + +# Enable the uboot environment stuff rather then the +# redboot stuff. +options AR71XX_ENV_UBOOT + +# uzip - to boot natively from flash +device geom_uncompress +options GEOM_UNCOMPRESS + +# Used for the static uboot partition map +device geom_map + +# Boot off of the rootfs, as defined in the geom_map setup. +options ROOTDEVNAME=\"ufs:map/rootfs.uncompress\" Property changes on: projects/powernv/mips/conf/AP143 ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: projects/powernv/mips/conf/AP143.hints =================================================================== --- projects/powernv/mips/conf/AP143.hints (nonexistent) +++ projects/powernv/mips/conf/AP143.hints (revision 291051) @@ -0,0 +1,105 @@ +# +# This file adds to the values in QCA953X_BASE.hints. +# +# $FreeBSD$ + +# Embedded Atheros Switch +hint.arswitch.0.at="mdio1" +hint.arswitch.0.is_7240=0 +hint.arswitch.0.is_9340=1 +hint.arswitch.0.numphys=4 +hint.arswitch.0.phy4cpu=1 # phy 4 is a "CPU" separate PHY +hint.arswitch.0.is_rgmii=0 +hint.arswitch.0.is_gmii=1 # arge1 <-> switch PHY is GMII + +# arge0 - MII, autoneg, phy(4) +# MAC for arge0 is the first 6 bytes of the ART +hint.arge.0.eeprommac=0x1fff0000 +hint.arge.0.phymask=0x10 # PHY4 +hint.arge.0.mdio=mdioproxy2 # .. off of the switch mdiobus + +# arge1 - GMII, 1000/full +hint.arge.1.eeprommac=0x1fff0006 +hint.arge.1.phymask=0x0 # No directly mapped PHYs +hint.arge.1.media=1000 +hint.arge.1.fduplex=1 + +# The AP121 4MB flash layout: +# +# bootargs=console=ttyS0,115200 root=31:02 rootfstype=squashfs +# init=/sbin/init mtdparts=ar7240-nor0:256k(u-boot),64k(u-boot-env), +# 2752k(rootfs),896k(uImage),64k(NVRAM),64k(ART) +# +# So: +# 256k: uboot +# 64: uboot-env +# 2752k: rootfs +# 896k: kernel +# 64k: config +# 64k: ART + +hint.map.0.at="flash/spi0" +hint.map.0.start=0x00000000 +hint.map.0.end=0x000040000 +hint.map.0.name="uboot" +hint.map.0.readonly=1 + +hint.map.1.at="flash/spi0" +hint.map.1.start=0x00040000 +hint.map.1.end=0x00050000 +hint.map.1.name="uboot-env" +hint.map.1.readonly=0 + +hint.map.2.at="flash/spi0" +hint.map.2.start=0x00050000 +hint.map.2.end=0x00300000 +hint.map.2.name="rootfs" +hint.map.2.readonly=0 + +hint.map.3.at="flash/spi0" +hint.map.3.start=0x00300000 +hint.map.3.end=0x003e0000 +hint.map.3.name="kernel" +hint.map.3.readonly=0 + +hint.map.4.at="flash/spi0" +hint.map.4.start=0x003e0000 +hint.map.4.end=0x003f0000 +hint.map.4.name="cfg" +hint.map.4.readonly=0 + +# This is radio calibration section. It is (or should be!) unique +# for each board, to take into account thermal and electrical differences +# as well as the regulatory compliance data. +# +hint.map.5.at="flash/spi0" +hint.map.5.start=0x003f0000 +hint.map.5.end=0x00400000 +hint.map.5.name="art" +hint.map.5.readonly=1 + +# GPIO specific configuration block + +# Don't flip on anything that isn't already enabled. +# This includes leaving the SPI CS1/CS2 pins as GPIO pins as they're +# not used here. +hint.gpio.0.function_set=0x00000000 +hint.gpio.0.function_clear=0x00000000 + +# These are the GPIO LEDs and buttons which can be software controlled. +#hint.gpio.0.pinmask=0x001c02ae +# hint.gpio.0.pinmask=0x00001803 + +# gpio0 - WLAN LED +# gpio1 - USB LED +# gpio11 - Jumpstart button +# gpio12 - Reset button + +# LEDs are configured separately and driven by the LED device +#hint.gpioled.0.at="gpiobus0" +#hint.gpioled.0.name="wlan" +#hint.gpioled.0.pins=0x0001 + +#hint.gpioled.1.at="gpiobus0" +#hint.gpioled.1.name="usb" +#hint.gpioled.1.pins=0x0002 Property changes on: projects/powernv/mips/conf/AP143.hints ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: projects/powernv/mips/conf/AP91 =================================================================== --- projects/powernv/mips/conf/AP91 (revision 291050) +++ projects/powernv/mips/conf/AP91 (revision 291051) @@ -1,64 +1,66 @@ # # Specific board setup for the Atheros AP91 reference board. # # The AP91 has the following hardware: # # + AR7241 CPU SoC # + AR9287 Wifi # + Integrated switch (XXX speed?) # + 4MB flash # + 16MB RAM # + uboot environment # $FreeBSD$ +#NO_UNIVERSE + include "AR724X_BASE" ident "AP91" hints "AP91.hints" options AR71XX_REALMEM=16*1024*1024 options AR71XX_ENV_UBOOT # Limit inlines makeoptions INLINE_LIMIT=768 # We bite the performance overhead for now; the kernel won't # fit if the mutexes are inlined. options MUTEX_NOINLINE options RWLOCK_NOINLINE options SX_NOINLINE # There's no need to enable swapping on this platform. options NO_SWAPPING # For DOS - enable if required # options MSDOSFS # uncompress - to boot read-only lzma natively from flash device geom_uncompress options GEOM_UNCOMPRESS options ROOTDEVNAME=\"ufs:/dev/map/rootfs.uncompress\" # Not enough space for these.. nooptions INVARIANTS nooptions INVARIANT_SUPPORT nooptions WITNESS nooptions WITNESS_SKIPSPIN nooptions DEBUG_REDZONE nooptions DEBUG_MEMGUARD # Used for the static uboot partition map device geom_map # Options needed for the EEPROM based calibration/PCI configuration data. options AR71XX_ATH_EEPROM # Fetch EEPROM/PCI config from flash options ATH_EEPROM_FIRMWARE # Use EEPROM from flash device firmware # Used by the above # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch Index: projects/powernv/mips/conf/AP93 =================================================================== --- projects/powernv/mips/conf/AP93 (revision 291050) +++ projects/powernv/mips/conf/AP93 (revision 291051) @@ -1,44 +1,46 @@ # # Specific board setup for the Atheros AP91 reference board. # # The AP93 has the following hardware: # # + AR7240 CPU SoC # + AR9280 Wifi # + Integrated switch (XXX speed?) # + 16MB flash # + 64MB RAM # + uboot environment # $FreeBSD$ +#NO_UNIVERSE + include "AR724X_BASE" ident "AP93" hints "AP93.hints" options AR71XX_REALMEM=64*1024*1024 options AR71XX_ENV_UBOOT # For DOS - enable if required options MSDOSFS # uncompress - to boot read-only lzma natively from flash device geom_uncompress options GEOM_UNCOMPRESS options ROOTDEVNAME=\"ufs:/dev/map/rootfs.uncompress\" # Used for the static uboot partition map device geom_map # Options needed for the EEPROM based calibration/PCI configuration data. options AR71XX_ATH_EEPROM # Fetch EEPROM/PCI config from flash options ATH_EEPROM_FIRMWARE # Use EEPROM from flash device firmware # Used by the above # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch Index: projects/powernv/mips/conf/AP94 =================================================================== --- projects/powernv/mips/conf/AP94 (revision 291050) +++ projects/powernv/mips/conf/AP94 (revision 291051) @@ -1,35 +1,37 @@ # # Specific board setup for the Atheros AP94 reference board. # # The AP94 has the following hardware: # # + AR7161 CPU SoC # + AR9223 2.4GHz 11n # + AR9220 5GHz 11n # + AR8216 10/100 switch # + m25p64 based 8MB flash # + 32mb RAM # + uboot environment # $FreeBSD$ +#NO_UNIVERSE + include "AR71XX_BASE" ident "AP94" hints "AP94.hints" # GEOM modules device geom_redboot # to get access to the SPI flash partitions device geom_uzip # compressed in-memory filesystem hackery! options GEOM_UZIP options ROOTDEVNAME=\"ufs:md0.uzip\" options AR71XX_REALMEM=32*1024*1024 options AR71XX_ENV_UBOOT # options MD_ROOT # options MD_ROOT_SIZE="6144" options AR71XX_ATH_EEPROM # Fetch EEPROM/PCI config from flash options ATH_EEPROM_FIRMWARE # Use EEPROM from flash device firmware # Used by the above Index: projects/powernv/mips/conf/AP96 =================================================================== --- projects/powernv/mips/conf/AP96 (revision 291050) +++ projects/powernv/mips/conf/AP96 (revision 291051) @@ -1,45 +1,47 @@ # # Specific board setup for the Atheros AP96 reference board. # # The AP96 has the following hardware: # # + AR7161 CPU SoC # + AR9223 2.4GHz 11n # + AR9220 5GHz 11n # + AR8316 10/100/1000 switch # + m25p64 based 8MB flash # + 64mb RAM # + uboot environment # $FreeBSD$ +#NO_UNIVERSE + include "AR71XX_BASE" ident "AP96" hints "AP96.hints" options AR71XX_REALMEM=64*1024*1024 options AR71XX_ENV_UBOOT # For DOS - enable if required options MSDOSFS # uncompress - to boot read-only lzma natively from flash device geom_uncompress options GEOM_UNCOMPRESS options ROOTDEVNAME=\"ufs:/dev/map/rootfs.uncompress\" # Used for the static uboot partition map device geom_map # Options needed for the EEPROM based calibration/PCI configuration data. options AR71XX_ATH_EEPROM # Fetch EEPROM/PCI config from flash options ATH_EEPROM_FIRMWARE # Use EEPROM from flash device firmware # Used by the above # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch Index: projects/powernv/mips/conf/BERI_DE4_MDROOT =================================================================== --- projects/powernv/mips/conf/BERI_DE4_MDROOT (revision 291050) +++ projects/powernv/mips/conf/BERI_DE4_MDROOT (revision 291051) @@ -1,19 +1,21 @@ # # BERI_DE4_MDROOT -- Kernel for the SRI/Cambridge "BERI" (Bluespec Extensible # RISC # Implementation) FPGA soft core, as configured in its Terasic DE-4 # reference configuration. # # $FreeBSD$ # +#NO_UNIVERSE + include "BERI_DE4_BASE" ident BERI_DE4_MDROOT # # This kernel configuration uses an embedded 8MB memory root file system. # Adjust the following path based on local requirements. # options MD_ROOT # MD is a potential root device options MD_ROOT_SIZE=26112 # 25.5MB options ROOTDEVNAME=\"ufs:md0\" Index: projects/powernv/mips/conf/BERI_DE4_SDROOT =================================================================== --- projects/powernv/mips/conf/BERI_DE4_SDROOT (revision 291050) +++ projects/powernv/mips/conf/BERI_DE4_SDROOT (revision 291051) @@ -1,16 +1,18 @@ # # BERI_DE4_SDROOT -- Kernel for the SRI/Cambridge "BERI" (Bluespec Extensible # RISC Implementation) FPGA soft core, as configured in its Terasic DE-4 # reference configuration. # # $FreeBSD$ # +#NO_UNIVERSE + include "BERI_DE4_BASE" ident BERI_DE4_SDROOT # # This kernel expects to find its root filesystem on the SD Card. # options ROOTDEVNAME=\"ufs:/dev/altera_sdcard0\" Index: projects/powernv/mips/conf/BERI_SIM_MDROOT =================================================================== --- projects/powernv/mips/conf/BERI_SIM_MDROOT (revision 291050) +++ projects/powernv/mips/conf/BERI_SIM_MDROOT (revision 291051) @@ -1,18 +1,20 @@ # # BERI_SIM_MDROOT -- Kernel for the SRI/Cambridge "BERI" (Bluespec Extensible # RISC Implementation) FPGA soft core, as configured for simulation. # # $FreeBSD$ # +#NO_UNIVERSE + include "BERI_SIM_BASE" ident BERI_SIM_MDROOT # # This kernel configuration uses an embedded memory root file system. # Adjust the following path based on local requirements. # options MD_ROOT # MD is a potential root device options MD_ROOT_SIZE=26112 # 25.5MB options ROOTDEVNAME=\"ufs:md0\" Index: projects/powernv/mips/conf/BERI_SIM_SDROOT =================================================================== --- projects/powernv/mips/conf/BERI_SIM_SDROOT (revision 291050) +++ projects/powernv/mips/conf/BERI_SIM_SDROOT (revision 291051) @@ -1,15 +1,17 @@ # # BERI_SIM_SDROOT -- Kernel for the SRI/Cambridge "BERI" (Bluespec Extensible # RISC Implementation) FPGA soft core, as configured for simulation. # # $FreeBSD$ # +#NO_UNIVERSE + include "BERI_SIM_BASE" ident BERI_SIM_SDROOT # # This kernel expects to find its root filesystem on the SD Card. # options ROOTDEVNAME=\"ufs:/dev/altera_sdcard0\" Index: projects/powernv/mips/conf/BERI_SIM_VIRTIO =================================================================== --- projects/powernv/mips/conf/BERI_SIM_VIRTIO (revision 291050) +++ projects/powernv/mips/conf/BERI_SIM_VIRTIO (revision 291051) @@ -1,20 +1,22 @@ # # BERI_SIM_VIRTIO -- Kernel for the SRI/Cambridge "BERI" (Bluespec Extensible # RISC Implementation) FPGA soft core, as configured for simulation. # # $FreeBSD$ # +#NO_UNIVERSE + include "BERI_SIM_BASE" ident BERI_SIM_VIRTIO device virtio device virtio_blk device virtio_mmio device altera_pio # # This kernel expects to find its root filesystem on the SD Card. # options ROOTDEVNAME=\"ufs:/dev/vtbd0\" Index: projects/powernv/mips/conf/CARAMBOLA2 =================================================================== --- projects/powernv/mips/conf/CARAMBOLA2 (revision 291050) +++ projects/powernv/mips/conf/CARAMBOLA2 (revision 291051) @@ -1,53 +1,55 @@ # # Carambola 2 - an AR933x based SoC wifi device. # # http://shop.8devices.com/wifi4things/carambola2 # # * AR9330 SoC # * 64MB RAM # * 16MB flash # * Integrated 1x1 2GHz wifi and 10/100 bridge # # $FreeBSD$ # +#NO_UNIVERSE + # Include the default AR933x parameters include "AR933X_BASE" ident CARAMBOLA2 # Override hints with board values hints "CARAMBOLA2.hints" # Board memory - 64MB options AR71XX_REALMEM=(64*1024*1024) # i2c GPIO bus #device gpioiic #device iicbb #device iicbus #device iic # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch # read MSDOS formatted disks - USB #options MSDOSFS # Enable the uboot environment stuff rather then the # redboot stuff. options AR71XX_ENV_UBOOT # uzip - to boot natively from flash device geom_uncompress options GEOM_UNCOMPRESS # Used for the static uboot partition map device geom_map # Boot off of the rootfs, as defined in the geom_map setup. options ROOTDEVNAME=\"ufs:map/rootfs.uncompress\" Index: projects/powernv/mips/conf/DB120 =================================================================== --- projects/powernv/mips/conf/DB120 (revision 291050) +++ projects/powernv/mips/conf/DB120 (revision 291051) @@ -1,52 +1,54 @@ # # DB120 - the AR9344 SoC reference design # # $FreeBSD$ # # Include the default AR934x parameters include "AR934X_BASE" +#NO_UNIVERSE + ident DB120 # Override hints with board values hints "DB120.hints" # Force the board memory - the base DB120 has 128MB RAM options AR71XX_REALMEM=(128*1024*1024) # i2c GPIO bus #device gpioiic #device iicbb #device iicbus #device iic # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch # read MSDOS formatted disks - USB #options MSDOSFS # Enable the uboot environment stuff rather then the # redboot stuff. options AR71XX_ENV_UBOOT # uzip - to boot natively from flash device geom_uncompress options GEOM_UNCOMPRESS # Used for the static uboot partition map device geom_map # yes, this board has a PCI connected atheros device device ath_pci options AR71XX_ATH_EEPROM device firmware # Used by the above options ATH_EEPROM_FIRMWARE # Boot off of the rootfs, as defined in the geom_map setup. options ROOTDEVNAME=\"ufs:map/rootfs.uncompress\" Index: projects/powernv/mips/conf/DIR-655A1 =================================================================== --- projects/powernv/mips/conf/DIR-655A1 (revision 291050) +++ projects/powernv/mips/conf/DIR-655A1 (revision 291051) @@ -1,56 +1,58 @@ # # DIR-655A1 - 3x3 2GHz D-Link AP # # This contains a QCA9558 MIPS74k SoC with on-board 3x3 2GHz wifi, # 128MiB RAM, an AR8327 5-port gigabit ethernet switch. # # $FreeBSD$ # +#NO_UNIVERSE + # Include the default QCA955x parameters include "QCA955X_BASE" ident DIR-655A1 # Override hints with board values hints "DIR-655A1.hints" # Force the board memory - the base AP135 has 128MB RAM options AR71XX_REALMEM=(128*1024*1024) # i2c GPIO bus #device gpioiic #device iicbb #device iicbus #device iic # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch # read MSDOS formatted disks - USB #options MSDOSFS # Enable the uboot environment stuff rather then the # redboot stuff. options AR71XX_ENV_UBOOT # uzip - to boot natively from flash device geom_uncompress options GEOM_UNCOMPRESS # Used for the static uboot partition map device geom_map options AR71XX_ATH_EEPROM device firmware # Used by the above options ATH_EEPROM_FIRMWARE # Boot off of the rootfs, as defined in the geom_map setup. options ROOTDEVNAME=\"ufs:map/rootfs.uncompress\" # Default to accept options IPFIREWALL_DEFAULT_TO_ACCEPT Index: projects/powernv/mips/conf/DIR-825B1 =================================================================== --- projects/powernv/mips/conf/DIR-825B1 (revision 291050) +++ projects/powernv/mips/conf/DIR-825B1 (revision 291051) @@ -1,71 +1,73 @@ # # Specific board setup for the D-Link DIR-825B1 router. # # The DIR-825B1 has the following hardware: # # + AR7161 CPU SoC # + AR9223 2.4GHz 11n # + AR9220 5GHz 11n # + RealTek RTL8366S Gigabit switch # + m25p64 based 8MB flash # + 64MB RAM # + uboot environment # $FreeBSD$ +#NO_UNIVERSE + include "AR71XX_BASE" ident "DIR-825B1" hints "DIR-825B1.hints" # Since the kernel image must fit inside 1024KiB, we have to build almost # everything as modules. # nodevice random nodevice gpio nodevice gpioled nodevice gif nodevice gre nodevice if_bridge nodevice usb nodevice ehci nodevice wlan nodevice wlan_xauth nodevice wlan_acl nodevice wlan_wep nodevice wlan_tkip nodevice wlan_ccmp nodevice wlan_rssadapt nodevice wlan_amrr nodevice ath nodevice ath_pci nodevice ath_hal nodevice umass nodevice ath_rate_sample nooptions INET6 # Don't include the SCSI/CAM strings in the default build options SCSI_NO_SENSE_STRINGS options SCSI_NO_OP_STRINGS # .. And no sysctl strings options NO_SYSCTL_DESCR # GEOM modules device geom_map # to get access to the SPI flash partitions device geom_uncompress # compressed in-memory filesystem hackery! options GEOM_UNCOMPRESS options GEOM_PART_GPT options ROOTDEVNAME=\"ufs:/dev/map/rootfs.uncompress\" options AR71XX_REALMEM=64*1024*1024 options AR71XX_ENV_UBOOT # options MSDOSFS # Read MSDOS filesystems; useful for USB/CF # options MD_ROOT # options MD_ROOT_SIZE="6144" options AR71XX_ATH_EEPROM # Fetch EEPROM/PCI config from flash options ATH_EEPROM_FIRMWARE # Use EEPROM from flash device firmware # Used by the above Index: projects/powernv/mips/conf/DIR-825C1 =================================================================== --- projects/powernv/mips/conf/DIR-825C1 (revision 291050) +++ projects/powernv/mips/conf/DIR-825C1 (revision 291051) @@ -1,66 +1,68 @@ # $FreeBSD$ # # Specific board setup for the D-Link DIR-825C1 router. # # The DIR-825C1 has the following hardware: # # + AR9344 CPU SoC 74k MIPS # + ARxxx 2.4GHz 11n # + ARXXX 5GHz 11n # + AR8327 Gigabit switch # + m25p80 based 16MB flash # + 128MB RAM # + uboot environment +#NO_UNIVERSE + # Include the default AR934x parameters include "AR934X_BASE" ident DIR825C1 # Override hints with board values hints "DIR-825C1.hints" # Force the board memory - the base DB120 has 128MB RAM options AR71XX_REALMEM=(128*1024*1024) # i2c GPIO bus device gpioiic device iicbb device iicbus device iic # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch # read MSDOS formatted disks - USB options MSDOSFS # Enable the uboot environment stuff rather then the # redboot stuff. options AR71XX_ENV_UBOOT # Used for the static uboot partition map device geom_map # uzip - to boot natively from flash options GEOM_UNCOMPRESS options GEOM_PART_GPT # yes, this board has a PCI connected atheros device device ath_pci options AR71XX_ATH_EEPROM device firmware # Used by the above options ATH_EEPROM_FIRMWARE # Boot off of the rootfs, as defined in the geom_map setup. options ROOTDEVNAME=\"ufs:map/rootfs.uncompress\" # In order to netboot, you have to build the mfsroot into the kernel # 19443712 or 19M is the biggest rootfs via netboot this thing supports #options MD_ROOT # md device usable as a potential root device #options MD_ROOT_SIZE=19444 #makeoptions MFS_IMAGE=/tftpboot/mfsroot-dir825c1.img.ulzma #options ROOTDEVNAME=\"ufs:md0.uncompress\" Index: projects/powernv/mips/conf/ENH200 =================================================================== --- projects/powernv/mips/conf/ENH200 (revision 291050) +++ projects/powernv/mips/conf/ENH200 (revision 291051) @@ -1,44 +1,46 @@ # # Specific board setup for the Engenius ENH-200 802.11bgn mesh node. # # The Engenius ENH-200 has the following hardware: # # + AR7240 CPU SoC # + AR9285 Wifi # + Integrated switch # + 8MB flash # + 32MB RAM # + uboot environment # $FreeBSD$ +#NO_UNIVERSE + include "AR724X_BASE" ident "ENH200" hints "ENH200.hints" options AR71XX_REALMEM=32*1024*1024 options AR71XX_ENV_UBOOT # For DOS - enable if required options MSDOSFS # uncompress - to boot read-only lzma natively from flash device geom_uncompress options GEOM_UNCOMPRESS options ROOTDEVNAME=\"ufs:/dev/map/rootfs.uncompress\" # Used for the static uboot partition map device geom_map # Options needed for the EEPROM based calibration/PCI configuration data. options AR71XX_ATH_EEPROM # Fetch EEPROM/PCI config from flash options ATH_EEPROM_FIRMWARE # Use EEPROM from flash device firmware # Used by the above # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch Index: projects/powernv/mips/conf/ONIONOMEGA =================================================================== --- projects/powernv/mips/conf/ONIONOMEGA (nonexistent) +++ projects/powernv/mips/conf/ONIONOMEGA (revision 291051) @@ -0,0 +1,55 @@ +# +# Onion Omega - an AR9330 based SoC +# +# https://onion.io/omega/ +# +# * AR9330 SoC +# * 64MB RAM +# * 16MB flash +# * Integrated 1x1 2GHz wifi and optional 10/100 ethernet +# +# $FreeBSD$ +# + +#NO_UNIVERSE + +# Include the default AR933x parameters +include "AR933X_BASE" + +ident ONIONOMEGA + +# Override hints with board values +hints "ONIONOMEGA.hints" + +# Board memory - 64MB +options AR71XX_REALMEM=(64*1024*1024) + +# i2c GPIO bus +#device gpioiic +#device iicbb +#device iicbus +#device iic + +# Options required for miiproxy and mdiobus +options ARGE_MDIO # Export an MDIO bus separate from arge +device miiproxy # MDIO bus <-> MII PHY rendezvous + +device etherswitch +device arswitch + +# read MSDOS formatted disks - USB +#options MSDOSFS + +# Enable the uboot environment stuff rather then the +# redboot stuff. +options AR71XX_ENV_UBOOT + +# uzip - to boot natively from flash +device geom_uncompress +options GEOM_UNCOMPRESS + +# Used for the static uboot partition map +device geom_map + +# Boot off of the rootfs, as defined in the geom_map setup. +options ROOTDEVNAME=\"ufs:map/rootfs.uncompress\" Property changes on: projects/powernv/mips/conf/ONIONOMEGA ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/powernv/mips/conf/ONIONOMEGA.hints =================================================================== --- projects/powernv/mips/conf/ONIONOMEGA.hints (nonexistent) +++ projects/powernv/mips/conf/ONIONOMEGA.hints (revision 291051) @@ -0,0 +1,94 @@ +# +# This file adds to the values in AR933X_BASE.hints. +# +# $FreeBSD$ + +# mdiobus on arge1 +hint.argemdio.0.at="nexus0" +hint.argemdio.0.maddr=0x1a000000 +hint.argemdio.0.msize=0x1000 +hint.argemdio.0.order=0 + +# Embedded Atheros Switch +hint.arswitch.0.at="mdio0" + +# XXX this should really say it's an AR933x switch, as there +# are some vlan specific differences here! +hint.arswitch.0.is_7240=1 +hint.arswitch.0.numphys=4 +hint.arswitch.0.phy4cpu=1 # phy 4 is a "CPU" separate PHY +hint.arswitch.0.is_rgmii=0 +hint.arswitch.0.is_gmii=1 # arge1 <-> switch PHY is GMII + +# arge0 - MII, autoneg, phy(4) +hint.arge.0.phymask=0x10 # PHY4 +hint.arge.0.mdio=mdioproxy1 # .. off of the switch mdiobus +hint.arge.0.eeprommac=0x1fff0000 + +# arge1 - GMII, 1000/full +hint.arge.1.phymask=0x0 # No directly mapped PHYs +hint.arge.1.media=1000 +hint.arge.1.fduplex=1 +hint.arge.1.eeprommac=0x1fff0006 + +# 16MB flash layout: +# [ 0.510000] 5 tp-link partitions found on MTD device spi0.0 +# [ 0.510000] Creating 5 MTD partitions on "spi0.0": +# [ 0.520000] 0x000000000000-0x000000020000 : "u-boot" +# [ 0.520000] 0x000000020000-0x000000136468 : "kernel" +# [ 0.530000] 0x000000136468-0x000000ff0000 : "rootfs" +# [ 0.530000] mtd: device 2 (rootfs) set to be root filesystem +# [ 0.540000] 1 squashfs-split partitions found on MTD device rootfs +# [ 0.540000] 0x000000730000-0x000000fe0000 : "rootfs_data" +# [ 0.540000] 0x000000fe0000-0x000000ff0000 : "nvram" +# [ 0.550000] 0x000000ff0000-0x000001000000 : "art" +# [ 0.560000] 0x000000020000-0x000000fe0000 : "firmware" + +# 64KiB uboot +hint.map.0.at="flash/spi0" +hint.map.0.start=0x00000000 +hint.map.0.end=0x00010000 +hint.map.0.name="u-boot" +hint.map.0.readonly=1 + +# 64KiB uboot +hint.map.1.at="flash/spi0" +hint.map.1.start=0x00010000 +hint.map.1.end=0x00020000 +hint.map.1.name="uboot-env" +hint.map.1.readonly=1 + +# kernel +hint.map.2.at="flash/spi0" +hint.map.2.start=0x00020000 +hint.map.2.end="search:0x00020000:0x10000:.!/bin/sh" +hint.map.2.name="kernel" +hint.map.2.readonly=1 + +# rootfs ulzma +hint.map.3.at="flash/spi0" +hint.map.3.start="search:0x00020000:0x10000:.!/bin/sh" +hint.map.3.end=0x00fe0000 +hint.map.3.name="rootfs" +hint.map.3.readonly=1 + +# 64KiB cfg +hint.map.4.at="flash/spi0" +hint.map.4.start=0x00fe0000 +hint.map.4.end=0x00ff0000 +hint.map.4.name="cfg" +hint.map.4.readonly=0 + +# all firmware 16000KiB +hint.map.5.at="flash/spi0" +hint.map.5.start=0x00020000 +hint.map.5.end=0x00ff0000 +hint.map.5.name="firmware" +hint.map.5.readonly=0 + +# 64KiB ART +hint.map.6.at="flash/spi0" +hint.map.6.start=0x00ff0000 +hint.map.6.end=0x01000000 +hint.map.6.name="ART" +hint.map.6.readonly=1 Property changes on: projects/powernv/mips/conf/ONIONOMEGA.hints ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: projects/powernv/mips/conf/PB47 =================================================================== --- projects/powernv/mips/conf/PB47 (revision 291050) +++ projects/powernv/mips/conf/PB47 (revision 291051) @@ -1,40 +1,42 @@ # # Atheros PB47 reference board. # # * one MiniPCI+ slot (modified to allow two idsel lines # on the one slot, for a specific kind of internal-only # NIC; # * one XMII slot # * One ethernet PHY # * Akros Silicon AS1834 # * 8MB NOR SPI flash # * 64MB RAM # # $FreeBSD$ # +#NO_UNIVERSE + include "AR71XX_BASE" ident "PB47" hints "PB47.hints" # Enable the uboot environment stuff rather then the # redboot stuff. options AR71XX_ENV_UBOOT # XXX TODO: add uboot boot parameter parsing to extract MAC, RAM. # Right now it will just detect 32mb out of 64mb, as well as # return a garbage MAC address. options AR71XX_REALMEM=64*1024*1024 # For DOS - enable if required options MSDOSFS # uncompress - to boot read-only lzma natively from flash device geom_uncompress options GEOM_UNCOMPRESS # Used for the static uboot partition map device geom_map # Boot off of the rootfs, as defined in the geom_map setup. options ROOTDEVNAME=\"ufs:map/rootfs.uncompress\" Index: projects/powernv/mips/conf/PICOSTATION_M2HP =================================================================== --- projects/powernv/mips/conf/PICOSTATION_M2HP (revision 291050) +++ projects/powernv/mips/conf/PICOSTATION_M2HP (revision 291051) @@ -1,68 +1,70 @@ # # Specific board setup for the Picostation M2 HP board. # # This board has the following hardware: # # + AR7241 CPU SoC # + AR9287 Wifi # + Integrated switch (XXX speed?) # + 8MB flash # + 32MB RAM # + uboot environment # $FreeBSD$ +#NO_UNIVERSE + include "AR724X_BASE" ident "PICOSTATION_M2HP" hints "PICOSTATION_M2HP.hints" options AR71XX_REALMEM=32*1024*1024 options AR71XX_ENV_UBOOT # Limit inlines makeoptions INLINE_LIMIT=768 # We bite the performance overhead for now; the kernel won't # fit if the mutexes are inlined. options MUTEX_NOINLINE options RWLOCK_NOINLINE options SX_NOINLINE # There's no need to enable swapping on this platform. options NO_SWAPPING # For DOS - enable if required # options MSDOSFS # uncompress - to boot read-only lzma natively from flash device geom_uncompress options GEOM_UNCOMPRESS options ROOTDEVNAME=\"ufs:/dev/map/rootfs.uncompress\" # Not enough space for these.. nooptions INVARIANTS nooptions INVARIANT_SUPPORT nooptions WITNESS nooptions WITNESS_SKIPSPIN nooptions DEBUG_REDZONE nooptions DEBUG_MEMGUARD # Used for the static uboot partition map device geom_map # Options needed for the EEPROM based calibration/PCI configuration data. options AR71XX_ATH_EEPROM # Fetch EEPROM/PCI config from flash options ATH_EEPROM_FIRMWARE # Use EEPROM from flash device firmware # Used by the above # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch # Enable GPIO device gpio device gpioled Index: projects/powernv/mips/conf/QCA953X_BASE =================================================================== --- projects/powernv/mips/conf/QCA953X_BASE (nonexistent) +++ projects/powernv/mips/conf/QCA953X_BASE (revision 291051) @@ -0,0 +1,138 @@ +# +# QCA953x -- Kernel configuration base file for the Qualcomm Atheros QCA953x SoC. +# +# This file (and the hints file accompanying it) are not designed to be +# used by themselves. Instead, users of this file should create a kernel +# config file which includes this file (which gets the basic hints), then +# override the default options (adding devices as needed) and adding +# hints as needed (for example, the GPIO and LAN PHY.) +# +# $FreeBSD$ +# + +machine mips mips +ident QCA953X_BASE +cpu CPU_MIPS4KC +makeoptions KERNLOADADDR=0x80050000 +options HZ=1000 + +files "../atheros/files.ar71xx" +hints "QCA953X_BASE.hints" + +makeoptions DEBUG=-g #Build kernel with gdb(1) debug symbols +makeoptions MODULES_OVERRIDE="gpio ar71xx if_gif if_vlan if_gre if_bridge bridgestp usb wlan wlan_xauth wlan_acl wlan_wep wlan_tkip wlan_ccmp wlan_rssadapt wlan_amrr hwpmc ipfw" + +options DDB +options KDB +options ALQ +options BREAK_TO_DEBUGGER +options ALT_BREAK_TO_DEBUGGER + +options SCHED_4BSD #4BSD scheduler +options INET #InterNETworking +#options INET6 #InterNETworking +#options NFSCL #Network Filesystem Client +options PSEUDOFS #Pseudo-filesystem framework +options _KPOSIX_PRIORITY_SCHEDULING #Posix P1003_1B real-time extensions + +# Don't include the SCSI/CAM strings in the default build +options SCSI_NO_SENSE_STRINGS +options SCSI_NO_OP_STRINGS + +# .. And no sysctl strings +options NO_SYSCTL_DESCR + +# For small memory footprints +options VM_KMEM_SIZE_SCALE=1 + +# Limit IO size +options NBUF=128 + +# Limit UMTX hash size +# options UMTX_NUM_CHAINS=64 + +#options UMA_DEBUG_ALLOC + +# PMC +options HWPMC_HOOKS +#options HWPMC_MIPS_BACKTRACE +device hwpmc +device hwpmc_mips24k + +options ARGE_DEBUG + +# options NFS_LEGACYRPC +# Debugging for use in -current +options INVARIANTS +options INVARIANT_SUPPORT +options WITNESS +options WITNESS_SKIPSPIN +options FFS #Berkeley Fast Filesystem +#options SOFTUPDATES #Enable FFS soft updates support +#options UFS_ACL #Support for access control lists +#options UFS_DIRHASH #Improve performance on big directories +options NO_FFS_SNAPSHOT # We don't require snapshot support + +options IPFIREWALL_DEFAULT_TO_ACCEPT + +# Wireless NIC cards +options IEEE80211_DEBUG +options IEEE80211_SUPPORT_MESH +options IEEE80211_SUPPORT_TDMA +options IEEE80211_SUPPORT_SUPERG +options IEEE80211_ALQ # 802.11 ALQ logging support +device wlan # 802.11 support +device wlan_wep # 802.11 WEP support +device wlan_ccmp # 802.11 CCMP support +device wlan_tkip # 802.11 TKIP support +device wlan_xauth # 802.11 hostap support + +# ath(4) +device ath # Atheros network device +device ath_rate_sample +device ath_ahb # Atheros host bus glue +options ATH_DEBUG +options ATH_DIAGAPI +option ATH_ENABLE_11N +option AH_DEBUG_ALQ + +#device ath_hal +device ath_ar9300 # AR9330 HAL; no need for the others +option AH_DEBUG +option AH_SUPPORT_AR5416 # 11n HAL support +option AH_SUPPORT_AR9330 # Chipset support +option AH_AR5416_INTERRUPT_MITIGATION + +device mii +device arge + +device usb +options USB_EHCI_BIG_ENDIAN_DESC # handle big-endian byte order +options USB_DEBUG +options USB_HOST_ALIGN=32 # AR71XX (MIPS in general?) requires this +device ehci + +device scbus +device umass +device da + +device spibus +device ar71xx_spi +device mx25l +device ar71xx_wdog + +# Handle 25MHz refclock by allowing a higher baudrate error tolerance. +device uart +device uart_ar71xx +options UART_DEV_TOLERANCE_PCT=50 + +device ar71xx_apb + +device loop +device ether +device md +device bpf +device random +device if_bridge +device gpio +device gpioled Property changes on: projects/powernv/mips/conf/QCA953X_BASE ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: projects/powernv/mips/conf/QCA953X_BASE.hints =================================================================== --- projects/powernv/mips/conf/QCA953X_BASE.hints (nonexistent) +++ projects/powernv/mips/conf/QCA953X_BASE.hints (revision 291051) @@ -0,0 +1,75 @@ +# This file (and the kernel config file accompanying it) are not designed +# to be used by themselves. Instead, users of this file should create a +# kernel # config file which includes this file (which gets the basic hints), +# then override the default options (adding devices as needed) and adding +# hints as needed (for example, the GPIO and LAN PHY.) + +# $FreeBSD$ + +hint.apb.0.at="nexus0" +hint.apb.0.irq=4 + +# mdiobus on arge0 - required to bring up arge0 +hint.argemdio.0.at="nexus0" +hint.argemdio.0.maddr=0x19000000 +hint.argemdio.0.msize=0x1000 +hint.argemdio.0.order=0 + +# mdiobus on arge1 - this is what the internal switch is hooked into. +hint.argemdio.1.at="nexus0" +hint.argemdio.1.maddr=0x1a000000 +hint.argemdio.1.msize=0x1000 +hint.argemdio.1.order=0 + +# uart0 +hint.uart.0.at="apb0" +# see atheros/uart_cpu_ar71xx.c why +3 +hint.uart.0.maddr=0x18020003 +hint.uart.0.msize=0x18 +hint.uart.0.irq=3 + +# ehci - on IP3 +hint.ehci.0.at="nexus0" +hint.ehci.0.maddr=0x1b000100 +hint.ehci.0.msize=0x00ffff00 +hint.ehci.0.irq=1 + +hint.arge.0.at="nexus0" +hint.arge.0.maddr=0x19000000 +hint.arge.0.msize=0x1000 +hint.arge.0.irq=2 + +hint.arge.1.at="nexus0" +hint.arge.1.maddr=0x1a000000 +hint.arge.1.msize=0x1000 +hint.arge.1.irq=3 + +# XXX The ath device hangs off of the AHB, rather than the Nexus. +hint.ath.0.at="nexus0" +hint.ath.0.maddr=0x18100000 +hint.ath.0.msize=0x20000 +hint.ath.0.irq=0 +hint.ath.0.vendor_id=0x168c +# XXX for now this is 0x00ff because there's no HAL support +# yet for honeybee. +hint.ath.0.device_id=0x00ff +# Set this to define where the ath calibration data +# should be fetched from in physical memory. +# hint.ath.0.eepromaddr=0x1fff1000 + +# SPI flash +hint.spi.0.at="nexus0" +hint.spi.0.maddr=0x1f000000 +hint.spi.0.msize=0x10 + +hint.mx25l.0.at="spibus0" +hint.mx25l.0.cs=0 + +# Watchdog +hint.ar71xx_wdog.0.at="nexus0" + +# The GPIO function and pin mask is configured per-board +hint.gpio.0.at="apb0" +hint.gpio.0.maddr=0x18040000 +hint.gpio.0.msize=0x1000 +hint.gpio.0.irq=2 Property changes on: projects/powernv/mips/conf/QCA953X_BASE.hints ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: projects/powernv/mips/conf/ROUTERSTATION =================================================================== --- projects/powernv/mips/conf/ROUTERSTATION (revision 291050) +++ projects/powernv/mips/conf/ROUTERSTATION (revision 291051) @@ -1,28 +1,30 @@ # # Ubiquiti Routerstation: Boot from onboard flash # # $FreeBSD$ # +#NO_UNIVERSE + include "AR71XX_BASE" ident "ROUTERSTATION" hints "ROUTERSTATION.hints" # XXX Is there an RTC on the RS? # GEOM modules device geom_redboot # to get access to the SPI flash partitions device geom_uncompress # compressed in-memory filesystem support options GEOM_UNCOMPRESS # For DOS options MSDOSFS # Etherswitch support options ARGE_MDIO device miiproxy device etherswitch device ukswitch # Boot path - redboot MFS options ROOTDEVNAME=\"ufs:redboot/rootfs.uncompress\" Index: projects/powernv/mips/conf/ROUTERSTATION_MFS =================================================================== --- projects/powernv/mips/conf/ROUTERSTATION_MFS (revision 291050) +++ projects/powernv/mips/conf/ROUTERSTATION_MFS (revision 291051) @@ -1,19 +1,21 @@ # # Ubiquiti Routerstation: boot from MFS # # $FreeBSD$ # +#NO_UNIVERSE + include "AR71XX_BASE" ident "ROUTERSTATION_MFS" hints "ROUTERSTATION.hints" # GEOM modules device geom_redboot # to get access to the SPI flash partitions device geom_uzip # compressed in-memory filesystem hackery! options GEOM_UZIP options ROOTDEVNAME=\"ufs:md0.uzip\" options MD_ROOT options MD_ROOT_SIZE="6144" Index: projects/powernv/mips/conf/RSPRO =================================================================== --- projects/powernv/mips/conf/RSPRO (revision 291050) +++ projects/powernv/mips/conf/RSPRO (revision 291051) @@ -1,30 +1,32 @@ # # Routerstation Pro: boot from on-board flash # # $FreeBSD$ # +#NO_UNIVERSE + include "AR71XX_BASE" ident "RSPRO" hints "RSPRO.hints" # RTC - requires hackery in the spibus code to work device pcf2123_rtc # GEOM modules device geom_redboot # to get access to the SPI flash partitions device geom_uncompress # compressed in-memory filesystem support options GEOM_UNCOMPRESS # For DOS options MSDOSFS # For etherswitch support options ARGE_MDIO device miiproxy device etherswitch device arswitch # Boot off of flash options ROOTDEVNAME=\"ufs:redboot/rootfs.uncompress\" Index: projects/powernv/mips/conf/RSPRO_MFS =================================================================== --- projects/powernv/mips/conf/RSPRO_MFS (revision 291050) +++ projects/powernv/mips/conf/RSPRO_MFS (revision 291051) @@ -1,23 +1,25 @@ # # Ubiquiti Routerstation Pro: boot from MFS # # $FreeBSD$ # +#NO_UNIVERSE + include "AR71XX_BASE" ident "RSPRO_MFS" hints "RSPRO.hints" # RTC - requires hackery in the spibus code to work device pcf2123_rtc # GEOM modules device geom_redboot # to get access to the SPI flash partitions device geom_uzip # compressed in-memory filesystem hackery! options GEOM_UZIP # Boot from the first MFS uzip options ROOTDEVNAME=\"ufs:md0.uzip\" options MD_ROOT options MD_ROOT_SIZE="6144" Index: projects/powernv/mips/conf/TL-ARCHERC7V2 =================================================================== --- projects/powernv/mips/conf/TL-ARCHERC7V2 (revision 291050) +++ projects/powernv/mips/conf/TL-ARCHERC7V2 (revision 291051) @@ -1,68 +1,70 @@ # # TP-Link Archer C7 - based on the AP135 reference design. # # This contains a QCA9558 MIPS74k SoC with on-board 3x3 2GHz wifi, # 128MiB RAM, an AR8327 5-port gigabit ethernet switch and # a QCA 11ac 5GHz AP NIC. # # The to things not currently support are the QCA 11ac NIC and # PCIe host controllers - there's two of them, and the existing # PCIe code here doesn't support that just yet. # # $FreeBSD$ # +#NO_UNIVERSE + # Include the default QCA955x parameters include "QCA955X_BASE" ident TL-ARCHERC7V2 # Override hints with board values hints "TL-ARCHERC7V2.hints" # Force the board memory - this has 128MiB RAM options AR71XX_REALMEM=(128*1024*1024) # i2c GPIO bus #device gpioiic #device iicbb #device iicbus #device iic # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch # read MSDOS formatted disks - USB options MSDOSFS # Enable the uboot environment stuff rather then the # redboot stuff. options AR71XX_ENV_UBOOT # uzip - to boot natively from flash device geom_uncompress options GEOM_UNCOMPRESS # Used for the static uboot partition map device geom_map # yes, this board has a PCIe connected atheros device # add ath_pci so it can at least attach things when there's a # ath(4) in there, rather than the 11ac chip we don't support. device pci device qca955x_pci device ath_pci options AR71XX_ATH_EEPROM device firmware # Used by the above options ATH_EEPROM_FIRMWARE # Boot off of the rootfs, as defined in the geom_map setup. options ROOTDEVNAME=\"ufs:map/rootfs.uncompress\" # Default to accept options IPFIREWALL_DEFAULT_TO_ACCEPT Index: projects/powernv/mips/conf/TL-WDR4300 =================================================================== --- projects/powernv/mips/conf/TL-WDR4300 (revision 291050) +++ projects/powernv/mips/conf/TL-WDR4300 (revision 291051) @@ -1,52 +1,54 @@ # # TPLink TL-WDR4300 - AR9344 based dual-band 2x2 wifi # # $FreeBSD$ # +#NO_UNIVERSE + # Include the default AR934x parameters include "AR934X_BASE" ident TL-WDR4300 # Override hints with board values hints "TL-WDR4300.hints" # Force the board memory - the base DB120 has 128MB RAM options AR71XX_REALMEM=(128*1024*1024) # i2c GPIO bus #device gpioiic #device iicbb #device iicbus #device iic # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch # read MSDOS formatted disks - USB options MSDOSFS # Enable the uboot environment stuff rather then the # redboot stuff. options AR71XX_ENV_UBOOT # uzip - to boot natively from flash device geom_uncompress options GEOM_UNCOMPRESS # Used for the static uboot partition map device geom_map # yes, this board has a PCI connected atheros device device ath_pci options AR71XX_ATH_EEPROM device firmware # Used by the above options ATH_EEPROM_FIRMWARE # Boot off of the rootfs, as defined in the geom_map setup. options ROOTDEVNAME=\"ufs:map/rootfs.uncompress\" Index: projects/powernv/mips/conf/TL-WR1043NDv2 =================================================================== --- projects/powernv/mips/conf/TL-WR1043NDv2 (revision 291050) +++ projects/powernv/mips/conf/TL-WR1043NDv2 (revision 291051) @@ -1,51 +1,53 @@ # # TP-Link TL-WR1043nd v2 - based on the AP135 reference design. # # This contains a QCA9558 MIPS74k SoC with on-board 3x3 2GHz wifi, # 64MiB RAM and an AR8327 5-port gigabit ethernet switch. # # $FreeBSD$ # +#NO_UNIVERSE + # Include the default QCA955x parameters include "QCA955X_BASE" ident TL-WR1043NDv2 # Override hints with board values hints "TL-WR1043NDv2.hints" options AR71XX_REALMEM=(64*1024*1024) # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch # read MSDOS formatted disks - USB options MSDOSFS # Enable the uboot environment stuff rather then the # redboot stuff. options AR71XX_ENV_UBOOT # uzip - to boot natively from flash device geom_uncompress options GEOM_UNCOMPRESS # Used for the static uboot partition map device geom_map # yes, this board has a PCI connected atheros device #device ath_pci #options AR71XX_ATH_EEPROM #device firmware # Used by the above #options ATH_EEPROM_FIRMWARE # Boot off of the rootfs, as defined in the geom_map setup. options ROOTDEVNAME=\"ufs:map/rootfs.uncompress\" # Default to accept options IPFIREWALL_DEFAULT_TO_ACCEPT Index: projects/powernv/mips/conf/TL-WR740Nv4 =================================================================== --- projects/powernv/mips/conf/TL-WR740Nv4 (revision 291050) +++ projects/powernv/mips/conf/TL-WR740Nv4 (revision 291051) @@ -1,53 +1,55 @@ # # TP-Link WR740N v4 # # * AR9330 SoC # * 32MB RAM # * 4MB flash # * Integrated 1x1 2GHz wifi and 10/100 bridge # # $FreeBSD$ # +#NO_UNIVERSE + # Include the default AR933x parameters include "AR933X_BASE" ident TL-WR740Nv4 # Override hints with board values hints "TL-WR740Nv4.hints" # Board memory - 32MB options AR71XX_REALMEM=(32*1024*1024) # i2c GPIO bus #device gpioiic #device iicbb #device iicbus #device iic # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch # read MSDOS formatted disks - USB #options MSDOSFS # Enable the uboot environment stuff rather then the # redboot stuff. options AR71XX_ENV_UBOOT # uzip - to boot natively from flash device geom_uncompress options GEOM_UNCOMPRESS # Used for the static uboot partition map device geom_map # Boot off of the rootfs, as defined in the geom_map setup. # options ROOTDEVNAME=\"ufs:map/rootfs.uncompress\" # Note: we don't fit in 4MB flash, so the rootfs must be on USB for now options ROOTDEVNAME=\"ufs:da0\" Index: projects/powernv/mips/conf/TP-MR3020 =================================================================== --- projects/powernv/mips/conf/TP-MR3020 (revision 291050) +++ projects/powernv/mips/conf/TP-MR3020 (revision 291051) @@ -1,55 +1,57 @@ # # TP Link MR3020 - an AR9331 based SoC wifi device. # # This is for the 32 RAM/4 flash part. There is little to no # chance that this will ever boot FreeBSD directly from the 3.5MB # of flash. The kernel can fit into the space, but userland is just # too big even when stripped down to its limits. # # * AR9331 SoC # * 32MB RAM # * 4MB flash # * Integrated 1x1 2GHz wifi and 10/100 bridge # * USB powered # * USB storage # # $FreeBSD$ # +#NO_UNIVERSE + # Include the default AR933x parameters include "AR933X_BASE" ident TP-MR3020 # Override hints with board values hints "TP-MR3020.hints" # Board memory - 32MB options AR71XX_REALMEM=(32*1024*1024) # Disable support for paging options NO_SWAPPING # i2c GPIO bus device gpioiic device iicbb device iicbus device iic # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch # Enable the uboot environment stuff rather then the # redboot stuff. options AR71XX_ENV_UBOOT # Used for the static uboot partition map device geom_map # With only 4MB of flash, we are stuck using USB # for the rootfs. options ROOTDEVNAME=\"ufs:da0\" Index: projects/powernv/mips/conf/TP-WN1043ND =================================================================== --- projects/powernv/mips/conf/TP-WN1043ND (revision 291050) +++ projects/powernv/mips/conf/TP-WN1043ND (revision 291051) @@ -1,59 +1,61 @@ # # TP-1043ND -- Kernel configuration file for the TP-Link WR-1043ND # # $FreeBSD$ # +#NO_UNIVERSE + # Include the default AR913x parameters common to all AR913x SoC users. include "AR91XX_BASE" ident TP-WN1043ND # Override hints with board values hints "TP-WN1043ND.hints" # Force the board memory - 32mb options AR71XX_REALMEM=32*1024*1024 # i2c GPIO bus device gpioiic device iicbb device iicbus device iic # ethernet switch device device etherswitch # RTL8366RB support device rtl8366rb # read MSDOS formatted disks - USB options MSDOSFS # Enable the uboot environment stuff rather then the # redboot stuff. options AR71XX_ENV_UBOOT # uncompress - to boot natively from flash device geom_uncompress options GEOM_UNCOMPRESS # Used for the static uboot partition map device geom_map # Boot off of the rootfs, as defined in the geom_map setup. options ROOTDEVNAME=\"ufs:map/rootfs.uncompress\" # We bite the performance overhead for now; the kernel won't # fit if the mutexes are inlined. options MUTEX_NOINLINE options RWLOCK_NOINLINE options SX_NOINLINE # Remove everything we don't need. We need a _really_ small kernel! nooptions INVARIANTS nooptions INVARIANT_SUPPORT nooptions WITNESS nooptions WITNESS_SKIPSPIN nooptions DEBUG_REDZONE nooptions DEBUG_MEMGUARD Index: projects/powernv/mips/conf/WZR-300HP =================================================================== --- projects/powernv/mips/conf/WZR-300HP (revision 291050) +++ projects/powernv/mips/conf/WZR-300HP (revision 291051) @@ -1,49 +1,51 @@ # # Specific board setup for the Buffalo Airstation WZR-300HP # # The WZR-300HP has the following hardware: # # + AR7242 CPU SoC # + AR9280 5GHz 11n # + AR8136 Gigabit switch # + 2 m25ll128 based 16MB flash # + 64MB RAM # + uboot environment # $FreeBSD$ +#NO_UNIVERSE + include "AR724X_BASE" ident "WZR-300HP" hints "WZR-300HP.hints" options AR71XX_REALMEM=64*1024*1024 options AR71XX_ENV_UBOOT options BOOTVERBOSE # GEOM modules device geom_map # to get access to the SPI flash partitions device geom_uncompress # compressed in-memory filesystem hackery! options ROOTDEVNAME=\"ufs:/dev/map/rootfs.uncompress\" options AR71XX_ATH_EEPROM # Fetch EEPROM/PCI config from flash options ATH_EEPROM_FIRMWARE # Use EEPROM from flash device firmware # Used by the above # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch # hwpmc device hwpmc_mips24k device hwpmc # load these via modules, shrink kernel nodevice if_bridge nodevice bridgestp nodevice random Index: projects/powernv/mips/conf/WZR-HPAG300H =================================================================== --- projects/powernv/mips/conf/WZR-HPAG300H (revision 291050) +++ projects/powernv/mips/conf/WZR-HPAG300H (revision 291051) @@ -1,49 +1,51 @@ # # Specific board setup for the Buffalo Airstation WZR-HPAG300H # # The WZR-HPAG300H has the following hardware: # # + AR7161 CPU SoC # + 2x AR9280 5GHz 11n # + AR8136 Gigabit switch # + 2 m25ll128(really w25q128) based 16MB flash # + 128MB RAM # + uboot environment # $FreeBSD$ +#NO_UNIVERSE + include "AR71XX_BASE" ident "WZR-HPAG300H" hints "WZR-HPAG300H.hints" options AR71XX_REALMEM=128*1024*1024 options AR71XX_ENV_UBOOT options BOOTVERBOSE # GEOM modules device geom_uncompress # compressed in-memory filesystem hackery! device geom_map # to get access to the SPI flash partitions options ROOTDEVNAME=\"ufs:/dev/map/rootfs.uncompress\" options AR71XX_ATH_EEPROM # Fetch EEPROM/PCI config from flash options ATH_EEPROM_FIRMWARE # Use EEPROM from flash device firmware # Used by the above # Options required for miiproxy and mdiobus options ARGE_MDIO # Export an MDIO bus separate from arge device miiproxy # MDIO bus <-> MII PHY rendezvous device etherswitch device arswitch # hwpmc device hwpmc_mips24k device hwpmc # load these via modules, shrink kernel nodevice if_bridge nodevice bridgestp nodevice random Index: projects/powernv/modules/tests/framework/Makefile =================================================================== --- projects/powernv/modules/tests/framework/Makefile (revision 291050) +++ projects/powernv/modules/tests/framework/Makefile (revision 291051) @@ -1,15 +1,16 @@ # # $FreeBSD$ # .PATH: ${.CURDIR}/../../../tests/framework KMOD= kern_testfrwk SRCS= kern_testfrwk.c +SRCS+= bus_if.h device_if.h # # Enable full debugging # #CFLAGS += -g .include Index: projects/powernv/netinet/in_mcast.c =================================================================== --- projects/powernv/netinet/in_mcast.c (revision 291050) +++ projects/powernv/netinet/in_mcast.c (revision 291051) @@ -1,3012 +1,3012 @@ /*- * Copyright (c) 2007-2009 Bruce Simpson. * Copyright (c) 2005 Robert N. M. Watson. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * IPv4 multicast socket, group, and socket option processing module. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef KTR_IGMPV3 #define KTR_IGMPV3 KTR_INET #endif #ifndef __SOCKUNION_DECLARED union sockunion { struct sockaddr_storage ss; struct sockaddr sa; struct sockaddr_dl sdl; struct sockaddr_in sin; }; typedef union sockunion sockunion_t; #define __SOCKUNION_DECLARED #endif /* __SOCKUNION_DECLARED */ static MALLOC_DEFINE(M_INMFILTER, "in_mfilter", "IPv4 multicast PCB-layer source filter"); static MALLOC_DEFINE(M_IPMADDR, "in_multi", "IPv4 multicast group"); static MALLOC_DEFINE(M_IPMOPTS, "ip_moptions", "IPv4 multicast options"); static MALLOC_DEFINE(M_IPMSOURCE, "ip_msource", "IPv4 multicast IGMP-layer source filter"); /* * Locking: * - Lock order is: Giant, INP_WLOCK, IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK. * - The IF_ADDR_LOCK is implicitly taken by inm_lookup() earlier, however * it can be taken by code in net/if.c also. * - ip_moptions and in_mfilter are covered by the INP_WLOCK. * * struct in_multi is covered by IN_MULTI_LOCK. There isn't strictly * any need for in_multi itself to be virtualized -- it is bound to an ifp * anyway no matter what happens. */ struct mtx in_multi_mtx; MTX_SYSINIT(in_multi_mtx, &in_multi_mtx, "in_multi_mtx", MTX_DEF); /* * Functions with non-static linkage defined in this file should be * declared in in_var.h: * imo_multi_filter() * in_addmulti() * in_delmulti() * in_joingroup() * in_joingroup_locked() * in_leavegroup() * in_leavegroup_locked() * and ip_var.h: * inp_freemoptions() * inp_getmoptions() * inp_setmoptions() * * XXX: Both carp and pf need to use the legacy (*,G) KPIs in_addmulti() * and in_delmulti(). */ static void imf_commit(struct in_mfilter *); static int imf_get_source(struct in_mfilter *imf, const struct sockaddr_in *psin, struct in_msource **); static struct in_msource * imf_graft(struct in_mfilter *, const uint8_t, const struct sockaddr_in *); static void imf_leave(struct in_mfilter *); static int imf_prune(struct in_mfilter *, const struct sockaddr_in *); static void imf_purge(struct in_mfilter *); static void imf_rollback(struct in_mfilter *); static void imf_reap(struct in_mfilter *); static int imo_grow(struct ip_moptions *); static size_t imo_match_group(const struct ip_moptions *, const struct ifnet *, const struct sockaddr *); static struct in_msource * imo_match_source(const struct ip_moptions *, const size_t, const struct sockaddr *); static void ims_merge(struct ip_msource *ims, const struct in_msource *lims, const int rollback); static int in_getmulti(struct ifnet *, const struct in_addr *, struct in_multi **); static int inm_get_source(struct in_multi *inm, const in_addr_t haddr, const int noalloc, struct ip_msource **pims); #ifdef KTR static int inm_is_ifp_detached(const struct in_multi *); #endif static int inm_merge(struct in_multi *, /*const*/ struct in_mfilter *); static void inm_purge(struct in_multi *); static void inm_reap(struct in_multi *); static struct ip_moptions * inp_findmoptions(struct inpcb *); static void inp_freemoptions_internal(struct ip_moptions *); static void inp_gcmoptions(void *, int); static int inp_get_source_filters(struct inpcb *, struct sockopt *); static int inp_join_group(struct inpcb *, struct sockopt *); static int inp_leave_group(struct inpcb *, struct sockopt *); static struct ifnet * inp_lookup_mcast_ifp(const struct inpcb *, const struct sockaddr_in *, const struct in_addr); static int inp_block_unblock_source(struct inpcb *, struct sockopt *); static int inp_set_multicast_if(struct inpcb *, struct sockopt *); static int inp_set_source_filters(struct inpcb *, struct sockopt *); static int sysctl_ip_mcast_filters(SYSCTL_HANDLER_ARGS); static SYSCTL_NODE(_net_inet_ip, OID_AUTO, mcast, CTLFLAG_RW, 0, "IPv4 multicast"); static u_long in_mcast_maxgrpsrc = IP_MAX_GROUP_SRC_FILTER; SYSCTL_ULONG(_net_inet_ip_mcast, OID_AUTO, maxgrpsrc, CTLFLAG_RWTUN, &in_mcast_maxgrpsrc, 0, "Max source filters per group"); static u_long in_mcast_maxsocksrc = IP_MAX_SOCK_SRC_FILTER; SYSCTL_ULONG(_net_inet_ip_mcast, OID_AUTO, maxsocksrc, CTLFLAG_RWTUN, &in_mcast_maxsocksrc, 0, "Max source filters per socket"); int in_mcast_loop = IP_DEFAULT_MULTICAST_LOOP; SYSCTL_INT(_net_inet_ip_mcast, OID_AUTO, loop, CTLFLAG_RWTUN, &in_mcast_loop, 0, "Loopback multicast datagrams by default"); static SYSCTL_NODE(_net_inet_ip_mcast, OID_AUTO, filters, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_ip_mcast_filters, "Per-interface stack-wide source filters"); static STAILQ_HEAD(, ip_moptions) imo_gc_list = STAILQ_HEAD_INITIALIZER(imo_gc_list); static struct task imo_gc_task = TASK_INITIALIZER(0, inp_gcmoptions, NULL); #ifdef KTR /* * Inline function which wraps assertions for a valid ifp. * The ifnet layer will set the ifma's ifp pointer to NULL if the ifp * is detached. */ static int __inline inm_is_ifp_detached(const struct in_multi *inm) { struct ifnet *ifp; KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__)); ifp = inm->inm_ifma->ifma_ifp; if (ifp != NULL) { /* * Sanity check that netinet's notion of ifp is the * same as net's. */ KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__)); } return (ifp == NULL); } #endif /* * Initialize an in_mfilter structure to a known state at t0, t1 * with an empty source filter list. */ static __inline void imf_init(struct in_mfilter *imf, const int st0, const int st1) { memset(imf, 0, sizeof(struct in_mfilter)); RB_INIT(&imf->imf_sources); imf->imf_st[0] = st0; imf->imf_st[1] = st1; } /* * Function for looking up an in_multi record for an IPv4 multicast address * on a given interface. ifp must be valid. If no record found, return NULL. * The IN_MULTI_LOCK and IF_ADDR_LOCK on ifp must be held. */ struct in_multi * inm_lookup_locked(struct ifnet *ifp, const struct in_addr ina) { struct ifmultiaddr *ifma; struct in_multi *inm; IN_MULTI_LOCK_ASSERT(); IF_ADDR_LOCK_ASSERT(ifp); inm = NULL; TAILQ_FOREACH(ifma, &((ifp)->if_multiaddrs), ifma_link) { if (ifma->ifma_addr->sa_family == AF_INET) { inm = (struct in_multi *)ifma->ifma_protospec; if (inm->inm_addr.s_addr == ina.s_addr) break; inm = NULL; } } return (inm); } /* * Wrapper for inm_lookup_locked(). * The IF_ADDR_LOCK will be taken on ifp and released on return. */ struct in_multi * inm_lookup(struct ifnet *ifp, const struct in_addr ina) { struct in_multi *inm; IN_MULTI_LOCK_ASSERT(); IF_ADDR_RLOCK(ifp); inm = inm_lookup_locked(ifp, ina); IF_ADDR_RUNLOCK(ifp); return (inm); } /* * Resize the ip_moptions vector to the next power-of-two minus 1. * May be called with locks held; do not sleep. */ static int imo_grow(struct ip_moptions *imo) { struct in_multi **nmships; struct in_multi **omships; struct in_mfilter *nmfilters; struct in_mfilter *omfilters; size_t idx; size_t newmax; size_t oldmax; nmships = NULL; nmfilters = NULL; omships = imo->imo_membership; omfilters = imo->imo_mfilters; oldmax = imo->imo_max_memberships; newmax = ((oldmax + 1) * 2) - 1; if (newmax <= IP_MAX_MEMBERSHIPS) { nmships = (struct in_multi **)realloc(omships, sizeof(struct in_multi *) * newmax, M_IPMOPTS, M_NOWAIT); nmfilters = (struct in_mfilter *)realloc(omfilters, sizeof(struct in_mfilter) * newmax, M_INMFILTER, M_NOWAIT); if (nmships != NULL && nmfilters != NULL) { /* Initialize newly allocated source filter heads. */ for (idx = oldmax; idx < newmax; idx++) { imf_init(&nmfilters[idx], MCAST_UNDEFINED, MCAST_EXCLUDE); } imo->imo_max_memberships = newmax; imo->imo_membership = nmships; imo->imo_mfilters = nmfilters; } } if (nmships == NULL || nmfilters == NULL) { if (nmships != NULL) free(nmships, M_IPMOPTS); if (nmfilters != NULL) free(nmfilters, M_INMFILTER); return (ETOOMANYREFS); } return (0); } /* * Find an IPv4 multicast group entry for this ip_moptions instance * which matches the specified group, and optionally an interface. * Return its index into the array, or -1 if not found. */ static size_t imo_match_group(const struct ip_moptions *imo, const struct ifnet *ifp, const struct sockaddr *group) { const struct sockaddr_in *gsin; struct in_multi **pinm; int idx; int nmships; gsin = (const struct sockaddr_in *)group; /* The imo_membership array may be lazy allocated. */ if (imo->imo_membership == NULL || imo->imo_num_memberships == 0) return (-1); nmships = imo->imo_num_memberships; pinm = &imo->imo_membership[0]; for (idx = 0; idx < nmships; idx++, pinm++) { if (*pinm == NULL) continue; if ((ifp == NULL || ((*pinm)->inm_ifp == ifp)) && in_hosteq((*pinm)->inm_addr, gsin->sin_addr)) { break; } } if (idx >= nmships) idx = -1; return (idx); } /* * Find an IPv4 multicast source entry for this imo which matches * the given group index for this socket, and source address. * * NOTE: This does not check if the entry is in-mode, merely if * it exists, which may not be the desired behaviour. */ static struct in_msource * imo_match_source(const struct ip_moptions *imo, const size_t gidx, const struct sockaddr *src) { struct ip_msource find; struct in_mfilter *imf; struct ip_msource *ims; const sockunion_t *psa; KASSERT(src->sa_family == AF_INET, ("%s: !AF_INET", __func__)); KASSERT(gidx != -1 && gidx < imo->imo_num_memberships, ("%s: invalid index %d\n", __func__, (int)gidx)); /* The imo_mfilters array may be lazy allocated. */ if (imo->imo_mfilters == NULL) return (NULL); imf = &imo->imo_mfilters[gidx]; /* Source trees are keyed in host byte order. */ psa = (const sockunion_t *)src; find.ims_haddr = ntohl(psa->sin.sin_addr.s_addr); ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find); return ((struct in_msource *)ims); } /* * Perform filtering for multicast datagrams on a socket by group and source. * * Returns 0 if a datagram should be allowed through, or various error codes * if the socket was not a member of the group, or the source was muted, etc. */ int imo_multi_filter(const struct ip_moptions *imo, const struct ifnet *ifp, const struct sockaddr *group, const struct sockaddr *src) { size_t gidx; struct in_msource *ims; int mode; KASSERT(ifp != NULL, ("%s: null ifp", __func__)); gidx = imo_match_group(imo, ifp, group); if (gidx == -1) return (MCAST_NOTGMEMBER); /* * Check if the source was included in an (S,G) join. * Allow reception on exclusive memberships by default, * reject reception on inclusive memberships by default. * Exclude source only if an in-mode exclude filter exists. * Include source only if an in-mode include filter exists. * NOTE: We are comparing group state here at IGMP t1 (now) * with socket-layer t0 (since last downcall). */ mode = imo->imo_mfilters[gidx].imf_st[1]; ims = imo_match_source(imo, gidx, src); if ((ims == NULL && mode == MCAST_INCLUDE) || (ims != NULL && ims->imsl_st[0] != mode)) return (MCAST_NOTSMEMBER); return (MCAST_PASS); } /* * Find and return a reference to an in_multi record for (ifp, group), * and bump its reference count. * If one does not exist, try to allocate it, and update link-layer multicast * filters on ifp to listen for group. * Assumes the IN_MULTI lock is held across the call. * Return 0 if successful, otherwise return an appropriate error code. */ static int in_getmulti(struct ifnet *ifp, const struct in_addr *group, struct in_multi **pinm) { struct sockaddr_in gsin; struct ifmultiaddr *ifma; struct in_ifinfo *ii; struct in_multi *inm; int error; IN_MULTI_LOCK_ASSERT(); ii = (struct in_ifinfo *)ifp->if_afdata[AF_INET]; inm = inm_lookup(ifp, *group); if (inm != NULL) { /* * If we already joined this group, just bump the * refcount and return it. */ KASSERT(inm->inm_refcount >= 1, ("%s: bad refcount %d", __func__, inm->inm_refcount)); ++inm->inm_refcount; *pinm = inm; return (0); } memset(&gsin, 0, sizeof(gsin)); gsin.sin_family = AF_INET; gsin.sin_len = sizeof(struct sockaddr_in); gsin.sin_addr = *group; /* * Check if a link-layer group is already associated * with this network-layer group on the given ifnet. */ error = if_addmulti(ifp, (struct sockaddr *)&gsin, &ifma); if (error != 0) return (error); /* XXX ifma_protospec must be covered by IF_ADDR_LOCK */ IF_ADDR_WLOCK(ifp); /* * If something other than netinet is occupying the link-layer * group, print a meaningful error message and back out of * the allocation. * Otherwise, bump the refcount on the existing network-layer * group association and return it. */ if (ifma->ifma_protospec != NULL) { inm = (struct in_multi *)ifma->ifma_protospec; #ifdef INVARIANTS KASSERT(ifma->ifma_addr != NULL, ("%s: no ifma_addr", __func__)); KASSERT(ifma->ifma_addr->sa_family == AF_INET, ("%s: ifma not AF_INET", __func__)); KASSERT(inm != NULL, ("%s: no ifma_protospec", __func__)); if (inm->inm_ifma != ifma || inm->inm_ifp != ifp || !in_hosteq(inm->inm_addr, *group)) panic("%s: ifma %p is inconsistent with %p (%s)", __func__, ifma, inm, inet_ntoa(*group)); #endif ++inm->inm_refcount; *pinm = inm; IF_ADDR_WUNLOCK(ifp); return (0); } IF_ADDR_WLOCK_ASSERT(ifp); /* * A new in_multi record is needed; allocate and initialize it. * We DO NOT perform an IGMP join as the in_ layer may need to * push an initial source list down to IGMP to support SSM. * * The initial source filter state is INCLUDE, {} as per the RFC. */ inm = malloc(sizeof(*inm), M_IPMADDR, M_NOWAIT | M_ZERO); if (inm == NULL) { - if_delmulti_ifma(ifma); IF_ADDR_WUNLOCK(ifp); + if_delmulti_ifma(ifma); return (ENOMEM); } inm->inm_addr = *group; inm->inm_ifp = ifp; inm->inm_igi = ii->ii_igmp; inm->inm_ifma = ifma; inm->inm_refcount = 1; inm->inm_state = IGMP_NOT_MEMBER; mbufq_init(&inm->inm_scq, IGMP_MAX_STATE_CHANGES); inm->inm_st[0].iss_fmode = MCAST_UNDEFINED; inm->inm_st[1].iss_fmode = MCAST_UNDEFINED; RB_INIT(&inm->inm_srcs); ifma->ifma_protospec = inm; *pinm = inm; IF_ADDR_WUNLOCK(ifp); return (0); } /* * Drop a reference to an in_multi record. * * If the refcount drops to 0, free the in_multi record and * delete the underlying link-layer membership. */ void inm_release_locked(struct in_multi *inm) { struct ifmultiaddr *ifma; IN_MULTI_LOCK_ASSERT(); CTR2(KTR_IGMPV3, "%s: refcount is %d", __func__, inm->inm_refcount); if (--inm->inm_refcount > 0) { CTR2(KTR_IGMPV3, "%s: refcount is now %d", __func__, inm->inm_refcount); return; } CTR2(KTR_IGMPV3, "%s: freeing inm %p", __func__, inm); ifma = inm->inm_ifma; /* XXX this access is not covered by IF_ADDR_LOCK */ CTR2(KTR_IGMPV3, "%s: purging ifma %p", __func__, ifma); KASSERT(ifma->ifma_protospec == inm, ("%s: ifma_protospec != inm", __func__)); ifma->ifma_protospec = NULL; inm_purge(inm); free(inm, M_IPMADDR); if_delmulti_ifma(ifma); } /* * Clear recorded source entries for a group. * Used by the IGMP code. Caller must hold the IN_MULTI lock. * FIXME: Should reap. */ void inm_clear_recorded(struct in_multi *inm) { struct ip_msource *ims; IN_MULTI_LOCK_ASSERT(); RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) { if (ims->ims_stp) { ims->ims_stp = 0; --inm->inm_st[1].iss_rec; } } KASSERT(inm->inm_st[1].iss_rec == 0, ("%s: iss_rec %d not 0", __func__, inm->inm_st[1].iss_rec)); } /* * Record a source as pending for a Source-Group IGMPv3 query. * This lives here as it modifies the shared tree. * * inm is the group descriptor. * naddr is the address of the source to record in network-byte order. * * If the net.inet.igmp.sgalloc sysctl is non-zero, we will * lazy-allocate a source node in response to an SG query. * Otherwise, no allocation is performed. This saves some memory * with the trade-off that the source will not be reported to the * router if joined in the window between the query response and * the group actually being joined on the local host. * * VIMAGE: XXX: Currently the igmp_sgalloc feature has been removed. * This turns off the allocation of a recorded source entry if * the group has not been joined. * * Return 0 if the source didn't exist or was already marked as recorded. * Return 1 if the source was marked as recorded by this function. * Return <0 if any error occured (negated errno code). */ int inm_record_source(struct in_multi *inm, const in_addr_t naddr) { struct ip_msource find; struct ip_msource *ims, *nims; IN_MULTI_LOCK_ASSERT(); find.ims_haddr = ntohl(naddr); ims = RB_FIND(ip_msource_tree, &inm->inm_srcs, &find); if (ims && ims->ims_stp) return (0); if (ims == NULL) { if (inm->inm_nsrc == in_mcast_maxgrpsrc) return (-ENOSPC); nims = malloc(sizeof(struct ip_msource), M_IPMSOURCE, M_NOWAIT | M_ZERO); if (nims == NULL) return (-ENOMEM); nims->ims_haddr = find.ims_haddr; RB_INSERT(ip_msource_tree, &inm->inm_srcs, nims); ++inm->inm_nsrc; ims = nims; } /* * Mark the source as recorded and update the recorded * source count. */ ++ims->ims_stp; ++inm->inm_st[1].iss_rec; return (1); } /* * Return a pointer to an in_msource owned by an in_mfilter, * given its source address. * Lazy-allocate if needed. If this is a new entry its filter state is * undefined at t0. * * imf is the filter set being modified. * haddr is the source address in *host* byte-order. * * SMPng: May be called with locks held; malloc must not block. */ static int imf_get_source(struct in_mfilter *imf, const struct sockaddr_in *psin, struct in_msource **plims) { struct ip_msource find; struct ip_msource *ims, *nims; struct in_msource *lims; int error; error = 0; ims = NULL; lims = NULL; /* key is host byte order */ find.ims_haddr = ntohl(psin->sin_addr.s_addr); ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find); lims = (struct in_msource *)ims; if (lims == NULL) { if (imf->imf_nsrc == in_mcast_maxsocksrc) return (ENOSPC); nims = malloc(sizeof(struct in_msource), M_INMFILTER, M_NOWAIT | M_ZERO); if (nims == NULL) return (ENOMEM); lims = (struct in_msource *)nims; lims->ims_haddr = find.ims_haddr; lims->imsl_st[0] = MCAST_UNDEFINED; RB_INSERT(ip_msource_tree, &imf->imf_sources, nims); ++imf->imf_nsrc; } *plims = lims; return (error); } /* * Graft a source entry into an existing socket-layer filter set, * maintaining any required invariants and checking allocations. * * The source is marked as being in the new filter mode at t1. * * Return the pointer to the new node, otherwise return NULL. */ static struct in_msource * imf_graft(struct in_mfilter *imf, const uint8_t st1, const struct sockaddr_in *psin) { struct ip_msource *nims; struct in_msource *lims; nims = malloc(sizeof(struct in_msource), M_INMFILTER, M_NOWAIT | M_ZERO); if (nims == NULL) return (NULL); lims = (struct in_msource *)nims; lims->ims_haddr = ntohl(psin->sin_addr.s_addr); lims->imsl_st[0] = MCAST_UNDEFINED; lims->imsl_st[1] = st1; RB_INSERT(ip_msource_tree, &imf->imf_sources, nims); ++imf->imf_nsrc; return (lims); } /* * Prune a source entry from an existing socket-layer filter set, * maintaining any required invariants and checking allocations. * * The source is marked as being left at t1, it is not freed. * * Return 0 if no error occurred, otherwise return an errno value. */ static int imf_prune(struct in_mfilter *imf, const struct sockaddr_in *psin) { struct ip_msource find; struct ip_msource *ims; struct in_msource *lims; /* key is host byte order */ find.ims_haddr = ntohl(psin->sin_addr.s_addr); ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find); if (ims == NULL) return (ENOENT); lims = (struct in_msource *)ims; lims->imsl_st[1] = MCAST_UNDEFINED; return (0); } /* * Revert socket-layer filter set deltas at t1 to t0 state. */ static void imf_rollback(struct in_mfilter *imf) { struct ip_msource *ims, *tims; struct in_msource *lims; RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) { lims = (struct in_msource *)ims; if (lims->imsl_st[0] == lims->imsl_st[1]) { /* no change at t1 */ continue; } else if (lims->imsl_st[0] != MCAST_UNDEFINED) { /* revert change to existing source at t1 */ lims->imsl_st[1] = lims->imsl_st[0]; } else { /* revert source added t1 */ CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims); RB_REMOVE(ip_msource_tree, &imf->imf_sources, ims); free(ims, M_INMFILTER); imf->imf_nsrc--; } } imf->imf_st[1] = imf->imf_st[0]; } /* * Mark socket-layer filter set as INCLUDE {} at t1. */ static void imf_leave(struct in_mfilter *imf) { struct ip_msource *ims; struct in_msource *lims; RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { lims = (struct in_msource *)ims; lims->imsl_st[1] = MCAST_UNDEFINED; } imf->imf_st[1] = MCAST_INCLUDE; } /* * Mark socket-layer filter set deltas as committed. */ static void imf_commit(struct in_mfilter *imf) { struct ip_msource *ims; struct in_msource *lims; RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { lims = (struct in_msource *)ims; lims->imsl_st[0] = lims->imsl_st[1]; } imf->imf_st[0] = imf->imf_st[1]; } /* * Reap unreferenced sources from socket-layer filter set. */ static void imf_reap(struct in_mfilter *imf) { struct ip_msource *ims, *tims; struct in_msource *lims; RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) { lims = (struct in_msource *)ims; if ((lims->imsl_st[0] == MCAST_UNDEFINED) && (lims->imsl_st[1] == MCAST_UNDEFINED)) { CTR2(KTR_IGMPV3, "%s: free lims %p", __func__, ims); RB_REMOVE(ip_msource_tree, &imf->imf_sources, ims); free(ims, M_INMFILTER); imf->imf_nsrc--; } } } /* * Purge socket-layer filter set. */ static void imf_purge(struct in_mfilter *imf) { struct ip_msource *ims, *tims; RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) { CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims); RB_REMOVE(ip_msource_tree, &imf->imf_sources, ims); free(ims, M_INMFILTER); imf->imf_nsrc--; } imf->imf_st[0] = imf->imf_st[1] = MCAST_UNDEFINED; KASSERT(RB_EMPTY(&imf->imf_sources), ("%s: imf_sources not empty", __func__)); } /* * Look up a source filter entry for a multicast group. * * inm is the group descriptor to work with. * haddr is the host-byte-order IPv4 address to look up. * noalloc may be non-zero to suppress allocation of sources. * *pims will be set to the address of the retrieved or allocated source. * * SMPng: NOTE: may be called with locks held. * Return 0 if successful, otherwise return a non-zero error code. */ static int inm_get_source(struct in_multi *inm, const in_addr_t haddr, const int noalloc, struct ip_msource **pims) { struct ip_msource find; struct ip_msource *ims, *nims; #ifdef KTR struct in_addr ia; #endif find.ims_haddr = haddr; ims = RB_FIND(ip_msource_tree, &inm->inm_srcs, &find); if (ims == NULL && !noalloc) { if (inm->inm_nsrc == in_mcast_maxgrpsrc) return (ENOSPC); nims = malloc(sizeof(struct ip_msource), M_IPMSOURCE, M_NOWAIT | M_ZERO); if (nims == NULL) return (ENOMEM); nims->ims_haddr = haddr; RB_INSERT(ip_msource_tree, &inm->inm_srcs, nims); ++inm->inm_nsrc; ims = nims; #ifdef KTR ia.s_addr = htonl(haddr); CTR3(KTR_IGMPV3, "%s: allocated %s as %p", __func__, inet_ntoa(ia), ims); #endif } *pims = ims; return (0); } /* * Merge socket-layer source into IGMP-layer source. * If rollback is non-zero, perform the inverse of the merge. */ static void ims_merge(struct ip_msource *ims, const struct in_msource *lims, const int rollback) { int n = rollback ? -1 : 1; #ifdef KTR struct in_addr ia; ia.s_addr = htonl(ims->ims_haddr); #endif if (lims->imsl_st[0] == MCAST_EXCLUDE) { CTR3(KTR_IGMPV3, "%s: t1 ex -= %d on %s", __func__, n, inet_ntoa(ia)); ims->ims_st[1].ex -= n; } else if (lims->imsl_st[0] == MCAST_INCLUDE) { CTR3(KTR_IGMPV3, "%s: t1 in -= %d on %s", __func__, n, inet_ntoa(ia)); ims->ims_st[1].in -= n; } if (lims->imsl_st[1] == MCAST_EXCLUDE) { CTR3(KTR_IGMPV3, "%s: t1 ex += %d on %s", __func__, n, inet_ntoa(ia)); ims->ims_st[1].ex += n; } else if (lims->imsl_st[1] == MCAST_INCLUDE) { CTR3(KTR_IGMPV3, "%s: t1 in += %d on %s", __func__, n, inet_ntoa(ia)); ims->ims_st[1].in += n; } } /* * Atomically update the global in_multi state, when a membership's * filter list is being updated in any way. * * imf is the per-inpcb-membership group filter pointer. * A fake imf may be passed for in-kernel consumers. * * XXX This is a candidate for a set-symmetric-difference style loop * which would eliminate the repeated lookup from root of ims nodes, * as they share the same key space. * * If any error occurred this function will back out of refcounts * and return a non-zero value. */ static int inm_merge(struct in_multi *inm, /*const*/ struct in_mfilter *imf) { struct ip_msource *ims, *nims; struct in_msource *lims; int schanged, error; int nsrc0, nsrc1; schanged = 0; error = 0; nsrc1 = nsrc0 = 0; /* * Update the source filters first, as this may fail. * Maintain count of in-mode filters at t0, t1. These are * used to work out if we transition into ASM mode or not. * Maintain a count of source filters whose state was * actually modified by this operation. */ RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { lims = (struct in_msource *)ims; if (lims->imsl_st[0] == imf->imf_st[0]) nsrc0++; if (lims->imsl_st[1] == imf->imf_st[1]) nsrc1++; if (lims->imsl_st[0] == lims->imsl_st[1]) continue; error = inm_get_source(inm, lims->ims_haddr, 0, &nims); ++schanged; if (error) break; ims_merge(nims, lims, 0); } if (error) { struct ip_msource *bims; RB_FOREACH_REVERSE_FROM(ims, ip_msource_tree, nims) { lims = (struct in_msource *)ims; if (lims->imsl_st[0] == lims->imsl_st[1]) continue; (void)inm_get_source(inm, lims->ims_haddr, 1, &bims); if (bims == NULL) continue; ims_merge(bims, lims, 1); } goto out_reap; } CTR3(KTR_IGMPV3, "%s: imf filters in-mode: %d at t0, %d at t1", __func__, nsrc0, nsrc1); /* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */ if (imf->imf_st[0] == imf->imf_st[1] && imf->imf_st[1] == MCAST_INCLUDE) { if (nsrc1 == 0) { CTR1(KTR_IGMPV3, "%s: --in on inm at t1", __func__); --inm->inm_st[1].iss_in; } } /* Handle filter mode transition on socket. */ if (imf->imf_st[0] != imf->imf_st[1]) { CTR3(KTR_IGMPV3, "%s: imf transition %d to %d", __func__, imf->imf_st[0], imf->imf_st[1]); if (imf->imf_st[0] == MCAST_EXCLUDE) { CTR1(KTR_IGMPV3, "%s: --ex on inm at t1", __func__); --inm->inm_st[1].iss_ex; } else if (imf->imf_st[0] == MCAST_INCLUDE) { CTR1(KTR_IGMPV3, "%s: --in on inm at t1", __func__); --inm->inm_st[1].iss_in; } if (imf->imf_st[1] == MCAST_EXCLUDE) { CTR1(KTR_IGMPV3, "%s: ex++ on inm at t1", __func__); inm->inm_st[1].iss_ex++; } else if (imf->imf_st[1] == MCAST_INCLUDE && nsrc1 > 0) { CTR1(KTR_IGMPV3, "%s: in++ on inm at t1", __func__); inm->inm_st[1].iss_in++; } } /* * Track inm filter state in terms of listener counts. * If there are any exclusive listeners, stack-wide * membership is exclusive. * Otherwise, if only inclusive listeners, stack-wide is inclusive. * If no listeners remain, state is undefined at t1, * and the IGMP lifecycle for this group should finish. */ if (inm->inm_st[1].iss_ex > 0) { CTR1(KTR_IGMPV3, "%s: transition to EX", __func__); inm->inm_st[1].iss_fmode = MCAST_EXCLUDE; } else if (inm->inm_st[1].iss_in > 0) { CTR1(KTR_IGMPV3, "%s: transition to IN", __func__); inm->inm_st[1].iss_fmode = MCAST_INCLUDE; } else { CTR1(KTR_IGMPV3, "%s: transition to UNDEF", __func__); inm->inm_st[1].iss_fmode = MCAST_UNDEFINED; } /* Decrement ASM listener count on transition out of ASM mode. */ if (imf->imf_st[0] == MCAST_EXCLUDE && nsrc0 == 0) { if ((imf->imf_st[1] != MCAST_EXCLUDE) || (imf->imf_st[1] == MCAST_EXCLUDE && nsrc1 > 0)) CTR1(KTR_IGMPV3, "%s: --asm on inm at t1", __func__); --inm->inm_st[1].iss_asm; } /* Increment ASM listener count on transition to ASM mode. */ if (imf->imf_st[1] == MCAST_EXCLUDE && nsrc1 == 0) { CTR1(KTR_IGMPV3, "%s: asm++ on inm at t1", __func__); inm->inm_st[1].iss_asm++; } CTR3(KTR_IGMPV3, "%s: merged imf %p to inm %p", __func__, imf, inm); inm_print(inm); out_reap: if (schanged > 0) { CTR1(KTR_IGMPV3, "%s: sources changed; reaping", __func__); inm_reap(inm); } return (error); } /* * Mark an in_multi's filter set deltas as committed. * Called by IGMP after a state change has been enqueued. */ void inm_commit(struct in_multi *inm) { struct ip_msource *ims; CTR2(KTR_IGMPV3, "%s: commit inm %p", __func__, inm); CTR1(KTR_IGMPV3, "%s: pre commit:", __func__); inm_print(inm); RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) { ims->ims_st[0] = ims->ims_st[1]; } inm->inm_st[0] = inm->inm_st[1]; } /* * Reap unreferenced nodes from an in_multi's filter set. */ static void inm_reap(struct in_multi *inm) { struct ip_msource *ims, *tims; RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, tims) { if (ims->ims_st[0].ex > 0 || ims->ims_st[0].in > 0 || ims->ims_st[1].ex > 0 || ims->ims_st[1].in > 0 || ims->ims_stp != 0) continue; CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims); RB_REMOVE(ip_msource_tree, &inm->inm_srcs, ims); free(ims, M_IPMSOURCE); inm->inm_nsrc--; } } /* * Purge all source nodes from an in_multi's filter set. */ static void inm_purge(struct in_multi *inm) { struct ip_msource *ims, *tims; RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, tims) { CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims); RB_REMOVE(ip_msource_tree, &inm->inm_srcs, ims); free(ims, M_IPMSOURCE); inm->inm_nsrc--; } } /* * Join a multicast group; unlocked entry point. * * SMPng: XXX: in_joingroup() is called from in_control() when Giant * is not held. Fortunately, ifp is unlikely to have been detached * at this point, so we assume it's OK to recurse. */ int in_joingroup(struct ifnet *ifp, const struct in_addr *gina, /*const*/ struct in_mfilter *imf, struct in_multi **pinm) { int error; IN_MULTI_LOCK(); error = in_joingroup_locked(ifp, gina, imf, pinm); IN_MULTI_UNLOCK(); return (error); } /* * Join a multicast group; real entry point. * * Only preserves atomicity at inm level. * NOTE: imf argument cannot be const due to sys/tree.h limitations. * * If the IGMP downcall fails, the group is not joined, and an error * code is returned. */ int in_joingroup_locked(struct ifnet *ifp, const struct in_addr *gina, /*const*/ struct in_mfilter *imf, struct in_multi **pinm) { struct in_mfilter timf; struct in_multi *inm; int error; IN_MULTI_LOCK_ASSERT(); CTR4(KTR_IGMPV3, "%s: join %s on %p(%s))", __func__, inet_ntoa(*gina), ifp, ifp->if_xname); error = 0; inm = NULL; /* * If no imf was specified (i.e. kernel consumer), * fake one up and assume it is an ASM join. */ if (imf == NULL) { imf_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE); imf = &timf; } error = in_getmulti(ifp, gina, &inm); if (error) { CTR1(KTR_IGMPV3, "%s: in_getmulti() failure", __func__); return (error); } CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); error = inm_merge(inm, imf); if (error) { CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__); goto out_inm_release; } CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); error = igmp_change_state(inm); if (error) { CTR1(KTR_IGMPV3, "%s: failed to update source", __func__); goto out_inm_release; } out_inm_release: if (error) { CTR2(KTR_IGMPV3, "%s: dropping ref on %p", __func__, inm); inm_release_locked(inm); } else { *pinm = inm; } return (error); } /* * Leave a multicast group; unlocked entry point. */ int in_leavegroup(struct in_multi *inm, /*const*/ struct in_mfilter *imf) { int error; IN_MULTI_LOCK(); error = in_leavegroup_locked(inm, imf); IN_MULTI_UNLOCK(); return (error); } /* * Leave a multicast group; real entry point. * All source filters will be expunged. * * Only preserves atomicity at inm level. * * Holding the write lock for the INP which contains imf * is highly advisable. We can't assert for it as imf does not * contain a back-pointer to the owning inp. * * Note: This is not the same as inm_release(*) as this function also * makes a state change downcall into IGMP. */ int in_leavegroup_locked(struct in_multi *inm, /*const*/ struct in_mfilter *imf) { struct in_mfilter timf; int error; error = 0; IN_MULTI_LOCK_ASSERT(); CTR5(KTR_IGMPV3, "%s: leave inm %p, %s/%s, imf %p", __func__, inm, inet_ntoa(inm->inm_addr), (inm_is_ifp_detached(inm) ? "null" : inm->inm_ifp->if_xname), imf); /* * If no imf was specified (i.e. kernel consumer), * fake one up and assume it is an ASM join. */ if (imf == NULL) { imf_init(&timf, MCAST_EXCLUDE, MCAST_UNDEFINED); imf = &timf; } /* * Begin state merge transaction at IGMP layer. * * As this particular invocation should not cause any memory * to be allocated, and there is no opportunity to roll back * the transaction, it MUST NOT fail. */ CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); error = inm_merge(inm, imf); KASSERT(error == 0, ("%s: failed to merge inm state", __func__)); CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); CURVNET_SET(inm->inm_ifp->if_vnet); error = igmp_change_state(inm); CURVNET_RESTORE(); if (error) CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__); CTR2(KTR_IGMPV3, "%s: dropping ref on %p", __func__, inm); inm_release_locked(inm); return (error); } /*#ifndef BURN_BRIDGES*/ /* * Join an IPv4 multicast group in (*,G) exclusive mode. * The group must be a 224.0.0.0/24 link-scope group. * This KPI is for legacy kernel consumers only. */ struct in_multi * in_addmulti(struct in_addr *ap, struct ifnet *ifp) { struct in_multi *pinm; int error; KASSERT(IN_LOCAL_GROUP(ntohl(ap->s_addr)), ("%s: %s not in 224.0.0.0/24", __func__, inet_ntoa(*ap))); error = in_joingroup(ifp, ap, NULL, &pinm); if (error != 0) pinm = NULL; return (pinm); } /* * Leave an IPv4 multicast group, assumed to be in exclusive (*,G) mode. * This KPI is for legacy kernel consumers only. */ void in_delmulti(struct in_multi *inm) { (void)in_leavegroup(inm, NULL); } /*#endif*/ /* * Block or unblock an ASM multicast source on an inpcb. * This implements the delta-based API described in RFC 3678. * * The delta-based API applies only to exclusive-mode memberships. * An IGMP downcall will be performed. * * SMPng: NOTE: Must take Giant as a join may create a new ifma. * * Return 0 if successful, otherwise return an appropriate error code. */ static int inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) { struct group_source_req gsr; sockunion_t *gsa, *ssa; struct ifnet *ifp; struct in_mfilter *imf; struct ip_moptions *imo; struct in_msource *ims; struct in_multi *inm; size_t idx; uint16_t fmode; int error, doblock; ifp = NULL; error = 0; doblock = 0; memset(&gsr, 0, sizeof(struct group_source_req)); gsa = (sockunion_t *)&gsr.gsr_group; ssa = (sockunion_t *)&gsr.gsr_source; switch (sopt->sopt_name) { case IP_BLOCK_SOURCE: case IP_UNBLOCK_SOURCE: { struct ip_mreq_source mreqs; error = sooptcopyin(sopt, &mreqs, sizeof(struct ip_mreq_source), sizeof(struct ip_mreq_source)); if (error) return (error); gsa->sin.sin_family = AF_INET; gsa->sin.sin_len = sizeof(struct sockaddr_in); gsa->sin.sin_addr = mreqs.imr_multiaddr; ssa->sin.sin_family = AF_INET; ssa->sin.sin_len = sizeof(struct sockaddr_in); ssa->sin.sin_addr = mreqs.imr_sourceaddr; if (!in_nullhost(mreqs.imr_interface)) INADDR_TO_IFP(mreqs.imr_interface, ifp); if (sopt->sopt_name == IP_BLOCK_SOURCE) doblock = 1; CTR3(KTR_IGMPV3, "%s: imr_interface = %s, ifp = %p", __func__, inet_ntoa(mreqs.imr_interface), ifp); break; } case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: error = sooptcopyin(sopt, &gsr, sizeof(struct group_source_req), sizeof(struct group_source_req)); if (error) return (error); if (gsa->sin.sin_family != AF_INET || gsa->sin.sin_len != sizeof(struct sockaddr_in)) return (EINVAL); if (ssa->sin.sin_family != AF_INET || ssa->sin.sin_len != sizeof(struct sockaddr_in)) return (EINVAL); if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) return (EADDRNOTAVAIL); ifp = ifnet_byindex(gsr.gsr_interface); if (sopt->sopt_name == MCAST_BLOCK_SOURCE) doblock = 1; break; default: CTR2(KTR_IGMPV3, "%s: unknown sopt_name %d", __func__, sopt->sopt_name); return (EOPNOTSUPP); break; } if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr))) return (EINVAL); /* * Check if we are actually a member of this group. */ imo = inp_findmoptions(inp); idx = imo_match_group(imo, ifp, &gsa->sa); if (idx == -1 || imo->imo_mfilters == NULL) { error = EADDRNOTAVAIL; goto out_inp_locked; } KASSERT(imo->imo_mfilters != NULL, ("%s: imo_mfilters not allocated", __func__)); imf = &imo->imo_mfilters[idx]; inm = imo->imo_membership[idx]; /* * Attempting to use the delta-based API on an * non exclusive-mode membership is an error. */ fmode = imf->imf_st[0]; if (fmode != MCAST_EXCLUDE) { error = EINVAL; goto out_inp_locked; } /* * Deal with error cases up-front: * Asked to block, but already blocked; or * Asked to unblock, but nothing to unblock. * If adding a new block entry, allocate it. */ ims = imo_match_source(imo, idx, &ssa->sa); if ((ims != NULL && doblock) || (ims == NULL && !doblock)) { CTR3(KTR_IGMPV3, "%s: source %s %spresent", __func__, inet_ntoa(ssa->sin.sin_addr), doblock ? "" : "not "); error = EADDRNOTAVAIL; goto out_inp_locked; } INP_WLOCK_ASSERT(inp); /* * Begin state merge transaction at socket layer. */ if (doblock) { CTR2(KTR_IGMPV3, "%s: %s source", __func__, "block"); ims = imf_graft(imf, fmode, &ssa->sin); if (ims == NULL) error = ENOMEM; } else { CTR2(KTR_IGMPV3, "%s: %s source", __func__, "allow"); error = imf_prune(imf, &ssa->sin); } if (error) { CTR1(KTR_IGMPV3, "%s: merge imf state failed", __func__); goto out_imf_rollback; } /* * Begin state merge transaction at IGMP layer. */ IN_MULTI_LOCK(); CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); error = inm_merge(inm, imf); if (error) { CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__); goto out_in_multi_locked; } CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); error = igmp_change_state(inm); if (error) CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__); out_in_multi_locked: IN_MULTI_UNLOCK(); out_imf_rollback: if (error) imf_rollback(imf); else imf_commit(imf); imf_reap(imf); out_inp_locked: INP_WUNLOCK(inp); return (error); } /* * Given an inpcb, return its multicast options structure pointer. Accepts * an unlocked inpcb pointer, but will return it locked. May sleep. * * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. * SMPng: NOTE: Returns with the INP write lock held. */ static struct ip_moptions * inp_findmoptions(struct inpcb *inp) { struct ip_moptions *imo; struct in_multi **immp; struct in_mfilter *imfp; size_t idx; INP_WLOCK(inp); if (inp->inp_moptions != NULL) return (inp->inp_moptions); INP_WUNLOCK(inp); imo = malloc(sizeof(*imo), M_IPMOPTS, M_WAITOK); immp = malloc(sizeof(*immp) * IP_MIN_MEMBERSHIPS, M_IPMOPTS, M_WAITOK | M_ZERO); imfp = malloc(sizeof(struct in_mfilter) * IP_MIN_MEMBERSHIPS, M_INMFILTER, M_WAITOK); imo->imo_multicast_ifp = NULL; imo->imo_multicast_addr.s_addr = INADDR_ANY; imo->imo_multicast_vif = -1; imo->imo_multicast_ttl = IP_DEFAULT_MULTICAST_TTL; imo->imo_multicast_loop = in_mcast_loop; imo->imo_num_memberships = 0; imo->imo_max_memberships = IP_MIN_MEMBERSHIPS; imo->imo_membership = immp; /* Initialize per-group source filters. */ for (idx = 0; idx < IP_MIN_MEMBERSHIPS; idx++) imf_init(&imfp[idx], MCAST_UNDEFINED, MCAST_EXCLUDE); imo->imo_mfilters = imfp; INP_WLOCK(inp); if (inp->inp_moptions != NULL) { free(imfp, M_INMFILTER); free(immp, M_IPMOPTS); free(imo, M_IPMOPTS); return (inp->inp_moptions); } inp->inp_moptions = imo; return (imo); } /* * Discard the IP multicast options (and source filters). To minimize * the amount of work done while holding locks such as the INP's * pcbinfo lock (which is used in the receive path), the free * operation is performed asynchronously in a separate task. * * SMPng: NOTE: assumes INP write lock is held. */ void inp_freemoptions(struct ip_moptions *imo) { KASSERT(imo != NULL, ("%s: ip_moptions is NULL", __func__)); IN_MULTI_LOCK(); STAILQ_INSERT_TAIL(&imo_gc_list, imo, imo_link); IN_MULTI_UNLOCK(); taskqueue_enqueue(taskqueue_thread, &imo_gc_task); } static void inp_freemoptions_internal(struct ip_moptions *imo) { struct in_mfilter *imf; size_t idx, nmships; nmships = imo->imo_num_memberships; for (idx = 0; idx < nmships; ++idx) { imf = imo->imo_mfilters ? &imo->imo_mfilters[idx] : NULL; if (imf) imf_leave(imf); (void)in_leavegroup(imo->imo_membership[idx], imf); if (imf) imf_purge(imf); } if (imo->imo_mfilters) free(imo->imo_mfilters, M_INMFILTER); free(imo->imo_membership, M_IPMOPTS); free(imo, M_IPMOPTS); } static void inp_gcmoptions(void *context, int pending) { struct ip_moptions *imo; IN_MULTI_LOCK(); while (!STAILQ_EMPTY(&imo_gc_list)) { imo = STAILQ_FIRST(&imo_gc_list); STAILQ_REMOVE_HEAD(&imo_gc_list, imo_link); IN_MULTI_UNLOCK(); inp_freemoptions_internal(imo); IN_MULTI_LOCK(); } IN_MULTI_UNLOCK(); } /* * Atomically get source filters on a socket for an IPv4 multicast group. * Called with INP lock held; returns with lock released. */ static int inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) { struct __msfilterreq msfr; sockunion_t *gsa; struct ifnet *ifp; struct ip_moptions *imo; struct in_mfilter *imf; struct ip_msource *ims; struct in_msource *lims; struct sockaddr_in *psin; struct sockaddr_storage *ptss; struct sockaddr_storage *tss; int error; size_t idx, nsrcs, ncsrcs; INP_WLOCK_ASSERT(inp); imo = inp->inp_moptions; KASSERT(imo != NULL, ("%s: null ip_moptions", __func__)); INP_WUNLOCK(inp); error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), sizeof(struct __msfilterreq)); if (error) return (error); if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) return (EINVAL); ifp = ifnet_byindex(msfr.msfr_ifindex); if (ifp == NULL) return (EINVAL); INP_WLOCK(inp); /* * Lookup group on the socket. */ gsa = (sockunion_t *)&msfr.msfr_group; idx = imo_match_group(imo, ifp, &gsa->sa); if (idx == -1 || imo->imo_mfilters == NULL) { INP_WUNLOCK(inp); return (EADDRNOTAVAIL); } imf = &imo->imo_mfilters[idx]; /* * Ignore memberships which are in limbo. */ if (imf->imf_st[1] == MCAST_UNDEFINED) { INP_WUNLOCK(inp); return (EAGAIN); } msfr.msfr_fmode = imf->imf_st[1]; /* * If the user specified a buffer, copy out the source filter * entries to userland gracefully. * We only copy out the number of entries which userland * has asked for, but we always tell userland how big the * buffer really needs to be. */ if (msfr.msfr_nsrcs > in_mcast_maxsocksrc) msfr.msfr_nsrcs = in_mcast_maxsocksrc; tss = NULL; if (msfr.msfr_srcs != NULL && msfr.msfr_nsrcs > 0) { tss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, M_TEMP, M_NOWAIT | M_ZERO); if (tss == NULL) { INP_WUNLOCK(inp); return (ENOBUFS); } } /* * Count number of sources in-mode at t0. * If buffer space exists and remains, copy out source entries. */ nsrcs = msfr.msfr_nsrcs; ncsrcs = 0; ptss = tss; RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { lims = (struct in_msource *)ims; if (lims->imsl_st[0] == MCAST_UNDEFINED || lims->imsl_st[0] != imf->imf_st[0]) continue; ++ncsrcs; if (tss != NULL && nsrcs > 0) { psin = (struct sockaddr_in *)ptss; psin->sin_family = AF_INET; psin->sin_len = sizeof(struct sockaddr_in); psin->sin_addr.s_addr = htonl(lims->ims_haddr); psin->sin_port = 0; ++ptss; --nsrcs; } } INP_WUNLOCK(inp); if (tss != NULL) { error = copyout(tss, msfr.msfr_srcs, sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); free(tss, M_TEMP); if (error) return (error); } msfr.msfr_nsrcs = ncsrcs; error = sooptcopyout(sopt, &msfr, sizeof(struct __msfilterreq)); return (error); } /* * Return the IP multicast options in response to user getsockopt(). */ int inp_getmoptions(struct inpcb *inp, struct sockopt *sopt) { struct rm_priotracker in_ifa_tracker; struct ip_mreqn mreqn; struct ip_moptions *imo; struct ifnet *ifp; struct in_ifaddr *ia; int error, optval; u_char coptval; INP_WLOCK(inp); imo = inp->inp_moptions; /* * If socket is neither of type SOCK_RAW or SOCK_DGRAM, * or is a divert socket, reject it. */ if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || (inp->inp_socket->so_proto->pr_type != SOCK_RAW && inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) { INP_WUNLOCK(inp); return (EOPNOTSUPP); } error = 0; switch (sopt->sopt_name) { case IP_MULTICAST_VIF: if (imo != NULL) optval = imo->imo_multicast_vif; else optval = -1; INP_WUNLOCK(inp); error = sooptcopyout(sopt, &optval, sizeof(int)); break; case IP_MULTICAST_IF: memset(&mreqn, 0, sizeof(struct ip_mreqn)); if (imo != NULL) { ifp = imo->imo_multicast_ifp; if (!in_nullhost(imo->imo_multicast_addr)) { mreqn.imr_address = imo->imo_multicast_addr; } else if (ifp != NULL) { mreqn.imr_ifindex = ifp->if_index; IFP_TO_IA(ifp, ia, &in_ifa_tracker); if (ia != NULL) { mreqn.imr_address = IA_SIN(ia)->sin_addr; ifa_free(&ia->ia_ifa); } } } INP_WUNLOCK(inp); if (sopt->sopt_valsize == sizeof(struct ip_mreqn)) { error = sooptcopyout(sopt, &mreqn, sizeof(struct ip_mreqn)); } else { error = sooptcopyout(sopt, &mreqn.imr_address, sizeof(struct in_addr)); } break; case IP_MULTICAST_TTL: if (imo == 0) optval = coptval = IP_DEFAULT_MULTICAST_TTL; else optval = coptval = imo->imo_multicast_ttl; INP_WUNLOCK(inp); if (sopt->sopt_valsize == sizeof(u_char)) error = sooptcopyout(sopt, &coptval, sizeof(u_char)); else error = sooptcopyout(sopt, &optval, sizeof(int)); break; case IP_MULTICAST_LOOP: if (imo == 0) optval = coptval = IP_DEFAULT_MULTICAST_LOOP; else optval = coptval = imo->imo_multicast_loop; INP_WUNLOCK(inp); if (sopt->sopt_valsize == sizeof(u_char)) error = sooptcopyout(sopt, &coptval, sizeof(u_char)); else error = sooptcopyout(sopt, &optval, sizeof(int)); break; case IP_MSFILTER: if (imo == NULL) { error = EADDRNOTAVAIL; INP_WUNLOCK(inp); } else { error = inp_get_source_filters(inp, sopt); } break; default: INP_WUNLOCK(inp); error = ENOPROTOOPT; break; } INP_UNLOCK_ASSERT(inp); return (error); } /* * Look up the ifnet to use for a multicast group membership, * given the IPv4 address of an interface, and the IPv4 group address. * * This routine exists to support legacy multicast applications * which do not understand that multicast memberships are scoped to * specific physical links in the networking stack, or which need * to join link-scope groups before IPv4 addresses are configured. * * If inp is non-NULL, use this socket's current FIB number for any * required FIB lookup. * If ina is INADDR_ANY, look up the group address in the unicast FIB, * and use its ifp; usually, this points to the default next-hop. * * If the FIB lookup fails, attempt to use the first non-loopback * interface with multicast capability in the system as a * last resort. The legacy IPv4 ASM API requires that we do * this in order to allow groups to be joined when the routing * table has not yet been populated during boot. * * Returns NULL if no ifp could be found. * * SMPng: TODO: Acquire the appropriate locks for INADDR_TO_IFP. * FUTURE: Implement IPv4 source-address selection. */ static struct ifnet * inp_lookup_mcast_ifp(const struct inpcb *inp, const struct sockaddr_in *gsin, const struct in_addr ina) { struct rm_priotracker in_ifa_tracker; struct ifnet *ifp; KASSERT(gsin->sin_family == AF_INET, ("%s: not AF_INET", __func__)); KASSERT(IN_MULTICAST(ntohl(gsin->sin_addr.s_addr)), ("%s: not multicast", __func__)); ifp = NULL; if (!in_nullhost(ina)) { INADDR_TO_IFP(ina, ifp); } else { struct route ro; ro.ro_rt = NULL; memcpy(&ro.ro_dst, gsin, sizeof(struct sockaddr_in)); in_rtalloc_ign(&ro, 0, inp ? inp->inp_inc.inc_fibnum : 0); if (ro.ro_rt != NULL) { ifp = ro.ro_rt->rt_ifp; KASSERT(ifp != NULL, ("%s: null ifp", __func__)); RTFREE(ro.ro_rt); } else { struct in_ifaddr *ia; struct ifnet *mifp; mifp = NULL; IN_IFADDR_RLOCK(&in_ifa_tracker); TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { mifp = ia->ia_ifp; if (!(mifp->if_flags & IFF_LOOPBACK) && (mifp->if_flags & IFF_MULTICAST)) { ifp = mifp; break; } } IN_IFADDR_RUNLOCK(&in_ifa_tracker); } } return (ifp); } /* * Join an IPv4 multicast group, possibly with a source. */ static int inp_join_group(struct inpcb *inp, struct sockopt *sopt) { struct group_source_req gsr; sockunion_t *gsa, *ssa; struct ifnet *ifp; struct in_mfilter *imf; struct ip_moptions *imo; struct in_multi *inm; struct in_msource *lims; size_t idx; int error, is_new; ifp = NULL; imf = NULL; lims = NULL; error = 0; is_new = 0; memset(&gsr, 0, sizeof(struct group_source_req)); gsa = (sockunion_t *)&gsr.gsr_group; gsa->ss.ss_family = AF_UNSPEC; ssa = (sockunion_t *)&gsr.gsr_source; ssa->ss.ss_family = AF_UNSPEC; switch (sopt->sopt_name) { case IP_ADD_MEMBERSHIP: case IP_ADD_SOURCE_MEMBERSHIP: { struct ip_mreq_source mreqs; if (sopt->sopt_name == IP_ADD_MEMBERSHIP) { error = sooptcopyin(sopt, &mreqs, sizeof(struct ip_mreq), sizeof(struct ip_mreq)); /* * Do argument switcharoo from ip_mreq into * ip_mreq_source to avoid using two instances. */ mreqs.imr_interface = mreqs.imr_sourceaddr; mreqs.imr_sourceaddr.s_addr = INADDR_ANY; } else if (sopt->sopt_name == IP_ADD_SOURCE_MEMBERSHIP) { error = sooptcopyin(sopt, &mreqs, sizeof(struct ip_mreq_source), sizeof(struct ip_mreq_source)); } if (error) return (error); gsa->sin.sin_family = AF_INET; gsa->sin.sin_len = sizeof(struct sockaddr_in); gsa->sin.sin_addr = mreqs.imr_multiaddr; if (sopt->sopt_name == IP_ADD_SOURCE_MEMBERSHIP) { ssa->sin.sin_family = AF_INET; ssa->sin.sin_len = sizeof(struct sockaddr_in); ssa->sin.sin_addr = mreqs.imr_sourceaddr; } if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr))) return (EINVAL); ifp = inp_lookup_mcast_ifp(inp, &gsa->sin, mreqs.imr_interface); CTR3(KTR_IGMPV3, "%s: imr_interface = %s, ifp = %p", __func__, inet_ntoa(mreqs.imr_interface), ifp); break; } case MCAST_JOIN_GROUP: case MCAST_JOIN_SOURCE_GROUP: if (sopt->sopt_name == MCAST_JOIN_GROUP) { error = sooptcopyin(sopt, &gsr, sizeof(struct group_req), sizeof(struct group_req)); } else if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { error = sooptcopyin(sopt, &gsr, sizeof(struct group_source_req), sizeof(struct group_source_req)); } if (error) return (error); if (gsa->sin.sin_family != AF_INET || gsa->sin.sin_len != sizeof(struct sockaddr_in)) return (EINVAL); /* * Overwrite the port field if present, as the sockaddr * being copied in may be matched with a binary comparison. */ gsa->sin.sin_port = 0; if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { if (ssa->sin.sin_family != AF_INET || ssa->sin.sin_len != sizeof(struct sockaddr_in)) return (EINVAL); ssa->sin.sin_port = 0; } if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr))) return (EINVAL); if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) return (EADDRNOTAVAIL); ifp = ifnet_byindex(gsr.gsr_interface); break; default: CTR2(KTR_IGMPV3, "%s: unknown sopt_name %d", __func__, sopt->sopt_name); return (EOPNOTSUPP); break; } if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) return (EADDRNOTAVAIL); imo = inp_findmoptions(inp); idx = imo_match_group(imo, ifp, &gsa->sa); if (idx == -1) { is_new = 1; } else { inm = imo->imo_membership[idx]; imf = &imo->imo_mfilters[idx]; if (ssa->ss.ss_family != AF_UNSPEC) { /* * MCAST_JOIN_SOURCE_GROUP on an exclusive membership * is an error. On an existing inclusive membership, * it just adds the source to the filter list. */ if (imf->imf_st[1] != MCAST_INCLUDE) { error = EINVAL; goto out_inp_locked; } /* * Throw out duplicates. * * XXX FIXME: This makes a naive assumption that * even if entries exist for *ssa in this imf, * they will be rejected as dupes, even if they * are not valid in the current mode (in-mode). * * in_msource is transactioned just as for anything * else in SSM -- but note naive use of inm_graft() * below for allocating new filter entries. * * This is only an issue if someone mixes the * full-state SSM API with the delta-based API, * which is discouraged in the relevant RFCs. */ lims = imo_match_source(imo, idx, &ssa->sa); if (lims != NULL /*&& lims->imsl_st[1] == MCAST_INCLUDE*/) { error = EADDRNOTAVAIL; goto out_inp_locked; } } else { /* * MCAST_JOIN_GROUP on an existing exclusive * membership is an error; return EADDRINUSE * to preserve 4.4BSD API idempotence, and * avoid tedious detour to code below. * NOTE: This is bending RFC 3678 a bit. * * On an existing inclusive membership, this is also * an error; if you want to change filter mode, * you must use the userland API setsourcefilter(). * XXX We don't reject this for imf in UNDEFINED * state at t1, because allocation of a filter * is atomic with allocation of a membership. */ error = EINVAL; if (imf->imf_st[1] == MCAST_EXCLUDE) error = EADDRINUSE; goto out_inp_locked; } } /* * Begin state merge transaction at socket layer. */ INP_WLOCK_ASSERT(inp); if (is_new) { if (imo->imo_num_memberships == imo->imo_max_memberships) { error = imo_grow(imo); if (error) goto out_inp_locked; } /* * Allocate the new slot upfront so we can deal with * grafting the new source filter in same code path * as for join-source on existing membership. */ idx = imo->imo_num_memberships; imo->imo_membership[idx] = NULL; imo->imo_num_memberships++; KASSERT(imo->imo_mfilters != NULL, ("%s: imf_mfilters vector was not allocated", __func__)); imf = &imo->imo_mfilters[idx]; KASSERT(RB_EMPTY(&imf->imf_sources), ("%s: imf_sources not empty", __func__)); } /* * Graft new source into filter list for this inpcb's * membership of the group. The in_multi may not have * been allocated yet if this is a new membership, however, * the in_mfilter slot will be allocated and must be initialized. * * Note: Grafting of exclusive mode filters doesn't happen * in this path. * XXX: Should check for non-NULL lims (node exists but may * not be in-mode) for interop with full-state API. */ if (ssa->ss.ss_family != AF_UNSPEC) { /* Membership starts in IN mode */ if (is_new) { CTR1(KTR_IGMPV3, "%s: new join w/source", __func__); imf_init(imf, MCAST_UNDEFINED, MCAST_INCLUDE); } else { CTR2(KTR_IGMPV3, "%s: %s source", __func__, "allow"); } lims = imf_graft(imf, MCAST_INCLUDE, &ssa->sin); if (lims == NULL) { CTR1(KTR_IGMPV3, "%s: merge imf state failed", __func__); error = ENOMEM; goto out_imo_free; } } else { /* No address specified; Membership starts in EX mode */ if (is_new) { CTR1(KTR_IGMPV3, "%s: new join w/o source", __func__); imf_init(imf, MCAST_UNDEFINED, MCAST_EXCLUDE); } } /* * Begin state merge transaction at IGMP layer. */ IN_MULTI_LOCK(); if (is_new) { error = in_joingroup_locked(ifp, &gsa->sin.sin_addr, imf, &inm); if (error) { CTR1(KTR_IGMPV3, "%s: in_joingroup_locked failed", __func__); IN_MULTI_UNLOCK(); goto out_imo_free; } imo->imo_membership[idx] = inm; } else { CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); error = inm_merge(inm, imf); if (error) { CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__); goto out_in_multi_locked; } CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); error = igmp_change_state(inm); if (error) { CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__); goto out_in_multi_locked; } } out_in_multi_locked: IN_MULTI_UNLOCK(); INP_WLOCK_ASSERT(inp); if (error) { imf_rollback(imf); if (is_new) imf_purge(imf); else imf_reap(imf); } else { imf_commit(imf); } out_imo_free: if (error && is_new) { imo->imo_membership[idx] = NULL; --imo->imo_num_memberships; } out_inp_locked: INP_WUNLOCK(inp); return (error); } /* * Leave an IPv4 multicast group on an inpcb, possibly with a source. */ static int inp_leave_group(struct inpcb *inp, struct sockopt *sopt) { struct group_source_req gsr; struct ip_mreq_source mreqs; sockunion_t *gsa, *ssa; struct ifnet *ifp; struct in_mfilter *imf; struct ip_moptions *imo; struct in_msource *ims; struct in_multi *inm; size_t idx; int error, is_final; ifp = NULL; error = 0; is_final = 1; memset(&gsr, 0, sizeof(struct group_source_req)); gsa = (sockunion_t *)&gsr.gsr_group; gsa->ss.ss_family = AF_UNSPEC; ssa = (sockunion_t *)&gsr.gsr_source; ssa->ss.ss_family = AF_UNSPEC; switch (sopt->sopt_name) { case IP_DROP_MEMBERSHIP: case IP_DROP_SOURCE_MEMBERSHIP: if (sopt->sopt_name == IP_DROP_MEMBERSHIP) { error = sooptcopyin(sopt, &mreqs, sizeof(struct ip_mreq), sizeof(struct ip_mreq)); /* * Swap interface and sourceaddr arguments, * as ip_mreq and ip_mreq_source are laid * out differently. */ mreqs.imr_interface = mreqs.imr_sourceaddr; mreqs.imr_sourceaddr.s_addr = INADDR_ANY; } else if (sopt->sopt_name == IP_DROP_SOURCE_MEMBERSHIP) { error = sooptcopyin(sopt, &mreqs, sizeof(struct ip_mreq_source), sizeof(struct ip_mreq_source)); } if (error) return (error); gsa->sin.sin_family = AF_INET; gsa->sin.sin_len = sizeof(struct sockaddr_in); gsa->sin.sin_addr = mreqs.imr_multiaddr; if (sopt->sopt_name == IP_DROP_SOURCE_MEMBERSHIP) { ssa->sin.sin_family = AF_INET; ssa->sin.sin_len = sizeof(struct sockaddr_in); ssa->sin.sin_addr = mreqs.imr_sourceaddr; } /* * Attempt to look up hinted ifp from interface address. * Fallthrough with null ifp iff lookup fails, to * preserve 4.4BSD mcast API idempotence. * XXX NOTE WELL: The RFC 3678 API is preferred because * using an IPv4 address as a key is racy. */ if (!in_nullhost(mreqs.imr_interface)) INADDR_TO_IFP(mreqs.imr_interface, ifp); CTR3(KTR_IGMPV3, "%s: imr_interface = %s, ifp = %p", __func__, inet_ntoa(mreqs.imr_interface), ifp); break; case MCAST_LEAVE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: if (sopt->sopt_name == MCAST_LEAVE_GROUP) { error = sooptcopyin(sopt, &gsr, sizeof(struct group_req), sizeof(struct group_req)); } else if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { error = sooptcopyin(sopt, &gsr, sizeof(struct group_source_req), sizeof(struct group_source_req)); } if (error) return (error); if (gsa->sin.sin_family != AF_INET || gsa->sin.sin_len != sizeof(struct sockaddr_in)) return (EINVAL); if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { if (ssa->sin.sin_family != AF_INET || ssa->sin.sin_len != sizeof(struct sockaddr_in)) return (EINVAL); } if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) return (EADDRNOTAVAIL); ifp = ifnet_byindex(gsr.gsr_interface); if (ifp == NULL) return (EADDRNOTAVAIL); break; default: CTR2(KTR_IGMPV3, "%s: unknown sopt_name %d", __func__, sopt->sopt_name); return (EOPNOTSUPP); break; } if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr))) return (EINVAL); /* * Find the membership in the membership array. */ imo = inp_findmoptions(inp); idx = imo_match_group(imo, ifp, &gsa->sa); if (idx == -1) { error = EADDRNOTAVAIL; goto out_inp_locked; } inm = imo->imo_membership[idx]; imf = &imo->imo_mfilters[idx]; if (ssa->ss.ss_family != AF_UNSPEC) is_final = 0; /* * Begin state merge transaction at socket layer. */ INP_WLOCK_ASSERT(inp); /* * If we were instructed only to leave a given source, do so. * MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships. */ if (is_final) { imf_leave(imf); } else { if (imf->imf_st[0] == MCAST_EXCLUDE) { error = EADDRNOTAVAIL; goto out_inp_locked; } ims = imo_match_source(imo, idx, &ssa->sa); if (ims == NULL) { CTR3(KTR_IGMPV3, "%s: source %s %spresent", __func__, inet_ntoa(ssa->sin.sin_addr), "not "); error = EADDRNOTAVAIL; goto out_inp_locked; } CTR2(KTR_IGMPV3, "%s: %s source", __func__, "block"); error = imf_prune(imf, &ssa->sin); if (error) { CTR1(KTR_IGMPV3, "%s: merge imf state failed", __func__); goto out_inp_locked; } } /* * Begin state merge transaction at IGMP layer. */ IN_MULTI_LOCK(); if (is_final) { /* * Give up the multicast address record to which * the membership points. */ (void)in_leavegroup_locked(inm, imf); } else { CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); error = inm_merge(inm, imf); if (error) { CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__); goto out_in_multi_locked; } CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); error = igmp_change_state(inm); if (error) { CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__); } } out_in_multi_locked: IN_MULTI_UNLOCK(); if (error) imf_rollback(imf); else imf_commit(imf); imf_reap(imf); if (is_final) { /* Remove the gap in the membership and filter array. */ for (++idx; idx < imo->imo_num_memberships; ++idx) { imo->imo_membership[idx-1] = imo->imo_membership[idx]; imo->imo_mfilters[idx-1] = imo->imo_mfilters[idx]; } imo->imo_num_memberships--; } out_inp_locked: INP_WUNLOCK(inp); return (error); } /* * Select the interface for transmitting IPv4 multicast datagrams. * * Either an instance of struct in_addr or an instance of struct ip_mreqn * may be passed to this socket option. An address of INADDR_ANY or an * interface index of 0 is used to remove a previous selection. * When no interface is selected, one is chosen for every send. */ static int inp_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) { struct in_addr addr; struct ip_mreqn mreqn; struct ifnet *ifp; struct ip_moptions *imo; int error; if (sopt->sopt_valsize == sizeof(struct ip_mreqn)) { /* * An interface index was specified using the * Linux-derived ip_mreqn structure. */ error = sooptcopyin(sopt, &mreqn, sizeof(struct ip_mreqn), sizeof(struct ip_mreqn)); if (error) return (error); if (mreqn.imr_ifindex < 0 || V_if_index < mreqn.imr_ifindex) return (EINVAL); if (mreqn.imr_ifindex == 0) { ifp = NULL; } else { ifp = ifnet_byindex(mreqn.imr_ifindex); if (ifp == NULL) return (EADDRNOTAVAIL); } } else { /* * An interface was specified by IPv4 address. * This is the traditional BSD usage. */ error = sooptcopyin(sopt, &addr, sizeof(struct in_addr), sizeof(struct in_addr)); if (error) return (error); if (in_nullhost(addr)) { ifp = NULL; } else { INADDR_TO_IFP(addr, ifp); if (ifp == NULL) return (EADDRNOTAVAIL); } CTR3(KTR_IGMPV3, "%s: ifp = %p, addr = %s", __func__, ifp, inet_ntoa(addr)); } /* Reject interfaces which do not support multicast. */ if (ifp != NULL && (ifp->if_flags & IFF_MULTICAST) == 0) return (EOPNOTSUPP); imo = inp_findmoptions(inp); imo->imo_multicast_ifp = ifp; imo->imo_multicast_addr.s_addr = INADDR_ANY; INP_WUNLOCK(inp); return (0); } /* * Atomically set source filters on a socket for an IPv4 multicast group. * * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. */ static int inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt) { struct __msfilterreq msfr; sockunion_t *gsa; struct ifnet *ifp; struct in_mfilter *imf; struct ip_moptions *imo; struct in_multi *inm; size_t idx; int error; error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), sizeof(struct __msfilterreq)); if (error) return (error); if (msfr.msfr_nsrcs > in_mcast_maxsocksrc) return (ENOBUFS); if ((msfr.msfr_fmode != MCAST_EXCLUDE && msfr.msfr_fmode != MCAST_INCLUDE)) return (EINVAL); if (msfr.msfr_group.ss_family != AF_INET || msfr.msfr_group.ss_len != sizeof(struct sockaddr_in)) return (EINVAL); gsa = (sockunion_t *)&msfr.msfr_group; if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr))) return (EINVAL); gsa->sin.sin_port = 0; /* ignore port */ if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) return (EADDRNOTAVAIL); ifp = ifnet_byindex(msfr.msfr_ifindex); if (ifp == NULL) return (EADDRNOTAVAIL); /* * Take the INP write lock. * Check if this socket is a member of this group. */ imo = inp_findmoptions(inp); idx = imo_match_group(imo, ifp, &gsa->sa); if (idx == -1 || imo->imo_mfilters == NULL) { error = EADDRNOTAVAIL; goto out_inp_locked; } inm = imo->imo_membership[idx]; imf = &imo->imo_mfilters[idx]; /* * Begin state merge transaction at socket layer. */ INP_WLOCK_ASSERT(inp); imf->imf_st[1] = msfr.msfr_fmode; /* * Apply any new source filters, if present. * Make a copy of the user-space source vector so * that we may copy them with a single copyin. This * allows us to deal with page faults up-front. */ if (msfr.msfr_nsrcs > 0) { struct in_msource *lims; struct sockaddr_in *psin; struct sockaddr_storage *kss, *pkss; int i; INP_WUNLOCK(inp); CTR2(KTR_IGMPV3, "%s: loading %lu source list entries", __func__, (unsigned long)msfr.msfr_nsrcs); kss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, M_TEMP, M_WAITOK); error = copyin(msfr.msfr_srcs, kss, sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); if (error) { free(kss, M_TEMP); return (error); } INP_WLOCK(inp); /* * Mark all source filters as UNDEFINED at t1. * Restore new group filter mode, as imf_leave() * will set it to INCLUDE. */ imf_leave(imf); imf->imf_st[1] = msfr.msfr_fmode; /* * Update socket layer filters at t1, lazy-allocating * new entries. This saves a bunch of memory at the * cost of one RB_FIND() per source entry; duplicate * entries in the msfr_nsrcs vector are ignored. * If we encounter an error, rollback transaction. * * XXX This too could be replaced with a set-symmetric * difference like loop to avoid walking from root * every time, as the key space is common. */ for (i = 0, pkss = kss; i < msfr.msfr_nsrcs; i++, pkss++) { psin = (struct sockaddr_in *)pkss; if (psin->sin_family != AF_INET) { error = EAFNOSUPPORT; break; } if (psin->sin_len != sizeof(struct sockaddr_in)) { error = EINVAL; break; } error = imf_get_source(imf, psin, &lims); if (error) break; lims->imsl_st[1] = imf->imf_st[1]; } free(kss, M_TEMP); } if (error) goto out_imf_rollback; INP_WLOCK_ASSERT(inp); IN_MULTI_LOCK(); /* * Begin state merge transaction at IGMP layer. */ CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); error = inm_merge(inm, imf); if (error) { CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__); goto out_in_multi_locked; } CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); error = igmp_change_state(inm); if (error) CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__); out_in_multi_locked: IN_MULTI_UNLOCK(); out_imf_rollback: if (error) imf_rollback(imf); else imf_commit(imf); imf_reap(imf); out_inp_locked: INP_WUNLOCK(inp); return (error); } /* * Set the IP multicast options in response to user setsockopt(). * * Many of the socket options handled in this function duplicate the * functionality of socket options in the regular unicast API. However, * it is not possible to merge the duplicate code, because the idempotence * of the IPv4 multicast part of the BSD Sockets API must be preserved; * the effects of these options must be treated as separate and distinct. * * SMPng: XXX: Unlocked read of inp_socket believed OK. * FUTURE: The IP_MULTICAST_VIF option may be eliminated if MROUTING * is refactored to no longer use vifs. */ int inp_setmoptions(struct inpcb *inp, struct sockopt *sopt) { struct ip_moptions *imo; int error; error = 0; /* * If socket is neither of type SOCK_RAW or SOCK_DGRAM, * or is a divert socket, reject it. */ if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || (inp->inp_socket->so_proto->pr_type != SOCK_RAW && inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) return (EOPNOTSUPP); switch (sopt->sopt_name) { case IP_MULTICAST_VIF: { int vifi; /* * Select a multicast VIF for transmission. * Only useful if multicast forwarding is active. */ if (legal_vif_num == NULL) { error = EOPNOTSUPP; break; } error = sooptcopyin(sopt, &vifi, sizeof(int), sizeof(int)); if (error) break; if (!legal_vif_num(vifi) && (vifi != -1)) { error = EINVAL; break; } imo = inp_findmoptions(inp); imo->imo_multicast_vif = vifi; INP_WUNLOCK(inp); break; } case IP_MULTICAST_IF: error = inp_set_multicast_if(inp, sopt); break; case IP_MULTICAST_TTL: { u_char ttl; /* * Set the IP time-to-live for outgoing multicast packets. * The original multicast API required a char argument, * which is inconsistent with the rest of the socket API. * We allow either a char or an int. */ if (sopt->sopt_valsize == sizeof(u_char)) { error = sooptcopyin(sopt, &ttl, sizeof(u_char), sizeof(u_char)); if (error) break; } else { u_int ittl; error = sooptcopyin(sopt, &ittl, sizeof(u_int), sizeof(u_int)); if (error) break; if (ittl > 255) { error = EINVAL; break; } ttl = (u_char)ittl; } imo = inp_findmoptions(inp); imo->imo_multicast_ttl = ttl; INP_WUNLOCK(inp); break; } case IP_MULTICAST_LOOP: { u_char loop; /* * Set the loopback flag for outgoing multicast packets. * Must be zero or one. The original multicast API required a * char argument, which is inconsistent with the rest * of the socket API. We allow either a char or an int. */ if (sopt->sopt_valsize == sizeof(u_char)) { error = sooptcopyin(sopt, &loop, sizeof(u_char), sizeof(u_char)); if (error) break; } else { u_int iloop; error = sooptcopyin(sopt, &iloop, sizeof(u_int), sizeof(u_int)); if (error) break; loop = (u_char)iloop; } imo = inp_findmoptions(inp); imo->imo_multicast_loop = !!loop; INP_WUNLOCK(inp); break; } case IP_ADD_MEMBERSHIP: case IP_ADD_SOURCE_MEMBERSHIP: case MCAST_JOIN_GROUP: case MCAST_JOIN_SOURCE_GROUP: error = inp_join_group(inp, sopt); break; case IP_DROP_MEMBERSHIP: case IP_DROP_SOURCE_MEMBERSHIP: case MCAST_LEAVE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: error = inp_leave_group(inp, sopt); break; case IP_BLOCK_SOURCE: case IP_UNBLOCK_SOURCE: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: error = inp_block_unblock_source(inp, sopt); break; case IP_MSFILTER: error = inp_set_source_filters(inp, sopt); break; default: error = EOPNOTSUPP; break; } INP_UNLOCK_ASSERT(inp); return (error); } /* * Expose IGMP's multicast filter mode and source list(s) to userland, * keyed by (ifindex, group). * The filter mode is written out as a uint32_t, followed by * 0..n of struct in_addr. * For use by ifmcstat(8). * SMPng: NOTE: unlocked read of ifindex space. */ static int sysctl_ip_mcast_filters(SYSCTL_HANDLER_ARGS) { struct in_addr src, group; struct ifnet *ifp; struct ifmultiaddr *ifma; struct in_multi *inm; struct ip_msource *ims; int *name; int retval; u_int namelen; uint32_t fmode, ifindex; name = (int *)arg1; namelen = arg2; if (req->newptr != NULL) return (EPERM); if (namelen != 2) return (EINVAL); ifindex = name[0]; if (ifindex <= 0 || ifindex > V_if_index) { CTR2(KTR_IGMPV3, "%s: ifindex %u out of range", __func__, ifindex); return (ENOENT); } group.s_addr = name[1]; if (!IN_MULTICAST(ntohl(group.s_addr))) { CTR2(KTR_IGMPV3, "%s: group %s is not multicast", __func__, inet_ntoa(group)); return (EINVAL); } ifp = ifnet_byindex(ifindex); if (ifp == NULL) { CTR2(KTR_IGMPV3, "%s: no ifp for ifindex %u", __func__, ifindex); return (ENOENT); } retval = sysctl_wire_old_buffer(req, sizeof(uint32_t) + (in_mcast_maxgrpsrc * sizeof(struct in_addr))); if (retval) return (retval); IN_MULTI_LOCK(); IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_INET || ifma->ifma_protospec == NULL) continue; inm = (struct in_multi *)ifma->ifma_protospec; if (!in_hosteq(inm->inm_addr, group)) continue; fmode = inm->inm_st[1].iss_fmode; retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t)); if (retval != 0) break; RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) { #ifdef KTR struct in_addr ina; ina.s_addr = htonl(ims->ims_haddr); CTR2(KTR_IGMPV3, "%s: visit node %s", __func__, inet_ntoa(ina)); #endif /* * Only copy-out sources which are in-mode. */ if (fmode != ims_get_mode(inm, ims, 1)) { CTR1(KTR_IGMPV3, "%s: skip non-in-mode", __func__); continue; } src.s_addr = htonl(ims->ims_haddr); retval = SYSCTL_OUT(req, &src, sizeof(struct in_addr)); if (retval != 0) break; } } IF_ADDR_RUNLOCK(ifp); IN_MULTI_UNLOCK(); return (retval); } #if defined(KTR) && (KTR_COMPILE & KTR_IGMPV3) static const char *inm_modestrs[] = { "un", "in", "ex" }; static const char * inm_mode_str(const int mode) { if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE) return (inm_modestrs[mode]); return ("??"); } static const char *inm_statestrs[] = { "not-member", "silent", "idle", "lazy", "sleeping", "awakening", "query-pending", "sg-query-pending", "leaving" }; static const char * inm_state_str(const int state) { if (state >= IGMP_NOT_MEMBER && state <= IGMP_LEAVING_MEMBER) return (inm_statestrs[state]); return ("??"); } /* * Dump an in_multi structure to the console. */ void inm_print(const struct in_multi *inm) { int t; if ((ktr_mask & KTR_IGMPV3) == 0) return; printf("%s: --- begin inm %p ---\n", __func__, inm); printf("addr %s ifp %p(%s) ifma %p\n", inet_ntoa(inm->inm_addr), inm->inm_ifp, inm->inm_ifp->if_xname, inm->inm_ifma); printf("timer %u state %s refcount %u scq.len %u\n", inm->inm_timer, inm_state_str(inm->inm_state), inm->inm_refcount, inm->inm_scq.mq_len); printf("igi %p nsrc %lu sctimer %u scrv %u\n", inm->inm_igi, inm->inm_nsrc, inm->inm_sctimer, inm->inm_scrv); for (t = 0; t < 2; t++) { printf("t%d: fmode %s asm %u ex %u in %u rec %u\n", t, inm_mode_str(inm->inm_st[t].iss_fmode), inm->inm_st[t].iss_asm, inm->inm_st[t].iss_ex, inm->inm_st[t].iss_in, inm->inm_st[t].iss_rec); } printf("%s: --- end inm %p ---\n", __func__, inm); } #else /* !KTR || !(KTR_COMPILE & KTR_IGMPV3) */ void inm_print(const struct in_multi *inm) { } #endif /* KTR && (KTR_COMPILE & KTR_IGMPV3) */ RB_GENERATE(ip_msource_tree, ip_msource, ims_link, ip_msource_cmp); Index: projects/powernv/netinet6/in6_mcast.c =================================================================== --- projects/powernv/netinet6/in6_mcast.c (revision 291050) +++ projects/powernv/netinet6/in6_mcast.c (revision 291051) @@ -1,2836 +1,2836 @@ /* * Copyright (c) 2009 Bruce Simpson. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * IPv6 multicast socket, group, and socket option processing module. * Normative references: RFC 2292, RFC 3492, RFC 3542, RFC 3678, RFC 3810. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef KTR_MLD #define KTR_MLD KTR_INET6 #endif #ifndef __SOCKUNION_DECLARED union sockunion { struct sockaddr_storage ss; struct sockaddr sa; struct sockaddr_dl sdl; struct sockaddr_in6 sin6; }; typedef union sockunion sockunion_t; #define __SOCKUNION_DECLARED #endif /* __SOCKUNION_DECLARED */ static MALLOC_DEFINE(M_IN6MFILTER, "in6_mfilter", "IPv6 multicast PCB-layer source filter"); static MALLOC_DEFINE(M_IP6MADDR, "in6_multi", "IPv6 multicast group"); static MALLOC_DEFINE(M_IP6MOPTS, "ip6_moptions", "IPv6 multicast options"); static MALLOC_DEFINE(M_IP6MSOURCE, "ip6_msource", "IPv6 multicast MLD-layer source filter"); RB_GENERATE(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp); /* * Locking: * - Lock order is: Giant, INP_WLOCK, IN6_MULTI_LOCK, MLD_LOCK, IF_ADDR_LOCK. * - The IF_ADDR_LOCK is implicitly taken by in6m_lookup() earlier, however * it can be taken by code in net/if.c also. * - ip6_moptions and in6_mfilter are covered by the INP_WLOCK. * * struct in6_multi is covered by IN6_MULTI_LOCK. There isn't strictly * any need for in6_multi itself to be virtualized -- it is bound to an ifp * anyway no matter what happens. */ struct mtx in6_multi_mtx; MTX_SYSINIT(in6_multi_mtx, &in6_multi_mtx, "in6_multi_mtx", MTX_DEF); static void im6f_commit(struct in6_mfilter *); static int im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin, struct in6_msource **); static struct in6_msource * im6f_graft(struct in6_mfilter *, const uint8_t, const struct sockaddr_in6 *); static void im6f_leave(struct in6_mfilter *); static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *); static void im6f_purge(struct in6_mfilter *); static void im6f_rollback(struct in6_mfilter *); static void im6f_reap(struct in6_mfilter *); static int im6o_grow(struct ip6_moptions *); static size_t im6o_match_group(const struct ip6_moptions *, const struct ifnet *, const struct sockaddr *); static struct in6_msource * im6o_match_source(const struct ip6_moptions *, const size_t, const struct sockaddr *); static void im6s_merge(struct ip6_msource *ims, const struct in6_msource *lims, const int rollback); static int in6_mc_get(struct ifnet *, const struct in6_addr *, struct in6_multi **); static int in6m_get_source(struct in6_multi *inm, const struct in6_addr *addr, const int noalloc, struct ip6_msource **pims); #ifdef KTR static int in6m_is_ifp_detached(const struct in6_multi *); #endif static int in6m_merge(struct in6_multi *, /*const*/ struct in6_mfilter *); static void in6m_purge(struct in6_multi *); static void in6m_reap(struct in6_multi *); static struct ip6_moptions * in6p_findmoptions(struct inpcb *); static int in6p_get_source_filters(struct inpcb *, struct sockopt *); static int in6p_join_group(struct inpcb *, struct sockopt *); static int in6p_leave_group(struct inpcb *, struct sockopt *); static struct ifnet * in6p_lookup_mcast_ifp(const struct inpcb *, const struct sockaddr_in6 *); static int in6p_block_unblock_source(struct inpcb *, struct sockopt *); static int in6p_set_multicast_if(struct inpcb *, struct sockopt *); static int in6p_set_source_filters(struct inpcb *, struct sockopt *); static int sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS); SYSCTL_DECL(_net_inet6_ip6); /* XXX Not in any common header. */ static SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, mcast, CTLFLAG_RW, 0, "IPv6 multicast"); static u_long in6_mcast_maxgrpsrc = IPV6_MAX_GROUP_SRC_FILTER; SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxgrpsrc, CTLFLAG_RWTUN, &in6_mcast_maxgrpsrc, 0, "Max source filters per group"); static u_long in6_mcast_maxsocksrc = IPV6_MAX_SOCK_SRC_FILTER; SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxsocksrc, CTLFLAG_RWTUN, &in6_mcast_maxsocksrc, 0, "Max source filters per socket"); /* TODO Virtualize this switch. */ int in6_mcast_loop = IPV6_DEFAULT_MULTICAST_LOOP; SYSCTL_INT(_net_inet6_ip6_mcast, OID_AUTO, loop, CTLFLAG_RWTUN, &in6_mcast_loop, 0, "Loopback multicast datagrams by default"); static SYSCTL_NODE(_net_inet6_ip6_mcast, OID_AUTO, filters, CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_ip6_mcast_filters, "Per-interface stack-wide source filters"); #ifdef KTR /* * Inline function which wraps assertions for a valid ifp. * The ifnet layer will set the ifma's ifp pointer to NULL if the ifp * is detached. */ static int __inline in6m_is_ifp_detached(const struct in6_multi *inm) { struct ifnet *ifp; KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__)); ifp = inm->in6m_ifma->ifma_ifp; if (ifp != NULL) { /* * Sanity check that network-layer notion of ifp is the * same as that of link-layer. */ KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__)); } return (ifp == NULL); } #endif /* * Initialize an in6_mfilter structure to a known state at t0, t1 * with an empty source filter list. */ static __inline void im6f_init(struct in6_mfilter *imf, const int st0, const int st1) { memset(imf, 0, sizeof(struct in6_mfilter)); RB_INIT(&imf->im6f_sources); imf->im6f_st[0] = st0; imf->im6f_st[1] = st1; } /* * Resize the ip6_moptions vector to the next power-of-two minus 1. * May be called with locks held; do not sleep. */ static int im6o_grow(struct ip6_moptions *imo) { struct in6_multi **nmships; struct in6_multi **omships; struct in6_mfilter *nmfilters; struct in6_mfilter *omfilters; size_t idx; size_t newmax; size_t oldmax; nmships = NULL; nmfilters = NULL; omships = imo->im6o_membership; omfilters = imo->im6o_mfilters; oldmax = imo->im6o_max_memberships; newmax = ((oldmax + 1) * 2) - 1; if (newmax <= IPV6_MAX_MEMBERSHIPS) { nmships = (struct in6_multi **)realloc(omships, sizeof(struct in6_multi *) * newmax, M_IP6MOPTS, M_NOWAIT); nmfilters = (struct in6_mfilter *)realloc(omfilters, sizeof(struct in6_mfilter) * newmax, M_IN6MFILTER, M_NOWAIT); if (nmships != NULL && nmfilters != NULL) { /* Initialize newly allocated source filter heads. */ for (idx = oldmax; idx < newmax; idx++) { im6f_init(&nmfilters[idx], MCAST_UNDEFINED, MCAST_EXCLUDE); } imo->im6o_max_memberships = newmax; imo->im6o_membership = nmships; imo->im6o_mfilters = nmfilters; } } if (nmships == NULL || nmfilters == NULL) { if (nmships != NULL) free(nmships, M_IP6MOPTS); if (nmfilters != NULL) free(nmfilters, M_IN6MFILTER); return (ETOOMANYREFS); } return (0); } /* * Find an IPv6 multicast group entry for this ip6_moptions instance * which matches the specified group, and optionally an interface. * Return its index into the array, or -1 if not found. */ static size_t im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp, const struct sockaddr *group) { const struct sockaddr_in6 *gsin6; struct in6_multi **pinm; int idx; int nmships; gsin6 = (const struct sockaddr_in6 *)group; /* The im6o_membership array may be lazy allocated. */ if (imo->im6o_membership == NULL || imo->im6o_num_memberships == 0) return (-1); nmships = imo->im6o_num_memberships; pinm = &imo->im6o_membership[0]; for (idx = 0; idx < nmships; idx++, pinm++) { if (*pinm == NULL) continue; if ((ifp == NULL || ((*pinm)->in6m_ifp == ifp)) && IN6_ARE_ADDR_EQUAL(&(*pinm)->in6m_addr, &gsin6->sin6_addr)) { break; } } if (idx >= nmships) idx = -1; return (idx); } /* * Find an IPv6 multicast source entry for this imo which matches * the given group index for this socket, and source address. * * XXX TODO: The scope ID, if present in src, is stripped before * any comparison. We SHOULD enforce scope/zone checks where the source * filter entry has a link scope. * * NOTE: This does not check if the entry is in-mode, merely if * it exists, which may not be the desired behaviour. */ static struct in6_msource * im6o_match_source(const struct ip6_moptions *imo, const size_t gidx, const struct sockaddr *src) { struct ip6_msource find; struct in6_mfilter *imf; struct ip6_msource *ims; const sockunion_t *psa; KASSERT(src->sa_family == AF_INET6, ("%s: !AF_INET6", __func__)); KASSERT(gidx != -1 && gidx < imo->im6o_num_memberships, ("%s: invalid index %d\n", __func__, (int)gidx)); /* The im6o_mfilters array may be lazy allocated. */ if (imo->im6o_mfilters == NULL) return (NULL); imf = &imo->im6o_mfilters[gidx]; psa = (const sockunion_t *)src; find.im6s_addr = psa->sin6.sin6_addr; in6_clearscope(&find.im6s_addr); /* XXX */ ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); return ((struct in6_msource *)ims); } /* * Perform filtering for multicast datagrams on a socket by group and source. * * Returns 0 if a datagram should be allowed through, or various error codes * if the socket was not a member of the group, or the source was muted, etc. */ int im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp, const struct sockaddr *group, const struct sockaddr *src) { size_t gidx; struct in6_msource *ims; int mode; KASSERT(ifp != NULL, ("%s: null ifp", __func__)); gidx = im6o_match_group(imo, ifp, group); if (gidx == -1) return (MCAST_NOTGMEMBER); /* * Check if the source was included in an (S,G) join. * Allow reception on exclusive memberships by default, * reject reception on inclusive memberships by default. * Exclude source only if an in-mode exclude filter exists. * Include source only if an in-mode include filter exists. * NOTE: We are comparing group state here at MLD t1 (now) * with socket-layer t0 (since last downcall). */ mode = imo->im6o_mfilters[gidx].im6f_st[1]; ims = im6o_match_source(imo, gidx, src); if ((ims == NULL && mode == MCAST_INCLUDE) || (ims != NULL && ims->im6sl_st[0] != mode)) return (MCAST_NOTSMEMBER); return (MCAST_PASS); } /* * Find and return a reference to an in6_multi record for (ifp, group), * and bump its reference count. * If one does not exist, try to allocate it, and update link-layer multicast * filters on ifp to listen for group. * Assumes the IN6_MULTI lock is held across the call. * Return 0 if successful, otherwise return an appropriate error code. */ static int in6_mc_get(struct ifnet *ifp, const struct in6_addr *group, struct in6_multi **pinm) { struct sockaddr_in6 gsin6; struct ifmultiaddr *ifma; struct in6_multi *inm; int error; error = 0; /* * XXX: Accesses to ifma_protospec must be covered by IF_ADDR_LOCK; * if_addmulti() takes this mutex itself, so we must drop and * re-acquire around the call. */ IN6_MULTI_LOCK_ASSERT(); IF_ADDR_WLOCK(ifp); inm = in6m_lookup_locked(ifp, group); if (inm != NULL) { /* * If we already joined this group, just bump the * refcount and return it. */ KASSERT(inm->in6m_refcount >= 1, ("%s: bad refcount %d", __func__, inm->in6m_refcount)); ++inm->in6m_refcount; *pinm = inm; goto out_locked; } memset(&gsin6, 0, sizeof(gsin6)); gsin6.sin6_family = AF_INET6; gsin6.sin6_len = sizeof(struct sockaddr_in6); gsin6.sin6_addr = *group; /* * Check if a link-layer group is already associated * with this network-layer group on the given ifnet. */ IF_ADDR_WUNLOCK(ifp); error = if_addmulti(ifp, (struct sockaddr *)&gsin6, &ifma); if (error != 0) return (error); IF_ADDR_WLOCK(ifp); /* * If something other than netinet6 is occupying the link-layer * group, print a meaningful error message and back out of * the allocation. * Otherwise, bump the refcount on the existing network-layer * group association and return it. */ if (ifma->ifma_protospec != NULL) { inm = (struct in6_multi *)ifma->ifma_protospec; #ifdef INVARIANTS KASSERT(ifma->ifma_addr != NULL, ("%s: no ifma_addr", __func__)); KASSERT(ifma->ifma_addr->sa_family == AF_INET6, ("%s: ifma not AF_INET6", __func__)); KASSERT(inm != NULL, ("%s: no ifma_protospec", __func__)); if (inm->in6m_ifma != ifma || inm->in6m_ifp != ifp || !IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, group)) panic("%s: ifma %p is inconsistent with %p (%p)", __func__, ifma, inm, group); #endif ++inm->in6m_refcount; *pinm = inm; goto out_locked; } IF_ADDR_WLOCK_ASSERT(ifp); /* * A new in6_multi record is needed; allocate and initialize it. * We DO NOT perform an MLD join as the in6_ layer may need to * push an initial source list down to MLD to support SSM. * * The initial source filter state is INCLUDE, {} as per the RFC. * Pending state-changes per group are subject to a bounds check. */ inm = malloc(sizeof(*inm), M_IP6MADDR, M_NOWAIT | M_ZERO); if (inm == NULL) { + IF_ADDR_WUNLOCK(ifp); if_delmulti_ifma(ifma); - error = ENOMEM; - goto out_locked; + return (ENOMEM); } inm->in6m_addr = *group; inm->in6m_ifp = ifp; inm->in6m_mli = MLD_IFINFO(ifp); inm->in6m_ifma = ifma; inm->in6m_refcount = 1; inm->in6m_state = MLD_NOT_MEMBER; mbufq_init(&inm->in6m_scq, MLD_MAX_STATE_CHANGES); inm->in6m_st[0].iss_fmode = MCAST_UNDEFINED; inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; RB_INIT(&inm->in6m_srcs); ifma->ifma_protospec = inm; *pinm = inm; out_locked: IF_ADDR_WUNLOCK(ifp); return (error); } /* * Drop a reference to an in6_multi record. * * If the refcount drops to 0, free the in6_multi record and * delete the underlying link-layer membership. */ void in6m_release_locked(struct in6_multi *inm) { struct ifmultiaddr *ifma; IN6_MULTI_LOCK_ASSERT(); CTR2(KTR_MLD, "%s: refcount is %d", __func__, inm->in6m_refcount); if (--inm->in6m_refcount > 0) { CTR2(KTR_MLD, "%s: refcount is now %d", __func__, inm->in6m_refcount); return; } CTR2(KTR_MLD, "%s: freeing inm %p", __func__, inm); ifma = inm->in6m_ifma; /* XXX this access is not covered by IF_ADDR_LOCK */ CTR2(KTR_MLD, "%s: purging ifma %p", __func__, ifma); KASSERT(ifma->ifma_protospec == inm, ("%s: ifma_protospec != inm", __func__)); ifma->ifma_protospec = NULL; in6m_purge(inm); free(inm, M_IP6MADDR); if_delmulti_ifma(ifma); } /* * Clear recorded source entries for a group. * Used by the MLD code. Caller must hold the IN6_MULTI lock. * FIXME: Should reap. */ void in6m_clear_recorded(struct in6_multi *inm) { struct ip6_msource *ims; IN6_MULTI_LOCK_ASSERT(); RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { if (ims->im6s_stp) { ims->im6s_stp = 0; --inm->in6m_st[1].iss_rec; } } KASSERT(inm->in6m_st[1].iss_rec == 0, ("%s: iss_rec %d not 0", __func__, inm->in6m_st[1].iss_rec)); } /* * Record a source as pending for a Source-Group MLDv2 query. * This lives here as it modifies the shared tree. * * inm is the group descriptor. * naddr is the address of the source to record in network-byte order. * * If the net.inet6.mld.sgalloc sysctl is non-zero, we will * lazy-allocate a source node in response to an SG query. * Otherwise, no allocation is performed. This saves some memory * with the trade-off that the source will not be reported to the * router if joined in the window between the query response and * the group actually being joined on the local host. * * VIMAGE: XXX: Currently the mld_sgalloc feature has been removed. * This turns off the allocation of a recorded source entry if * the group has not been joined. * * Return 0 if the source didn't exist or was already marked as recorded. * Return 1 if the source was marked as recorded by this function. * Return <0 if any error occured (negated errno code). */ int in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr) { struct ip6_msource find; struct ip6_msource *ims, *nims; IN6_MULTI_LOCK_ASSERT(); find.im6s_addr = *addr; ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); if (ims && ims->im6s_stp) return (0); if (ims == NULL) { if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) return (-ENOSPC); nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE, M_NOWAIT | M_ZERO); if (nims == NULL) return (-ENOMEM); nims->im6s_addr = find.im6s_addr; RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); ++inm->in6m_nsrc; ims = nims; } /* * Mark the source as recorded and update the recorded * source count. */ ++ims->im6s_stp; ++inm->in6m_st[1].iss_rec; return (1); } /* * Return a pointer to an in6_msource owned by an in6_mfilter, * given its source address. * Lazy-allocate if needed. If this is a new entry its filter state is * undefined at t0. * * imf is the filter set being modified. * addr is the source address. * * SMPng: May be called with locks held; malloc must not block. */ static int im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin, struct in6_msource **plims) { struct ip6_msource find; struct ip6_msource *ims, *nims; struct in6_msource *lims; int error; error = 0; ims = NULL; lims = NULL; find.im6s_addr = psin->sin6_addr; ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); lims = (struct in6_msource *)ims; if (lims == NULL) { if (imf->im6f_nsrc == in6_mcast_maxsocksrc) return (ENOSPC); nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER, M_NOWAIT | M_ZERO); if (nims == NULL) return (ENOMEM); lims = (struct in6_msource *)nims; lims->im6s_addr = find.im6s_addr; lims->im6sl_st[0] = MCAST_UNDEFINED; RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims); ++imf->im6f_nsrc; } *plims = lims; return (error); } /* * Graft a source entry into an existing socket-layer filter set, * maintaining any required invariants and checking allocations. * * The source is marked as being in the new filter mode at t1. * * Return the pointer to the new node, otherwise return NULL. */ static struct in6_msource * im6f_graft(struct in6_mfilter *imf, const uint8_t st1, const struct sockaddr_in6 *psin) { struct ip6_msource *nims; struct in6_msource *lims; nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER, M_NOWAIT | M_ZERO); if (nims == NULL) return (NULL); lims = (struct in6_msource *)nims; lims->im6s_addr = psin->sin6_addr; lims->im6sl_st[0] = MCAST_UNDEFINED; lims->im6sl_st[1] = st1; RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims); ++imf->im6f_nsrc; return (lims); } /* * Prune a source entry from an existing socket-layer filter set, * maintaining any required invariants and checking allocations. * * The source is marked as being left at t1, it is not freed. * * Return 0 if no error occurred, otherwise return an errno value. */ static int im6f_prune(struct in6_mfilter *imf, const struct sockaddr_in6 *psin) { struct ip6_msource find; struct ip6_msource *ims; struct in6_msource *lims; find.im6s_addr = psin->sin6_addr; ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); if (ims == NULL) return (ENOENT); lims = (struct in6_msource *)ims; lims->im6sl_st[1] = MCAST_UNDEFINED; return (0); } /* * Revert socket-layer filter set deltas at t1 to t0 state. */ static void im6f_rollback(struct in6_mfilter *imf) { struct ip6_msource *ims, *tims; struct in6_msource *lims; RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { lims = (struct in6_msource *)ims; if (lims->im6sl_st[0] == lims->im6sl_st[1]) { /* no change at t1 */ continue; } else if (lims->im6sl_st[0] != MCAST_UNDEFINED) { /* revert change to existing source at t1 */ lims->im6sl_st[1] = lims->im6sl_st[0]; } else { /* revert source added t1 */ CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); free(ims, M_IN6MFILTER); imf->im6f_nsrc--; } } imf->im6f_st[1] = imf->im6f_st[0]; } /* * Mark socket-layer filter set as INCLUDE {} at t1. */ static void im6f_leave(struct in6_mfilter *imf) { struct ip6_msource *ims; struct in6_msource *lims; RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { lims = (struct in6_msource *)ims; lims->im6sl_st[1] = MCAST_UNDEFINED; } imf->im6f_st[1] = MCAST_INCLUDE; } /* * Mark socket-layer filter set deltas as committed. */ static void im6f_commit(struct in6_mfilter *imf) { struct ip6_msource *ims; struct in6_msource *lims; RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { lims = (struct in6_msource *)ims; lims->im6sl_st[0] = lims->im6sl_st[1]; } imf->im6f_st[0] = imf->im6f_st[1]; } /* * Reap unreferenced sources from socket-layer filter set. */ static void im6f_reap(struct in6_mfilter *imf) { struct ip6_msource *ims, *tims; struct in6_msource *lims; RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { lims = (struct in6_msource *)ims; if ((lims->im6sl_st[0] == MCAST_UNDEFINED) && (lims->im6sl_st[1] == MCAST_UNDEFINED)) { CTR2(KTR_MLD, "%s: free lims %p", __func__, ims); RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); free(ims, M_IN6MFILTER); imf->im6f_nsrc--; } } } /* * Purge socket-layer filter set. */ static void im6f_purge(struct in6_mfilter *imf) { struct ip6_msource *ims, *tims; RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims); free(ims, M_IN6MFILTER); imf->im6f_nsrc--; } imf->im6f_st[0] = imf->im6f_st[1] = MCAST_UNDEFINED; KASSERT(RB_EMPTY(&imf->im6f_sources), ("%s: im6f_sources not empty", __func__)); } /* * Look up a source filter entry for a multicast group. * * inm is the group descriptor to work with. * addr is the IPv6 address to look up. * noalloc may be non-zero to suppress allocation of sources. * *pims will be set to the address of the retrieved or allocated source. * * SMPng: NOTE: may be called with locks held. * Return 0 if successful, otherwise return a non-zero error code. */ static int in6m_get_source(struct in6_multi *inm, const struct in6_addr *addr, const int noalloc, struct ip6_msource **pims) { struct ip6_msource find; struct ip6_msource *ims, *nims; #ifdef KTR char ip6tbuf[INET6_ADDRSTRLEN]; #endif find.im6s_addr = *addr; ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); if (ims == NULL && !noalloc) { if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) return (ENOSPC); nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE, M_NOWAIT | M_ZERO); if (nims == NULL) return (ENOMEM); nims->im6s_addr = *addr; RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); ++inm->in6m_nsrc; ims = nims; CTR3(KTR_MLD, "%s: allocated %s as %p", __func__, ip6_sprintf(ip6tbuf, addr), ims); } *pims = ims; return (0); } /* * Merge socket-layer source into MLD-layer source. * If rollback is non-zero, perform the inverse of the merge. */ static void im6s_merge(struct ip6_msource *ims, const struct in6_msource *lims, const int rollback) { int n = rollback ? -1 : 1; #ifdef KTR char ip6tbuf[INET6_ADDRSTRLEN]; ip6_sprintf(ip6tbuf, &lims->im6s_addr); #endif if (lims->im6sl_st[0] == MCAST_EXCLUDE) { CTR3(KTR_MLD, "%s: t1 ex -= %d on %s", __func__, n, ip6tbuf); ims->im6s_st[1].ex -= n; } else if (lims->im6sl_st[0] == MCAST_INCLUDE) { CTR3(KTR_MLD, "%s: t1 in -= %d on %s", __func__, n, ip6tbuf); ims->im6s_st[1].in -= n; } if (lims->im6sl_st[1] == MCAST_EXCLUDE) { CTR3(KTR_MLD, "%s: t1 ex += %d on %s", __func__, n, ip6tbuf); ims->im6s_st[1].ex += n; } else if (lims->im6sl_st[1] == MCAST_INCLUDE) { CTR3(KTR_MLD, "%s: t1 in += %d on %s", __func__, n, ip6tbuf); ims->im6s_st[1].in += n; } } /* * Atomically update the global in6_multi state, when a membership's * filter list is being updated in any way. * * imf is the per-inpcb-membership group filter pointer. * A fake imf may be passed for in-kernel consumers. * * XXX This is a candidate for a set-symmetric-difference style loop * which would eliminate the repeated lookup from root of ims nodes, * as they share the same key space. * * If any error occurred this function will back out of refcounts * and return a non-zero value. */ static int in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) { struct ip6_msource *ims, *nims; struct in6_msource *lims; int schanged, error; int nsrc0, nsrc1; schanged = 0; error = 0; nsrc1 = nsrc0 = 0; /* * Update the source filters first, as this may fail. * Maintain count of in-mode filters at t0, t1. These are * used to work out if we transition into ASM mode or not. * Maintain a count of source filters whose state was * actually modified by this operation. */ RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { lims = (struct in6_msource *)ims; if (lims->im6sl_st[0] == imf->im6f_st[0]) nsrc0++; if (lims->im6sl_st[1] == imf->im6f_st[1]) nsrc1++; if (lims->im6sl_st[0] == lims->im6sl_st[1]) continue; error = in6m_get_source(inm, &lims->im6s_addr, 0, &nims); ++schanged; if (error) break; im6s_merge(nims, lims, 0); } if (error) { struct ip6_msource *bims; RB_FOREACH_REVERSE_FROM(ims, ip6_msource_tree, nims) { lims = (struct in6_msource *)ims; if (lims->im6sl_st[0] == lims->im6sl_st[1]) continue; (void)in6m_get_source(inm, &lims->im6s_addr, 1, &bims); if (bims == NULL) continue; im6s_merge(bims, lims, 1); } goto out_reap; } CTR3(KTR_MLD, "%s: imf filters in-mode: %d at t0, %d at t1", __func__, nsrc0, nsrc1); /* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */ if (imf->im6f_st[0] == imf->im6f_st[1] && imf->im6f_st[1] == MCAST_INCLUDE) { if (nsrc1 == 0) { CTR1(KTR_MLD, "%s: --in on inm at t1", __func__); --inm->in6m_st[1].iss_in; } } /* Handle filter mode transition on socket. */ if (imf->im6f_st[0] != imf->im6f_st[1]) { CTR3(KTR_MLD, "%s: imf transition %d to %d", __func__, imf->im6f_st[0], imf->im6f_st[1]); if (imf->im6f_st[0] == MCAST_EXCLUDE) { CTR1(KTR_MLD, "%s: --ex on inm at t1", __func__); --inm->in6m_st[1].iss_ex; } else if (imf->im6f_st[0] == MCAST_INCLUDE) { CTR1(KTR_MLD, "%s: --in on inm at t1", __func__); --inm->in6m_st[1].iss_in; } if (imf->im6f_st[1] == MCAST_EXCLUDE) { CTR1(KTR_MLD, "%s: ex++ on inm at t1", __func__); inm->in6m_st[1].iss_ex++; } else if (imf->im6f_st[1] == MCAST_INCLUDE && nsrc1 > 0) { CTR1(KTR_MLD, "%s: in++ on inm at t1", __func__); inm->in6m_st[1].iss_in++; } } /* * Track inm filter state in terms of listener counts. * If there are any exclusive listeners, stack-wide * membership is exclusive. * Otherwise, if only inclusive listeners, stack-wide is inclusive. * If no listeners remain, state is undefined at t1, * and the MLD lifecycle for this group should finish. */ if (inm->in6m_st[1].iss_ex > 0) { CTR1(KTR_MLD, "%s: transition to EX", __func__); inm->in6m_st[1].iss_fmode = MCAST_EXCLUDE; } else if (inm->in6m_st[1].iss_in > 0) { CTR1(KTR_MLD, "%s: transition to IN", __func__); inm->in6m_st[1].iss_fmode = MCAST_INCLUDE; } else { CTR1(KTR_MLD, "%s: transition to UNDEF", __func__); inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; } /* Decrement ASM listener count on transition out of ASM mode. */ if (imf->im6f_st[0] == MCAST_EXCLUDE && nsrc0 == 0) { if ((imf->im6f_st[1] != MCAST_EXCLUDE) || (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 > 0)) CTR1(KTR_MLD, "%s: --asm on inm at t1", __func__); --inm->in6m_st[1].iss_asm; } /* Increment ASM listener count on transition to ASM mode. */ if (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 == 0) { CTR1(KTR_MLD, "%s: asm++ on inm at t1", __func__); inm->in6m_st[1].iss_asm++; } CTR3(KTR_MLD, "%s: merged imf %p to inm %p", __func__, imf, inm); in6m_print(inm); out_reap: if (schanged > 0) { CTR1(KTR_MLD, "%s: sources changed; reaping", __func__); in6m_reap(inm); } return (error); } /* * Mark an in6_multi's filter set deltas as committed. * Called by MLD after a state change has been enqueued. */ void in6m_commit(struct in6_multi *inm) { struct ip6_msource *ims; CTR2(KTR_MLD, "%s: commit inm %p", __func__, inm); CTR1(KTR_MLD, "%s: pre commit:", __func__); in6m_print(inm); RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { ims->im6s_st[0] = ims->im6s_st[1]; } inm->in6m_st[0] = inm->in6m_st[1]; } /* * Reap unreferenced nodes from an in6_multi's filter set. */ static void in6m_reap(struct in6_multi *inm) { struct ip6_msource *ims, *tims; RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) { if (ims->im6s_st[0].ex > 0 || ims->im6s_st[0].in > 0 || ims->im6s_st[1].ex > 0 || ims->im6s_st[1].in > 0 || ims->im6s_stp != 0) continue; CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims); free(ims, M_IP6MSOURCE); inm->in6m_nsrc--; } } /* * Purge all source nodes from an in6_multi's filter set. */ static void in6m_purge(struct in6_multi *inm) { struct ip6_msource *ims, *tims; RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) { CTR2(KTR_MLD, "%s: free ims %p", __func__, ims); RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims); free(ims, M_IP6MSOURCE); inm->in6m_nsrc--; } /* Free state-change requests that might be queued. */ mbufq_drain(&inm->in6m_scq); } /* * Join a multicast address w/o sources. * KAME compatibility entry point. * * SMPng: Assume no mc locks held by caller. */ struct in6_multi_mship * in6_joingroup(struct ifnet *ifp, struct in6_addr *mcaddr, int *errorp, int delay) { struct in6_multi_mship *imm; int error; imm = malloc(sizeof(*imm), M_IP6MADDR, M_NOWAIT); if (imm == NULL) { *errorp = ENOBUFS; return (NULL); } delay = (delay * PR_FASTHZ) / hz; error = in6_mc_join(ifp, mcaddr, NULL, &imm->i6mm_maddr, delay); if (error) { *errorp = error; free(imm, M_IP6MADDR); return (NULL); } return (imm); } /* * Leave a multicast address w/o sources. * KAME compatibility entry point. * * SMPng: Assume no mc locks held by caller. */ int in6_leavegroup(struct in6_multi_mship *imm) { if (imm->i6mm_maddr != NULL) in6_mc_leave(imm->i6mm_maddr, NULL); free(imm, M_IP6MADDR); return 0; } /* * Join a multicast group; unlocked entry point. * * SMPng: XXX: in6_mc_join() is called from in6_control() when upper * locks are not held. Fortunately, ifp is unlikely to have been detached * at this point, so we assume it's OK to recurse. */ int in6_mc_join(struct ifnet *ifp, const struct in6_addr *mcaddr, /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm, const int delay) { int error; IN6_MULTI_LOCK(); error = in6_mc_join_locked(ifp, mcaddr, imf, pinm, delay); IN6_MULTI_UNLOCK(); return (error); } /* * Join a multicast group; real entry point. * * Only preserves atomicity at inm level. * NOTE: imf argument cannot be const due to sys/tree.h limitations. * * If the MLD downcall fails, the group is not joined, and an error * code is returned. */ int in6_mc_join_locked(struct ifnet *ifp, const struct in6_addr *mcaddr, /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm, const int delay) { struct in6_mfilter timf; struct in6_multi *inm; int error; #ifdef KTR char ip6tbuf[INET6_ADDRSTRLEN]; #endif #ifdef INVARIANTS /* * Sanity: Check scope zone ID was set for ifp, if and * only if group is scoped to an interface. */ KASSERT(IN6_IS_ADDR_MULTICAST(mcaddr), ("%s: not a multicast address", __func__)); if (IN6_IS_ADDR_MC_LINKLOCAL(mcaddr) || IN6_IS_ADDR_MC_INTFACELOCAL(mcaddr)) { KASSERT(mcaddr->s6_addr16[1] != 0, ("%s: scope zone ID not set", __func__)); } #endif IN6_MULTI_LOCK_ASSERT(); CTR4(KTR_MLD, "%s: join %s on %p(%s))", __func__, ip6_sprintf(ip6tbuf, mcaddr), ifp, if_name(ifp)); error = 0; inm = NULL; /* * If no imf was specified (i.e. kernel consumer), * fake one up and assume it is an ASM join. */ if (imf == NULL) { im6f_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE); imf = &timf; } error = in6_mc_get(ifp, mcaddr, &inm); if (error) { CTR1(KTR_MLD, "%s: in6_mc_get() failure", __func__); return (error); } CTR1(KTR_MLD, "%s: merge inm state", __func__); error = in6m_merge(inm, imf); if (error) { CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); goto out_in6m_release; } CTR1(KTR_MLD, "%s: doing mld downcall", __func__); error = mld_change_state(inm, delay); if (error) { CTR1(KTR_MLD, "%s: failed to update source", __func__); goto out_in6m_release; } out_in6m_release: if (error) { CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm); in6m_release_locked(inm); } else { *pinm = inm; } return (error); } /* * Leave a multicast group; unlocked entry point. */ int in6_mc_leave(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) { struct ifnet *ifp; int error; ifp = inm->in6m_ifp; IN6_MULTI_LOCK(); error = in6_mc_leave_locked(inm, imf); IN6_MULTI_UNLOCK(); return (error); } /* * Leave a multicast group; real entry point. * All source filters will be expunged. * * Only preserves atomicity at inm level. * * Holding the write lock for the INP which contains imf * is highly advisable. We can't assert for it as imf does not * contain a back-pointer to the owning inp. * * Note: This is not the same as in6m_release(*) as this function also * makes a state change downcall into MLD. */ int in6_mc_leave_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) { struct in6_mfilter timf; int error; #ifdef KTR char ip6tbuf[INET6_ADDRSTRLEN]; #endif error = 0; IN6_MULTI_LOCK_ASSERT(); CTR5(KTR_MLD, "%s: leave inm %p, %s/%s, imf %p", __func__, inm, ip6_sprintf(ip6tbuf, &inm->in6m_addr), (in6m_is_ifp_detached(inm) ? "null" : if_name(inm->in6m_ifp)), imf); /* * If no imf was specified (i.e. kernel consumer), * fake one up and assume it is an ASM join. */ if (imf == NULL) { im6f_init(&timf, MCAST_EXCLUDE, MCAST_UNDEFINED); imf = &timf; } /* * Begin state merge transaction at MLD layer. * * As this particular invocation should not cause any memory * to be allocated, and there is no opportunity to roll back * the transaction, it MUST NOT fail. */ CTR1(KTR_MLD, "%s: merge inm state", __func__); error = in6m_merge(inm, imf); KASSERT(error == 0, ("%s: failed to merge inm state", __func__)); CTR1(KTR_MLD, "%s: doing mld downcall", __func__); error = mld_change_state(inm, 0); if (error) CTR1(KTR_MLD, "%s: failed mld downcall", __func__); CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm); in6m_release_locked(inm); return (error); } /* * Block or unblock an ASM multicast source on an inpcb. * This implements the delta-based API described in RFC 3678. * * The delta-based API applies only to exclusive-mode memberships. * An MLD downcall will be performed. * * SMPng: NOTE: Must take Giant as a join may create a new ifma. * * Return 0 if successful, otherwise return an appropriate error code. */ static int in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) { struct group_source_req gsr; sockunion_t *gsa, *ssa; struct ifnet *ifp; struct in6_mfilter *imf; struct ip6_moptions *imo; struct in6_msource *ims; struct in6_multi *inm; size_t idx; uint16_t fmode; int error, doblock; #ifdef KTR char ip6tbuf[INET6_ADDRSTRLEN]; #endif ifp = NULL; error = 0; doblock = 0; memset(&gsr, 0, sizeof(struct group_source_req)); gsa = (sockunion_t *)&gsr.gsr_group; ssa = (sockunion_t *)&gsr.gsr_source; switch (sopt->sopt_name) { case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: error = sooptcopyin(sopt, &gsr, sizeof(struct group_source_req), sizeof(struct group_source_req)); if (error) return (error); if (gsa->sin6.sin6_family != AF_INET6 || gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) return (EINVAL); if (ssa->sin6.sin6_family != AF_INET6 || ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) return (EINVAL); if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) return (EADDRNOTAVAIL); ifp = ifnet_byindex(gsr.gsr_interface); if (sopt->sopt_name == MCAST_BLOCK_SOURCE) doblock = 1; break; default: CTR2(KTR_MLD, "%s: unknown sopt_name %d", __func__, sopt->sopt_name); return (EOPNOTSUPP); break; } if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) return (EINVAL); (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); /* * Check if we are actually a member of this group. */ imo = in6p_findmoptions(inp); idx = im6o_match_group(imo, ifp, &gsa->sa); if (idx == -1 || imo->im6o_mfilters == NULL) { error = EADDRNOTAVAIL; goto out_in6p_locked; } KASSERT(imo->im6o_mfilters != NULL, ("%s: im6o_mfilters not allocated", __func__)); imf = &imo->im6o_mfilters[idx]; inm = imo->im6o_membership[idx]; /* * Attempting to use the delta-based API on an * non exclusive-mode membership is an error. */ fmode = imf->im6f_st[0]; if (fmode != MCAST_EXCLUDE) { error = EINVAL; goto out_in6p_locked; } /* * Deal with error cases up-front: * Asked to block, but already blocked; or * Asked to unblock, but nothing to unblock. * If adding a new block entry, allocate it. */ ims = im6o_match_source(imo, idx, &ssa->sa); if ((ims != NULL && doblock) || (ims == NULL && !doblock)) { CTR3(KTR_MLD, "%s: source %s %spresent", __func__, ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr), doblock ? "" : "not "); error = EADDRNOTAVAIL; goto out_in6p_locked; } INP_WLOCK_ASSERT(inp); /* * Begin state merge transaction at socket layer. */ if (doblock) { CTR2(KTR_MLD, "%s: %s source", __func__, "block"); ims = im6f_graft(imf, fmode, &ssa->sin6); if (ims == NULL) error = ENOMEM; } else { CTR2(KTR_MLD, "%s: %s source", __func__, "allow"); error = im6f_prune(imf, &ssa->sin6); } if (error) { CTR1(KTR_MLD, "%s: merge imf state failed", __func__); goto out_im6f_rollback; } /* * Begin state merge transaction at MLD layer. */ IN6_MULTI_LOCK(); CTR1(KTR_MLD, "%s: merge inm state", __func__); error = in6m_merge(inm, imf); if (error) CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); else { CTR1(KTR_MLD, "%s: doing mld downcall", __func__); error = mld_change_state(inm, 0); if (error) CTR1(KTR_MLD, "%s: failed mld downcall", __func__); } IN6_MULTI_UNLOCK(); out_im6f_rollback: if (error) im6f_rollback(imf); else im6f_commit(imf); im6f_reap(imf); out_in6p_locked: INP_WUNLOCK(inp); return (error); } /* * Given an inpcb, return its multicast options structure pointer. Accepts * an unlocked inpcb pointer, but will return it locked. May sleep. * * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. * SMPng: NOTE: Returns with the INP write lock held. */ static struct ip6_moptions * in6p_findmoptions(struct inpcb *inp) { struct ip6_moptions *imo; struct in6_multi **immp; struct in6_mfilter *imfp; size_t idx; INP_WLOCK(inp); if (inp->in6p_moptions != NULL) return (inp->in6p_moptions); INP_WUNLOCK(inp); imo = malloc(sizeof(*imo), M_IP6MOPTS, M_WAITOK); immp = malloc(sizeof(*immp) * IPV6_MIN_MEMBERSHIPS, M_IP6MOPTS, M_WAITOK | M_ZERO); imfp = malloc(sizeof(struct in6_mfilter) * IPV6_MIN_MEMBERSHIPS, M_IN6MFILTER, M_WAITOK); imo->im6o_multicast_ifp = NULL; imo->im6o_multicast_hlim = V_ip6_defmcasthlim; imo->im6o_multicast_loop = in6_mcast_loop; imo->im6o_num_memberships = 0; imo->im6o_max_memberships = IPV6_MIN_MEMBERSHIPS; imo->im6o_membership = immp; /* Initialize per-group source filters. */ for (idx = 0; idx < IPV6_MIN_MEMBERSHIPS; idx++) im6f_init(&imfp[idx], MCAST_UNDEFINED, MCAST_EXCLUDE); imo->im6o_mfilters = imfp; INP_WLOCK(inp); if (inp->in6p_moptions != NULL) { free(imfp, M_IN6MFILTER); free(immp, M_IP6MOPTS); free(imo, M_IP6MOPTS); return (inp->in6p_moptions); } inp->in6p_moptions = imo; return (imo); } /* * Discard the IPv6 multicast options (and source filters). * * SMPng: NOTE: assumes INP write lock is held. */ void ip6_freemoptions(struct ip6_moptions *imo) { struct in6_mfilter *imf; size_t idx, nmships; KASSERT(imo != NULL, ("%s: ip6_moptions is NULL", __func__)); nmships = imo->im6o_num_memberships; for (idx = 0; idx < nmships; ++idx) { imf = imo->im6o_mfilters ? &imo->im6o_mfilters[idx] : NULL; if (imf) im6f_leave(imf); /* XXX this will thrash the lock(s) */ (void)in6_mc_leave(imo->im6o_membership[idx], imf); if (imf) im6f_purge(imf); } if (imo->im6o_mfilters) free(imo->im6o_mfilters, M_IN6MFILTER); free(imo->im6o_membership, M_IP6MOPTS); free(imo, M_IP6MOPTS); } /* * Atomically get source filters on a socket for an IPv6 multicast group. * Called with INP lock held; returns with lock released. */ static int in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) { struct __msfilterreq msfr; sockunion_t *gsa; struct ifnet *ifp; struct ip6_moptions *imo; struct in6_mfilter *imf; struct ip6_msource *ims; struct in6_msource *lims; struct sockaddr_in6 *psin; struct sockaddr_storage *ptss; struct sockaddr_storage *tss; int error; size_t idx, nsrcs, ncsrcs; INP_WLOCK_ASSERT(inp); imo = inp->in6p_moptions; KASSERT(imo != NULL, ("%s: null ip6_moptions", __func__)); INP_WUNLOCK(inp); error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), sizeof(struct __msfilterreq)); if (error) return (error); if (msfr.msfr_group.ss_family != AF_INET6 || msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) return (EINVAL); gsa = (sockunion_t *)&msfr.msfr_group; if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) return (EINVAL); if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) return (EADDRNOTAVAIL); ifp = ifnet_byindex(msfr.msfr_ifindex); if (ifp == NULL) return (EADDRNOTAVAIL); (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); INP_WLOCK(inp); /* * Lookup group on the socket. */ idx = im6o_match_group(imo, ifp, &gsa->sa); if (idx == -1 || imo->im6o_mfilters == NULL) { INP_WUNLOCK(inp); return (EADDRNOTAVAIL); } imf = &imo->im6o_mfilters[idx]; /* * Ignore memberships which are in limbo. */ if (imf->im6f_st[1] == MCAST_UNDEFINED) { INP_WUNLOCK(inp); return (EAGAIN); } msfr.msfr_fmode = imf->im6f_st[1]; /* * If the user specified a buffer, copy out the source filter * entries to userland gracefully. * We only copy out the number of entries which userland * has asked for, but we always tell userland how big the * buffer really needs to be. */ if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) msfr.msfr_nsrcs = in6_mcast_maxsocksrc; tss = NULL; if (msfr.msfr_srcs != NULL && msfr.msfr_nsrcs > 0) { tss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, M_TEMP, M_NOWAIT | M_ZERO); if (tss == NULL) { INP_WUNLOCK(inp); return (ENOBUFS); } } /* * Count number of sources in-mode at t0. * If buffer space exists and remains, copy out source entries. */ nsrcs = msfr.msfr_nsrcs; ncsrcs = 0; ptss = tss; RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { lims = (struct in6_msource *)ims; if (lims->im6sl_st[0] == MCAST_UNDEFINED || lims->im6sl_st[0] != imf->im6f_st[0]) continue; ++ncsrcs; if (tss != NULL && nsrcs > 0) { psin = (struct sockaddr_in6 *)ptss; psin->sin6_family = AF_INET6; psin->sin6_len = sizeof(struct sockaddr_in6); psin->sin6_addr = lims->im6s_addr; psin->sin6_port = 0; --nsrcs; ++ptss; } } INP_WUNLOCK(inp); if (tss != NULL) { error = copyout(tss, msfr.msfr_srcs, sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); free(tss, M_TEMP); if (error) return (error); } msfr.msfr_nsrcs = ncsrcs; error = sooptcopyout(sopt, &msfr, sizeof(struct __msfilterreq)); return (error); } /* * Return the IP multicast options in response to user getsockopt(). */ int ip6_getmoptions(struct inpcb *inp, struct sockopt *sopt) { struct ip6_moptions *im6o; int error; u_int optval; INP_WLOCK(inp); im6o = inp->in6p_moptions; /* * If socket is neither of type SOCK_RAW or SOCK_DGRAM, * or is a divert socket, reject it. */ if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || (inp->inp_socket->so_proto->pr_type != SOCK_RAW && inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) { INP_WUNLOCK(inp); return (EOPNOTSUPP); } error = 0; switch (sopt->sopt_name) { case IPV6_MULTICAST_IF: if (im6o == NULL || im6o->im6o_multicast_ifp == NULL) { optval = 0; } else { optval = im6o->im6o_multicast_ifp->if_index; } INP_WUNLOCK(inp); error = sooptcopyout(sopt, &optval, sizeof(u_int)); break; case IPV6_MULTICAST_HOPS: if (im6o == NULL) optval = V_ip6_defmcasthlim; else optval = im6o->im6o_multicast_hlim; INP_WUNLOCK(inp); error = sooptcopyout(sopt, &optval, sizeof(u_int)); break; case IPV6_MULTICAST_LOOP: if (im6o == NULL) optval = in6_mcast_loop; /* XXX VIMAGE */ else optval = im6o->im6o_multicast_loop; INP_WUNLOCK(inp); error = sooptcopyout(sopt, &optval, sizeof(u_int)); break; case IPV6_MSFILTER: if (im6o == NULL) { error = EADDRNOTAVAIL; INP_WUNLOCK(inp); } else { error = in6p_get_source_filters(inp, sopt); } break; default: INP_WUNLOCK(inp); error = ENOPROTOOPT; break; } INP_UNLOCK_ASSERT(inp); return (error); } /* * Look up the ifnet to use for a multicast group membership, * given the address of an IPv6 group. * * This routine exists to support legacy IPv6 multicast applications. * * If inp is non-NULL, use this socket's current FIB number for any * required FIB lookup. Look up the group address in the unicast FIB, * and use its ifp; usually, this points to the default next-hop. * If the FIB lookup fails, return NULL. * * FUTURE: Support multiple forwarding tables for IPv6. * * Returns NULL if no ifp could be found. */ static struct ifnet * in6p_lookup_mcast_ifp(const struct inpcb *in6p, const struct sockaddr_in6 *gsin6) { struct route_in6 ro6; struct ifnet *ifp; KASSERT(in6p->inp_vflag & INP_IPV6, ("%s: not INP_IPV6 inpcb", __func__)); KASSERT(gsin6->sin6_family == AF_INET6, ("%s: not AF_INET6 group", __func__)); ifp = NULL; memset(&ro6, 0, sizeof(struct route_in6)); memcpy(&ro6.ro_dst, gsin6, sizeof(struct sockaddr_in6)); rtalloc_ign_fib((struct route *)&ro6, 0, in6p ? in6p->inp_inc.inc_fibnum : RT_DEFAULT_FIB); if (ro6.ro_rt != NULL) { ifp = ro6.ro_rt->rt_ifp; KASSERT(ifp != NULL, ("%s: null ifp", __func__)); RTFREE(ro6.ro_rt); } return (ifp); } /* * Join an IPv6 multicast group, possibly with a source. * * FIXME: The KAME use of the unspecified address (::) * to join *all* multicast groups is currently unsupported. */ static int in6p_join_group(struct inpcb *inp, struct sockopt *sopt) { struct group_source_req gsr; sockunion_t *gsa, *ssa; struct ifnet *ifp; struct in6_mfilter *imf; struct ip6_moptions *imo; struct in6_multi *inm; struct in6_msource *lims; size_t idx; int error, is_new; ifp = NULL; imf = NULL; lims = NULL; error = 0; is_new = 0; memset(&gsr, 0, sizeof(struct group_source_req)); gsa = (sockunion_t *)&gsr.gsr_group; gsa->ss.ss_family = AF_UNSPEC; ssa = (sockunion_t *)&gsr.gsr_source; ssa->ss.ss_family = AF_UNSPEC; /* * Chew everything into struct group_source_req. * Overwrite the port field if present, as the sockaddr * being copied in may be matched with a binary comparison. * Ignore passed-in scope ID. */ switch (sopt->sopt_name) { case IPV6_JOIN_GROUP: { struct ipv6_mreq mreq; error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), sizeof(struct ipv6_mreq)); if (error) return (error); gsa->sin6.sin6_family = AF_INET6; gsa->sin6.sin6_len = sizeof(struct sockaddr_in6); gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr; if (mreq.ipv6mr_interface == 0) { ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6); } else { if (V_if_index < mreq.ipv6mr_interface) return (EADDRNOTAVAIL); ifp = ifnet_byindex(mreq.ipv6mr_interface); } CTR3(KTR_MLD, "%s: ipv6mr_interface = %d, ifp = %p", __func__, mreq.ipv6mr_interface, ifp); } break; case MCAST_JOIN_GROUP: case MCAST_JOIN_SOURCE_GROUP: if (sopt->sopt_name == MCAST_JOIN_GROUP) { error = sooptcopyin(sopt, &gsr, sizeof(struct group_req), sizeof(struct group_req)); } else if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { error = sooptcopyin(sopt, &gsr, sizeof(struct group_source_req), sizeof(struct group_source_req)); } if (error) return (error); if (gsa->sin6.sin6_family != AF_INET6 || gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) return (EINVAL); if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { if (ssa->sin6.sin6_family != AF_INET6 || ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) return (EINVAL); if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)) return (EINVAL); /* * TODO: Validate embedded scope ID in source * list entry against passed-in ifp, if and only * if source list filter entry is iface or node local. */ in6_clearscope(&ssa->sin6.sin6_addr); ssa->sin6.sin6_port = 0; ssa->sin6.sin6_scope_id = 0; } if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) return (EADDRNOTAVAIL); ifp = ifnet_byindex(gsr.gsr_interface); break; default: CTR2(KTR_MLD, "%s: unknown sopt_name %d", __func__, sopt->sopt_name); return (EOPNOTSUPP); break; } if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) return (EINVAL); if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) return (EADDRNOTAVAIL); gsa->sin6.sin6_port = 0; gsa->sin6.sin6_scope_id = 0; /* * Always set the scope zone ID on memberships created from userland. * Use the passed-in ifp to do this. * XXX The in6_setscope() return value is meaningless. * XXX SCOPE6_LOCK() is taken by in6_setscope(). */ (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); imo = in6p_findmoptions(inp); idx = im6o_match_group(imo, ifp, &gsa->sa); if (idx == -1) { is_new = 1; } else { inm = imo->im6o_membership[idx]; imf = &imo->im6o_mfilters[idx]; if (ssa->ss.ss_family != AF_UNSPEC) { /* * MCAST_JOIN_SOURCE_GROUP on an exclusive membership * is an error. On an existing inclusive membership, * it just adds the source to the filter list. */ if (imf->im6f_st[1] != MCAST_INCLUDE) { error = EINVAL; goto out_in6p_locked; } /* * Throw out duplicates. * * XXX FIXME: This makes a naive assumption that * even if entries exist for *ssa in this imf, * they will be rejected as dupes, even if they * are not valid in the current mode (in-mode). * * in6_msource is transactioned just as for anything * else in SSM -- but note naive use of in6m_graft() * below for allocating new filter entries. * * This is only an issue if someone mixes the * full-state SSM API with the delta-based API, * which is discouraged in the relevant RFCs. */ lims = im6o_match_source(imo, idx, &ssa->sa); if (lims != NULL /*&& lims->im6sl_st[1] == MCAST_INCLUDE*/) { error = EADDRNOTAVAIL; goto out_in6p_locked; } } else { /* * MCAST_JOIN_GROUP alone, on any existing membership, * is rejected, to stop the same inpcb tying up * multiple refs to the in_multi. * On an existing inclusive membership, this is also * an error; if you want to change filter mode, * you must use the userland API setsourcefilter(). * XXX We don't reject this for imf in UNDEFINED * state at t1, because allocation of a filter * is atomic with allocation of a membership. */ error = EINVAL; goto out_in6p_locked; } } /* * Begin state merge transaction at socket layer. */ INP_WLOCK_ASSERT(inp); if (is_new) { if (imo->im6o_num_memberships == imo->im6o_max_memberships) { error = im6o_grow(imo); if (error) goto out_in6p_locked; } /* * Allocate the new slot upfront so we can deal with * grafting the new source filter in same code path * as for join-source on existing membership. */ idx = imo->im6o_num_memberships; imo->im6o_membership[idx] = NULL; imo->im6o_num_memberships++; KASSERT(imo->im6o_mfilters != NULL, ("%s: im6f_mfilters vector was not allocated", __func__)); imf = &imo->im6o_mfilters[idx]; KASSERT(RB_EMPTY(&imf->im6f_sources), ("%s: im6f_sources not empty", __func__)); } /* * Graft new source into filter list for this inpcb's * membership of the group. The in6_multi may not have * been allocated yet if this is a new membership, however, * the in_mfilter slot will be allocated and must be initialized. * * Note: Grafting of exclusive mode filters doesn't happen * in this path. * XXX: Should check for non-NULL lims (node exists but may * not be in-mode) for interop with full-state API. */ if (ssa->ss.ss_family != AF_UNSPEC) { /* Membership starts in IN mode */ if (is_new) { CTR1(KTR_MLD, "%s: new join w/source", __func__); im6f_init(imf, MCAST_UNDEFINED, MCAST_INCLUDE); } else { CTR2(KTR_MLD, "%s: %s source", __func__, "allow"); } lims = im6f_graft(imf, MCAST_INCLUDE, &ssa->sin6); if (lims == NULL) { CTR1(KTR_MLD, "%s: merge imf state failed", __func__); error = ENOMEM; goto out_im6o_free; } } else { /* No address specified; Membership starts in EX mode */ if (is_new) { CTR1(KTR_MLD, "%s: new join w/o source", __func__); im6f_init(imf, MCAST_UNDEFINED, MCAST_EXCLUDE); } } /* * Begin state merge transaction at MLD layer. */ IN6_MULTI_LOCK(); if (is_new) { error = in6_mc_join_locked(ifp, &gsa->sin6.sin6_addr, imf, &inm, 0); if (error) { IN6_MULTI_UNLOCK(); goto out_im6o_free; } imo->im6o_membership[idx] = inm; } else { CTR1(KTR_MLD, "%s: merge inm state", __func__); error = in6m_merge(inm, imf); if (error) CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); else { CTR1(KTR_MLD, "%s: doing mld downcall", __func__); error = mld_change_state(inm, 0); if (error) CTR1(KTR_MLD, "%s: failed mld downcall", __func__); } } IN6_MULTI_UNLOCK(); INP_WLOCK_ASSERT(inp); if (error) { im6f_rollback(imf); if (is_new) im6f_purge(imf); else im6f_reap(imf); } else { im6f_commit(imf); } out_im6o_free: if (error && is_new) { imo->im6o_membership[idx] = NULL; --imo->im6o_num_memberships; } out_in6p_locked: INP_WUNLOCK(inp); return (error); } /* * Leave an IPv6 multicast group on an inpcb, possibly with a source. */ static int in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) { struct ipv6_mreq mreq; struct group_source_req gsr; sockunion_t *gsa, *ssa; struct ifnet *ifp; struct in6_mfilter *imf; struct ip6_moptions *imo; struct in6_msource *ims; struct in6_multi *inm; uint32_t ifindex; size_t idx; int error, is_final; #ifdef KTR char ip6tbuf[INET6_ADDRSTRLEN]; #endif ifp = NULL; ifindex = 0; error = 0; is_final = 1; memset(&gsr, 0, sizeof(struct group_source_req)); gsa = (sockunion_t *)&gsr.gsr_group; gsa->ss.ss_family = AF_UNSPEC; ssa = (sockunion_t *)&gsr.gsr_source; ssa->ss.ss_family = AF_UNSPEC; /* * Chew everything passed in up into a struct group_source_req * as that is easier to process. * Note: Any embedded scope ID in the multicast group passed * in by userland is ignored, the interface index is the recommended * mechanism to specify an interface; see below. */ switch (sopt->sopt_name) { case IPV6_LEAVE_GROUP: error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), sizeof(struct ipv6_mreq)); if (error) return (error); gsa->sin6.sin6_family = AF_INET6; gsa->sin6.sin6_len = sizeof(struct sockaddr_in6); gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr; gsa->sin6.sin6_port = 0; gsa->sin6.sin6_scope_id = 0; ifindex = mreq.ipv6mr_interface; break; case MCAST_LEAVE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: if (sopt->sopt_name == MCAST_LEAVE_GROUP) { error = sooptcopyin(sopt, &gsr, sizeof(struct group_req), sizeof(struct group_req)); } else if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { error = sooptcopyin(sopt, &gsr, sizeof(struct group_source_req), sizeof(struct group_source_req)); } if (error) return (error); if (gsa->sin6.sin6_family != AF_INET6 || gsa->sin6.sin6_len != sizeof(struct sockaddr_in6)) return (EINVAL); if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { if (ssa->sin6.sin6_family != AF_INET6 || ssa->sin6.sin6_len != sizeof(struct sockaddr_in6)) return (EINVAL); if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr)) return (EINVAL); /* * TODO: Validate embedded scope ID in source * list entry against passed-in ifp, if and only * if source list filter entry is iface or node local. */ in6_clearscope(&ssa->sin6.sin6_addr); } gsa->sin6.sin6_port = 0; gsa->sin6.sin6_scope_id = 0; ifindex = gsr.gsr_interface; break; default: CTR2(KTR_MLD, "%s: unknown sopt_name %d", __func__, sopt->sopt_name); return (EOPNOTSUPP); break; } if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) return (EINVAL); /* * Validate interface index if provided. If no interface index * was provided separately, attempt to look the membership up * from the default scope as a last resort to disambiguate * the membership we are being asked to leave. * XXX SCOPE6 lock potentially taken here. */ if (ifindex != 0) { if (V_if_index < ifindex) return (EADDRNOTAVAIL); ifp = ifnet_byindex(ifindex); if (ifp == NULL) return (EADDRNOTAVAIL); (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); } else { error = sa6_embedscope(&gsa->sin6, V_ip6_use_defzone); if (error) return (EADDRNOTAVAIL); /* * Some badly behaved applications don't pass an ifindex * or a scope ID, which is an API violation. In this case, * perform a lookup as per a v6 join. * * XXX For now, stomp on zone ID for the corner case. * This is not the 'KAME way', but we need to see the ifp * directly until such time as this implementation is * refactored, assuming the scope IDs are the way to go. */ ifindex = ntohs(gsa->sin6.sin6_addr.s6_addr16[1]); if (ifindex == 0) { CTR2(KTR_MLD, "%s: warning: no ifindex, looking up " "ifp for group %s.", __func__, ip6_sprintf(ip6tbuf, &gsa->sin6.sin6_addr)); ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6); } else { ifp = ifnet_byindex(ifindex); } if (ifp == NULL) return (EADDRNOTAVAIL); } CTR2(KTR_MLD, "%s: ifp = %p", __func__, ifp); KASSERT(ifp != NULL, ("%s: ifp did not resolve", __func__)); /* * Find the membership in the membership array. */ imo = in6p_findmoptions(inp); idx = im6o_match_group(imo, ifp, &gsa->sa); if (idx == -1) { error = EADDRNOTAVAIL; goto out_in6p_locked; } inm = imo->im6o_membership[idx]; imf = &imo->im6o_mfilters[idx]; if (ssa->ss.ss_family != AF_UNSPEC) is_final = 0; /* * Begin state merge transaction at socket layer. */ INP_WLOCK_ASSERT(inp); /* * If we were instructed only to leave a given source, do so. * MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships. */ if (is_final) { im6f_leave(imf); } else { if (imf->im6f_st[0] == MCAST_EXCLUDE) { error = EADDRNOTAVAIL; goto out_in6p_locked; } ims = im6o_match_source(imo, idx, &ssa->sa); if (ims == NULL) { CTR3(KTR_MLD, "%s: source %p %spresent", __func__, ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr), "not "); error = EADDRNOTAVAIL; goto out_in6p_locked; } CTR2(KTR_MLD, "%s: %s source", __func__, "block"); error = im6f_prune(imf, &ssa->sin6); if (error) { CTR1(KTR_MLD, "%s: merge imf state failed", __func__); goto out_in6p_locked; } } /* * Begin state merge transaction at MLD layer. */ IN6_MULTI_LOCK(); if (is_final) { /* * Give up the multicast address record to which * the membership points. */ (void)in6_mc_leave_locked(inm, imf); } else { CTR1(KTR_MLD, "%s: merge inm state", __func__); error = in6m_merge(inm, imf); if (error) CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); else { CTR1(KTR_MLD, "%s: doing mld downcall", __func__); error = mld_change_state(inm, 0); if (error) CTR1(KTR_MLD, "%s: failed mld downcall", __func__); } } IN6_MULTI_UNLOCK(); if (error) im6f_rollback(imf); else im6f_commit(imf); im6f_reap(imf); if (is_final) { /* Remove the gap in the membership array. */ for (++idx; idx < imo->im6o_num_memberships; ++idx) { imo->im6o_membership[idx-1] = imo->im6o_membership[idx]; imo->im6o_mfilters[idx-1] = imo->im6o_mfilters[idx]; } imo->im6o_num_memberships--; } out_in6p_locked: INP_WUNLOCK(inp); return (error); } /* * Select the interface for transmitting IPv6 multicast datagrams. * * Either an instance of struct in6_addr or an instance of struct ipv6_mreqn * may be passed to this socket option. An address of in6addr_any or an * interface index of 0 is used to remove a previous selection. * When no interface is selected, one is chosen for every send. */ static int in6p_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) { struct ifnet *ifp; struct ip6_moptions *imo; u_int ifindex; int error; if (sopt->sopt_valsize != sizeof(u_int)) return (EINVAL); error = sooptcopyin(sopt, &ifindex, sizeof(u_int), sizeof(u_int)); if (error) return (error); if (V_if_index < ifindex) return (EINVAL); if (ifindex == 0) ifp = NULL; else { ifp = ifnet_byindex(ifindex); if (ifp == NULL) return (EINVAL); if ((ifp->if_flags & IFF_MULTICAST) == 0) return (EADDRNOTAVAIL); } imo = in6p_findmoptions(inp); imo->im6o_multicast_ifp = ifp; INP_WUNLOCK(inp); return (0); } /* * Atomically set source filters on a socket for an IPv6 multicast group. * * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. */ static int in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt) { struct __msfilterreq msfr; sockunion_t *gsa; struct ifnet *ifp; struct in6_mfilter *imf; struct ip6_moptions *imo; struct in6_multi *inm; size_t idx; int error; error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), sizeof(struct __msfilterreq)); if (error) return (error); if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) return (ENOBUFS); if (msfr.msfr_fmode != MCAST_EXCLUDE && msfr.msfr_fmode != MCAST_INCLUDE) return (EINVAL); if (msfr.msfr_group.ss_family != AF_INET6 || msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) return (EINVAL); gsa = (sockunion_t *)&msfr.msfr_group; if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr)) return (EINVAL); gsa->sin6.sin6_port = 0; /* ignore port */ if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) return (EADDRNOTAVAIL); ifp = ifnet_byindex(msfr.msfr_ifindex); if (ifp == NULL) return (EADDRNOTAVAIL); (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL); /* * Take the INP write lock. * Check if this socket is a member of this group. */ imo = in6p_findmoptions(inp); idx = im6o_match_group(imo, ifp, &gsa->sa); if (idx == -1 || imo->im6o_mfilters == NULL) { error = EADDRNOTAVAIL; goto out_in6p_locked; } inm = imo->im6o_membership[idx]; imf = &imo->im6o_mfilters[idx]; /* * Begin state merge transaction at socket layer. */ INP_WLOCK_ASSERT(inp); imf->im6f_st[1] = msfr.msfr_fmode; /* * Apply any new source filters, if present. * Make a copy of the user-space source vector so * that we may copy them with a single copyin. This * allows us to deal with page faults up-front. */ if (msfr.msfr_nsrcs > 0) { struct in6_msource *lims; struct sockaddr_in6 *psin; struct sockaddr_storage *kss, *pkss; int i; INP_WUNLOCK(inp); CTR2(KTR_MLD, "%s: loading %lu source list entries", __func__, (unsigned long)msfr.msfr_nsrcs); kss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, M_TEMP, M_WAITOK); error = copyin(msfr.msfr_srcs, kss, sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); if (error) { free(kss, M_TEMP); return (error); } INP_WLOCK(inp); /* * Mark all source filters as UNDEFINED at t1. * Restore new group filter mode, as im6f_leave() * will set it to INCLUDE. */ im6f_leave(imf); imf->im6f_st[1] = msfr.msfr_fmode; /* * Update socket layer filters at t1, lazy-allocating * new entries. This saves a bunch of memory at the * cost of one RB_FIND() per source entry; duplicate * entries in the msfr_nsrcs vector are ignored. * If we encounter an error, rollback transaction. * * XXX This too could be replaced with a set-symmetric * difference like loop to avoid walking from root * every time, as the key space is common. */ for (i = 0, pkss = kss; i < msfr.msfr_nsrcs; i++, pkss++) { psin = (struct sockaddr_in6 *)pkss; if (psin->sin6_family != AF_INET6) { error = EAFNOSUPPORT; break; } if (psin->sin6_len != sizeof(struct sockaddr_in6)) { error = EINVAL; break; } if (IN6_IS_ADDR_MULTICAST(&psin->sin6_addr)) { error = EINVAL; break; } /* * TODO: Validate embedded scope ID in source * list entry against passed-in ifp, if and only * if source list filter entry is iface or node local. */ in6_clearscope(&psin->sin6_addr); error = im6f_get_source(imf, psin, &lims); if (error) break; lims->im6sl_st[1] = imf->im6f_st[1]; } free(kss, M_TEMP); } if (error) goto out_im6f_rollback; INP_WLOCK_ASSERT(inp); IN6_MULTI_LOCK(); /* * Begin state merge transaction at MLD layer. */ CTR1(KTR_MLD, "%s: merge inm state", __func__); error = in6m_merge(inm, imf); if (error) CTR1(KTR_MLD, "%s: failed to merge inm state", __func__); else { CTR1(KTR_MLD, "%s: doing mld downcall", __func__); error = mld_change_state(inm, 0); if (error) CTR1(KTR_MLD, "%s: failed mld downcall", __func__); } IN6_MULTI_UNLOCK(); out_im6f_rollback: if (error) im6f_rollback(imf); else im6f_commit(imf); im6f_reap(imf); out_in6p_locked: INP_WUNLOCK(inp); return (error); } /* * Set the IP multicast options in response to user setsockopt(). * * Many of the socket options handled in this function duplicate the * functionality of socket options in the regular unicast API. However, * it is not possible to merge the duplicate code, because the idempotence * of the IPv6 multicast part of the BSD Sockets API must be preserved; * the effects of these options must be treated as separate and distinct. * * SMPng: XXX: Unlocked read of inp_socket believed OK. */ int ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) { struct ip6_moptions *im6o; int error; error = 0; /* * If socket is neither of type SOCK_RAW or SOCK_DGRAM, * or is a divert socket, reject it. */ if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || (inp->inp_socket->so_proto->pr_type != SOCK_RAW && inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) return (EOPNOTSUPP); switch (sopt->sopt_name) { case IPV6_MULTICAST_IF: error = in6p_set_multicast_if(inp, sopt); break; case IPV6_MULTICAST_HOPS: { int hlim; if (sopt->sopt_valsize != sizeof(int)) { error = EINVAL; break; } error = sooptcopyin(sopt, &hlim, sizeof(hlim), sizeof(int)); if (error) break; if (hlim < -1 || hlim > 255) { error = EINVAL; break; } else if (hlim == -1) { hlim = V_ip6_defmcasthlim; } im6o = in6p_findmoptions(inp); im6o->im6o_multicast_hlim = hlim; INP_WUNLOCK(inp); break; } case IPV6_MULTICAST_LOOP: { u_int loop; /* * Set the loopback flag for outgoing multicast packets. * Must be zero or one. */ if (sopt->sopt_valsize != sizeof(u_int)) { error = EINVAL; break; } error = sooptcopyin(sopt, &loop, sizeof(u_int), sizeof(u_int)); if (error) break; if (loop > 1) { error = EINVAL; break; } im6o = in6p_findmoptions(inp); im6o->im6o_multicast_loop = loop; INP_WUNLOCK(inp); break; } case IPV6_JOIN_GROUP: case MCAST_JOIN_GROUP: case MCAST_JOIN_SOURCE_GROUP: error = in6p_join_group(inp, sopt); break; case IPV6_LEAVE_GROUP: case MCAST_LEAVE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: error = in6p_leave_group(inp, sopt); break; case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: error = in6p_block_unblock_source(inp, sopt); break; case IPV6_MSFILTER: error = in6p_set_source_filters(inp, sopt); break; default: error = EOPNOTSUPP; break; } INP_UNLOCK_ASSERT(inp); return (error); } /* * Expose MLD's multicast filter mode and source list(s) to userland, * keyed by (ifindex, group). * The filter mode is written out as a uint32_t, followed by * 0..n of struct in6_addr. * For use by ifmcstat(8). * SMPng: NOTE: unlocked read of ifindex space. */ static int sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS) { struct in6_addr mcaddr; struct in6_addr src; struct ifnet *ifp; struct ifmultiaddr *ifma; struct in6_multi *inm; struct ip6_msource *ims; int *name; int retval; u_int namelen; uint32_t fmode, ifindex; #ifdef KTR char ip6tbuf[INET6_ADDRSTRLEN]; #endif name = (int *)arg1; namelen = arg2; if (req->newptr != NULL) return (EPERM); /* int: ifindex + 4 * 32 bits of IPv6 address */ if (namelen != 5) return (EINVAL); ifindex = name[0]; if (ifindex <= 0 || ifindex > V_if_index) { CTR2(KTR_MLD, "%s: ifindex %u out of range", __func__, ifindex); return (ENOENT); } memcpy(&mcaddr, &name[1], sizeof(struct in6_addr)); if (!IN6_IS_ADDR_MULTICAST(&mcaddr)) { CTR2(KTR_MLD, "%s: group %s is not multicast", __func__, ip6_sprintf(ip6tbuf, &mcaddr)); return (EINVAL); } ifp = ifnet_byindex(ifindex); if (ifp == NULL) { CTR2(KTR_MLD, "%s: no ifp for ifindex %u", __func__, ifindex); return (ENOENT); } /* * Internal MLD lookups require that scope/zone ID is set. */ (void)in6_setscope(&mcaddr, ifp, NULL); retval = sysctl_wire_old_buffer(req, sizeof(uint32_t) + (in6_mcast_maxgrpsrc * sizeof(struct in6_addr))); if (retval) return (retval); IN6_MULTI_LOCK(); IF_ADDR_RLOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_INET6 || ifma->ifma_protospec == NULL) continue; inm = (struct in6_multi *)ifma->ifma_protospec; if (!IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, &mcaddr)) continue; fmode = inm->in6m_st[1].iss_fmode; retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t)); if (retval != 0) break; RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { CTR2(KTR_MLD, "%s: visit node %p", __func__, ims); /* * Only copy-out sources which are in-mode. */ if (fmode != im6s_get_mode(inm, ims, 1)) { CTR1(KTR_MLD, "%s: skip non-in-mode", __func__); continue; } src = ims->im6s_addr; retval = SYSCTL_OUT(req, &src, sizeof(struct in6_addr)); if (retval != 0) break; } } IF_ADDR_RUNLOCK(ifp); IN6_MULTI_UNLOCK(); return (retval); } #ifdef KTR static const char *in6m_modestrs[] = { "un", "in", "ex" }; static const char * in6m_mode_str(const int mode) { if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE) return (in6m_modestrs[mode]); return ("??"); } static const char *in6m_statestrs[] = { "not-member", "silent", "idle", "lazy", "sleeping", "awakening", "query-pending", "sg-query-pending", "leaving" }; static const char * in6m_state_str(const int state) { if (state >= MLD_NOT_MEMBER && state <= MLD_LEAVING_MEMBER) return (in6m_statestrs[state]); return ("??"); } /* * Dump an in6_multi structure to the console. */ void in6m_print(const struct in6_multi *inm) { int t; char ip6tbuf[INET6_ADDRSTRLEN]; if ((ktr_mask & KTR_MLD) == 0) return; printf("%s: --- begin in6m %p ---\n", __func__, inm); printf("addr %s ifp %p(%s) ifma %p\n", ip6_sprintf(ip6tbuf, &inm->in6m_addr), inm->in6m_ifp, if_name(inm->in6m_ifp), inm->in6m_ifma); printf("timer %u state %s refcount %u scq.len %u\n", inm->in6m_timer, in6m_state_str(inm->in6m_state), inm->in6m_refcount, mbufq_len(&inm->in6m_scq)); printf("mli %p nsrc %lu sctimer %u scrv %u\n", inm->in6m_mli, inm->in6m_nsrc, inm->in6m_sctimer, inm->in6m_scrv); for (t = 0; t < 2; t++) { printf("t%d: fmode %s asm %u ex %u in %u rec %u\n", t, in6m_mode_str(inm->in6m_st[t].iss_fmode), inm->in6m_st[t].iss_asm, inm->in6m_st[t].iss_ex, inm->in6m_st[t].iss_in, inm->in6m_st[t].iss_rec); } printf("%s: --- end in6m %p ---\n", __func__, inm); } #else /* !KTR */ void in6m_print(const struct in6_multi *inm) { } #endif /* KTR */ Index: projects/powernv/netpfil/ipfw/ip_fw_dynamic.c =================================================================== --- projects/powernv/netpfil/ipfw/ip_fw_dynamic.c (revision 291050) +++ projects/powernv/netpfil/ipfw/ip_fw_dynamic.c (revision 291051) @@ -1,1605 +1,1608 @@ /*- * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #define DEB(x) #define DDB(x) x /* * Dynamic rule support for ipfw */ #include "opt_ipfw.h" #include "opt_inet.h" #ifndef INET #error IPFIREWALL requires INET. #endif /* INET */ #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include /* for ETHERTYPE_IP */ #include #include #include #include #include #include /* ip_defttl */ #include #include #include #include /* IN6_ARE_ADDR_EQUAL */ #ifdef INET6 #include #include #endif #include #include /* XXX for in_cksum */ #ifdef MAC #include #endif /* * Description of dynamic rules. * * Dynamic rules are stored in lists accessed through a hash table * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can * be modified through the sysctl variable dyn_buckets which is * updated when the table becomes empty. * * XXX currently there is only one list, ipfw_dyn. * * When a packet is received, its address fields are first masked * with the mask defined for the rule, then hashed, then matched * against the entries in the corresponding list. * Dynamic rules can be used for different purposes: * + stateful rules; * + enforcing limits on the number of sessions; * + in-kernel NAT (not implemented yet) * * The lifetime of dynamic rules is regulated by dyn_*_lifetime, * measured in seconds and depending on the flags. * * The total number of dynamic rules is equal to UMA zone items count. * The max number of dynamic rules is dyn_max. When we reach * the maximum number of rules we do not create anymore. This is * done to avoid consuming too much memory, but also too much * time when searching on each packet (ideally, we should try instead * to put a limit on the length of the list on each bucket...). * * Each dynamic rule holds a pointer to the parent ipfw rule so * we know what action to perform. Dynamic rules are removed when * the parent rule is deleted. This can be changed by dyn_keep_states * sysctl. * * There are some limitations with dynamic rules -- we do not * obey the 'randomized match', and we do not do multiple * passes through the firewall. XXX check the latter!!! */ struct ipfw_dyn_bucket { struct mtx mtx; /* Bucket protecting lock */ ipfw_dyn_rule *head; /* Pointer to first rule */ }; /* * Static variables followed by global ones */ static VNET_DEFINE(struct ipfw_dyn_bucket *, ipfw_dyn_v); static VNET_DEFINE(u_int32_t, dyn_buckets_max); static VNET_DEFINE(u_int32_t, curr_dyn_buckets); static VNET_DEFINE(struct callout, ipfw_timeout); #define V_ipfw_dyn_v VNET(ipfw_dyn_v) #define V_dyn_buckets_max VNET(dyn_buckets_max) #define V_curr_dyn_buckets VNET(curr_dyn_buckets) #define V_ipfw_timeout VNET(ipfw_timeout) static VNET_DEFINE(uma_zone_t, ipfw_dyn_rule_zone); #define V_ipfw_dyn_rule_zone VNET(ipfw_dyn_rule_zone) #define IPFW_BUCK_LOCK_INIT(b) \ mtx_init(&(b)->mtx, "IPFW dynamic bucket", NULL, MTX_DEF) #define IPFW_BUCK_LOCK_DESTROY(b) \ mtx_destroy(&(b)->mtx) #define IPFW_BUCK_LOCK(i) mtx_lock(&V_ipfw_dyn_v[(i)].mtx) #define IPFW_BUCK_UNLOCK(i) mtx_unlock(&V_ipfw_dyn_v[(i)].mtx) #define IPFW_BUCK_ASSERT(i) mtx_assert(&V_ipfw_dyn_v[(i)].mtx, MA_OWNED) static VNET_DEFINE(int, dyn_keep_states); #define V_dyn_keep_states VNET(dyn_keep_states) /* * Timeouts for various events in handing dynamic rules. */ static VNET_DEFINE(u_int32_t, dyn_ack_lifetime); static VNET_DEFINE(u_int32_t, dyn_syn_lifetime); static VNET_DEFINE(u_int32_t, dyn_fin_lifetime); static VNET_DEFINE(u_int32_t, dyn_rst_lifetime); static VNET_DEFINE(u_int32_t, dyn_udp_lifetime); static VNET_DEFINE(u_int32_t, dyn_short_lifetime); #define V_dyn_ack_lifetime VNET(dyn_ack_lifetime) #define V_dyn_syn_lifetime VNET(dyn_syn_lifetime) #define V_dyn_fin_lifetime VNET(dyn_fin_lifetime) #define V_dyn_rst_lifetime VNET(dyn_rst_lifetime) #define V_dyn_udp_lifetime VNET(dyn_udp_lifetime) #define V_dyn_short_lifetime VNET(dyn_short_lifetime) /* * Keepalives are sent if dyn_keepalive is set. They are sent every * dyn_keepalive_period seconds, in the last dyn_keepalive_interval * seconds of lifetime of a rule. * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower * than dyn_keepalive_period. */ static VNET_DEFINE(u_int32_t, dyn_keepalive_interval); static VNET_DEFINE(u_int32_t, dyn_keepalive_period); static VNET_DEFINE(u_int32_t, dyn_keepalive); static VNET_DEFINE(time_t, dyn_keepalive_last); #define V_dyn_keepalive_interval VNET(dyn_keepalive_interval) #define V_dyn_keepalive_period VNET(dyn_keepalive_period) #define V_dyn_keepalive VNET(dyn_keepalive) #define V_dyn_keepalive_last VNET(dyn_keepalive_last) static VNET_DEFINE(u_int32_t, dyn_max); /* max # of dynamic rules */ #define DYN_COUNT uma_zone_get_cur(V_ipfw_dyn_rule_zone) #define V_dyn_max VNET(dyn_max) /* for userspace, we emulate the uma_zone_counter with ipfw_dyn_count */ static int ipfw_dyn_count; /* number of objects */ #ifdef USERSPACE /* emulation of UMA object counters for userspace */ #define uma_zone_get_cur(x) ipfw_dyn_count #endif /* USERSPACE */ static int last_log; /* Log ratelimiting */ static void ipfw_dyn_tick(void *vnetx); static void check_dyn_rules(struct ip_fw_chain *, ipfw_range_tlv *, int, int); #ifdef SYSCTL_NODE static int sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS); static int sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS); SYSBEGIN(f2) SYSCTL_DECL(_net_inet_ip_fw); SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_buckets, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_buckets_max), 0, "Max number of dyn. buckets"); SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(curr_dyn_buckets), 0, "Current Number of dyn. buckets"); SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_count, CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RD, 0, 0, sysctl_ipfw_dyn_count, "IU", "Number of dyn. rules"); SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_max, CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW, 0, 0, sysctl_ipfw_dyn_max, "IU", "Max number of dyn. rules"); SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_ack_lifetime), 0, "Lifetime of dyn. rules for acks"); SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_syn_lifetime), 0, "Lifetime of dyn. rules for syn"); SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_fin_lifetime), 0, "Lifetime of dyn. rules for fin"); SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_rst_lifetime), 0, "Lifetime of dyn. rules for rst"); SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_udp_lifetime), 0, "Lifetime of dyn. rules for UDP"); SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_short_lifetime), 0, "Lifetime of dyn. rules for other situations"); SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keepalive), 0, "Enable keepalives for dyn. rules"); SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_keep_states, CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keep_states), 0, "Do not flush dynamic states on rule deletion"); SYSEND #endif /* SYSCTL_NODE */ #ifdef INET6 static __inline int hash_packet6(struct ipfw_flow_id *id) { u_int32_t i; i = (id->dst_ip6.__u6_addr.__u6_addr32[2]) ^ (id->dst_ip6.__u6_addr.__u6_addr32[3]) ^ (id->src_ip6.__u6_addr.__u6_addr32[2]) ^ (id->src_ip6.__u6_addr.__u6_addr32[3]) ^ (id->dst_port) ^ (id->src_port); return i; } #endif /* * IMPORTANT: the hash function for dynamic rules must be commutative * in source and destination (ip,port), because rules are bidirectional * and we want to find both in the same bucket. */ static __inline int hash_packet(struct ipfw_flow_id *id, int buckets) { u_int32_t i; #ifdef INET6 if (IS_IP6_FLOW_ID(id)) i = hash_packet6(id); else #endif /* INET6 */ i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port); i &= (buckets - 1); return i; } /** * Print customizable flow id description via log(9) facility. */ static void print_dyn_rule_flags(struct ipfw_flow_id *id, int dyn_type, int log_flags, char *prefix, char *postfix) { struct in_addr da; #ifdef INET6 char src[INET6_ADDRSTRLEN], dst[INET6_ADDRSTRLEN]; #else char src[INET_ADDRSTRLEN], dst[INET_ADDRSTRLEN]; #endif #ifdef INET6 if (IS_IP6_FLOW_ID(id)) { ip6_sprintf(src, &id->src_ip6); ip6_sprintf(dst, &id->dst_ip6); } else #endif { da.s_addr = htonl(id->src_ip); inet_ntop(AF_INET, &da, src, sizeof(src)); da.s_addr = htonl(id->dst_ip); inet_ntop(AF_INET, &da, dst, sizeof(dst)); } log(log_flags, "ipfw: %s type %d %s %d -> %s %d, %d %s\n", prefix, dyn_type, src, id->src_port, dst, id->dst_port, DYN_COUNT, postfix); } #define print_dyn_rule(id, dtype, prefix, postfix) \ print_dyn_rule_flags(id, dtype, LOG_DEBUG, prefix, postfix) #define TIME_LEQ(a,b) ((int)((a)-(b)) <= 0) #define TIME_LE(a,b) ((int)((a)-(b)) < 0) /* * Lookup a dynamic rule, locked version. */ static ipfw_dyn_rule * lookup_dyn_rule_locked(struct ipfw_flow_id *pkt, int i, int *match_direction, struct tcphdr *tcp) { /* * Stateful ipfw extensions. * Lookup into dynamic session queue. */ #define MATCH_REVERSE 0 #define MATCH_FORWARD 1 #define MATCH_NONE 2 #define MATCH_UNKNOWN 3 int dir = MATCH_NONE; ipfw_dyn_rule *prev, *q = NULL; IPFW_BUCK_ASSERT(i); for (prev = NULL, q = V_ipfw_dyn_v[i].head; q; prev = q, q = q->next) { if (q->dyn_type == O_LIMIT_PARENT && q->count) continue; if (pkt->proto != q->id.proto || q->dyn_type == O_LIMIT_PARENT) continue; if (IS_IP6_FLOW_ID(pkt)) { if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.src_ip6) && IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.dst_ip6) && pkt->src_port == q->id.src_port && pkt->dst_port == q->id.dst_port) { dir = MATCH_FORWARD; break; } if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.dst_ip6) && IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.src_ip6) && pkt->src_port == q->id.dst_port && pkt->dst_port == q->id.src_port) { dir = MATCH_REVERSE; break; } } else { if (pkt->src_ip == q->id.src_ip && pkt->dst_ip == q->id.dst_ip && pkt->src_port == q->id.src_port && pkt->dst_port == q->id.dst_port) { dir = MATCH_FORWARD; break; } if (pkt->src_ip == q->id.dst_ip && pkt->dst_ip == q->id.src_ip && pkt->src_port == q->id.dst_port && pkt->dst_port == q->id.src_port) { dir = MATCH_REVERSE; break; } } } if (q == NULL) goto done; /* q = NULL, not found */ if (prev != NULL) { /* found and not in front */ prev->next = q->next; q->next = V_ipfw_dyn_v[i].head; V_ipfw_dyn_v[i].head = q; } if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */ uint32_t ack; u_char flags = pkt->_flags & (TH_FIN | TH_SYN | TH_RST); #define BOTH_SYN (TH_SYN | (TH_SYN << 8)) #define BOTH_FIN (TH_FIN | (TH_FIN << 8)) #define TCP_FLAGS (TH_FLAGS | (TH_FLAGS << 8)) #define ACK_FWD 0x10000 /* fwd ack seen */ #define ACK_REV 0x20000 /* rev ack seen */ q->state |= (dir == MATCH_FORWARD) ? flags : (flags << 8); switch (q->state & TCP_FLAGS) { case TH_SYN: /* opening */ q->expire = time_uptime + V_dyn_syn_lifetime; break; case BOTH_SYN: /* move to established */ case BOTH_SYN | TH_FIN: /* one side tries to close */ case BOTH_SYN | (TH_FIN << 8): #define _SEQ_GE(a,b) ((int)(a) - (int)(b) >= 0) if (tcp == NULL) break; ack = ntohl(tcp->th_ack); if (dir == MATCH_FORWARD) { if (q->ack_fwd == 0 || _SEQ_GE(ack, q->ack_fwd)) { q->ack_fwd = ack; q->state |= ACK_FWD; } } else { if (q->ack_rev == 0 || _SEQ_GE(ack, q->ack_rev)) { q->ack_rev = ack; q->state |= ACK_REV; } } if ((q->state & (ACK_FWD | ACK_REV)) == (ACK_FWD | ACK_REV)) { q->expire = time_uptime + V_dyn_ack_lifetime; q->state &= ~(ACK_FWD | ACK_REV); } break; case BOTH_SYN | BOTH_FIN: /* both sides closed */ if (V_dyn_fin_lifetime >= V_dyn_keepalive_period) V_dyn_fin_lifetime = V_dyn_keepalive_period - 1; q->expire = time_uptime + V_dyn_fin_lifetime; break; default: #if 0 /* * reset or some invalid combination, but can also * occur if we use keep-state the wrong way. */ if ( (q->state & ((TH_RST << 8)|TH_RST)) == 0) printf("invalid state: 0x%x\n", q->state); #endif if (V_dyn_rst_lifetime >= V_dyn_keepalive_period) V_dyn_rst_lifetime = V_dyn_keepalive_period - 1; q->expire = time_uptime + V_dyn_rst_lifetime; break; } } else if (pkt->proto == IPPROTO_UDP) { q->expire = time_uptime + V_dyn_udp_lifetime; } else { /* other protocols */ q->expire = time_uptime + V_dyn_short_lifetime; } done: if (match_direction != NULL) *match_direction = dir; return (q); } ipfw_dyn_rule * ipfw_lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction, struct tcphdr *tcp) { ipfw_dyn_rule *q; int i; i = hash_packet(pkt, V_curr_dyn_buckets); IPFW_BUCK_LOCK(i); q = lookup_dyn_rule_locked(pkt, i, match_direction, tcp); if (q == NULL) IPFW_BUCK_UNLOCK(i); /* NB: return table locked when q is not NULL */ return q; } /* * Unlock bucket mtx * @p - pointer to dynamic rule */ void ipfw_dyn_unlock(ipfw_dyn_rule *q) { IPFW_BUCK_UNLOCK(q->bucket); } static int resize_dynamic_table(struct ip_fw_chain *chain, int nbuckets) { int i, k, nbuckets_old; ipfw_dyn_rule *q; struct ipfw_dyn_bucket *dyn_v, *dyn_v_old; /* Check if given number is power of 2 and less than 64k */ if ((nbuckets > 65536) || (!powerof2(nbuckets))) return 1; CTR3(KTR_NET, "%s: resize dynamic hash: %d -> %d", __func__, V_curr_dyn_buckets, nbuckets); /* Allocate and initialize new hash */ dyn_v = malloc(nbuckets * sizeof(ipfw_dyn_rule), M_IPFW, M_WAITOK | M_ZERO); for (i = 0 ; i < nbuckets; i++) IPFW_BUCK_LOCK_INIT(&dyn_v[i]); /* * Call upper half lock, as get_map() do to ease * read-only access to dynamic rules hash from sysctl */ IPFW_UH_WLOCK(chain); /* * Acquire chain write lock to permit hash access * for main traffic path without additional locks */ IPFW_WLOCK(chain); /* Save old values */ nbuckets_old = V_curr_dyn_buckets; dyn_v_old = V_ipfw_dyn_v; /* Skip relinking if array is not set up */ if (V_ipfw_dyn_v == NULL) V_curr_dyn_buckets = 0; /* Re-link all dynamic states */ for (i = 0 ; i < V_curr_dyn_buckets ; i++) { while (V_ipfw_dyn_v[i].head != NULL) { /* Remove from current chain */ q = V_ipfw_dyn_v[i].head; V_ipfw_dyn_v[i].head = q->next; /* Get new hash value */ k = hash_packet(&q->id, nbuckets); q->bucket = k; /* Add to the new head */ q->next = dyn_v[k].head; dyn_v[k].head = q; } } /* Update current pointers/buckets values */ V_curr_dyn_buckets = nbuckets; V_ipfw_dyn_v = dyn_v; IPFW_WUNLOCK(chain); IPFW_UH_WUNLOCK(chain); /* Start periodic callout on initial creation */ if (dyn_v_old == NULL) { callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, curvnet, 0); return (0); } /* Destroy all mutexes */ for (i = 0 ; i < nbuckets_old ; i++) IPFW_BUCK_LOCK_DESTROY(&dyn_v_old[i]); /* Free old hash */ free(dyn_v_old, M_IPFW); return 0; } /** * Install state of type 'type' for a dynamic session. * The hash table contains two type of rules: * - regular rules (O_KEEP_STATE) * - rules for sessions with limited number of sess per user * (O_LIMIT). When they are created, the parent is * increased by 1, and decreased on delete. In this case, * the third parameter is the parent rule and not the chain. * - "parent" rules for the above (O_LIMIT_PARENT). */ static ipfw_dyn_rule * add_dyn_rule(struct ipfw_flow_id *id, int i, u_int8_t dyn_type, struct ip_fw *rule) { ipfw_dyn_rule *r; IPFW_BUCK_ASSERT(i); r = uma_zalloc(V_ipfw_dyn_rule_zone, M_NOWAIT | M_ZERO); if (r == NULL) { if (last_log != time_uptime) { last_log = time_uptime; log(LOG_DEBUG, "ipfw: Cannot allocate dynamic state, " "consider increasing net.inet.ip.fw.dyn_max\n"); } return NULL; } ipfw_dyn_count++; /* * refcount on parent is already incremented, so * it is safe to use parent unlocked. */ if (dyn_type == O_LIMIT) { ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule; if ( parent->dyn_type != O_LIMIT_PARENT) panic("invalid parent"); r->parent = parent; rule = parent->rule; } r->id = *id; r->expire = time_uptime + V_dyn_syn_lifetime; r->rule = rule; r->dyn_type = dyn_type; IPFW_ZERO_DYN_COUNTER(r); r->count = 0; r->bucket = i; r->next = V_ipfw_dyn_v[i].head; V_ipfw_dyn_v[i].head = r; DEB(print_dyn_rule(id, dyn_type, "add dyn entry", "total");) return r; } /** * lookup dynamic parent rule using pkt and rule as search keys. * If the lookup fails, then install one. */ static ipfw_dyn_rule * lookup_dyn_parent(struct ipfw_flow_id *pkt, int *pindex, struct ip_fw *rule) { ipfw_dyn_rule *q; int i, is_v6; is_v6 = IS_IP6_FLOW_ID(pkt); i = hash_packet( pkt, V_curr_dyn_buckets ); *pindex = i; IPFW_BUCK_LOCK(i); for (q = V_ipfw_dyn_v[i].head ; q != NULL ; q=q->next) if (q->dyn_type == O_LIMIT_PARENT && rule== q->rule && pkt->proto == q->id.proto && pkt->src_port == q->id.src_port && pkt->dst_port == q->id.dst_port && ( (is_v6 && IN6_ARE_ADDR_EQUAL(&(pkt->src_ip6), &(q->id.src_ip6)) && IN6_ARE_ADDR_EQUAL(&(pkt->dst_ip6), &(q->id.dst_ip6))) || (!is_v6 && pkt->src_ip == q->id.src_ip && pkt->dst_ip == q->id.dst_ip) ) ) { q->expire = time_uptime + V_dyn_short_lifetime; DEB(print_dyn_rule(pkt, q->dyn_type, "lookup_dyn_parent found", "");) return q; } /* Add virtual limiting rule */ return add_dyn_rule(pkt, i, O_LIMIT_PARENT, rule); } /** * Install dynamic state for rule type cmd->o.opcode * * Returns 1 (failure) if state is not installed because of errors or because * session limitations are enforced. */ int ipfw_install_state(struct ip_fw_chain *chain, struct ip_fw *rule, ipfw_insn_limit *cmd, struct ip_fw_args *args, uint32_t tablearg) { ipfw_dyn_rule *q; int i; DEB(print_dyn_rule(&args->f_id, cmd->o.opcode, "install_state", "");) i = hash_packet(&args->f_id, V_curr_dyn_buckets); IPFW_BUCK_LOCK(i); q = lookup_dyn_rule_locked(&args->f_id, i, NULL, NULL); if (q != NULL) { /* should never occur */ DEB( if (last_log != time_uptime) { last_log = time_uptime; printf("ipfw: %s: entry already present, done\n", __func__); }) IPFW_BUCK_UNLOCK(i); return (0); } /* * State limiting is done via uma(9) zone limiting. * Save pointer to newly-installed rule and reject * packet if add_dyn_rule() returned NULL. * Note q is currently set to NULL. */ switch (cmd->o.opcode) { case O_KEEP_STATE: /* bidir rule */ q = add_dyn_rule(&args->f_id, i, O_KEEP_STATE, rule); break; case O_LIMIT: { /* limit number of sessions */ struct ipfw_flow_id id; ipfw_dyn_rule *parent; uint32_t conn_limit; uint16_t limit_mask = cmd->limit_mask; int pindex; conn_limit = IP_FW_ARG_TABLEARG(chain, cmd->conn_limit, limit); DEB( if (cmd->conn_limit == IP_FW_TARG) printf("ipfw: %s: O_LIMIT rule, conn_limit: %u " "(tablearg)\n", __func__, conn_limit); else printf("ipfw: %s: O_LIMIT rule, conn_limit: %u\n", __func__, conn_limit); ) id.dst_ip = id.src_ip = id.dst_port = id.src_port = 0; id.proto = args->f_id.proto; id.addr_type = args->f_id.addr_type; id.fib = M_GETFIB(args->m); if (IS_IP6_FLOW_ID (&(args->f_id))) { + bzero(&id.src_ip6, sizeof(id.src_ip6)); + bzero(&id.dst_ip6, sizeof(id.dst_ip6)); + if (limit_mask & DYN_SRC_ADDR) id.src_ip6 = args->f_id.src_ip6; if (limit_mask & DYN_DST_ADDR) id.dst_ip6 = args->f_id.dst_ip6; } else { if (limit_mask & DYN_SRC_ADDR) id.src_ip = args->f_id.src_ip; if (limit_mask & DYN_DST_ADDR) id.dst_ip = args->f_id.dst_ip; } if (limit_mask & DYN_SRC_PORT) id.src_port = args->f_id.src_port; if (limit_mask & DYN_DST_PORT) id.dst_port = args->f_id.dst_port; /* * We have to release lock for previous bucket to * avoid possible deadlock */ IPFW_BUCK_UNLOCK(i); if ((parent = lookup_dyn_parent(&id, &pindex, rule)) == NULL) { printf("ipfw: %s: add parent failed\n", __func__); IPFW_BUCK_UNLOCK(pindex); return (1); } if (parent->count >= conn_limit) { if (V_fw_verbose && last_log != time_uptime) { last_log = time_uptime; char sbuf[24]; last_log = time_uptime; snprintf(sbuf, sizeof(sbuf), "%d drop session", parent->rule->rulenum); print_dyn_rule_flags(&args->f_id, cmd->o.opcode, LOG_SECURITY | LOG_DEBUG, sbuf, "too many entries"); } IPFW_BUCK_UNLOCK(pindex); return (1); } /* Increment counter on parent */ parent->count++; IPFW_BUCK_UNLOCK(pindex); IPFW_BUCK_LOCK(i); q = add_dyn_rule(&args->f_id, i, O_LIMIT, (struct ip_fw *)parent); if (q == NULL) { /* Decrement index and notify caller */ IPFW_BUCK_UNLOCK(i); IPFW_BUCK_LOCK(pindex); parent->count--; IPFW_BUCK_UNLOCK(pindex); return (1); } break; } default: printf("ipfw: %s: unknown dynamic rule type %u\n", __func__, cmd->o.opcode); } if (q == NULL) { IPFW_BUCK_UNLOCK(i); return (1); /* Notify caller about failure */ } /* XXX just set lifetime */ lookup_dyn_rule_locked(&args->f_id, i, NULL, NULL); IPFW_BUCK_UNLOCK(i); return (0); } /* * Generate a TCP packet, containing either a RST or a keepalive. * When flags & TH_RST, we are sending a RST packet, because of a * "reset" action matched the packet. * Otherwise we are sending a keepalive, and flags & TH_ * The 'replyto' mbuf is the mbuf being replied to, if any, and is required * so that MAC can label the reply appropriately. */ struct mbuf * ipfw_send_pkt(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t seq, u_int32_t ack, int flags) { struct mbuf *m = NULL; /* stupid compiler */ int len, dir; struct ip *h = NULL; /* stupid compiler */ #ifdef INET6 struct ip6_hdr *h6 = NULL; #endif struct tcphdr *th = NULL; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return (NULL); M_SETFIB(m, id->fib); #ifdef MAC if (replyto != NULL) mac_netinet_firewall_reply(replyto, m); else mac_netinet_firewall_send(m); #else (void)replyto; /* don't warn about unused arg */ #endif switch (id->addr_type) { case 4: len = sizeof(struct ip) + sizeof(struct tcphdr); break; #ifdef INET6 case 6: len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); break; #endif default: /* XXX: log me?!? */ FREE_PKT(m); return (NULL); } dir = ((flags & (TH_SYN | TH_RST)) == TH_SYN); m->m_data += max_linkhdr; m->m_flags |= M_SKIP_FIREWALL; m->m_pkthdr.len = m->m_len = len; m->m_pkthdr.rcvif = NULL; bzero(m->m_data, len); switch (id->addr_type) { case 4: h = mtod(m, struct ip *); /* prepare for checksum */ h->ip_p = IPPROTO_TCP; h->ip_len = htons(sizeof(struct tcphdr)); if (dir) { h->ip_src.s_addr = htonl(id->src_ip); h->ip_dst.s_addr = htonl(id->dst_ip); } else { h->ip_src.s_addr = htonl(id->dst_ip); h->ip_dst.s_addr = htonl(id->src_ip); } th = (struct tcphdr *)(h + 1); break; #ifdef INET6 case 6: h6 = mtod(m, struct ip6_hdr *); /* prepare for checksum */ h6->ip6_nxt = IPPROTO_TCP; h6->ip6_plen = htons(sizeof(struct tcphdr)); if (dir) { h6->ip6_src = id->src_ip6; h6->ip6_dst = id->dst_ip6; } else { h6->ip6_src = id->dst_ip6; h6->ip6_dst = id->src_ip6; } th = (struct tcphdr *)(h6 + 1); break; #endif } if (dir) { th->th_sport = htons(id->src_port); th->th_dport = htons(id->dst_port); } else { th->th_sport = htons(id->dst_port); th->th_dport = htons(id->src_port); } th->th_off = sizeof(struct tcphdr) >> 2; if (flags & TH_RST) { if (flags & TH_ACK) { th->th_seq = htonl(ack); th->th_flags = TH_RST; } else { if (flags & TH_SYN) seq++; th->th_ack = htonl(seq); th->th_flags = TH_RST | TH_ACK; } } else { /* * Keepalive - use caller provided sequence numbers */ th->th_seq = htonl(seq); th->th_ack = htonl(ack); th->th_flags = TH_ACK; } switch (id->addr_type) { case 4: th->th_sum = in_cksum(m, len); /* finish the ip header */ h->ip_v = 4; h->ip_hl = sizeof(*h) >> 2; h->ip_tos = IPTOS_LOWDELAY; h->ip_off = htons(0); h->ip_len = htons(len); h->ip_ttl = V_ip_defttl; h->ip_sum = 0; break; #ifdef INET6 case 6: th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(*h6), sizeof(struct tcphdr)); /* finish the ip6 header */ h6->ip6_vfc |= IPV6_VERSION; h6->ip6_hlim = IPV6_DEFHLIM; break; #endif } return (m); } /* * Queue keepalive packets for given dynamic rule */ static struct mbuf ** ipfw_dyn_send_ka(struct mbuf **mtailp, ipfw_dyn_rule *q) { struct mbuf *m_rev, *m_fwd; m_rev = (q->state & ACK_REV) ? NULL : ipfw_send_pkt(NULL, &(q->id), q->ack_rev - 1, q->ack_fwd, TH_SYN); m_fwd = (q->state & ACK_FWD) ? NULL : ipfw_send_pkt(NULL, &(q->id), q->ack_fwd - 1, q->ack_rev, 0); if (m_rev != NULL) { *mtailp = m_rev; mtailp = &(*mtailp)->m_nextpkt; } if (m_fwd != NULL) { *mtailp = m_fwd; mtailp = &(*mtailp)->m_nextpkt; } return (mtailp); } /* * This procedure is used to perform various maintance * on dynamic hash list. Currently it is called every second. */ static void ipfw_dyn_tick(void * vnetx) { struct ip_fw_chain *chain; int check_ka = 0; #ifdef VIMAGE struct vnet *vp = vnetx; #endif CURVNET_SET(vp); chain = &V_layer3_chain; /* Run keepalive checks every keepalive_period iff ka is enabled */ if ((V_dyn_keepalive_last + V_dyn_keepalive_period <= time_uptime) && (V_dyn_keepalive != 0)) { V_dyn_keepalive_last = time_uptime; check_ka = 1; } check_dyn_rules(chain, NULL, check_ka, 1); callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, vnetx, 0); CURVNET_RESTORE(); } /* * Walk thru all dynamic states doing generic maintance: * 1) free expired states * 2) free all states based on deleted rule / set * 3) send keepalives for states if needed * * @chain - pointer to current ipfw rules chain * @rule - delete all states originated by given rule if != NULL * @set - delete all states originated by any rule in set @set if != RESVD_SET * @check_ka - perform checking/sending keepalives * @timer - indicate call from timer routine. * * Timer routine must call this function unlocked to permit * sending keepalives/resizing table. * * Others has to call function with IPFW_UH_WLOCK held. * Additionally, function assume that dynamic rule/set is * ALREADY deleted so no new states can be generated by * 'deleted' rules. * * Write lock is needed to ensure that unused parent rules * are not freed by other instance (see stage 2, 3) */ static void check_dyn_rules(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int check_ka, int timer) { struct mbuf *m0, *m, *mnext, **mtailp; struct ip *h; int i, dyn_count, new_buckets = 0, max_buckets; int expired = 0, expired_limits = 0, parents = 0, total = 0; ipfw_dyn_rule *q, *q_prev, *q_next; ipfw_dyn_rule *exp_head, **exptailp; ipfw_dyn_rule *exp_lhead, **expltailp; KASSERT(V_ipfw_dyn_v != NULL, ("%s: dynamic table not allocated", __func__)); /* Avoid possible LOR */ KASSERT(!check_ka || timer, ("%s: keepalive check with lock held", __func__)); /* * Do not perform any checks if we currently have no dynamic states */ if (DYN_COUNT == 0) return; /* Expired states */ exp_head = NULL; exptailp = &exp_head; /* Expired limit states */ exp_lhead = NULL; expltailp = &exp_lhead; /* * We make a chain of packets to go out here -- not deferring * until after we drop the IPFW dynamic rule lock would result * in a lock order reversal with the normal packet input -> ipfw * call stack. */ m0 = NULL; mtailp = &m0; /* Protect from hash resizing */ if (timer != 0) IPFW_UH_WLOCK(chain); else IPFW_UH_WLOCK_ASSERT(chain); #define NEXT_RULE() { q_prev = q; q = q->next ; continue; } /* Stage 1: perform requested deletion */ for (i = 0 ; i < V_curr_dyn_buckets ; i++) { IPFW_BUCK_LOCK(i); for (q = V_ipfw_dyn_v[i].head, q_prev = q; q ; ) { /* account every rule */ total++; /* Skip parent rules at all */ if (q->dyn_type == O_LIMIT_PARENT) { parents++; NEXT_RULE(); } /* * Remove rules which are: * 1) expired * 2) matches deletion range */ if ((TIME_LEQ(q->expire, time_uptime)) || (rt != NULL && ipfw_match_range(q->rule, rt))) { if (TIME_LE(time_uptime, q->expire) && q->dyn_type == O_KEEP_STATE && V_dyn_keep_states != 0) { /* * Do not delete state if * it is not expired and * dyn_keep_states is ON. * However we need to re-link it * to any other stable rule */ q->rule = chain->default_rule; NEXT_RULE(); } /* Unlink q from current list */ q_next = q->next; if (q == V_ipfw_dyn_v[i].head) V_ipfw_dyn_v[i].head = q_next; else q_prev->next = q_next; q->next = NULL; /* queue q to expire list */ if (q->dyn_type != O_LIMIT) { *exptailp = q; exptailp = &(*exptailp)->next; DEB(print_dyn_rule(&q->id, q->dyn_type, "unlink entry", "left"); ) } else { /* Separate list for limit rules */ *expltailp = q; expltailp = &(*expltailp)->next; expired_limits++; DEB(print_dyn_rule(&q->id, q->dyn_type, "unlink limit entry", "left"); ) } q = q_next; expired++; continue; } /* * Check if we need to send keepalive: * we need to ensure if is time to do KA, * this is established TCP session, and * expire time is within keepalive interval */ if ((check_ka != 0) && (q->id.proto == IPPROTO_TCP) && ((q->state & BOTH_SYN) == BOTH_SYN) && (TIME_LEQ(q->expire, time_uptime + V_dyn_keepalive_interval))) mtailp = ipfw_dyn_send_ka(mtailp, q); NEXT_RULE(); } IPFW_BUCK_UNLOCK(i); } /* Stage 2: decrement counters from O_LIMIT parents */ if (expired_limits != 0) { /* * XXX: Note that deleting set with more than one * heavily-used LIMIT rules can result in overwhelming * locking due to lack of per-hash value sorting * * We should probably think about: * 1) pre-allocating hash of size, say, * MAX(16, V_curr_dyn_buckets / 1024) * 2) checking if expired_limits is large enough * 3) If yes, init hash (or its part), re-link * current list and start decrementing procedure in * each bucket separately */ /* * Small optimization: do not unlock bucket until * we see the next item resides in different bucket */ if (exp_lhead != NULL) { i = exp_lhead->parent->bucket; IPFW_BUCK_LOCK(i); } for (q = exp_lhead; q != NULL; q = q->next) { if (i != q->parent->bucket) { IPFW_BUCK_UNLOCK(i); i = q->parent->bucket; IPFW_BUCK_LOCK(i); } /* Decrease parent refcount */ q->parent->count--; } if (exp_lhead != NULL) IPFW_BUCK_UNLOCK(i); } /* * We protectet ourselves from unused parent deletion * (from the timer function) by holding UH write lock. */ /* Stage 3: remove unused parent rules */ if ((parents != 0) && (expired != 0)) { for (i = 0 ; i < V_curr_dyn_buckets ; i++) { IPFW_BUCK_LOCK(i); for (q = V_ipfw_dyn_v[i].head, q_prev = q ; q ; ) { if (q->dyn_type != O_LIMIT_PARENT) NEXT_RULE(); if (q->count != 0) NEXT_RULE(); /* Parent rule without consumers */ /* Unlink q from current list */ q_next = q->next; if (q == V_ipfw_dyn_v[i].head) V_ipfw_dyn_v[i].head = q_next; else q_prev->next = q_next; q->next = NULL; /* Add to expired list */ *exptailp = q; exptailp = &(*exptailp)->next; DEB(print_dyn_rule(&q->id, q->dyn_type, "unlink parent entry", "left"); ) expired++; q = q_next; } IPFW_BUCK_UNLOCK(i); } } #undef NEXT_RULE if (timer != 0) { /* * Check if we need to resize hash: * if current number of states exceeds number of buckes in hash, * grow hash size to the minimum power of 2 which is bigger than * current states count. Limit hash size by 64k. */ max_buckets = (V_dyn_buckets_max > 65536) ? 65536 : V_dyn_buckets_max; dyn_count = DYN_COUNT; if ((dyn_count > V_curr_dyn_buckets * 2) && (dyn_count < max_buckets)) { new_buckets = V_curr_dyn_buckets; while (new_buckets < dyn_count) { new_buckets *= 2; if (new_buckets >= max_buckets) break; } } IPFW_UH_WUNLOCK(chain); } /* Finally delete old states ad limits if any */ for (q = exp_head; q != NULL; q = q_next) { q_next = q->next; uma_zfree(V_ipfw_dyn_rule_zone, q); ipfw_dyn_count--; } for (q = exp_lhead; q != NULL; q = q_next) { q_next = q->next; uma_zfree(V_ipfw_dyn_rule_zone, q); ipfw_dyn_count--; } /* * The rest code MUST be called from timer routine only * without holding any locks */ if (timer == 0) return; /* Send keepalive packets if any */ for (m = m0; m != NULL; m = mnext) { mnext = m->m_nextpkt; m->m_nextpkt = NULL; h = mtod(m, struct ip *); if (h->ip_v == 4) ip_output(m, NULL, NULL, 0, NULL, NULL); #ifdef INET6 else ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); #endif } /* Run table resize without holding any locks */ if (new_buckets != 0) resize_dynamic_table(chain, new_buckets); } /* * Deletes all dynamic rules originated by given rule or all rules in * given set. Specify RESVD_SET to indicate set should not be used. * @chain - pointer to current ipfw rules chain * @rr - delete all states originated by rules in matched range. * * Function has to be called with IPFW_UH_WLOCK held. * Additionally, function assume that dynamic rule/set is * ALREADY deleted so no new states can be generated by * 'deleted' rules. */ void ipfw_expire_dyn_rules(struct ip_fw_chain *chain, ipfw_range_tlv *rt) { check_dyn_rules(chain, rt, 0, 0); } /* * Check if rule contains at least one dynamic opcode. * * Returns 1 if such opcode is found, 0 otherwise. */ int ipfw_is_dyn_rule(struct ip_fw *rule) { int cmdlen, l; ipfw_insn *cmd; l = rule->cmd_len; cmd = rule->cmd; cmdlen = 0; for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { cmdlen = F_LEN(cmd); switch (cmd->opcode) { case O_LIMIT: case O_KEEP_STATE: case O_PROBE_STATE: case O_CHECK_STATE: return (1); } } return (0); } void ipfw_dyn_init(struct ip_fw_chain *chain) { V_ipfw_dyn_v = NULL; V_dyn_buckets_max = 256; /* must be power of 2 */ V_curr_dyn_buckets = 256; /* must be power of 2 */ V_dyn_ack_lifetime = 300; V_dyn_syn_lifetime = 20; V_dyn_fin_lifetime = 1; V_dyn_rst_lifetime = 1; V_dyn_udp_lifetime = 10; V_dyn_short_lifetime = 5; V_dyn_keepalive_interval = 20; V_dyn_keepalive_period = 5; V_dyn_keepalive = 1; /* do send keepalives */ V_dyn_keepalive_last = time_uptime; V_dyn_max = 16384; /* max # of dynamic rules */ V_ipfw_dyn_rule_zone = uma_zcreate("IPFW dynamic rule", sizeof(ipfw_dyn_rule), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); /* Enforce limit on dynamic rules */ uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max); callout_init(&V_ipfw_timeout, 1); /* * This can potentially be done on first dynamic rule * being added to chain. */ resize_dynamic_table(chain, V_curr_dyn_buckets); } void ipfw_dyn_uninit(int pass) { int i; if (pass == 0) { callout_drain(&V_ipfw_timeout); return; } if (V_ipfw_dyn_v != NULL) { /* * Skip deleting all dynamic states - * uma_zdestroy() does this more efficiently; */ /* Destroy all mutexes */ for (i = 0 ; i < V_curr_dyn_buckets ; i++) IPFW_BUCK_LOCK_DESTROY(&V_ipfw_dyn_v[i]); free(V_ipfw_dyn_v, M_IPFW); V_ipfw_dyn_v = NULL; } uma_zdestroy(V_ipfw_dyn_rule_zone); } #ifdef SYSCTL_NODE /* * Get/set maximum number of dynamic states in given VNET instance. */ static int sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS) { int error; unsigned int nstates; nstates = V_dyn_max; error = sysctl_handle_int(oidp, &nstates, 0, req); /* Read operation or some error */ if ((error != 0) || (req->newptr == NULL)) return (error); V_dyn_max = nstates; uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max); return (0); } /* * Get current number of dynamic states in given VNET instance. */ static int sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS) { int error; unsigned int nstates; nstates = DYN_COUNT; error = sysctl_handle_int(oidp, &nstates, 0, req); return (error); } #endif /* * Returns size of dynamic states in legacy format */ int ipfw_dyn_len(void) { return (V_ipfw_dyn_v == NULL) ? 0 : (DYN_COUNT * sizeof(ipfw_dyn_rule)); } /* * Returns number of dynamic states. * Used by dump format v1 (current). */ int ipfw_dyn_get_count(void) { return (V_ipfw_dyn_v == NULL) ? 0 : DYN_COUNT; } static void export_dyn_rule(ipfw_dyn_rule *src, ipfw_dyn_rule *dst) { memcpy(dst, src, sizeof(*src)); memcpy(&(dst->rule), &(src->rule->rulenum), sizeof(src->rule->rulenum)); /* * store set number into high word of * dst->rule pointer. */ memcpy((char *)&dst->rule + sizeof(src->rule->rulenum), &(src->rule->set), sizeof(src->rule->set)); /* * store a non-null value in "next". * The userland code will interpret a * NULL here as a marker * for the last dynamic rule. */ memcpy(&dst->next, &dst, sizeof(dst)); dst->expire = TIME_LEQ(dst->expire, time_uptime) ? 0 : dst->expire - time_uptime; } /* * Fills int buffer given by @sd with dynamic states. * Used by dump format v1 (current). * * Returns 0 on success. */ int ipfw_dump_states(struct ip_fw_chain *chain, struct sockopt_data *sd) { ipfw_dyn_rule *p; ipfw_obj_dyntlv *dst, *last; ipfw_obj_ctlv *ctlv; int i; size_t sz; if (V_ipfw_dyn_v == NULL) return (0); IPFW_UH_RLOCK_ASSERT(chain); ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv)); if (ctlv == NULL) return (ENOMEM); sz = sizeof(ipfw_obj_dyntlv); ctlv->head.type = IPFW_TLV_DYNSTATE_LIST; ctlv->objsize = sz; last = NULL; for (i = 0 ; i < V_curr_dyn_buckets; i++) { IPFW_BUCK_LOCK(i); for (p = V_ipfw_dyn_v[i].head ; p != NULL; p = p->next) { dst = (ipfw_obj_dyntlv *)ipfw_get_sopt_space(sd, sz); if (dst == NULL) { IPFW_BUCK_UNLOCK(i); return (ENOMEM); } export_dyn_rule(p, &dst->state); dst->head.length = sz; dst->head.type = IPFW_TLV_DYN_ENT; last = dst; } IPFW_BUCK_UNLOCK(i); } if (last != NULL) /* mark last dynamic rule */ last->head.flags = IPFW_DF_LAST; return (0); } /* * Fill given buffer with dynamic states (legacy format). * IPFW_UH_RLOCK has to be held while calling. */ void ipfw_get_dynamic(struct ip_fw_chain *chain, char **pbp, const char *ep) { ipfw_dyn_rule *p, *last = NULL; char *bp; int i; if (V_ipfw_dyn_v == NULL) return; bp = *pbp; IPFW_UH_RLOCK_ASSERT(chain); for (i = 0 ; i < V_curr_dyn_buckets; i++) { IPFW_BUCK_LOCK(i); for (p = V_ipfw_dyn_v[i].head ; p != NULL; p = p->next) { if (bp + sizeof *p <= ep) { ipfw_dyn_rule *dst = (ipfw_dyn_rule *)bp; export_dyn_rule(p, dst); last = dst; bp += sizeof(ipfw_dyn_rule); } } IPFW_BUCK_UNLOCK(i); } if (last != NULL) /* mark last dynamic rule */ bzero(&last->next, sizeof(last)); *pbp = bp; } /* end of file */ Index: projects/powernv/netsmb/smb_conn.c =================================================================== --- projects/powernv/netsmb/smb_conn.c (revision 291050) +++ projects/powernv/netsmb/smb_conn.c (revision 291051) @@ -1,966 +1,968 @@ /*- * Copyright (c) 2000-2001 Boris Popov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Connection engine. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct smb_connobj smb_vclist; static int smb_vcnext = 1; /* next unique id for VC */ SYSCTL_NODE(_net, OID_AUTO, smb, CTLFLAG_RW, NULL, "SMB protocol"); static MALLOC_DEFINE(M_SMBCONN, "smb_conn", "SMB connection"); static void smb_co_init(struct smb_connobj *cp, int level, char *ilockname, char *lockname); static void smb_co_done(struct smb_connobj *cp); static int smb_vc_disconnect(struct smb_vc *vcp); static void smb_vc_free(struct smb_connobj *cp); static void smb_vc_gone(struct smb_connobj *cp, struct smb_cred *scred); static smb_co_free_t smb_share_free; static smb_co_gone_t smb_share_gone; static int smb_sysctl_treedump(SYSCTL_HANDLER_ARGS); SYSCTL_PROC(_net_smb, OID_AUTO, treedump, CTLFLAG_RD | CTLTYPE_OPAQUE, NULL, 0, smb_sysctl_treedump, "S,treedump", "Requester tree"); int smb_sm_init(void) { smb_co_init(&smb_vclist, SMBL_SM, "smbsm ilock", "smbsm"); sx_xlock(&smb_vclist.co_interlock); smb_co_unlock(&smb_vclist); sx_unlock(&smb_vclist.co_interlock); return 0; } int smb_sm_done(void) { /* XXX: hold the mutex */ if (smb_vclist.co_usecount > 1) { SMBERROR("%d connections still active\n", smb_vclist.co_usecount - 1); return EBUSY; } smb_co_done(&smb_vclist); return 0; } static int smb_sm_lockvclist(void) { int error; sx_xlock(&smb_vclist.co_interlock); error = smb_co_lock(&smb_vclist); sx_unlock(&smb_vclist.co_interlock); return error; } static void smb_sm_unlockvclist(void) { sx_xlock(&smb_vclist.co_interlock); smb_co_unlock(&smb_vclist); sx_unlock(&smb_vclist.co_interlock); } static int smb_sm_lookupint(struct smb_vcspec *vcspec, struct smb_sharespec *shspec, struct smb_cred *scred, struct smb_vc **vcpp) { struct smb_connobj *scp; struct smb_vc *vcp; int exact = 1; int error; vcspec->shspec = shspec; error = ENOENT; vcp = NULL; SMBCO_FOREACH(scp, &smb_vclist) { vcp = (struct smb_vc *)scp; error = smb_vc_lock(vcp); if (error) continue; if ((vcp->obj.co_flags & SMBV_PRIVATE) || !CONNADDREQ(vcp->vc_paddr, vcspec->sap) || strcmp(vcp->vc_username, vcspec->username) != 0) goto err1; if (vcspec->owner != SMBM_ANY_OWNER) { if (vcp->vc_uid != vcspec->owner) goto err1; } else exact = 0; if (vcspec->group != SMBM_ANY_GROUP) { if (vcp->vc_grp != vcspec->group) goto err1; } else exact = 0; if (vcspec->mode & SMBM_EXACT) { if (!exact || (vcspec->mode & SMBM_MASK) != vcp->vc_mode) goto err1; } if (smb_vc_access(vcp, scred, vcspec->mode) != 0) goto err1; vcspec->ssp = NULL; if (shspec) { error = (int)smb_vc_lookupshare(vcp, shspec, scred, &vcspec->ssp); if (error) goto fail; } error = 0; break; err1: error = 1; fail: smb_vc_unlock(vcp); } if (vcp) { smb_vc_ref(vcp); *vcpp = vcp; } return (error); } int smb_sm_lookup(struct smb_vcspec *vcspec, struct smb_sharespec *shspec, struct smb_cred *scred, struct smb_vc **vcpp) { struct smb_vc *vcp; struct smb_share *ssp = NULL; int error; *vcpp = vcp = NULL; error = smb_sm_lockvclist(); if (error) return error; error = smb_sm_lookupint(vcspec, shspec, scred, vcpp); if (error == 0 || (vcspec->flags & SMBV_CREATE) == 0) { smb_sm_unlockvclist(); return error; } error = smb_sm_lookupint(vcspec, NULL, scred, &vcp); if (error) { error = smb_vc_create(vcspec, scred, &vcp); if (error) goto out; error = smb_vc_connect(vcp, scred); if (error) goto out; } if (shspec == NULL) goto out; error = smb_share_create(vcp, shspec, scred, &ssp); if (error) goto out; error = smb_smb_treeconnect(ssp, scred); if (error == 0) vcspec->ssp = ssp; else smb_share_put(ssp, scred); out: smb_sm_unlockvclist(); if (error == 0) *vcpp = vcp; else if (vcp) { smb_vc_lock(vcp); smb_vc_put(vcp, scred); } return error; } /* * Common code for connection object */ static void smb_co_init(struct smb_connobj *cp, int level, char *ilockname, char *lockname) { SLIST_INIT(&cp->co_children); sx_init_flags(&cp->co_interlock, ilockname, SX_RECURSE); cv_init(&cp->co_lock, "smblock"); cp->co_lockcnt = 0; cp->co_locker = NULL; cp->co_level = level; cp->co_usecount = 1; sx_xlock(&cp->co_interlock); smb_co_lock(cp); sx_unlock(&cp->co_interlock); } static void smb_co_done(struct smb_connobj *cp) { sx_destroy(&cp->co_interlock); cv_destroy(&cp->co_lock); cp->co_locker = NULL; cp->co_flags = 0; cp->co_lockcnt = 0; } static void smb_co_gone(struct smb_connobj *cp, struct smb_cred *scred) { struct smb_connobj *parent; if (cp->co_gone) cp->co_gone(cp, scred); parent = cp->co_parent; if (parent) { sx_xlock(&parent->co_interlock); smb_co_lock(parent); sx_unlock(&parent->co_interlock); SLIST_REMOVE(&parent->co_children, cp, smb_connobj, co_next); smb_co_put(parent, scred); } if (cp->co_free) cp->co_free(cp); } void smb_co_ref(struct smb_connobj *cp) { sx_xlock(&cp->co_interlock); cp->co_usecount++; sx_unlock(&cp->co_interlock); } void smb_co_rele(struct smb_connobj *cp, struct smb_cred *scred) { sx_xlock(&cp->co_interlock); smb_co_unlock(cp); if (cp->co_usecount > 1) { cp->co_usecount--; sx_unlock(&cp->co_interlock); return; } if (cp->co_usecount == 0) { SMBERROR("negative use_count for object %d", cp->co_level); sx_unlock(&cp->co_interlock); return; } cp->co_usecount--; cp->co_flags |= SMBO_GONE; sx_unlock(&cp->co_interlock); smb_co_gone(cp, scred); } int smb_co_get(struct smb_connobj *cp, struct smb_cred *scred) { int error; MPASS(sx_xholder(&cp->co_interlock) == curthread); cp->co_usecount++; error = smb_co_lock(cp); if (error) cp->co_usecount--; return error; } void smb_co_put(struct smb_connobj *cp, struct smb_cred *scred) { sx_xlock(&cp->co_interlock); if (cp->co_usecount > 1) { cp->co_usecount--; } else if (cp->co_usecount == 1) { cp->co_usecount--; cp->co_flags |= SMBO_GONE; } else { SMBERROR("negative usecount"); } smb_co_unlock(cp); sx_unlock(&cp->co_interlock); if ((cp->co_flags & SMBO_GONE) == 0) return; smb_co_gone(cp, scred); } int smb_co_lock(struct smb_connobj *cp) { MPASS(sx_xholder(&cp->co_interlock) == curthread); for (;;) { if (cp->co_flags & SMBO_GONE) return EINVAL; if (cp->co_locker == NULL) { cp->co_locker = curthread; return 0; } if (cp->co_locker == curthread) { cp->co_lockcnt++; return 0; } cv_wait(&cp->co_lock, &cp->co_interlock); } } void smb_co_unlock(struct smb_connobj *cp) { MPASS(sx_xholder(&cp->co_interlock) == curthread); MPASS(cp->co_locker == curthread); if (cp->co_lockcnt != 0) { cp->co_lockcnt--; return; } cp->co_locker = NULL; cv_signal(&cp->co_lock); } static void smb_co_addchild(struct smb_connobj *parent, struct smb_connobj *child) { smb_co_ref(parent); SLIST_INSERT_HEAD(&parent->co_children, child, co_next); child->co_parent = parent; } /* * Session implementation */ int smb_vc_create(struct smb_vcspec *vcspec, struct smb_cred *scred, struct smb_vc **vcpp) { struct smb_vc *vcp; struct ucred *cred = scred->scr_cred; uid_t uid = vcspec->owner; gid_t gid = vcspec->group; uid_t realuid = cred->cr_uid; char *domain = vcspec->domain; int error, isroot; isroot = smb_suser(cred) == 0; /* * Only superuser can create VCs with different uid and gid */ if (uid != SMBM_ANY_OWNER && uid != realuid && !isroot) return EPERM; if (gid != SMBM_ANY_GROUP && !groupmember(gid, cred) && !isroot) return EPERM; vcp = smb_zmalloc(sizeof(*vcp), M_SMBCONN, M_WAITOK); smb_co_init(VCTOCP(vcp), SMBL_VC, "smb_vc ilock", "smb_vc"); vcp->obj.co_free = smb_vc_free; vcp->obj.co_gone = smb_vc_gone; vcp->vc_number = smb_vcnext++; vcp->vc_timo = SMB_DEFRQTIMO; vcp->vc_smbuid = SMB_UID_UNKNOWN; vcp->vc_mode = vcspec->rights & SMBM_MASK; vcp->obj.co_flags = vcspec->flags & (SMBV_PRIVATE | SMBV_SINGLESHARE); vcp->vc_tdesc = &smb_tran_nbtcp_desc; vcp->vc_seqno = 0; vcp->vc_mackey = NULL; vcp->vc_mackeylen = 0; if (uid == SMBM_ANY_OWNER) uid = realuid; if (gid == SMBM_ANY_GROUP) gid = cred->cr_groups[0]; vcp->vc_uid = uid; vcp->vc_grp = gid; smb_sl_init(&vcp->vc_stlock, "vcstlock"); error = ENOMEM; vcp->vc_paddr = sodupsockaddr(vcspec->sap, M_WAITOK); if (vcp->vc_paddr == NULL) goto fail; vcp->vc_laddr = sodupsockaddr(vcspec->lap, M_WAITOK); if (vcp->vc_laddr == NULL) goto fail; vcp->vc_pass = smb_strdup(vcspec->pass); if (vcp->vc_pass == NULL) goto fail; vcp->vc_domain = smb_strdup((domain && domain[0]) ? domain : "NODOMAIN"); if (vcp->vc_domain == NULL) goto fail; vcp->vc_srvname = smb_strdup(vcspec->srvname); if (vcp->vc_srvname == NULL) goto fail; vcp->vc_username = smb_strdup(vcspec->username); if (vcp->vc_username == NULL) goto fail; error = (int)iconv_open("tolower", vcspec->localcs, &vcp->vc_tolower); if (error) goto fail; error = (int)iconv_open("toupper", vcspec->localcs, &vcp->vc_toupper); if (error) goto fail; if (vcspec->servercs[0]) { error = (int)iconv_open(vcspec->servercs, vcspec->localcs, &vcp->vc_cp_toserver); if (error) goto fail; error = (int)iconv_open(vcspec->localcs, vcspec->servercs, &vcp->vc_cp_tolocal); if (error) goto fail; vcp->vc_toserver = vcp->vc_cp_toserver; vcp->vc_tolocal = vcp->vc_cp_tolocal; iconv_add(ENCODING_UNICODE, ENCODING_UNICODE, SMB_UNICODE_NAME); iconv_add(ENCODING_UNICODE, SMB_UNICODE_NAME, ENCODING_UNICODE); error = (int)iconv_open(SMB_UNICODE_NAME, vcspec->localcs, &vcp->vc_ucs_toserver); if (!error) { error = (int)iconv_open(vcspec->localcs, SMB_UNICODE_NAME, &vcp->vc_ucs_tolocal); } if (error) { if (vcp->vc_ucs_toserver) iconv_close(vcp->vc_ucs_toserver); vcp->vc_ucs_toserver = NULL; vcp->vc_ucs_tolocal = NULL; } } error = (int)smb_iod_create(vcp); if (error) goto fail; *vcpp = vcp; smb_co_addchild(&smb_vclist, VCTOCP(vcp)); return (0); fail: smb_vc_put(vcp, scred); return (error); } static void smb_vc_free(struct smb_connobj *cp) { struct smb_vc *vcp = CPTOVC(cp); if (vcp->vc_iod) smb_iod_destroy(vcp->vc_iod); SMB_STRFREE(vcp->vc_username); SMB_STRFREE(vcp->vc_srvname); SMB_STRFREE(vcp->vc_pass); SMB_STRFREE(vcp->vc_domain); if (vcp->vc_mackey) free(vcp->vc_mackey, M_SMBTEMP); if (vcp->vc_paddr) free(vcp->vc_paddr, M_SONAME); if (vcp->vc_laddr) free(vcp->vc_laddr, M_SONAME); if (vcp->vc_tolower) iconv_close(vcp->vc_tolower); if (vcp->vc_toupper) iconv_close(vcp->vc_toupper); if (vcp->vc_tolocal) vcp->vc_tolocal = NULL; if (vcp->vc_toserver) vcp->vc_toserver = NULL; if (vcp->vc_cp_tolocal) iconv_close(vcp->vc_cp_tolocal); if (vcp->vc_cp_toserver) iconv_close(vcp->vc_cp_toserver); if (vcp->vc_ucs_tolocal) iconv_close(vcp->vc_ucs_tolocal); if (vcp->vc_ucs_toserver) iconv_close(vcp->vc_ucs_toserver); smb_co_done(VCTOCP(vcp)); smb_sl_destroy(&vcp->vc_stlock); free(vcp, M_SMBCONN); } /* * Called when use count of VC dropped to zero. */ static void smb_vc_gone(struct smb_connobj *cp, struct smb_cred *scred) { struct smb_vc *vcp = CPTOVC(cp); smb_vc_disconnect(vcp); } void smb_vc_ref(struct smb_vc *vcp) { smb_co_ref(VCTOCP(vcp)); } void smb_vc_rele(struct smb_vc *vcp, struct smb_cred *scred) { smb_co_rele(VCTOCP(vcp), scred); } int smb_vc_get(struct smb_vc *vcp, struct smb_cred *scred) { struct smb_connobj *cp; int error; cp = VCTOCP(vcp); sx_xlock(&cp->co_interlock); error = smb_co_get(cp, scred); sx_unlock(&cp->co_interlock); return error; } void smb_vc_put(struct smb_vc *vcp, struct smb_cred *scred) { smb_co_put(VCTOCP(vcp), scred); } int smb_vc_lock(struct smb_vc *vcp) { struct smb_connobj *cp; int error; cp = VCTOCP(vcp); sx_xlock(&cp->co_interlock); error = smb_co_lock(cp); sx_unlock(&cp->co_interlock); return error; } void smb_vc_unlock(struct smb_vc *vcp) { struct smb_connobj *cp; cp = VCTOCP(vcp); sx_xlock(&cp->co_interlock); smb_co_unlock(cp); sx_unlock(&cp->co_interlock); } int smb_vc_access(struct smb_vc *vcp, struct smb_cred *scred, mode_t mode) { struct ucred *cred = scred->scr_cred; if (smb_suser(cred) == 0 || cred->cr_uid == vcp->vc_uid) return 0; mode >>= 3; if (!groupmember(vcp->vc_grp, cred)) mode >>= 3; return (vcp->vc_mode & mode) == mode ? 0 : EACCES; } static int smb_vc_cmpshare(struct smb_share *ssp, struct smb_sharespec *dp) { int exact = 1; if (strcmp(ssp->ss_name, dp->name) != 0) return 1; if (dp->owner != SMBM_ANY_OWNER) { if (ssp->ss_uid != dp->owner) return 1; } else exact = 0; if (dp->group != SMBM_ANY_GROUP) { if (ssp->ss_grp != dp->group) return 1; } else exact = 0; if (dp->mode & SMBM_EXACT) { if (!exact) return 1; return (dp->mode & SMBM_MASK) == ssp->ss_mode ? 0 : 1; } if (smb_share_access(ssp, dp->scred, dp->mode) != 0) return 1; return 0; } /* * Lookup share in the given VC. Share referenced and locked on return. * VC expected to be locked on entry and will be left locked on exit. */ int smb_vc_lookupshare(struct smb_vc *vcp, struct smb_sharespec *dp, struct smb_cred *scred, struct smb_share **sspp) { struct smb_connobj *scp = NULL; struct smb_share *ssp = NULL; int error; *sspp = NULL; dp->scred = scred; SMBCO_FOREACH(scp, VCTOCP(vcp)) { ssp = (struct smb_share *)scp; error = smb_share_lock(ssp); if (error) continue; if (smb_vc_cmpshare(ssp, dp) == 0) break; smb_share_unlock(ssp); } if (ssp) { smb_share_ref(ssp); *sspp = ssp; error = 0; } else error = ENOENT; return error; } int smb_vc_connect(struct smb_vc *vcp, struct smb_cred *scred) { return smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL); } /* * Destroy VC to server, invalidate shares linked with it. * Transport should be locked on entry. */ int smb_vc_disconnect(struct smb_vc *vcp) { - smb_iod_request(vcp->vc_iod, SMBIOD_EV_DISCONNECT | SMBIOD_EV_SYNC, NULL); + if (vcp->vc_iod != NULL) + smb_iod_request(vcp->vc_iod, SMBIOD_EV_DISCONNECT | + SMBIOD_EV_SYNC, NULL); return 0; } static char smb_emptypass[] = ""; const char * smb_vc_getpass(struct smb_vc *vcp) { if (vcp->vc_pass) return vcp->vc_pass; return smb_emptypass; } static int smb_vc_getinfo(struct smb_vc *vcp, struct smb_vc_info *vip) { bzero(vip, sizeof(struct smb_vc_info)); vip->itype = SMB_INFO_VC; vip->usecount = vcp->obj.co_usecount; vip->uid = vcp->vc_uid; vip->gid = vcp->vc_grp; vip->mode = vcp->vc_mode; vip->flags = vcp->obj.co_flags; vip->sopt = vcp->vc_sopt; vip->iodstate = vcp->vc_iod->iod_state; bzero(&vip->sopt.sv_skey, sizeof(vip->sopt.sv_skey)); snprintf(vip->srvname, sizeof(vip->srvname), "%s", vcp->vc_srvname); snprintf(vip->vcname, sizeof(vip->vcname), "%s", vcp->vc_username); return 0; } u_short smb_vc_nextmid(struct smb_vc *vcp) { u_short r; sx_xlock(&vcp->obj.co_interlock); r = vcp->vc_mid++; sx_unlock(&vcp->obj.co_interlock); return r; } /* * Share implementation */ /* * Allocate share structure and attach it to the given VC * Connection expected to be locked on entry. Share will be returned * in locked state. */ int smb_share_create(struct smb_vc *vcp, struct smb_sharespec *shspec, struct smb_cred *scred, struct smb_share **sspp) { struct smb_share *ssp; struct ucred *cred = scred->scr_cred; uid_t realuid = cred->cr_uid; uid_t uid = shspec->owner; gid_t gid = shspec->group; int error, isroot; isroot = smb_suser(cred) == 0; /* * Only superuser can create shares with different uid and gid */ if (uid != SMBM_ANY_OWNER && uid != realuid && !isroot) return EPERM; if (gid != SMBM_ANY_GROUP && !groupmember(gid, cred) && !isroot) return EPERM; error = smb_vc_lookupshare(vcp, shspec, scred, &ssp); if (!error) { smb_share_put(ssp, scred); return EEXIST; } if (uid == SMBM_ANY_OWNER) uid = realuid; if (gid == SMBM_ANY_GROUP) gid = cred->cr_groups[0]; ssp = smb_zmalloc(sizeof(*ssp), M_SMBCONN, M_WAITOK); smb_co_init(SSTOCP(ssp), SMBL_SHARE, "smbss ilock", "smbss"); ssp->obj.co_free = smb_share_free; ssp->obj.co_gone = smb_share_gone; smb_sl_init(&ssp->ss_stlock, "ssstlock"); ssp->ss_name = smb_strdup(shspec->name); if (shspec->pass && shspec->pass[0]) ssp->ss_pass = smb_strdup(shspec->pass); ssp->ss_type = shspec->stype; ssp->ss_tid = SMB_TID_UNKNOWN; ssp->ss_uid = uid; ssp->ss_grp = gid; ssp->ss_mode = shspec->rights & SMBM_MASK; smb_co_addchild(VCTOCP(vcp), SSTOCP(ssp)); *sspp = ssp; return 0; } static void smb_share_free(struct smb_connobj *cp) { struct smb_share *ssp = CPTOSS(cp); SMB_STRFREE(ssp->ss_name); SMB_STRFREE(ssp->ss_pass); smb_sl_destroy(&ssp->ss_stlock); smb_co_done(SSTOCP(ssp)); free(ssp, M_SMBCONN); } static void smb_share_gone(struct smb_connobj *cp, struct smb_cred *scred) { struct smb_share *ssp = CPTOSS(cp); smb_smb_treedisconnect(ssp, scred); } void smb_share_ref(struct smb_share *ssp) { smb_co_ref(SSTOCP(ssp)); } void smb_share_rele(struct smb_share *ssp, struct smb_cred *scred) { smb_co_rele(SSTOCP(ssp), scred); } int smb_share_get(struct smb_share *ssp, struct smb_cred *scred) { struct smb_connobj *cp = SSTOCP(ssp); int error; sx_xlock(&cp->co_interlock); error = smb_co_get(cp, scred); sx_unlock(&cp->co_interlock); return error; } void smb_share_put(struct smb_share *ssp, struct smb_cred *scred) { smb_co_put(SSTOCP(ssp), scred); } int smb_share_lock(struct smb_share *ssp) { struct smb_connobj *cp; int error; cp = SSTOCP(ssp); sx_xlock(&cp->co_interlock); error = smb_co_lock(cp); sx_unlock(&cp->co_interlock); return error; } void smb_share_unlock(struct smb_share *ssp) { struct smb_connobj *cp; cp = SSTOCP(ssp); sx_xlock(&cp->co_interlock); smb_co_unlock(cp); sx_unlock(&cp->co_interlock); } int smb_share_access(struct smb_share *ssp, struct smb_cred *scred, mode_t mode) { struct ucred *cred = scred->scr_cred; if (smb_suser(cred) == 0 || cred->cr_uid == ssp->ss_uid) return 0; mode >>= 3; if (!groupmember(ssp->ss_grp, cred)) mode >>= 3; return (ssp->ss_mode & mode) == mode ? 0 : EACCES; } void smb_share_invalidate(struct smb_share *ssp) { ssp->ss_tid = SMB_TID_UNKNOWN; } int smb_share_valid(struct smb_share *ssp) { return ssp->ss_tid != SMB_TID_UNKNOWN && ssp->ss_vcgenid == SSTOVC(ssp)->vc_genid; } const char* smb_share_getpass(struct smb_share *ssp) { struct smb_vc *vcp; if (ssp->ss_pass) return ssp->ss_pass; vcp = SSTOVC(ssp); if (vcp->vc_pass) return vcp->vc_pass; return smb_emptypass; } static int smb_share_getinfo(struct smb_share *ssp, struct smb_share_info *sip) { bzero(sip, sizeof(struct smb_share_info)); sip->itype = SMB_INFO_SHARE; sip->usecount = ssp->obj.co_usecount; sip->tid = ssp->ss_tid; sip->type= ssp->ss_type; sip->uid = ssp->ss_uid; sip->gid = ssp->ss_grp; sip->mode= ssp->ss_mode; sip->flags = ssp->obj.co_flags; snprintf(sip->sname, sizeof(sip->sname), "%s", ssp->ss_name); return 0; } /* * Dump an entire tree into sysctl call */ static int smb_sysctl_treedump(SYSCTL_HANDLER_ARGS) { struct smb_connobj *scp1, *scp2; struct smb_vc *vcp; struct smb_share *ssp; struct smb_vc_info vci; struct smb_share_info ssi; int error, itype; error = sysctl_wire_old_buffer(req, 0); if (error) return (error); error = smb_sm_lockvclist(); if (error) return error; SMBCO_FOREACH(scp1, &smb_vclist) { vcp = (struct smb_vc *)scp1; error = smb_vc_lock(vcp); if (error) continue; smb_vc_getinfo(vcp, &vci); error = SYSCTL_OUT(req, &vci, sizeof(struct smb_vc_info)); if (error) { smb_vc_unlock(vcp); break; } SMBCO_FOREACH(scp2, VCTOCP(vcp)) { ssp = (struct smb_share *)scp2; error = smb_share_lock(ssp); if (error) { error = 0; continue; } smb_share_getinfo(ssp, &ssi); smb_share_unlock(ssp); error = SYSCTL_OUT(req, &ssi, sizeof(struct smb_share_info)); if (error) break; } smb_vc_unlock(vcp); if (error) break; } if (!error) { itype = SMB_INFO_NONE; error = SYSCTL_OUT(req, &itype, sizeof(itype)); } smb_sm_unlockvclist(); return error; } Index: projects/powernv/netsmb/smb_iod.c =================================================================== --- projects/powernv/netsmb/smb_iod.c (revision 291050) +++ projects/powernv/netsmb/smb_iod.c (revision 291051) @@ -1,717 +1,720 @@ /*- * Copyright (c) 2000-2001 Boris Popov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SMBIOD_SLEEP_TIMO 2 #define SMBIOD_PING_TIMO 60 /* seconds */ #define SMB_IOD_EVLOCKPTR(iod) (&((iod)->iod_evlock)) #define SMB_IOD_EVLOCK(iod) smb_sl_lock(&((iod)->iod_evlock)) #define SMB_IOD_EVUNLOCK(iod) smb_sl_unlock(&((iod)->iod_evlock)) #define SMB_IOD_RQLOCKPTR(iod) (&((iod)->iod_rqlock)) #define SMB_IOD_RQLOCK(iod) smb_sl_lock(&((iod)->iod_rqlock)) #define SMB_IOD_RQUNLOCK(iod) smb_sl_unlock(&((iod)->iod_rqlock)) #define smb_iod_wakeup(iod) wakeup(&(iod)->iod_flags) static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon"); static int smb_iod_next; static int smb_iod_sendall(struct smbiod *iod); static int smb_iod_disconnect(struct smbiod *iod); static void smb_iod_thread(void *); static __inline void smb_iod_rqprocessed(struct smb_rq *rqp, int error) { SMBRQ_SLOCK(rqp); rqp->sr_lerror = error; rqp->sr_rpgen++; rqp->sr_state = SMBRQ_NOTIFIED; wakeup(&rqp->sr_state); SMBRQ_SUNLOCK(rqp); } static void smb_iod_invrq(struct smbiod *iod) { struct smb_rq *rqp; /* * Invalidate all outstanding requests for this connection */ SMB_IOD_RQLOCK(iod); TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) { rqp->sr_flags |= SMBR_RESTART; smb_iod_rqprocessed(rqp, ENOTCONN); } SMB_IOD_RQUNLOCK(iod); } static void smb_iod_closetran(struct smbiod *iod) { struct smb_vc *vcp = iod->iod_vc; struct thread *td = iod->iod_td; if (vcp->vc_tdata == NULL) return; SMB_TRAN_DISCONNECT(vcp, td); SMB_TRAN_DONE(vcp, td); vcp->vc_tdata = NULL; } static void smb_iod_dead(struct smbiod *iod) { iod->iod_state = SMBIOD_ST_DEAD; smb_iod_closetran(iod); smb_iod_invrq(iod); } static int smb_iod_connect(struct smbiod *iod) { struct smb_vc *vcp = iod->iod_vc; struct thread *td = iod->iod_td; int error; SMBIODEBUG("%d\n", iod->iod_state); switch(iod->iod_state) { case SMBIOD_ST_VCACTIVE: SMBERROR("called for already opened connection\n"); return EISCONN; case SMBIOD_ST_DEAD: return ENOTCONN; /* XXX: last error code ? */ default: break; } vcp->vc_genid++; error = 0; error = (int)SMB_TRAN_CREATE(vcp, td); if (error) goto fail; SMBIODEBUG("tcreate\n"); if (vcp->vc_laddr) { error = (int)SMB_TRAN_BIND(vcp, vcp->vc_laddr, td); if (error) goto fail; } SMBIODEBUG("tbind\n"); error = (int)SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, td); if (error) goto fail; SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags); iod->iod_state = SMBIOD_ST_TRANACTIVE; SMBIODEBUG("tconnect\n"); /* vcp->vc_mid = 0;*/ error = (int)smb_smb_negotiate(vcp, &iod->iod_scred); if (error) goto fail; SMBIODEBUG("snegotiate\n"); error = (int)smb_smb_ssnsetup(vcp, &iod->iod_scred); if (error) goto fail; iod->iod_state = SMBIOD_ST_VCACTIVE; SMBIODEBUG("completed\n"); smb_iod_invrq(iod); return (0); fail: smb_iod_dead(iod); return (error); } static int smb_iod_disconnect(struct smbiod *iod) { struct smb_vc *vcp = iod->iod_vc; SMBIODEBUG("\n"); if (iod->iod_state == SMBIOD_ST_VCACTIVE) { smb_smb_ssnclose(vcp, &iod->iod_scred); iod->iod_state = SMBIOD_ST_TRANACTIVE; } vcp->vc_smbuid = SMB_UID_UNKNOWN; smb_iod_closetran(iod); iod->iod_state = SMBIOD_ST_NOTCONN; return 0; } static int smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp) { int error; if (iod->iod_state != SMBIOD_ST_VCACTIVE) { if (iod->iod_state != SMBIOD_ST_DEAD) return ENOTCONN; iod->iod_state = SMBIOD_ST_RECONNECT; error = smb_iod_connect(iod); if (error) return error; } SMBIODEBUG("tree reconnect\n"); SMBS_ST_LOCK(ssp); ssp->ss_flags |= SMBS_RECONNECTING; SMBS_ST_UNLOCK(ssp); error = smb_smb_treeconnect(ssp, &iod->iod_scred); SMBS_ST_LOCK(ssp); ssp->ss_flags &= ~SMBS_RECONNECTING; SMBS_ST_UNLOCK(ssp); wakeup(&ssp->ss_vcgenid); return error; } static int smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp) { struct thread *td = iod->iod_td; struct smb_vc *vcp = iod->iod_vc; struct smb_share *ssp = rqp->sr_share; struct mbuf *m; int error; SMBIODEBUG("iod_state = %d\n", iod->iod_state); switch (iod->iod_state) { case SMBIOD_ST_NOTCONN: smb_iod_rqprocessed(rqp, ENOTCONN); return 0; case SMBIOD_ST_DEAD: iod->iod_state = SMBIOD_ST_RECONNECT; return 0; case SMBIOD_ST_RECONNECT: return 0; default: break; } if (rqp->sr_sendcnt == 0) { #ifdef movedtoanotherplace if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux) return 0; #endif le16enc(rqp->sr_rqtid, ssp ? ssp->ss_tid : SMB_TID_UNKNOWN); le16enc(rqp->sr_rquid, vcp ? vcp->vc_smbuid : 0); mb_fixhdr(&rqp->sr_rq); if (vcp->vc_hflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) smb_rq_sign(rqp); } if (rqp->sr_sendcnt++ > 5) { rqp->sr_flags |= SMBR_RESTART; smb_iod_rqprocessed(rqp, rqp->sr_lerror); /* * If all attempts to send a request failed, then * something is seriously hosed. */ return ENOTCONN; } SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0); m_dumpm(rqp->sr_rq.mb_top); m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_WAITOK); error = rqp->sr_lerror = SMB_TRAN_SEND(vcp, m, td); if (error == 0) { getnanotime(&rqp->sr_timesent); iod->iod_lastrqsent = rqp->sr_timesent; rqp->sr_flags |= SMBR_SENT; rqp->sr_state = SMBRQ_SENT; return 0; } /* * Check for fatal errors */ if (SMB_TRAN_FATAL(vcp, error)) { /* * No further attempts should be made */ return ENOTCONN; } if (smb_rq_intr(rqp)) smb_iod_rqprocessed(rqp, EINTR); return 0; } /* * Process incoming packets */ static int smb_iod_recvall(struct smbiod *iod) { struct smb_vc *vcp = iod->iod_vc; struct thread *td = iod->iod_td; struct smb_rq *rqp; struct mbuf *m; u_char *hp; u_short mid; int error; switch (iod->iod_state) { case SMBIOD_ST_NOTCONN: case SMBIOD_ST_DEAD: case SMBIOD_ST_RECONNECT: return 0; default: break; } for (;;) { m = NULL; error = SMB_TRAN_RECV(vcp, &m, td); if (error == EWOULDBLOCK) break; if (SMB_TRAN_FATAL(vcp, error)) { smb_iod_dead(iod); break; } if (error) break; if (m == NULL) { SMBERROR("tran return NULL without error\n"); error = EPIPE; continue; } m = m_pullup(m, SMB_HDRLEN); if (m == NULL) continue; /* wait for a good packet */ /* * Now we got an entire and possibly invalid SMB packet. * Be careful while parsing it. */ m_dumpm(m); hp = mtod(m, u_char*); if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) { m_freem(m); continue; } mid = SMB_HDRMID(hp); SMBSDEBUG("mid %04x\n", (u_int)mid); SMB_IOD_RQLOCK(iod); TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) { if (rqp->sr_mid != mid) continue; SMBRQ_SLOCK(rqp); if (rqp->sr_rp.md_top == NULL) { md_initm(&rqp->sr_rp, m); } else { if (rqp->sr_flags & SMBR_MULTIPACKET) { md_append_record(&rqp->sr_rp, m); } else { SMBRQ_SUNLOCK(rqp); SMBERROR("duplicate response %d (ignored)\n", mid); break; } } SMBRQ_SUNLOCK(rqp); smb_iod_rqprocessed(rqp, 0); break; } SMB_IOD_RQUNLOCK(iod); if (rqp == NULL) { SMBERROR("drop resp with mid %d\n", (u_int)mid); /* smb_printrqlist(vcp);*/ m_freem(m); } } /* * check for interrupts */ SMB_IOD_RQLOCK(iod); TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) { if (smb_td_intr(rqp->sr_cred->scr_td)) { smb_iod_rqprocessed(rqp, EINTR); } } SMB_IOD_RQUNLOCK(iod); return 0; } int smb_iod_request(struct smbiod *iod, int event, void *ident) { struct smbiod_event *evp; int error; SMBIODEBUG("\n"); evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK); evp->ev_type = event; evp->ev_ident = ident; SMB_IOD_EVLOCK(iod); STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link); if ((event & SMBIOD_EV_SYNC) == 0) { SMB_IOD_EVUNLOCK(iod); smb_iod_wakeup(iod); return 0; } smb_iod_wakeup(iod); msleep(evp, SMB_IOD_EVLOCKPTR(iod), PWAIT | PDROP, "90evw", 0); error = evp->ev_error; free(evp, M_SMBIOD); return error; } /* * Place request in the queue. * Request from smbiod have a high priority. */ int smb_iod_addrq(struct smb_rq *rqp) { struct smb_vc *vcp = rqp->sr_vc; struct smbiod *iod = vcp->vc_iod; int error; SMBIODEBUG("\n"); if (rqp->sr_cred->scr_td != NULL && rqp->sr_cred->scr_td->td_proc == iod->iod_p) { rqp->sr_flags |= SMBR_INTERNAL; SMB_IOD_RQLOCK(iod); TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link); SMB_IOD_RQUNLOCK(iod); for (;;) { if (smb_iod_sendrq(iod, rqp) != 0) { smb_iod_dead(iod); break; } /* * we don't need to lock state field here */ if (rqp->sr_state != SMBRQ_NOTSENT) break; tsleep(&iod->iod_flags, PWAIT, "90sndw", hz); } if (rqp->sr_lerror) smb_iod_removerq(rqp); return rqp->sr_lerror; } switch (iod->iod_state) { case SMBIOD_ST_NOTCONN: return ENOTCONN; case SMBIOD_ST_DEAD: error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL); if (error) return error; return EXDEV; default: break; } SMB_IOD_RQLOCK(iod); for (;;) { if (vcp->vc_maxmux == 0) { SMBERROR("maxmux == 0\n"); break; } if (iod->iod_muxcnt < vcp->vc_maxmux) break; iod->iod_muxwant++; msleep(&iod->iod_muxwant, SMB_IOD_RQLOCKPTR(iod), PWAIT, "90mux", 0); } iod->iod_muxcnt++; TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link); SMB_IOD_RQUNLOCK(iod); smb_iod_wakeup(iod); return 0; } int smb_iod_removerq(struct smb_rq *rqp) { struct smb_vc *vcp = rqp->sr_vc; struct smbiod *iod = vcp->vc_iod; SMBIODEBUG("\n"); if (rqp->sr_flags & SMBR_INTERNAL) { SMB_IOD_RQLOCK(iod); TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link); SMB_IOD_RQUNLOCK(iod); return 0; } SMB_IOD_RQLOCK(iod); while (rqp->sr_flags & SMBR_XLOCK) { rqp->sr_flags |= SMBR_XLOCKWANT; msleep(rqp, SMB_IOD_RQLOCKPTR(iod), PWAIT, "90xrm", 0); } TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link); iod->iod_muxcnt--; if (iod->iod_muxwant) { iod->iod_muxwant--; wakeup(&iod->iod_muxwant); } SMB_IOD_RQUNLOCK(iod); return 0; } int smb_iod_waitrq(struct smb_rq *rqp) { struct smbiod *iod = rqp->sr_vc->vc_iod; int error; SMBIODEBUG("\n"); if (rqp->sr_flags & SMBR_INTERNAL) { for (;;) { smb_iod_sendall(iod); smb_iod_recvall(iod); if (rqp->sr_rpgen != rqp->sr_rplast) break; tsleep(&iod->iod_flags, PWAIT, "90irq", hz); } smb_iod_removerq(rqp); return rqp->sr_lerror; } SMBRQ_SLOCK(rqp); if (rqp->sr_rpgen == rqp->sr_rplast) msleep(&rqp->sr_state, SMBRQ_SLOCKPTR(rqp), PWAIT, "90wrq", 0); rqp->sr_rplast++; SMBRQ_SUNLOCK(rqp); error = rqp->sr_lerror; if (rqp->sr_flags & SMBR_MULTIPACKET) { /* * If request should stay in the list, then reinsert it * at the end of queue so other waiters have chance to concur */ SMB_IOD_RQLOCK(iod); TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link); TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link); SMB_IOD_RQUNLOCK(iod); } else smb_iod_removerq(rqp); return error; } static int smb_iod_sendall(struct smbiod *iod) { struct smb_vc *vcp = iod->iod_vc; struct smb_rq *rqp; struct timespec ts, tstimeout; int herror; herror = 0; /* * Loop through the list of requests and send them if possible */ SMB_IOD_RQLOCK(iod); TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) { switch (rqp->sr_state) { case SMBRQ_NOTSENT: rqp->sr_flags |= SMBR_XLOCK; SMB_IOD_RQUNLOCK(iod); herror = smb_iod_sendrq(iod, rqp); SMB_IOD_RQLOCK(iod); rqp->sr_flags &= ~SMBR_XLOCK; if (rqp->sr_flags & SMBR_XLOCKWANT) { rqp->sr_flags &= ~SMBR_XLOCKWANT; wakeup(rqp); } break; case SMBRQ_SENT: SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout); timespecadd(&tstimeout, &tstimeout); getnanotime(&ts); timespecsub(&ts, &tstimeout); if (timespeccmp(&ts, &rqp->sr_timesent, >)) { smb_iod_rqprocessed(rqp, ETIMEDOUT); } break; default: break; } if (herror) break; } SMB_IOD_RQUNLOCK(iod); if (herror == ENOTCONN) smb_iod_dead(iod); return 0; } /* * "main" function for smbiod daemon */ static __inline void smb_iod_main(struct smbiod *iod) { /* struct smb_vc *vcp = iod->iod_vc;*/ struct smbiod_event *evp; /* struct timespec tsnow;*/ int error; SMBIODEBUG("\n"); error = 0; /* * Check all interesting events */ for (;;) { SMB_IOD_EVLOCK(iod); evp = STAILQ_FIRST(&iod->iod_evlist); if (evp == NULL) { SMB_IOD_EVUNLOCK(iod); break; } STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link); evp->ev_type |= SMBIOD_EV_PROCESSING; SMB_IOD_EVUNLOCK(iod); switch (evp->ev_type & SMBIOD_EV_MASK) { case SMBIOD_EV_CONNECT: iod->iod_state = SMBIOD_ST_RECONNECT; evp->ev_error = smb_iod_connect(iod); break; case SMBIOD_EV_DISCONNECT: evp->ev_error = smb_iod_disconnect(iod); break; case SMBIOD_EV_TREECONNECT: evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident); break; case SMBIOD_EV_SHUTDOWN: iod->iod_flags |= SMBIOD_SHUTDOWN; break; case SMBIOD_EV_NEWRQ: break; } if (evp->ev_type & SMBIOD_EV_SYNC) { SMB_IOD_EVLOCK(iod); wakeup(evp); SMB_IOD_EVUNLOCK(iod); } else free(evp, M_SMBIOD); } #if 0 if (iod->iod_state == SMBIOD_ST_VCACTIVE) { getnanotime(&tsnow); timespecsub(&tsnow, &iod->iod_pingtimo); if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) { smb_smb_echo(vcp, &iod->iod_scred); } } #endif smb_iod_sendall(iod); smb_iod_recvall(iod); return; } void smb_iod_thread(void *arg) { struct smbiod *iod = arg; mtx_lock(&Giant); /* * Here we assume that the thread structure will be the same * for an entire kthread (kproc, to be more precise) life. */ iod->iod_td = curthread; smb_makescred(&iod->iod_scred, iod->iod_td, NULL); while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) { smb_iod_main(iod); SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo); if (iod->iod_flags & SMBIOD_SHUTDOWN) break; tsleep(&iod->iod_flags, PWAIT, "90idle", iod->iod_sleeptimo); } /* We can now safely destroy the mutexes and free the iod structure. */ smb_sl_destroy(&iod->iod_rqlock); smb_sl_destroy(&iod->iod_evlock); free(iod, M_SMBIOD); mtx_unlock(&Giant); kproc_exit(0); } int smb_iod_create(struct smb_vc *vcp) { struct smbiod *iod; int error; iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK); iod->iod_id = smb_iod_next++; iod->iod_state = SMBIOD_ST_NOTCONN; iod->iod_vc = vcp; iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO; iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO; getnanotime(&iod->iod_lastrqsent); vcp->vc_iod = iod; smb_sl_init(&iod->iod_rqlock, "90rql"); TAILQ_INIT(&iod->iod_rqlist); smb_sl_init(&iod->iod_evlock, "90evl"); STAILQ_INIT(&iod->iod_evlist); error = kproc_create(smb_iod_thread, iod, &iod->iod_p, RFNOWAIT, 0, "smbiod%d", iod->iod_id); if (error) { SMBERROR("can't start smbiod: %d", error); + vcp->vc_iod = NULL; + smb_sl_destroy(&iod->iod_rqlock); + smb_sl_destroy(&iod->iod_evlock); free(iod, M_SMBIOD); return error; } return 0; } int smb_iod_destroy(struct smbiod *iod) { smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL); return 0; } int smb_iod_init(void) { return 0; } int smb_iod_done(void) { return 0; } Index: projects/powernv/ofed/drivers/net/mlx4/cmd.c =================================================================== --- projects/powernv/ofed/drivers/net/mlx4/cmd.c (revision 291050) +++ projects/powernv/ofed/drivers/net/mlx4/cmd.c (revision 291051) @@ -1,2605 +1,2606 @@ /* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include "mlx4.h" #include "fw.h" #define CMD_POLL_TOKEN 0xffff #define INBOX_MASK 0xffffffffffffff00ULL #define CMD_CHAN_VER 1 #define CMD_CHAN_IF_REV 1 enum { /* command completed successfully: */ CMD_STAT_OK = 0x00, /* Internal error (such as a bus error) occurred while processing command: */ CMD_STAT_INTERNAL_ERR = 0x01, /* Operation/command not supported or opcode modifier not supported: */ CMD_STAT_BAD_OP = 0x02, /* Parameter not supported or parameter out of range: */ CMD_STAT_BAD_PARAM = 0x03, /* System not enabled or bad system state: */ CMD_STAT_BAD_SYS_STATE = 0x04, /* Attempt to access reserved or unallocaterd resource: */ CMD_STAT_BAD_RESOURCE = 0x05, /* Requested resource is currently executing a command, or is otherwise busy: */ CMD_STAT_RESOURCE_BUSY = 0x06, /* Required capability exceeds device limits: */ CMD_STAT_EXCEED_LIM = 0x08, /* Resource is not in the appropriate state or ownership: */ CMD_STAT_BAD_RES_STATE = 0x09, /* Index out of range: */ CMD_STAT_BAD_INDEX = 0x0a, /* FW image corrupted: */ CMD_STAT_BAD_NVMEM = 0x0b, /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */ CMD_STAT_ICM_ERROR = 0x0c, /* Attempt to modify a QP/EE which is not in the presumed state: */ CMD_STAT_BAD_QP_STATE = 0x10, /* Bad segment parameters (Address/Size): */ CMD_STAT_BAD_SEG_PARAM = 0x20, /* Memory Region has Memory Windows bound to: */ CMD_STAT_REG_BOUND = 0x21, /* HCA local attached memory not present: */ CMD_STAT_LAM_NOT_PRE = 0x22, /* Bad management packet (silently discarded): */ CMD_STAT_BAD_PKT = 0x30, /* More outstanding CQEs in CQ than new CQ size: */ CMD_STAT_BAD_SIZE = 0x40, /* Multi Function device support required: */ CMD_STAT_MULTI_FUNC_REQ = 0x50, }; enum { HCR_IN_PARAM_OFFSET = 0x00, HCR_IN_MODIFIER_OFFSET = 0x08, HCR_OUT_PARAM_OFFSET = 0x0c, HCR_TOKEN_OFFSET = 0x14, HCR_STATUS_OFFSET = 0x18, HCR_OPMOD_SHIFT = 12, HCR_T_BIT = 21, HCR_E_BIT = 22, HCR_GO_BIT = 23 }; enum { GO_BIT_TIMEOUT_MSECS = 10000 }; enum mlx4_vlan_transition { MLX4_VLAN_TRANSITION_VST_VST = 0, MLX4_VLAN_TRANSITION_VST_VGT = 1, MLX4_VLAN_TRANSITION_VGT_VST = 2, MLX4_VLAN_TRANSITION_VGT_VGT = 3, }; struct mlx4_cmd_context { struct completion done; int result; int next; u64 out_param; u16 token; u8 fw_status; }; static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, struct mlx4_vhcr_cmd *in_vhcr); static int mlx4_status_to_errno(u8 status) { static const int trans_table[] = { [CMD_STAT_INTERNAL_ERR] = -EIO, [CMD_STAT_BAD_OP] = -EPERM, [CMD_STAT_BAD_PARAM] = -EINVAL, [CMD_STAT_BAD_SYS_STATE] = -ENXIO, [CMD_STAT_BAD_RESOURCE] = -EBADF, [CMD_STAT_RESOURCE_BUSY] = -EBUSY, [CMD_STAT_EXCEED_LIM] = -ENOMEM, [CMD_STAT_BAD_RES_STATE] = -EBADF, [CMD_STAT_BAD_INDEX] = -EBADF, [CMD_STAT_BAD_NVMEM] = -EFAULT, [CMD_STAT_ICM_ERROR] = -ENFILE, [CMD_STAT_BAD_QP_STATE] = -EINVAL, [CMD_STAT_BAD_SEG_PARAM] = -EFAULT, [CMD_STAT_REG_BOUND] = -EBUSY, [CMD_STAT_LAM_NOT_PRE] = -EAGAIN, [CMD_STAT_BAD_PKT] = -EINVAL, [CMD_STAT_BAD_SIZE] = -ENOMEM, [CMD_STAT_MULTI_FUNC_REQ] = -EACCES, }; if (status >= ARRAY_SIZE(trans_table) || (status != CMD_STAT_OK && trans_table[status] == 0)) return -EIO; return trans_table[status]; } static const char *cmd_to_str(u16 cmd) { switch (cmd) { case MLX4_CMD_SYS_EN: return "SYS_EN"; case MLX4_CMD_SYS_DIS: return "SYS_DIS"; case MLX4_CMD_MAP_FA: return "MAP_FA"; case MLX4_CMD_UNMAP_FA: return "UNMAP_FA"; case MLX4_CMD_RUN_FW: return "RUN_FW"; case MLX4_CMD_MOD_STAT_CFG: return "MOD_STAT_CFG"; case MLX4_CMD_QUERY_DEV_CAP: return "QUERY_DEV_CAP"; case MLX4_CMD_QUERY_FW: return "QUERY_FW"; case MLX4_CMD_ENABLE_LAM: return "ENABLE_LAM"; case MLX4_CMD_DISABLE_LAM: return "DISABLE_LAM"; case MLX4_CMD_QUERY_DDR: return "QUERY_DDR"; case MLX4_CMD_QUERY_ADAPTER: return "QUERY_ADAPTER"; case MLX4_CMD_INIT_HCA: return "INIT_HCA"; case MLX4_CMD_CLOSE_HCA: return "CLOSE_HCA"; case MLX4_CMD_INIT_PORT: return "INIT_PORT"; case MLX4_CMD_CLOSE_PORT: return "CLOSE_PORT"; case MLX4_CMD_QUERY_HCA: return "QUERY_HCA"; case MLX4_CMD_QUERY_PORT: return "QUERY_PORT"; case MLX4_CMD_SENSE_PORT: return "SENSE_PORT"; case MLX4_CMD_HW_HEALTH_CHECK: return "HW_HEALTH_CHECK"; case MLX4_CMD_SET_PORT: return "SET_PORT"; case MLX4_CMD_SET_NODE: return "SET_NODE"; case MLX4_CMD_QUERY_FUNC: return "QUERY_FUNC"; case MLX4_CMD_MAP_ICM: return "MAP_ICM"; case MLX4_CMD_UNMAP_ICM: return "UNMAP_ICM"; case MLX4_CMD_MAP_ICM_AUX: return "MAP_ICM_AUX"; case MLX4_CMD_UNMAP_ICM_AUX: return "UNMAP_ICM_AUX"; case MLX4_CMD_SET_ICM_SIZE: return "SET_ICM_SIZE"; /*master notify fw on finish for slave's flr*/ case MLX4_CMD_INFORM_FLR_DONE: return "INFORM_FLR_DONE"; case MLX4_CMD_GET_OP_REQ: return "GET_OP_REQ"; /* TPT commands */ case MLX4_CMD_SW2HW_MPT: return "SW2HW_MPT"; case MLX4_CMD_QUERY_MPT: return "QUERY_MPT"; case MLX4_CMD_HW2SW_MPT: return "HW2SW_MPT"; case MLX4_CMD_READ_MTT: return "READ_MTT"; case MLX4_CMD_WRITE_MTT: return "WRITE_MTT"; case MLX4_CMD_SYNC_TPT: return "SYNC_TPT"; /* EQ commands */ case MLX4_CMD_MAP_EQ: return "MAP_EQ"; case MLX4_CMD_SW2HW_EQ: return "SW2HW_EQ"; case MLX4_CMD_HW2SW_EQ: return "HW2SW_EQ"; case MLX4_CMD_QUERY_EQ: return "QUERY_EQ"; /* CQ commands */ case MLX4_CMD_SW2HW_CQ: return "SW2HW_CQ"; case MLX4_CMD_HW2SW_CQ: return "HW2SW_CQ"; case MLX4_CMD_QUERY_CQ: return "QUERY_CQ:"; case MLX4_CMD_MODIFY_CQ: return "MODIFY_CQ:"; /* SRQ commands */ case MLX4_CMD_SW2HW_SRQ: return "SW2HW_SRQ"; case MLX4_CMD_HW2SW_SRQ: return "HW2SW_SRQ"; case MLX4_CMD_QUERY_SRQ: return "QUERY_SRQ"; case MLX4_CMD_ARM_SRQ: return "ARM_SRQ"; /* QP/EE commands */ case MLX4_CMD_RST2INIT_QP: return "RST2INIT_QP"; case MLX4_CMD_INIT2RTR_QP: return "INIT2RTR_QP"; case MLX4_CMD_RTR2RTS_QP: return "RTR2RTS_QP"; case MLX4_CMD_RTS2RTS_QP: return "RTS2RTS_QP"; case MLX4_CMD_SQERR2RTS_QP: return "SQERR2RTS_QP"; case MLX4_CMD_2ERR_QP: return "2ERR_QP"; case MLX4_CMD_RTS2SQD_QP: return "RTS2SQD_QP"; case MLX4_CMD_SQD2SQD_QP: return "SQD2SQD_QP"; case MLX4_CMD_SQD2RTS_QP: return "SQD2RTS_QP"; case MLX4_CMD_2RST_QP: return "2RST_QP"; case MLX4_CMD_QUERY_QP: return "QUERY_QP"; case MLX4_CMD_INIT2INIT_QP: return "INIT2INIT_QP"; case MLX4_CMD_SUSPEND_QP: return "SUSPEND_QP"; case MLX4_CMD_UNSUSPEND_QP: return "UNSUSPEND_QP"; /* special QP and management commands */ case MLX4_CMD_CONF_SPECIAL_QP: return "CONF_SPECIAL_QP"; case MLX4_CMD_MAD_IFC: return "MAD_IFC"; /* multicast commands */ case MLX4_CMD_READ_MCG: return "READ_MCG"; case MLX4_CMD_WRITE_MCG: return "WRITE_MCG"; case MLX4_CMD_MGID_HASH: return "MGID_HASH"; /* miscellaneous commands */ case MLX4_CMD_DIAG_RPRT: return "DIAG_RPRT"; case MLX4_CMD_NOP: return "NOP"; case MLX4_CMD_ACCESS_MEM: return "ACCESS_MEM"; case MLX4_CMD_SET_VEP: return "SET_VEP"; /* Ethernet specific commands */ case MLX4_CMD_SET_VLAN_FLTR: return "SET_VLAN_FLTR"; case MLX4_CMD_SET_MCAST_FLTR: return "SET_MCAST_FLTR"; case MLX4_CMD_DUMP_ETH_STATS: return "DUMP_ETH_STATS"; /* Communication channel commands */ case MLX4_CMD_ARM_COMM_CHANNEL: return "ARM_COMM_CHANNEL"; case MLX4_CMD_GEN_EQE: return "GEN_EQE"; /* virtual commands */ case MLX4_CMD_ALLOC_RES: return "ALLOC_RES"; case MLX4_CMD_FREE_RES: return "FREE_RES"; case MLX4_CMD_MCAST_ATTACH: return "MCAST_ATTACH"; case MLX4_CMD_UCAST_ATTACH: return "UCAST_ATTACH"; case MLX4_CMD_PROMISC: return "PROMISC"; case MLX4_CMD_QUERY_FUNC_CAP: return "QUERY_FUNC_CAP"; case MLX4_CMD_QP_ATTACH: return "QP_ATTACH"; /* debug commands */ case MLX4_CMD_QUERY_DEBUG_MSG: return "QUERY_DEBUG_MSG"; case MLX4_CMD_SET_DEBUG_MSG: return "SET_DEBUG_MSG"; /* statistics commands */ case MLX4_CMD_QUERY_IF_STAT: return "QUERY_IF_STAT"; case MLX4_CMD_SET_IF_STAT: return "SET_IF_STAT"; /* register/delete flow steering network rules */ case MLX4_QP_FLOW_STEERING_ATTACH: return "QP_FLOW_STEERING_ATTACH"; case MLX4_QP_FLOW_STEERING_DETACH: return "QP_FLOW_STEERING_DETACH"; case MLX4_FLOW_STEERING_IB_UC_QP_RANGE: return "FLOW_STEERING_IB_UC_QP_RANGE"; default: return "OTHER"; } } static u8 mlx4_errno_to_status(int errno) { switch (errno) { case -EPERM: return CMD_STAT_BAD_OP; case -EINVAL: return CMD_STAT_BAD_PARAM; case -ENXIO: return CMD_STAT_BAD_SYS_STATE; case -EBUSY: return CMD_STAT_RESOURCE_BUSY; case -ENOMEM: return CMD_STAT_EXCEED_LIM; case -ENFILE: return CMD_STAT_ICM_ERROR; default: return CMD_STAT_INTERNAL_ERR; } } static int comm_pending(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); u32 status = readl(&priv->mfunc.comm->slave_read); return (swab32(status) >> 31) != priv->cmd.comm_toggle; } static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param) { struct mlx4_priv *priv = mlx4_priv(dev); u32 val; priv->cmd.comm_toggle ^= 1; val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31); __raw_writel((__force u32) cpu_to_be32(val), &priv->mfunc.comm->slave_write); mmiowb(); } static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param, unsigned long timeout) { struct mlx4_priv *priv = mlx4_priv(dev); unsigned long end; int err = 0; int ret_from_pending = 0; /* First, verify that the master reports correct status */ if (comm_pending(dev)) { mlx4_warn(dev, "Communication channel is not idle." "my toggle is %d (cmd:0x%x)\n", priv->cmd.comm_toggle, cmd); return -EAGAIN; } /* Write command */ down(&priv->cmd.poll_sem); mlx4_comm_cmd_post(dev, cmd, param); end = msecs_to_jiffies(timeout) + jiffies; while (comm_pending(dev) && time_before(jiffies, end)) cond_resched(); ret_from_pending = comm_pending(dev); if (ret_from_pending) { /* check if the slave is trying to boot in the middle of * FLR process. The only non-zero result in the RESET command * is MLX4_DELAY_RESET_SLAVE*/ if ((MLX4_COMM_CMD_RESET == cmd)) { mlx4_warn(dev, "Got slave FLRed from Communication" " channel (ret:0x%x)\n", ret_from_pending); err = MLX4_DELAY_RESET_SLAVE; } else { mlx4_warn(dev, "Communication channel timed out\n"); err = -ETIMEDOUT; } } up(&priv->cmd.poll_sem); return err; } static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op, u16 param, unsigned long timeout) { struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; struct mlx4_cmd_context *context; unsigned long end; int err = 0; down(&cmd->event_sem); end = msecs_to_jiffies(timeout) + jiffies; while (comm_pending(dev) && time_before(jiffies, end)) cond_resched(); if (comm_pending(dev)) { mlx4_warn(dev, "mlx4_comm_cmd_wait: Comm channel " "is not idle. My toggle is %d (op: 0x%x)\n", mlx4_priv(dev)->cmd.comm_toggle, op); up(&cmd->event_sem); return -EAGAIN; } spin_lock(&cmd->context_lock); BUG_ON(cmd->free_head < 0); context = &cmd->context[cmd->free_head]; context->token += cmd->token_mask + 1; cmd->free_head = context->next; spin_unlock(&cmd->context_lock); init_completion(&context->done); mlx4_comm_cmd_post(dev, op, param); /* In slave, wait unconditionally for completion */ wait_for_completion(&context->done); err = context->result; if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) { mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", op, context->fw_status); goto out; } out: /* wait for comm channel ready * this is necessary for prevention the race * when switching between event to polling mode */ end = msecs_to_jiffies(timeout) + jiffies; while (comm_pending(dev) && time_before(jiffies, end)) cond_resched(); spin_lock(&cmd->context_lock); context->next = cmd->free_head; cmd->free_head = context - cmd->context; spin_unlock(&cmd->context_lock); up(&cmd->event_sem); return err; } int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, unsigned long timeout) { if (mlx4_priv(dev)->cmd.use_events) return mlx4_comm_cmd_wait(dev, cmd, param, timeout); return mlx4_comm_cmd_poll(dev, cmd, param, timeout); } static int cmd_pending(struct mlx4_dev *dev) { u32 status; if (pci_channel_offline(dev->pdev)) return -EIO; status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); return (status & swab32(1 << HCR_GO_BIT)) || (mlx4_priv(dev)->cmd.toggle == !!(status & swab32(1 << HCR_T_BIT))); } static int get_status(struct mlx4_dev *dev, u32 *status, int *go_bit, int *t_bit) { if (pci_channel_offline(dev->pdev)) return -EIO; *status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); *t_bit = !!(*status & swab32(1 << HCR_T_BIT)); *go_bit = !!(*status & swab32(1 << HCR_GO_BIT)); return 0; } static int mlx4_cmd_post(struct mlx4_dev *dev, struct timespec *ts1, u64 in_param, u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, u16 token, int event) { struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; u32 __iomem *hcr = cmd->hcr; int ret = -EAGAIN; unsigned long end; int err, go_bit = 0, t_bit = 0; u32 status = 0; mutex_lock(&cmd->hcr_mutex); if (pci_channel_offline(dev->pdev)) { /* * Device is going through error recovery * and cannot accept commands. */ ret = -EIO; goto out; } end = jiffies; if (event) end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); while (cmd_pending(dev)) { if (pci_channel_offline(dev->pdev)) { /* * Device is going through error recovery * and cannot accept commands. */ ret = -EIO; goto out; } if (time_after_eq(jiffies, end)) { mlx4_err(dev, "%s:cmd_pending failed\n", __func__); goto out; } cond_resched(); } /* * We use writel (instead of something like memcpy_toio) * because writes of less than 32 bits to the HCR don't work * (and some architectures such as ia64 implement memcpy_toio * in terms of writeb). */ __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0); __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1); __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2); __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3); __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4); __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5); if (ts1) ktime_get_ts(ts1); /* __raw_writel may not order writes. */ wmb(); __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | (cmd->toggle << HCR_T_BIT) | (event ? (1 << HCR_E_BIT) : 0) | (op_modifier << HCR_OPMOD_SHIFT) | op), hcr + 6); /* * Make sure that our HCR writes don't get mixed in with * writes from another CPU starting a FW command. */ mmiowb(); cmd->toggle = cmd->toggle ^ 1; ret = 0; out: if (ret) { err = get_status(dev, &status, &go_bit, &t_bit); mlx4_warn(dev, "Could not post command %s (0x%x): ret=%d, " "in_param=0x%llx, in_mod=0x%x, op_mod=0x%x, " "get_status err=%d, status_reg=0x%x, go_bit=%d, " "t_bit=%d, toggle=0x%x\n", cmd_to_str(op), op, ret, (unsigned long long) in_param, in_modifier, op_modifier, err, status, go_bit, t_bit, cmd->toggle); } mutex_unlock(&cmd->hcr_mutex); return ret; } static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, int out_is_imm, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr; int ret; mutex_lock(&priv->cmd.slave_cmd_mutex); vhcr->in_param = cpu_to_be64(in_param); vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0; vhcr->in_modifier = cpu_to_be32(in_modifier); vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff)); vhcr->token = cpu_to_be16(CMD_POLL_TOKEN); vhcr->status = 0; vhcr->flags = !!(priv->cmd.use_events) << 6; if (mlx4_is_master(dev)) { ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr); if (!ret) { if (out_is_imm) { if (out_param) *out_param = be64_to_cpu(vhcr->out_param); else { mlx4_err(dev, "response expected while" "output mailbox is NULL for " "command 0x%x\n", op); vhcr->status = CMD_STAT_BAD_PARAM; } } ret = mlx4_status_to_errno(vhcr->status); } } else { ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, MLX4_COMM_TIME + timeout); if (!ret) { if (out_is_imm) { if (out_param) *out_param = be64_to_cpu(vhcr->out_param); else { mlx4_err(dev, "response expected while" "output mailbox is NULL for " "command 0x%x\n", op); vhcr->status = CMD_STAT_BAD_PARAM; } } ret = mlx4_status_to_errno(vhcr->status); } else mlx4_err(dev, "failed execution of VHCR_POST command" "opcode %s (0x%x)\n", cmd_to_str(op), op); } mutex_unlock(&priv->cmd.slave_cmd_mutex); return ret; } static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, int out_is_imm, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout) { struct mlx4_priv *priv = mlx4_priv(dev); void __iomem *hcr = priv->cmd.hcr; int err = 0; unsigned long end; u32 stat; down(&priv->cmd.poll_sem); if (pci_channel_offline(dev->pdev)) { /* * Device is going through error recovery * and cannot accept commands. */ err = -EIO; goto out; } err = mlx4_cmd_post(dev, NULL, in_param, out_param ? *out_param : 0, in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); if (err) goto out; end = msecs_to_jiffies(timeout) + jiffies; while (cmd_pending(dev) && time_before(jiffies, end)) { if (pci_channel_offline(dev->pdev)) { /* * Device is going through error recovery * and cannot accept commands. */ err = -EIO; goto out; } cond_resched(); } if (cmd_pending(dev)) { mlx4_warn(dev, "command %s (0x%x) timed out (go bit not cleared)\n", cmd_to_str(op), op); err = -ETIMEDOUT; goto out; } if (out_is_imm) *out_param = (u64) be32_to_cpu((__force __be32) __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 | (u64) be32_to_cpu((__force __be32) __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4)); stat = be32_to_cpu((__force __be32) __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24; err = mlx4_status_to_errno(stat); if (err) mlx4_err(dev, "command %s (0x%x) failed: fw status = 0x%x\n", cmd_to_str(op), op, stat); out: up(&priv->cmd.poll_sem); return err; } void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cmd_context *context = &priv->cmd.context[token & priv->cmd.token_mask]; /* previously timed out command completing at long last */ if (token != context->token) return; context->fw_status = status; context->result = mlx4_status_to_errno(status); context->out_param = out_param; complete(&context->done); } static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, int out_is_imm, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout) { struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; struct mlx4_cmd_context *context; int err = 0; int go_bit = 0, t_bit = 0, stat_err; u32 status = 0; struct timespec ts1, ts2; ktime_t t1, t2, delta; s64 ds; if (out_is_imm && !out_param) return -EINVAL; down(&cmd->event_sem); spin_lock(&cmd->context_lock); BUG_ON(cmd->free_head < 0); context = &cmd->context[cmd->free_head]; context->token += cmd->token_mask + 1; cmd->free_head = context->next; spin_unlock(&cmd->context_lock); init_completion(&context->done); err = mlx4_cmd_post(dev, &ts1, in_param, out_param ? *out_param : 0, in_modifier, op_modifier, op, context->token, 1); if (err) goto out; if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { stat_err = get_status(dev, &status, &go_bit, &t_bit); mlx4_warn(dev, "command %s (0x%x) timed out: in_param=0x%llx, " "in_mod=0x%x, op_mod=0x%x, get_status err=%d, " "status_reg=0x%x, go_bit=%d, t_bit=%d, toggle=0x%x\n" , cmd_to_str(op), op, (unsigned long long) in_param, in_modifier, op_modifier, stat_err, status, go_bit, t_bit, mlx4_priv(dev)->cmd.toggle); err = -EBUSY; goto out; } if (mlx4_debug_level & MLX4_DEBUG_MASK_CMD_TIME) { ktime_get_ts(&ts2); t1 = timespec_to_ktime(ts1); t2 = timespec_to_ktime(ts2); delta = ktime_sub(t2, t1); ds = ktime_to_ns(delta); pr_info("mlx4: fw exec time for %s is %lld nsec\n", cmd_to_str(op), (long long) ds); } err = context->result; if (err) { mlx4_err(dev, "command %s (0x%x) failed: in_param=0x%llx, " "in_mod=0x%x, op_mod=0x%x, fw status = 0x%x\n", cmd_to_str(op), op, (unsigned long long) in_param, in_modifier, op_modifier, context->fw_status); switch(context->fw_status) { case CMD_STAT_BAD_PARAM: mlx4_err(dev, "Parameter is not supported, " "parameter is out of range\n"); break; case CMD_STAT_EXCEED_LIM: mlx4_err(dev, "Required capability exceeded " "device limits\n"); break; default: break; } goto out; } if (out_is_imm) *out_param = context->out_param; out: spin_lock(&cmd->context_lock); context->next = cmd->free_head; cmd->free_head = context - cmd->context; spin_unlock(&cmd->context_lock); up(&cmd->event_sem); return err; } int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, int out_is_imm, u32 in_modifier, u8 op_modifier, u16 op, unsigned long timeout, int native) { if (pci_channel_offline(dev->pdev)) return -EIO; if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { if (mlx4_priv(dev)->cmd.use_events) return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm, in_modifier, op_modifier, op, timeout); else return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm, in_modifier, op_modifier, op, timeout); } return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm, in_modifier, op_modifier, op, timeout); } EXPORT_SYMBOL_GPL(__mlx4_cmd); static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev) { return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); } static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr, int slave, u64 slave_addr, int size, int is_read) { u64 in_param; u64 out_param; if ((slave_addr & 0xfff) | (master_addr & 0xfff) | (slave & ~0x7f) | (size & 0xff)) { mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx " "master_addr:0x%llx slave_id:%d size:%d\n", (unsigned long long) slave_addr, (unsigned long long) master_addr, slave, size); return -EINVAL; } if (is_read) { in_param = (u64) slave | slave_addr; out_param = (u64) dev->caps.function | master_addr; } else { in_param = (u64) dev->caps.function | master_addr; out_param = (u64) slave | slave_addr; } return mlx4_cmd_imm(dev, in_param, &out_param, size, 0, MLX4_CMD_ACCESS_MEM, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox) { struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf); struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf); int err; int i; if (index & 0x1f) return -EINVAL; in_mad->attr_mod = cpu_to_be32(index / 32); err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3, MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); if (err) return err; for (i = 0; i < 32; ++i) pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]); return err; } static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox) { int i; int err; for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) { err = query_pkey_block(dev, port, i, table + i, inbox, outbox); if (err) return err; } return 0; } #define PORT_CAPABILITY_LOCATION_IN_SMP 20 #define PORT_STATE_OFFSET 32 static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf) { if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP) return IB_PORT_ACTIVE; else return IB_PORT_DOWN; } static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { struct ib_smp *smp = inbox->buf; u32 index; u8 port; u16 *table; int err; int vidx, pidx; struct mlx4_priv *priv = mlx4_priv(dev); struct ib_smp *outsmp = outbox->buf; __be16 *outtab = (__be16 *)(outsmp->data); __be32 slave_cap_mask; __be64 slave_node_guid; port = vhcr->in_modifier; if (smp->base_version == 1 && smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && smp->class_version == 1) { if (smp->method == IB_MGMT_METHOD_GET) { if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) { index = be32_to_cpu(smp->attr_mod); if (port < 1 || port > dev->caps.num_ports) return -EINVAL; table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL); if (!table) return -ENOMEM; /* need to get the full pkey table because the paravirtualized * pkeys may be scattered among several pkey blocks. */ err = get_full_pkey_table(dev, port, table, inbox, outbox); if (!err) { for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) { pidx = priv->virt2phys_pkey[slave][port - 1][vidx]; outtab[vidx % 32] = cpu_to_be16(table[pidx]); } } kfree(table); return err; } if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) { /*get the slave specific caps:*/ /*do the command */ err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier, vhcr->op_modifier, vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); /* modify the response for slaves */ if (!err && slave != mlx4_master_func_num(dev)) { u8 *state = outsmp->data + PORT_STATE_OFFSET; *state = (*state & 0xf0) | vf_port_state(dev, port, slave); slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port]; memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4); } return err; } if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) { /* compute slave's gid block */ smp->attr_mod = cpu_to_be32(slave / 8); /* execute cmd */ err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier, vhcr->op_modifier, vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); if (!err) { /* if needed, move slave gid to index 0 */ if (slave % 8) memcpy(outsmp->data, outsmp->data + (slave % 8) * 8, 8); /* delete all other gids */ memset(outsmp->data + 8, 0, 56); } return err; } if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) { err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier, vhcr->op_modifier, vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); if (!err) { slave_node_guid = mlx4_get_slave_node_guid(dev, slave); memcpy(outsmp->data + 12, &slave_node_guid, 8); } return err; } } } if (slave != mlx4_master_func_num(dev) && ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) || (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && smp->method == IB_MGMT_METHOD_SET))) { mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, " "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n", slave, smp->method, smp->mgmt_class, be16_to_cpu(smp->attr_id)); return -EPERM; } /*default:*/ return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier, vhcr->op_modifier, vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); } static int MLX4_CMD_DIAG_RPRT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { return -EPERM; } static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { return -EPERM; } int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { u64 in_param; u64 out_param; int err; in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param; out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param; if (cmd->encode_slave_id) { in_param &= 0xffffffffffffff00ll; in_param |= slave; } err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm, vhcr->in_modifier, vhcr->op_modifier, vhcr->op, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (cmd->out_is_imm) vhcr->out_param = out_param; return err; } static struct mlx4_cmd_info cmd_info[] = { { .opcode = MLX4_CMD_QUERY_FW, .has_inbox = false, .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_QUERY_FW_wrapper }, { .opcode = MLX4_CMD_QUERY_HCA, .has_inbox = false, .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = NULL }, { .opcode = MLX4_CMD_QUERY_DEV_CAP, .has_inbox = false, .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_QUERY_DEV_CAP_wrapper }, { .opcode = MLX4_CMD_QUERY_FUNC_CAP, .has_inbox = false, .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_QUERY_FUNC_CAP_wrapper }, { .opcode = MLX4_CMD_QUERY_ADAPTER, .has_inbox = false, .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = NULL }, { .opcode = MLX4_CMD_INIT_PORT, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_INIT_PORT_wrapper }, { .opcode = MLX4_CMD_CLOSE_PORT, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_CLOSE_PORT_wrapper }, { .opcode = MLX4_CMD_QUERY_PORT, .has_inbox = false, .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_QUERY_PORT_wrapper }, { .opcode = MLX4_CMD_SET_PORT, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_SET_PORT_wrapper }, { .opcode = MLX4_CMD_MAP_EQ, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_MAP_EQ_wrapper }, { .opcode = MLX4_CMD_SW2HW_EQ, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = true, .verify = NULL, .wrapper = mlx4_SW2HW_EQ_wrapper }, { .opcode = MLX4_CMD_HW_HEALTH_CHECK, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = NULL }, { .opcode = MLX4_CMD_DIAG_RPRT, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .skip_err_print = true, .verify = NULL, .wrapper = MLX4_CMD_DIAG_RPRT_wrapper }, { .opcode = MLX4_CMD_NOP, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = NULL }, { .opcode = MLX4_CMD_ALLOC_RES, .has_inbox = false, .has_outbox = false, .out_is_imm = true, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_ALLOC_RES_wrapper }, { .opcode = MLX4_CMD_FREE_RES, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_FREE_RES_wrapper }, { .opcode = MLX4_CMD_SW2HW_MPT, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = true, .verify = NULL, .wrapper = mlx4_SW2HW_MPT_wrapper }, { .opcode = MLX4_CMD_QUERY_MPT, .has_inbox = false, .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_QUERY_MPT_wrapper }, { .opcode = MLX4_CMD_HW2SW_MPT, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_HW2SW_MPT_wrapper }, { .opcode = MLX4_CMD_READ_MTT, .has_inbox = false, .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = NULL }, { .opcode = MLX4_CMD_WRITE_MTT, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_WRITE_MTT_wrapper }, { .opcode = MLX4_CMD_SYNC_TPT, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = NULL }, { .opcode = MLX4_CMD_HW2SW_EQ, .has_inbox = false, .has_outbox = true, .out_is_imm = false, .encode_slave_id = true, .verify = NULL, .wrapper = mlx4_HW2SW_EQ_wrapper }, { .opcode = MLX4_CMD_QUERY_EQ, .has_inbox = false, .has_outbox = true, .out_is_imm = false, .encode_slave_id = true, .verify = NULL, .wrapper = mlx4_QUERY_EQ_wrapper }, { .opcode = MLX4_CMD_SW2HW_CQ, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = true, .verify = NULL, .wrapper = mlx4_SW2HW_CQ_wrapper }, { .opcode = MLX4_CMD_HW2SW_CQ, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_HW2SW_CQ_wrapper }, { .opcode = MLX4_CMD_QUERY_CQ, .has_inbox = false, .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_QUERY_CQ_wrapper }, { .opcode = MLX4_CMD_MODIFY_CQ, .has_inbox = true, .has_outbox = false, .out_is_imm = true, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_MODIFY_CQ_wrapper }, { .opcode = MLX4_CMD_SW2HW_SRQ, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = true, .verify = NULL, .wrapper = mlx4_SW2HW_SRQ_wrapper }, { .opcode = MLX4_CMD_HW2SW_SRQ, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_HW2SW_SRQ_wrapper }, { .opcode = MLX4_CMD_QUERY_SRQ, .has_inbox = false, .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_QUERY_SRQ_wrapper }, { .opcode = MLX4_CMD_ARM_SRQ, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_ARM_SRQ_wrapper }, { .opcode = MLX4_CMD_RST2INIT_QP, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = true, .verify = NULL, .wrapper = mlx4_RST2INIT_QP_wrapper }, { .opcode = MLX4_CMD_INIT2INIT_QP, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_INIT2INIT_QP_wrapper }, { .opcode = MLX4_CMD_INIT2RTR_QP, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_INIT2RTR_QP_wrapper }, { .opcode = MLX4_CMD_RTR2RTS_QP, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_RTR2RTS_QP_wrapper }, { .opcode = MLX4_CMD_RTS2RTS_QP, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_RTS2RTS_QP_wrapper }, { .opcode = MLX4_CMD_SQERR2RTS_QP, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_SQERR2RTS_QP_wrapper }, { .opcode = MLX4_CMD_2ERR_QP, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_GEN_QP_wrapper }, { .opcode = MLX4_CMD_RTS2SQD_QP, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_GEN_QP_wrapper }, { .opcode = MLX4_CMD_SQD2SQD_QP, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_SQD2SQD_QP_wrapper }, { .opcode = MLX4_CMD_SQD2RTS_QP, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_SQD2RTS_QP_wrapper }, { .opcode = MLX4_CMD_2RST_QP, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_2RST_QP_wrapper }, { .opcode = MLX4_CMD_QUERY_QP, .has_inbox = false, .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_GEN_QP_wrapper }, { .opcode = MLX4_CMD_SUSPEND_QP, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_GEN_QP_wrapper }, { .opcode = MLX4_CMD_UNSUSPEND_QP, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_GEN_QP_wrapper }, { .opcode = MLX4_CMD_UPDATE_QP, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .skip_err_print = true, .verify = NULL, .wrapper = MLX4_CMD_UPDATE_QP_wrapper }, { .opcode = MLX4_CMD_CONF_SPECIAL_QP, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, /* XXX verify: only demux can do this */ .wrapper = NULL }, { .opcode = MLX4_CMD_MAD_IFC, .has_inbox = true, .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_MAD_IFC_wrapper }, { .opcode = MLX4_CMD_QUERY_IF_STAT, .has_inbox = false, .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_QUERY_IF_STAT_wrapper }, /* Native multicast commands are not available for guests */ { .opcode = MLX4_CMD_QP_ATTACH, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_QP_ATTACH_wrapper }, { .opcode = MLX4_CMD_PROMISC, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_PROMISC_wrapper }, /* Ethernet specific commands */ { .opcode = MLX4_CMD_SET_VLAN_FLTR, .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_SET_VLAN_FLTR_wrapper }, { .opcode = MLX4_CMD_SET_MCAST_FLTR, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_SET_MCAST_FLTR_wrapper }, { .opcode = MLX4_CMD_DUMP_ETH_STATS, .has_inbox = false, .has_outbox = true, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_DUMP_ETH_STATS_wrapper }, { .opcode = MLX4_CMD_INFORM_FLR_DONE, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = NULL }, /* flow steering commands */ { .opcode = MLX4_QP_FLOW_STEERING_ATTACH, .has_inbox = true, .has_outbox = false, .out_is_imm = true, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper }, { .opcode = MLX4_QP_FLOW_STEERING_DETACH, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper }, /* wol commands */ { .opcode = MLX4_CMD_MOD_STAT_CFG, .has_inbox = false, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .skip_err_print = true, .verify = NULL, .wrapper = mlx4_MOD_STAT_CFG_wrapper }, }; static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, struct mlx4_vhcr_cmd *in_vhcr) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cmd_info *cmd = NULL; struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr; struct mlx4_vhcr *vhcr; struct mlx4_cmd_mailbox *inbox = NULL; struct mlx4_cmd_mailbox *outbox = NULL; u64 in_param; u64 out_param; int ret = 0; int i; int err = 0; /* Create sw representation of Virtual HCR */ vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL); if (!vhcr) return -ENOMEM; /* DMA in the vHCR */ if (!in_vhcr) { ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave, priv->mfunc.master.slave_state[slave].vhcr_dma, ALIGN(sizeof(struct mlx4_vhcr_cmd), MLX4_ACCESS_MEM_ALIGN), 1); if (ret) { mlx4_err(dev, "%s:Failed reading vhcr" "ret: 0x%x\n", __func__, ret); kfree(vhcr); return ret; } } /* Fill SW VHCR fields */ vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param); vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param); vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier); vhcr->token = be16_to_cpu(vhcr_cmd->token); vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff; vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12); vhcr->e_bit = vhcr_cmd->flags & (1 << 6); /* Lookup command */ for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) { if (vhcr->op == cmd_info[i].opcode) { cmd = &cmd_info[i]; break; } } if (!cmd) { mlx4_err(dev, "unparavirt command: %s (0x%x) accepted from slave:%d\n", cmd_to_str(vhcr->op), vhcr->op, slave); vhcr_cmd->status = CMD_STAT_BAD_PARAM; goto out_status; } /* Read inbox */ if (cmd->has_inbox) { vhcr->in_param &= INBOX_MASK; inbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(inbox)) { vhcr_cmd->status = CMD_STAT_BAD_SIZE; inbox = NULL; goto out_status; } if (mlx4_ACCESS_MEM(dev, inbox->dma, slave, vhcr->in_param, MLX4_MAILBOX_SIZE, 1)) { mlx4_err(dev, "%s: Failed reading inbox for cmd %s (0x%x)\n", __func__, cmd_to_str(cmd->opcode), cmd->opcode); vhcr_cmd->status = CMD_STAT_INTERNAL_ERR; goto out_status; } } /* Apply permission and bound checks if applicable */ if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) { mlx4_warn(dev, "Command %s (0x%x) from slave: %d failed protection " "checks for resource_id: %d\n", cmd_to_str(vhcr->op), vhcr->op, slave, vhcr->in_modifier); vhcr_cmd->status = CMD_STAT_BAD_OP; goto out_status; } /* Allocate outbox */ if (cmd->has_outbox) { outbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(outbox)) { vhcr_cmd->status = CMD_STAT_BAD_SIZE; outbox = NULL; goto out_status; } } /* Execute the command! */ if (cmd->wrapper) { err = cmd->wrapper(dev, slave, vhcr, inbox, outbox, cmd); if (cmd->out_is_imm) vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param); } else { in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param; out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param; err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm, vhcr->in_modifier, vhcr->op_modifier, vhcr->op, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (cmd->out_is_imm) { vhcr->out_param = out_param; vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param); } } if (err) { if (!cmd->skip_err_print) mlx4_warn(dev, "vhcr command %s (0x%x) slave:%d " "in_param 0x%llx in_mod=0x%x, op_mod=0x%x " "failed with error:%d, status %d\n", cmd_to_str(vhcr->op), vhcr->op, slave, (unsigned long long) vhcr->in_param, vhcr->in_modifier, vhcr->op_modifier, vhcr->errno, err); vhcr_cmd->status = mlx4_errno_to_status(err); goto out_status; } /* Write outbox if command completed successfully */ if (cmd->has_outbox && !vhcr_cmd->status) { ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave, vhcr->out_param, MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED); if (ret) { /* If we failed to write back the outbox after the *command was successfully executed, we must fail this * slave, as it is now in undefined state */ mlx4_err(dev, "%s: Failed writing outbox\n", __func__); goto out; } } out_status: /* DMA back vhcr result */ if (!in_vhcr) { ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave, priv->mfunc.master.slave_state[slave].vhcr_dma, ALIGN(sizeof(struct mlx4_vhcr), MLX4_ACCESS_MEM_ALIGN), MLX4_CMD_WRAPPED); if (ret) mlx4_err(dev, "%s:Failed writing vhcr result\n", __func__); else if (vhcr->e_bit && mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe)) mlx4_warn(dev, "Failed to generate command completion " "eqe for slave %d\n", slave); } out: kfree(vhcr); mlx4_free_cmd_mailbox(dev, inbox); mlx4_free_cmd_mailbox(dev, outbox); return ret; } static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, int slave, int port) { struct mlx4_vport_oper_state *vp_oper; struct mlx4_vport_state *vp_admin; struct mlx4_vf_immed_vlan_work *work; int err; int admin_vlan_ix = NO_INDX; vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; if (vp_oper->state.default_vlan == vp_admin->default_vlan && vp_oper->state.default_qos == vp_admin->default_qos) return 0; work = kzalloc(sizeof(*work), GFP_KERNEL); if (!work) return -ENOMEM; if (vp_oper->state.default_vlan != vp_admin->default_vlan) { if (MLX4_VGT != vp_admin->default_vlan) { err = __mlx4_register_vlan(&priv->dev, port, vp_admin->default_vlan, &admin_vlan_ix); if (err) { mlx4_warn((&priv->dev), "No vlan resources slave %d, port %d\n", slave, port); + kfree(work); return err; } } else { admin_vlan_ix = NO_INDX; } work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n", (int)(vp_admin->default_vlan), admin_vlan_ix, slave, port); } /* save original vlan ix and vlan id */ work->orig_vlan_id = vp_oper->state.default_vlan; work->orig_vlan_ix = vp_oper->vlan_idx; /* handle new qos */ if (vp_oper->state.default_qos != vp_admin->default_qos) work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS; if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN) vp_oper->vlan_idx = admin_vlan_ix; vp_oper->state.default_vlan = vp_admin->default_vlan; vp_oper->state.default_qos = vp_admin->default_qos; /* iterate over QPs owned by this slave, using UPDATE_QP */ work->port = port; work->slave = slave; work->qos = vp_oper->state.default_qos; work->vlan_id = vp_oper->state.default_vlan; work->vlan_ix = vp_oper->vlan_idx; work->priv = priv; INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler); queue_work(priv->mfunc.master.comm_wq, &work->work); return 0; } static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) { int port, err; struct mlx4_vport_state *vp_admin; struct mlx4_vport_oper_state *vp_oper; for (port = 1; port <= MLX4_MAX_PORTS; port++) { vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; vp_oper->state = *vp_admin; if (MLX4_VGT != vp_admin->default_vlan) { err = __mlx4_register_vlan(&priv->dev, port, vp_admin->default_vlan, &(vp_oper->vlan_idx)); if (err) { vp_oper->vlan_idx = NO_INDX; mlx4_warn((&priv->dev), "No vlan resorces slave %d, port %d\n", slave, port); return err; } mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n", (int)(vp_oper->state.default_vlan), vp_oper->vlan_idx, slave, port); } if (vp_admin->spoofchk) { vp_oper->mac_idx = __mlx4_register_mac(&priv->dev, port, vp_admin->mac); if (0 > vp_oper->mac_idx) { err = vp_oper->mac_idx; vp_oper->mac_idx = NO_INDX; mlx4_warn((&priv->dev), "No mac resources slave %d, port %d\n", slave, port); return err; } mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n", (unsigned long long) vp_oper->state.mac, vp_oper->mac_idx, slave, port); } } return 0; } static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave) { int port; struct mlx4_vport_oper_state *vp_oper; for (port = 1; port <= MLX4_MAX_PORTS; port++) { vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; if (NO_INDX != vp_oper->vlan_idx) { __mlx4_unregister_vlan(&priv->dev, port, vp_oper->state.default_vlan); vp_oper->vlan_idx = NO_INDX; } if (NO_INDX != vp_oper->mac_idx) { __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac); vp_oper->mac_idx = NO_INDX; } } return; } static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, u16 param, u8 toggle) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; u32 reply; u8 is_going_down = 0; int i; unsigned long flags; slave_state[slave].comm_toggle ^= 1; reply = (u32) slave_state[slave].comm_toggle << 31; if (toggle != slave_state[slave].comm_toggle) { mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER" "STATE COMPROMISIED ***\n", toggle, slave); goto reset_slave; } if (cmd == MLX4_COMM_CMD_RESET) { mlx4_warn(dev, "Received reset from slave:%d\n", slave); slave_state[slave].active = false; slave_state[slave].old_vlan_api = false; mlx4_master_deactivate_admin_state(priv, slave); for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) { slave_state[slave].event_eq[i].eqn = -1; slave_state[slave].event_eq[i].token = 0; } /*check if we are in the middle of FLR process, if so return "retry" status to the slave*/ if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) goto inform_slave_state; mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave); /* write the version in the event field */ reply |= mlx4_comm_get_version(); goto reset_slave; } /*command from slave in the middle of FLR*/ if (cmd != MLX4_COMM_CMD_RESET && MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { mlx4_warn(dev, "slave:%d is Trying to run cmd (0x%x) " "in the middle of FLR\n", slave, cmd); return; } switch (cmd) { case MLX4_COMM_CMD_VHCR0: if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET) goto reset_slave; slave_state[slave].vhcr_dma = ((u64) param) << 48; priv->mfunc.master.slave_state[slave].cookie = 0; break; case MLX4_COMM_CMD_VHCR1: if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) goto reset_slave; slave_state[slave].vhcr_dma |= ((u64) param) << 32; break; case MLX4_COMM_CMD_VHCR2: if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1) goto reset_slave; slave_state[slave].vhcr_dma |= ((u64) param) << 16; break; case MLX4_COMM_CMD_VHCR_EN: if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2) goto reset_slave; slave_state[slave].vhcr_dma |= param; if (mlx4_master_activate_admin_state(priv, slave)) goto reset_slave; slave_state[slave].active = true; mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave); break; case MLX4_COMM_CMD_VHCR_POST: if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) && (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) goto reset_slave; mutex_lock(&priv->cmd.slave_cmd_mutex); if (mlx4_master_process_vhcr(dev, slave, NULL)) { mlx4_err(dev, "Failed processing vhcr for slave: %d," " resetting slave.\n", slave); mutex_unlock(&priv->cmd.slave_cmd_mutex); goto reset_slave; } mutex_unlock(&priv->cmd.slave_cmd_mutex); break; default: mlx4_warn(dev, "Bad comm cmd: %d from slave: %d\n", cmd, slave); goto reset_slave; } spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); if (!slave_state[slave].is_slave_going_down) slave_state[slave].last_cmd = cmd; else is_going_down = 1; spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); if (is_going_down) { mlx4_warn(dev, "Slave is going down aborting command (%d)" " executing from slave: %d\n", cmd, slave); return; } __raw_writel((__force u32) cpu_to_be32(reply), &priv->mfunc.comm[slave].slave_read); mmiowb(); return; reset_slave: /* cleanup any slave resources */ mlx4_delete_all_resources_for_slave(dev, slave); spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); if (!slave_state[slave].is_slave_going_down) slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET; spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); /*with slave in the middle of flr, no need to clean resources again.*/ inform_slave_state: __raw_writel((__force u32) cpu_to_be32(reply), &priv->mfunc.comm[slave].slave_read); wmb(); } /* master command processing */ void mlx4_master_comm_channel(struct work_struct *work) { struct mlx4_mfunc_master_ctx *master = container_of(work, struct mlx4_mfunc_master_ctx, comm_work); struct mlx4_mfunc *mfunc = container_of(master, struct mlx4_mfunc, master); struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); struct mlx4_dev *dev = &priv->dev; __be32 *bit_vec; u32 comm_cmd; u32 vec; int i, j, slave; int toggle; int served = 0; int reported = 0; u32 slt; bit_vec = master->comm_arm_bit_vector; for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) { vec = be32_to_cpu(bit_vec[i]); for (j = 0; j < 32; j++) { if (!(vec & (1 << j))) continue; ++reported; slave = (i * 32) + j; comm_cmd = swab32(readl( &mfunc->comm[slave].slave_write)); slt = swab32(readl(&mfunc->comm[slave].slave_read)) >> 31; toggle = comm_cmd >> 31; if (toggle != slt) { if (master->slave_state[slave].comm_toggle != slt) { mlx4_info(dev, "slave %d out of sync." " read toggle %d, state toggle %d. " "Resynching.\n", slave, slt, master->slave_state[slave].comm_toggle); master->slave_state[slave].comm_toggle = slt; } mlx4_master_do_cmd(dev, slave, comm_cmd >> 16 & 0xff, comm_cmd & 0xffff, toggle); ++served; } else mlx4_err(dev, "slave %d out of sync." " read toggle %d, write toggle %d.\n", slave, slt, toggle); } } if (reported && reported != served) mlx4_warn(dev, "Got command event with bitmask from %d slaves" " but %d were served\n", reported, served); } /* master command processing */ void mlx4_master_arm_comm_channel(struct work_struct *work) { struct mlx4_mfunc_master_ctx *master = container_of(work, struct mlx4_mfunc_master_ctx, arm_comm_work); struct mlx4_mfunc *mfunc = container_of(master, struct mlx4_mfunc, master); struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); struct mlx4_dev *dev = &priv->dev; if (mlx4_ARM_COMM_CHANNEL(dev)) mlx4_warn(dev, "Failed to arm comm channel events\n"); } static int sync_toggles(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int wr_toggle; int rd_toggle; unsigned long end; wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31; end = jiffies + msecs_to_jiffies(5000); while (time_before(jiffies, end)) { rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31; if (rd_toggle == wr_toggle) { priv->cmd.comm_toggle = rd_toggle; return 0; } cond_resched(); } /* * we could reach here if for example the previous VM using this * function misbehaved and left the channel with unsynced state. We * should fix this here and give this VM a chance to use a properly * synced channel */ mlx4_warn(dev, "recovering from previously mis-behaved VM\n"); __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read); __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write); priv->cmd.comm_toggle = 0; return 0; } int mlx4_multi_func_init(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_state; int i, j, err, port; if (mlx4_is_master(dev)) priv->mfunc.comm = ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) + priv->fw.comm_base, MLX4_COMM_PAGESIZE); else priv->mfunc.comm = ioremap(pci_resource_start(dev->pdev, 2) + MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); if (!priv->mfunc.comm) { mlx4_err(dev, "Couldn't map communication vector.\n"); goto err_vhcr; } if (mlx4_is_master(dev)) { priv->mfunc.master.slave_state = kzalloc(dev->num_slaves * sizeof(struct mlx4_slave_state), GFP_KERNEL); if (!priv->mfunc.master.slave_state) goto err_comm; priv->mfunc.master.vf_admin = kzalloc(dev->num_slaves * sizeof(struct mlx4_vf_admin_state), GFP_KERNEL); if (!priv->mfunc.master.vf_admin) goto err_comm_admin; priv->mfunc.master.vf_oper = kzalloc(dev->num_slaves * sizeof(struct mlx4_vf_oper_state), GFP_KERNEL); if (!priv->mfunc.master.vf_oper) goto err_comm_oper; for (i = 0; i < dev->num_slaves; ++i) { s_state = &priv->mfunc.master.slave_state[i]; s_state->last_cmd = MLX4_COMM_CMD_RESET; mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]); for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j) s_state->event_eq[j].eqn = -1; __raw_writel((__force u32) 0, &priv->mfunc.comm[i].slave_write); __raw_writel((__force u32) 0, &priv->mfunc.comm[i].slave_read); mmiowb(); for (port = 1; port <= MLX4_MAX_PORTS; port++) { s_state->vlan_filter[port] = kzalloc(sizeof(struct mlx4_vlan_fltr), GFP_KERNEL); if (!s_state->vlan_filter[port]) { if (--port) kfree(s_state->vlan_filter[port]); goto err_slaves; } INIT_LIST_HEAD(&s_state->mcast_filters[port]); priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT; priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT; priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX; priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX; } spin_lock_init(&s_state->lock); } memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size); priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD; INIT_WORK(&priv->mfunc.master.comm_work, mlx4_master_comm_channel); INIT_WORK(&priv->mfunc.master.arm_comm_work, mlx4_master_arm_comm_channel); INIT_WORK(&priv->mfunc.master.slave_event_work, mlx4_gen_slave_eqe); INIT_WORK(&priv->mfunc.master.slave_flr_event_work, mlx4_master_handle_slave_flr); spin_lock_init(&priv->mfunc.master.slave_state_lock); spin_lock_init(&priv->mfunc.master.slave_eq.event_lock); priv->mfunc.master.comm_wq = create_singlethread_workqueue("mlx4_comm"); if (!priv->mfunc.master.comm_wq) goto err_slaves; if (mlx4_init_resource_tracker(dev)) goto err_thread; err = mlx4_ARM_COMM_CHANNEL(dev); if (err) { mlx4_err(dev, " Failed to arm comm channel eq: %x\n", err); goto err_resource; } } else { err = sync_toggles(dev); if (err) { mlx4_err(dev, "Couldn't sync toggles\n"); goto err_comm; } } return 0; err_resource: mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL); err_thread: flush_workqueue(priv->mfunc.master.comm_wq); destroy_workqueue(priv->mfunc.master.comm_wq); err_slaves: while (--i) { for (port = 1; port <= MLX4_MAX_PORTS; port++) kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); } kfree(priv->mfunc.master.vf_oper); err_comm_oper: kfree(priv->mfunc.master.vf_admin); err_comm_admin: kfree(priv->mfunc.master.slave_state); err_comm: iounmap(priv->mfunc.comm); err_vhcr: dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, priv->mfunc.vhcr, priv->mfunc.vhcr_dma); priv->mfunc.vhcr = NULL; return -ENOMEM; } int mlx4_cmd_init(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); mutex_init(&priv->cmd.hcr_mutex); mutex_init(&priv->cmd.slave_cmd_mutex); sema_init(&priv->cmd.poll_sem, 1); priv->cmd.use_events = 0; priv->cmd.toggle = 1; priv->cmd.hcr = NULL; priv->mfunc.vhcr = NULL; if (!mlx4_is_slave(dev)) { priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE); if (!priv->cmd.hcr) { mlx4_err(dev, "Couldn't map command register.\n"); return -ENOMEM; } } if (mlx4_is_mfunc(dev)) { priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, &priv->mfunc.vhcr_dma, GFP_KERNEL); if (!priv->mfunc.vhcr) { mlx4_err(dev, "Couldn't allocate VHCR.\n"); goto err_hcr; } } priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, MLX4_MAILBOX_SIZE, MLX4_MAILBOX_SIZE, 0); if (!priv->cmd.pool) goto err_vhcr; return 0; err_vhcr: if (mlx4_is_mfunc(dev)) dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, priv->mfunc.vhcr, priv->mfunc.vhcr_dma); priv->mfunc.vhcr = NULL; err_hcr: if (!mlx4_is_slave(dev)) iounmap(priv->cmd.hcr); return -ENOMEM; } void mlx4_multi_func_cleanup(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int i, port; if (mlx4_is_master(dev)) { flush_workqueue(priv->mfunc.master.comm_wq); destroy_workqueue(priv->mfunc.master.comm_wq); for (i = 0; i < dev->num_slaves; i++) { for (port = 1; port <= MLX4_MAX_PORTS; port++) kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); } kfree(priv->mfunc.master.slave_state); kfree(priv->mfunc.master.vf_admin); kfree(priv->mfunc.master.vf_oper); } iounmap(priv->mfunc.comm); } void mlx4_cmd_cleanup(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); pci_pool_destroy(priv->cmd.pool); if (!mlx4_is_slave(dev)) iounmap(priv->cmd.hcr); if (mlx4_is_mfunc(dev)) dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, priv->mfunc.vhcr, priv->mfunc.vhcr_dma); priv->mfunc.vhcr = NULL; } /* * Switch to using events to issue FW commands (can only be called * after event queue for command events has been initialized). */ int mlx4_cmd_use_events(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int i; int err = 0; priv->cmd.context = kmalloc(priv->cmd.max_cmds * sizeof (struct mlx4_cmd_context), GFP_KERNEL); if (!priv->cmd.context) return -ENOMEM; for (i = 0; i < priv->cmd.max_cmds; ++i) { priv->cmd.context[i].token = i; priv->cmd.context[i].next = i + 1; } priv->cmd.context[priv->cmd.max_cmds - 1].next = -1; priv->cmd.free_head = 0; sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds); spin_lock_init(&priv->cmd.context_lock); for (priv->cmd.token_mask = 1; priv->cmd.token_mask < priv->cmd.max_cmds; priv->cmd.token_mask <<= 1) ; /* nothing */ --priv->cmd.token_mask; down(&priv->cmd.poll_sem); priv->cmd.use_events = 1; return err; } /* * Switch back to polling (used when shutting down the device) */ void mlx4_cmd_use_polling(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int i; priv->cmd.use_events = 0; for (i = 0; i < priv->cmd.max_cmds; ++i) down(&priv->cmd.event_sem); kfree(priv->cmd.context); up(&priv->cmd.poll_sem); } struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) { struct mlx4_cmd_mailbox *mailbox; mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL); if (!mailbox) return ERR_PTR(-ENOMEM); mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL, &mailbox->dma); if (!mailbox->buf) { kfree(mailbox); return ERR_PTR(-ENOMEM); } memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); return mailbox; } EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox) { if (!mailbox) return; pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma); kfree(mailbox); } EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox); u32 mlx4_comm_get_version(void) { return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER; } static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) { if ((vf < 0) || (vf >= dev->num_vfs)) { mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs); return -EINVAL; } return (vf+1); } int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_vport_state *s_info; int slave; if (!mlx4_is_master(dev)) return -EPROTONOSUPPORT; slave = mlx4_get_slave_indx(dev, vf); if (slave < 0) return -EINVAL; s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; s_info->mac = mlx4_mac_to_u64(mac); mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", vf, port, (unsigned long long) s_info->mac); return 0; } EXPORT_SYMBOL_GPL(mlx4_set_vf_mac); int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_vport_oper_state *vf_oper; struct mlx4_vport_state *vf_admin; int slave; if ((!mlx4_is_master(dev)) || !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL)) return -EPROTONOSUPPORT; if ((vlan > 4095) || (qos > 7)) return -EINVAL; slave = mlx4_get_slave_indx(dev, vf); if (slave < 0) return -EINVAL; vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; if ((0 == vlan) && (0 == qos)) vf_admin->default_vlan = MLX4_VGT; else vf_admin->default_vlan = vlan; vf_admin->default_qos = qos; if (priv->mfunc.master.slave_state[slave].active && dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { mlx4_info(dev, "updating vf %d port %d config params immediately\n", vf, port); mlx4_master_immediate_activate_vlan_qos(priv, slave, port); } return 0; } EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); /* mlx4_get_slave_default_vlan - * retrun true if VST ( default vlan) * if VST will fill vlan & qos (if not NULL) */ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, u16 *vlan, u8 *qos) { struct mlx4_vport_oper_state *vp_oper; struct mlx4_priv *priv; priv = mlx4_priv(dev); vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; if (MLX4_VGT != vp_oper->state.default_vlan) { if (vlan) *vlan = vp_oper->state.default_vlan; if (qos) *qos = vp_oper->state.default_qos; return true; } return false; } EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan); int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_vport_state *s_info; int slave; if ((!mlx4_is_master(dev)) || !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM)) return -EPROTONOSUPPORT; slave = mlx4_get_slave_indx(dev, vf); if (slave < 0) return -EINVAL; s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; s_info->spoofchk = setting; return 0; } EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk); int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_vport_state *s_info; struct mlx4_vport_oper_state *vp_oper; int slave; u8 link_stat_event; slave = mlx4_get_slave_indx(dev, vf); if (slave < 0) return -EINVAL; switch (link_state) { case IFLA_VF_LINK_STATE_AUTO: /* get link curent state */ if (!priv->sense.do_sense_port[port]) link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE; else link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN; break; case IFLA_VF_LINK_STATE_ENABLE: link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE; break; case IFLA_VF_LINK_STATE_DISABLE: link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN; break; default: mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n", link_state, slave, port); return -EINVAL; }; /* update the admin & oper state on the link state */ s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; s_info->link_state = link_state; vp_oper->state.link_state = link_state; /* send event */ mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event); return 0; } EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state); int mlx4_get_vf_link_state(struct mlx4_dev *dev, int port, int vf) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_vport_state *s_info; int slave; if (!mlx4_is_master(dev)) return -EPROTONOSUPPORT; slave = mlx4_get_slave_indx(dev, vf); if (slave < 0) return -EINVAL; s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; return s_info->link_state; } EXPORT_SYMBOL_GPL(mlx4_get_vf_link_state); Index: projects/powernv/powerpc/include/bus.h =================================================================== --- projects/powernv/powerpc/include/bus.h (revision 291050) +++ projects/powernv/powerpc/include/bus.h (revision 291051) @@ -1,460 +1,465 @@ /* $NetBSD: bus.h,v 1.11 2003/07/28 17:35:54 thorpej Exp $ */ /*- * Copyright (c) 1996, 1997, 1998, 2001 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, * NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1996 Charles M. Hannum. All rights reserved. * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Christopher G. Demetriou * for the NetBSD Project. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_BUS_H_ #define _MACHINE_BUS_H_ #include #define BUS_SPACE_ALIGNED_POINTER(p, t) ALIGNED_POINTER(p, t) #define BUS_SPACE_MAXADDR_24BIT 0xFFFFFFUL #define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFFUL #define BUS_SPACE_MAXSIZE_24BIT 0xFFFFFFUL #define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFFUL #ifdef __powerpc64__ #define BUS_SPACE_MAXADDR 0xFFFFFFFFFFFFFFFFUL #define BUS_SPACE_MAXSIZE 0xFFFFFFFFFFFFFFFFUL #else +#ifdef BOOKE +#define BUS_SPACE_MAXADDR 0xFFFFFFFFFFULL +#define BUS_SPACE_MAXSIZE 0xFFFFFFFFUL +#else #define BUS_SPACE_MAXADDR 0xFFFFFFFFUL #define BUS_SPACE_MAXSIZE 0xFFFFFFFFUL +#endif #endif #define BUS_SPACE_MAP_CACHEABLE 0x01 #define BUS_SPACE_MAP_LINEAR 0x02 #define BUS_SPACE_MAP_PREFETCHABLE 0x04 #define BUS_SPACE_UNRESTRICTED (~0) #define BUS_SPACE_BARRIER_READ 0x01 #define BUS_SPACE_BARRIER_WRITE 0x02 struct bus_space_access; struct bus_space { /* mapping/unmapping */ int (*bs_map)(bus_addr_t, bus_size_t, int, bus_space_handle_t *); void (*bs_unmap)(bus_size_t); int (*bs_subregion)(bus_space_handle_t, bus_size_t, bus_size_t, bus_space_handle_t *); /* allocation/deallocation */ int (*bs_alloc)(bus_addr_t, bus_addr_t, bus_size_t, bus_size_t, bus_size_t, int, bus_addr_t *, bus_space_handle_t *); void (*bs_free)(bus_space_handle_t, bus_size_t); void (*bs_barrier)(bus_space_handle_t, bus_size_t, bus_size_t, int); /* Read single. */ uint8_t (*bs_r_1)(bus_space_handle_t, bus_size_t); uint16_t (*bs_r_2)(bus_space_handle_t, bus_size_t); uint32_t (*bs_r_4)(bus_space_handle_t, bus_size_t); uint64_t (*bs_r_8)(bus_space_handle_t, bus_size_t); uint16_t (*bs_r_s_2)(bus_space_handle_t, bus_size_t); uint32_t (*bs_r_s_4)(bus_space_handle_t, bus_size_t); uint64_t (*bs_r_s_8)(bus_space_handle_t, bus_size_t); /* read multiple */ void (*bs_rm_1)(bus_space_handle_t, bus_size_t, uint8_t *, bus_size_t); void (*bs_rm_2)(bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t); void (*bs_rm_4)(bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t); void (*bs_rm_8)(bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t); void (*bs_rm_s_2)(bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t); void (*bs_rm_s_4)(bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t); void (*bs_rm_s_8)(bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t); /* read region */ void (*bs_rr_1)(bus_space_handle_t, bus_size_t, uint8_t *, bus_size_t); void (*bs_rr_2)(bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t); void (*bs_rr_4)(bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t); void (*bs_rr_8)(bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t); void (*bs_rr_s_2)(bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t); void (*bs_rr_s_4)(bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t); void (*bs_rr_s_8)(bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t); /* write */ void (*bs_w_1)(bus_space_handle_t, bus_size_t, uint8_t); void (*bs_w_2)(bus_space_handle_t, bus_size_t, uint16_t); void (*bs_w_4)(bus_space_handle_t, bus_size_t, uint32_t); void (*bs_w_8)(bus_space_handle_t, bus_size_t, uint64_t); void (*bs_w_s_2)(bus_space_handle_t, bus_size_t, uint16_t); void (*bs_w_s_4)(bus_space_handle_t, bus_size_t, uint32_t); void (*bs_w_s_8)(bus_space_handle_t, bus_size_t, uint64_t); /* write multiple */ void (*bs_wm_1)(bus_space_handle_t, bus_size_t, const uint8_t *, bus_size_t); void (*bs_wm_2)(bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t); void (*bs_wm_4)(bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t); void (*bs_wm_8)(bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t); void (*bs_wm_s_2)(bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t); void (*bs_wm_s_4)(bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t); void (*bs_wm_s_8)(bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t); /* write region */ void (*bs_wr_1)(bus_space_handle_t, bus_size_t, const uint8_t *, bus_size_t); void (*bs_wr_2)(bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t); void (*bs_wr_4)(bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t); void (*bs_wr_8)(bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t); void (*bs_wr_s_2)(bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t); void (*bs_wr_s_4)(bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t); void (*bs_wr_s_8)(bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t); /* set multiple */ void (*bs_sm_1)(bus_space_handle_t, bus_size_t, uint8_t, bus_size_t); void (*bs_sm_2)(bus_space_handle_t, bus_size_t, uint16_t, bus_size_t); void (*bs_sm_4)(bus_space_handle_t, bus_size_t, uint32_t, bus_size_t); void (*bs_sm_8)(bus_space_handle_t, bus_size_t, uint64_t, bus_size_t); void (*bs_sm_s_2)(bus_space_handle_t, bus_size_t, uint16_t, bus_size_t); void (*bs_sm_s_4)(bus_space_handle_t, bus_size_t, uint32_t, bus_size_t); void (*bs_sm_s_8)(bus_space_handle_t, bus_size_t, uint64_t, bus_size_t); /* set region */ void (*bs_sr_1)(bus_space_handle_t, bus_size_t, uint8_t, bus_size_t); void (*bs_sr_2)(bus_space_handle_t, bus_size_t, uint16_t, bus_size_t); void (*bs_sr_4)(bus_space_handle_t, bus_size_t, uint32_t, bus_size_t); void (*bs_sr_8)(bus_space_handle_t, bus_size_t, uint64_t, bus_size_t); void (*bs_sr_s_2)(bus_space_handle_t, bus_size_t, uint16_t, bus_size_t); void (*bs_sr_s_4)(bus_space_handle_t, bus_size_t, uint32_t, bus_size_t); void (*bs_sr_s_8)(bus_space_handle_t, bus_size_t, uint64_t, bus_size_t); /* copy region */ void (*bs_cr_1)(bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t); void (*bs_cr_2)(bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t); void (*bs_cr_4)(bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t); void (*bs_cr_8)(bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t); void (*bs_cr_s_2)(bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t); void (*bs_cr_s_4)(bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t); void (*bs_cr_s_8)(bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t); }; extern struct bus_space bs_be_tag; extern struct bus_space bs_le_tag; #define __bs_c(a,b) __CONCAT(a,b) #define __bs_opname(op,size) __bs_c(__bs_c(__bs_c(bs_,op),_),size) #define __bs_rs(sz, t, h, o) \ (*(t)->__bs_opname(r,sz))(h, o) #define __bs_ws(sz, t, h, o, v) \ (*(t)->__bs_opname(w,sz))(h, o, v) #define __bs_nonsingle(type, sz, t, h, o, a, c) \ (*(t)->__bs_opname(type,sz))(h, o, a, c) #define __bs_set(type, sz, t, h, o, v, c) \ (*(t)->__bs_opname(type,sz))(h, o, v, c) #define __bs_copy(sz, t, h1, o1, h2, o2, cnt) \ (*(t)->__bs_opname(c,sz))(h1, o1, h2, o2, cnt) /* * Mapping and unmapping operations. */ #define bus_space_map(t, a, s, c, hp) (*(t)->bs_map)(a, s, c, hp) #define bus_space_unmap(t, h, s) (*(t)->bs_unmap)(h, s) #define bus_space_subregion(t, h, o, s, hp) (*(t)->bs_subregion)(h, o, s, hp) /* * Allocation and deallocation operations. */ #define bus_space_alloc(t, rs, re, s, a, b, c, ap, hp) \ (*(t)->bs_alloc)(rs, re, s, a, b, c, ap, hp) #define bus_space_free(t, h, s) \ (*(t)->bs_free)(h, s) /* * Bus barrier operations. */ #define bus_space_barrier(t, h, o, l, f) (*(t)->bs_barrier)(h, o, l, f) /* * Bus read (single) operations. */ #define bus_space_read_1(t, h, o) __bs_rs(1,t,h,o) #define bus_space_read_2(t, h, o) __bs_rs(2,t,h,o) #define bus_space_read_4(t, h, o) __bs_rs(4,t,h,o) #define bus_space_read_8(t, h, o) __bs_rs(8,t,h,o) #define bus_space_read_stream_1 bus_space_read_1 #define bus_space_read_stream_2(t, h, o) __bs_rs(s_2,t,h,o) #define bus_space_read_stream_4(t, h, o) __bs_rs(s_4,t,h,o) #define bus_space_read_stream_8(t, h, o) __bs_rs(s_8,t,h,o) /* * Bus read multiple operations. */ #define bus_space_read_multi_1(t, h, o, a, c) \ __bs_nonsingle(rm,1,(t),(h),(o),(a),(c)) #define bus_space_read_multi_2(t, h, o, a, c) \ __bs_nonsingle(rm,2,(t),(h),(o),(a),(c)) #define bus_space_read_multi_4(t, h, o, a, c) \ __bs_nonsingle(rm,4,(t),(h),(o),(a),(c)) #define bus_space_read_multi_8(t, h, o, a, c) \ __bs_nonsingle(rm,8,(t),(h),(o),(a),(c)) #define bus_space_read_multi_stream_1 bus_space_read_multi_1 #define bus_space_read_multi_stream_2(t, h, o, a, c) \ __bs_nonsingle(rm,s_2,(t),(h),(o),(a),(c)) #define bus_space_read_multi_stream_4(t, h, o, a, c) \ __bs_nonsingle(rm,s_4,(t),(h),(o),(a),(c)) #define bus_space_read_multi_stream_8(t, h, o, a, c) \ __bs_nonsingle(rm,s_8,(t),(h),(o),(a),(c)) /* * Bus read region operations. */ #define bus_space_read_region_1(t, h, o, a, c) \ __bs_nonsingle(rr,1,(t),(h),(o),(a),(c)) #define bus_space_read_region_2(t, h, o, a, c) \ __bs_nonsingle(rr,2,(t),(h),(o),(a),(c)) #define bus_space_read_region_4(t, h, o, a, c) \ __bs_nonsingle(rr,4,(t),(h),(o),(a),(c)) #define bus_space_read_region_8(t, h, o, a, c) \ __bs_nonsingle(rr,8,(t),(h),(o),(a),(c)) #define bus_space_read_region_stream_1 bus_space_read_region_1 #define bus_space_read_region_stream_2(t, h, o, a, c) \ __bs_nonsingle(rr,s_2,(t),(h),(o),(a),(c)) #define bus_space_read_region_stream_4(t, h, o, a, c) \ __bs_nonsingle(rr,s_4,(t),(h),(o),(a),(c)) #define bus_space_read_region_stream_8(t, h, o, a, c) \ __bs_nonsingle(rr,s_8,(t),(h),(o),(a),(c)) /* * Bus write (single) operations. */ #define bus_space_write_1(t, h, o, v) __bs_ws(1,(t),(h),(o),(v)) #define bus_space_write_2(t, h, o, v) __bs_ws(2,(t),(h),(o),(v)) #define bus_space_write_4(t, h, o, v) __bs_ws(4,(t),(h),(o),(v)) #define bus_space_write_8(t, h, o, v) __bs_ws(8,(t),(h),(o),(v)) #define bus_space_write_stream_1 bus_space_write_1 #define bus_space_write_stream_2(t, h, o, v) __bs_ws(s_2,(t),(h),(o),(v)) #define bus_space_write_stream_4(t, h, o, v) __bs_ws(s_4,(t),(h),(o),(v)) #define bus_space_write_stream_8(t, h, o, v) __bs_ws(s_8,(t),(h),(o),(v)) /* * Bus write multiple operations. */ #define bus_space_write_multi_1(t, h, o, a, c) \ __bs_nonsingle(wm,1,(t),(h),(o),(a),(c)) #define bus_space_write_multi_2(t, h, o, a, c) \ __bs_nonsingle(wm,2,(t),(h),(o),(a),(c)) #define bus_space_write_multi_4(t, h, o, a, c) \ __bs_nonsingle(wm,4,(t),(h),(o),(a),(c)) #define bus_space_write_multi_8(t, h, o, a, c) \ __bs_nonsingle(wm,8,(t),(h),(o),(a),(c)) #define bus_space_write_multi_stream_1 bus_space_write_multi_1 #define bus_space_write_multi_stream_2(t, h, o, a, c) \ __bs_nonsingle(wm,s_2,(t),(h),(o),(a),(c)) #define bus_space_write_multi_stream_4(t, h, o, a, c) \ __bs_nonsingle(wm,s_4,(t),(h),(o),(a),(c)) #define bus_space_write_multi_stream_8(t, h, o, a, c) \ __bs_nonsingle(wm,s_8,(t),(h),(o),(a),(c)) /* * Bus write region operations. */ #define bus_space_write_region_1(t, h, o, a, c) \ __bs_nonsingle(wr,1,(t),(h),(o),(a),(c)) #define bus_space_write_region_2(t, h, o, a, c) \ __bs_nonsingle(wr,2,(t),(h),(o),(a),(c)) #define bus_space_write_region_4(t, h, o, a, c) \ __bs_nonsingle(wr,4,(t),(h),(o),(a),(c)) #define bus_space_write_region_8(t, h, o, a, c) \ __bs_nonsingle(wr,8,(t),(h),(o),(a),(c)) #define bus_space_write_region_stream_1 bus_space_write_region_1 #define bus_space_write_region_stream_2(t, h, o, a, c) \ __bs_nonsingle(wr,s_2,(t),(h),(o),(a),(c)) #define bus_space_write_region_stream_4(t, h, o, a, c) \ __bs_nonsingle(wr,s_4,(t),(h),(o),(a),(c)) #define bus_space_write_region_stream_8(t, h, o, a, c) \ __bs_nonsingle(wr,s_8,(t),(h),(o),(a),(c)) /* * Set multiple operations. */ #define bus_space_set_multi_1(t, h, o, v, c) \ __bs_set(sm,1,(t),(h),(o),(v),(c)) #define bus_space_set_multi_2(t, h, o, v, c) \ __bs_set(sm,2,(t),(h),(o),(v),(c)) #define bus_space_set_multi_4(t, h, o, v, c) \ __bs_set(sm,4,(t),(h),(o),(v),(c)) #define bus_space_set_multi_8(t, h, o, v, c) \ __bs_set(sm,8,(t),(h),(o),(v),(c)) #define bus_space_set_multi_stream_1 bus_space_set_multi_1 #define bus_space_set_multi_stream_2(t, h, o, v, c) \ __bs_set(sm,s_2,(t),(h),(o),(v),(c)) #define bus_space_set_multi_stream_4(t, h, o, v, c) \ __bs_set(sm,s_4,(t),(h),(o),(v),(c)) #define bus_space_set_multi_stream_8(t, h, o, v, c) \ __bs_set(sm,s_8,(t),(h),(o),(v),(c)) /* * Set region operations. */ #define bus_space_set_region_1(t, h, o, v, c) \ __bs_set(sr,1,(t),(h),(o),(v),(c)) #define bus_space_set_region_2(t, h, o, v, c) \ __bs_set(sr,2,(t),(h),(o),(v),(c)) #define bus_space_set_region_4(t, h, o, v, c) \ __bs_set(sr,4,(t),(h),(o),(v),(c)) #define bus_space_set_region_8(t, h, o, v, c) \ __bs_set(sr,8,(t),(h),(o),(v),(c)) #define bus_space_set_region_stream_1 bus_space_set_region_1 #define bus_space_set_region_stream_2(t, h, o, v, c) \ __bs_set(sr,s_2,(t),(h),(o),(v),(c)) #define bus_space_set_region_stream_4(t, h, o, v, c) \ __bs_set(sr,s_4,(t),(h),(o),(v),(c)) #define bus_space_set_region_stream_8(t, h, o, v, c) \ __bs_set(sr,s_8,(t),(h),(o),(v),(c)) #if 0 /* * Copy operations. */ #define bus_space_copy_region_1(t, h1, o1, h2, o2, c) \ __bs_copy(1, t, h1, o1, h2, o2, c) #define bus_space_copy_region_2(t, h1, o1, h2, o2, c) \ __bs_copy(2, t, h1, o1, h2, o2, c) #define bus_space_copy_region_4(t, h1, o1, h2, o2, c) \ __bs_copy(4, t, h1, o1, h2, o2, c) #define bus_space_copy_region_8(t, h1, o1, h2, o2, c) \ __bs_copy(8, t, h1, o1, h2, o2, c) #define bus_space_copy_region_stream_1 bus_space_copy_region_1 #define bus_space_copy_region_stream_2(t, h1, o1, h2, o2, c) \ __bs_copy(s_2, t, h1, o1, h2, o2, c) #define bus_space_copy_region_stream_4(t, h1, o1, h2, o2, c) \ __bs_copy(s_4, t, h1, o1, h2, o2, c) #define bus_space_copy_region_stream_8(t, h1, o1, h2, o2, c) \ __bs_copy(s_8, t, h1, o1, h2, o2, c) #endif #include #endif /* _MACHINE_BUS_H_ */ Index: projects/powernv/powerpc/mpc85xx/mpc85xx.c =================================================================== --- projects/powernv/powerpc/mpc85xx/mpc85xx.c (revision 291050) +++ projects/powernv/powerpc/mpc85xx/mpc85xx.c (revision 291051) @@ -1,182 +1,251 @@ /*- * Copyright (C) 2008 Semihalf, Rafal Jaworowski * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); +#include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include + /* * MPC85xx system specific routines */ uint32_t ccsr_read4(uintptr_t addr) { volatile uint32_t *ptr = (void *)addr; return (*ptr); } void ccsr_write4(uintptr_t addr, uint32_t val) { volatile uint32_t *ptr = (void *)addr; *ptr = val; powerpc_iomb(); } int law_getmax(void) { uint32_t ver; + int law_max; ver = SVR_VER(mfspr(SPR_SVR)); - if (ver == SVR_MPC8555E || ver == SVR_MPC8555) - return (8); - if (ver == SVR_MPC8548E || ver == SVR_MPC8548 || - ver == SVR_MPC8533E || ver == SVR_MPC8533) - return (10); + switch (ver) { + case SVR_MPC8555: + case SVR_MPC8555E: + law_max = 8; + break; + case SVR_MPC8533: + case SVR_MPC8533E: + case SVR_MPC8548: + case SVR_MPC8548E: + law_max = 10; + break; + case SVR_P5020: + case SVR_P5020E: + law_max = 32; + break; + default: + law_max = 8; + } - return (12); + return (law_max); } +static inline void +law_write(uint32_t n, uint64_t bar, uint32_t sr) +{ +#if defined(QORIQ_DPAA) + ccsr_write4(OCP85XX_LAWBARH(n), bar >> 32); + ccsr_write4(OCP85XX_LAWBARL(n), bar); +#else + ccsr_write4(OCP85XX_LAWBAR(n), bar >> 12); +#endif + ccsr_write4(OCP85XX_LAWSR(n), sr); + + /* + * The last write to LAWAR should be followed by a read + * of LAWAR before any device try to use any of windows. + * What more the read of LAWAR should be followed by isync + * instruction. + */ + + ccsr_read4(OCP85XX_LAWSR(n)); + isync(); +} + +static inline void +law_read(uint32_t n, uint64_t *bar, uint32_t *sr) +{ +#if defined(QORIQ_DPAA) + *bar = (uint64_t)ccsr_read4(OCP85XX_LAWBARH(n)) << 32 | + ccsr_read4(OCP85XX_LAWBARL(n)); +#else + *bar = (uint64_t)ccsr_read4(OCP85XX_LAWBAR(n)) << 12; +#endif + *sr = ccsr_read4(OCP85XX_LAWSR(n)); +} + +static int +law_find_free(void) +{ + uint32_t i,sr; + uint64_t bar; + int law_max; + + law_max = law_getmax(); + /* Find free LAW */ + for (i = 0; i < law_max; i++) { + law_read(i, &bar, &sr); + if ((sr & 0x80000000) == 0) + break; + } + + return (i); +} + #define _LAW_SR(trgt,size) (0x80000000 | (trgt << 20) | (ffsl(size) - 2)) -#define _LAW_BAR(addr) (addr >> 12) int -law_enable(int trgt, u_long addr, u_long size) +law_enable(int trgt, uint64_t bar, uint32_t size) { - uint32_t bar, sr; + uint64_t bar_tmp; + uint32_t sr, sr_tmp; int i, law_max; if (size == 0) return (0); law_max = law_getmax(); - bar = _LAW_BAR(addr); sr = _LAW_SR(trgt, size); /* Bail if already programmed. */ - for (i = 0; i < law_max; i++) - if (sr == ccsr_read4(OCP85XX_LAWSR(i)) && - bar == ccsr_read4(OCP85XX_LAWBAR(i))) + for (i = 0; i < law_max; i++) { + law_read(i, &bar_tmp, &sr_tmp); + if (sr == sr_tmp && bar == bar_tmp) return (0); + } /* Find an unused access window. */ - for (i = 0; i < law_max; i++) - if ((ccsr_read4(OCP85XX_LAWSR(i)) & 0x80000000) == 0) - break; + i = law_find_free(); if (i == law_max) return (ENOSPC); - ccsr_write4(OCP85XX_LAWBAR(i), bar); - ccsr_write4(OCP85XX_LAWSR(i), sr); + law_write(i, bar, sr); return (0); } int -law_disable(int trgt, u_long addr, u_long size) +law_disable(int trgt, uint64_t bar, uint32_t size) { - uint32_t bar, sr; + uint64_t bar_tmp; + uint32_t sr, sr_tmp; int i, law_max; law_max = law_getmax(); - bar = _LAW_BAR(addr); sr = _LAW_SR(trgt, size); /* Find and disable requested LAW. */ - for (i = 0; i < law_max; i++) - if (sr == ccsr_read4(OCP85XX_LAWSR(i)) && - bar == ccsr_read4(OCP85XX_LAWBAR(i))) { - ccsr_write4(OCP85XX_LAWBAR(i), 0); - ccsr_write4(OCP85XX_LAWSR(i), 0); + for (i = 0; i < law_max; i++) { + law_read(i, &bar_tmp, &sr_tmp); + if (sr == sr_tmp && bar == bar_tmp) { + law_write(i, 0, 0); return (0); } + } return (ENOENT); } int law_pci_target(struct resource *res, int *trgt_mem, int *trgt_io) { u_long start; uint32_t ver; int trgt, rv; ver = SVR_VER(mfspr(SPR_SVR)); start = rman_get_start(res) & 0xf000; rv = 0; trgt = -1; switch (start) { + case 0x0000: case 0x8000: trgt = 0; break; + case 0x1000: case 0x9000: trgt = 1; break; + case 0x2000: case 0xa000: if (ver == SVR_MPC8548E || ver == SVR_MPC8548) trgt = 3; else trgt = 2; break; + case 0x3000: case 0xb000: if (ver == SVR_MPC8548E || ver == SVR_MPC8548) rv = EINVAL; else trgt = 3; break; default: rv = ENXIO; } if (rv == 0) { *trgt_mem = trgt; *trgt_io = trgt; } return (rv); } Index: projects/powernv/powerpc/mpc85xx/mpc85xx.h =================================================================== --- projects/powernv/powerpc/mpc85xx/mpc85xx.h (revision 291050) +++ projects/powernv/powerpc/mpc85xx/mpc85xx.h (revision 291051) @@ -1,92 +1,113 @@ /*- * Copyright (C) 2008 Semihalf, Rafal Jaworowski * Copyright 2006 by Juniper Networks. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MPC85XX_H_ #define _MPC85XX_H_ #include /* * Configuration control and status registers */ extern vm_offset_t ccsrbar_va; #define CCSRBAR_VA ccsrbar_va #define OCP85XX_CCSRBAR (CCSRBAR_VA + 0x0) #define OCP85XX_BPTR (CCSRBAR_VA + 0x20) /* * E500 Coherency Module registers */ #define OCP85XX_EEBPCR (CCSRBAR_VA + 0x1010) /* * Local access registers */ -#define OCP85XX_LAWBAR(n) (CCSRBAR_VA + 0xc08 + 0x20 * (n)) -#define OCP85XX_LAWSR(n) (CCSRBAR_VA + 0xc10 + 0x20 * (n)) +#if defined(QORIQ_DPAA) +/* Write order: OCP_LAWBARH -> OCP_LAWBARL -> OCP_LAWSR */ +#define OCP85XX_LAWBARH(n) (CCSRBAR_VA + 0xc00 + 0x10 * (n)) +#define OCP85XX_LAWBARL(n) (CCSRBAR_VA + 0xc04 + 0x10 * (n)) +#define OCP85XX_LAWSR(n) (CCSRBAR_VA + 0xc08 + 0x10 * (n)) +#else +#define OCP85XX_LAWBAR(n) (CCSRBAR_VA + 0xc08 + 0x10 * (n)) +#define OCP85XX_LAWSR(n) (CCSRBAR_VA + 0xc10 + 0x10 * (n)) +#endif -#define OCP85XX_TGTIF_LBC 4 -#define OCP85XX_TGTIF_RAM_INTL 11 -#define OCP85XX_TGTIF_RIO 12 -#define OCP85XX_TGTIF_RAM1 15 -#define OCP85XX_TGTIF_RAM2 22 +/* Attribute register */ +#define OCP85XX_ENA_MASK 0x80000000 +#define OCP85XX_DIS_MASK 0x7fffffff +#if defined(QORIQ_DPAA) +#define OCP85XX_TGTIF_LBC 0x1f +#define OCP85XX_TGTIF_RAM_INTL 0x14 +#define OCP85XX_TGTIF_RAM1 0x10 +#define OCP85XX_TGTIF_RAM2 0x11 +#define OCP85XX_TGTIF_BMAN 0x18 +#define OCP85XX_TGTIF_QMAN 0x3C +#define OCP85XX_TRGT_SHIFT 20 +#else +#define OCP85XX_TGTIF_LBC 0x04 +#define OCP85XX_TGTIF_RAM_INTL 0x0b +#define OCP85XX_TGTIF_RIO 0x0c +#define OCP85XX_TGTIF_RAM1 0x0f +#define OCP85XX_TGTIF_RAM2 0x16 +#endif + /* * L2 cache registers */ #define OCP85XX_L2CTL (CCSRBAR_VA + 0x20000) /* * Power-On Reset configuration */ #define OCP85XX_PORDEVSR (CCSRBAR_VA + 0xe000c) #define OCP85XX_PORDEVSR_IO_SEL 0x00780000 #define OCP85XX_PORDEVSR_IO_SEL_SHIFT 19 #define OCP85XX_PORDEVSR2 (CCSRBAR_VA + 0xe0014) /* * Status Registers. */ #define OCP85XX_RSTCR (CCSRBAR_VA + 0xe00b0) /* * Prototypes. */ uint32_t ccsr_read4(uintptr_t addr); void ccsr_write4(uintptr_t addr, uint32_t val); -int law_enable(int trgt, u_long addr, u_long size); -int law_disable(int trgt, u_long addr, u_long size); +int law_enable(int trgt, uint64_t bar, uint32_t size); +int law_disable(int trgt, uint64_t bar, uint32_t size); int law_getmax(void); int law_pci_target(struct resource *, int *, int *); DECLARE_CLASS(mpc85xx_platform); int mpc85xx_attach(platform_t); #endif /* _MPC85XX_H_ */ Index: projects/powernv/x86/x86/intr_machdep.c =================================================================== --- projects/powernv/x86/x86/intr_machdep.c (revision 291050) +++ projects/powernv/x86/x86/intr_machdep.c (revision 291051) @@ -1,575 +1,584 @@ /*- * Copyright (c) 2003 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine dependent interrupt code for x86. For x86, we have to * deal with different PICs. Thus, we use the passed in vector to lookup * an interrupt source associated with that vector. The interrupt source * describes which PIC the source belongs to and includes methods to handle * that source. */ #include "opt_atpic.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #ifndef DEV_ATPIC #include #include #include #include #ifdef PC98 #include #else #include #endif #endif #define MAX_STRAY_LOG 5 typedef void (*mask_fn)(void *); static int intrcnt_index; static struct intsrc *interrupt_sources[NUM_IO_INTS]; static struct mtx intr_table_lock; static struct mtx intrcnt_lock; static TAILQ_HEAD(pics_head, pic) pics; #ifdef SMP static int assign_cpu; #endif u_long intrcnt[INTRCNT_COUNT]; char intrnames[INTRCNT_COUNT * (MAXCOMLEN + 1)]; size_t sintrcnt = sizeof(intrcnt); size_t sintrnames = sizeof(intrnames); static int intr_assign_cpu(void *arg, int cpu); static void intr_disable_src(void *arg); static void intr_init(void *__dummy); static int intr_pic_registered(struct pic *pic); static void intrcnt_setname(const char *name, int index); static void intrcnt_updatename(struct intsrc *is); static void intrcnt_register(struct intsrc *is); static int intr_pic_registered(struct pic *pic) { struct pic *p; TAILQ_FOREACH(p, &pics, pics) { if (p == pic) return (1); } return (0); } /* * Register a new interrupt controller (PIC). This is to support suspend * and resume where we suspend/resume controllers rather than individual * sources. This also allows controllers with no active sources (such as * 8259As in a system using the APICs) to participate in suspend and resume. */ int intr_register_pic(struct pic *pic) { int error; mtx_lock(&intr_table_lock); if (intr_pic_registered(pic)) error = EBUSY; else { TAILQ_INSERT_TAIL(&pics, pic, pics); error = 0; } mtx_unlock(&intr_table_lock); return (error); } /* * Register a new interrupt source with the global interrupt system. * The global interrupts need to be disabled when this function is * called. */ int intr_register_source(struct intsrc *isrc) { int error, vector; KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC")); vector = isrc->is_pic->pic_vector(isrc); if (interrupt_sources[vector] != NULL) return (EEXIST); error = intr_event_create(&isrc->is_event, isrc, 0, vector, intr_disable_src, (mask_fn)isrc->is_pic->pic_enable_source, (mask_fn)isrc->is_pic->pic_eoi_source, intr_assign_cpu, "irq%d:", vector); if (error) return (error); mtx_lock(&intr_table_lock); if (interrupt_sources[vector] != NULL) { mtx_unlock(&intr_table_lock); intr_event_destroy(isrc->is_event); return (EEXIST); } intrcnt_register(isrc); interrupt_sources[vector] = isrc; isrc->is_handlers = 0; mtx_unlock(&intr_table_lock); return (0); } struct intsrc * intr_lookup_source(int vector) { return (interrupt_sources[vector]); } int intr_add_handler(const char *name, int vector, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep) { struct intsrc *isrc; int error; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); error = intr_event_add_handler(isrc->is_event, name, filter, handler, arg, intr_priority(flags), flags, cookiep); if (error == 0) { mtx_lock(&intr_table_lock); intrcnt_updatename(isrc); isrc->is_handlers++; if (isrc->is_handlers == 1) { isrc->is_pic->pic_enable_intr(isrc); isrc->is_pic->pic_enable_source(isrc); } mtx_unlock(&intr_table_lock); } return (error); } int intr_remove_handler(void *cookie) { struct intsrc *isrc; - int error; + int error, mtx_owned; isrc = intr_handler_source(cookie); error = intr_event_remove_handler(cookie); if (error == 0) { - mtx_lock(&intr_table_lock); + /* + * Recursion is needed here so PICs can remove interrupts + * while resuming. It was previously not possible due to + * intr_resume holding the intr_table_lock and + * intr_remove_handler recursing on it. + */ + mtx_owned = mtx_owned(&intr_table_lock); + if (mtx_owned == 0) + mtx_lock(&intr_table_lock); isrc->is_handlers--; if (isrc->is_handlers == 0) { isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI); isrc->is_pic->pic_disable_intr(isrc); } intrcnt_updatename(isrc); - mtx_unlock(&intr_table_lock); + if (mtx_owned == 0) + mtx_unlock(&intr_table_lock); } return (error); } int intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol) { struct intsrc *isrc; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); return (isrc->is_pic->pic_config_intr(isrc, trig, pol)); } static void intr_disable_src(void *arg) { struct intsrc *isrc; isrc = arg; isrc->is_pic->pic_disable_source(isrc, PIC_EOI); } void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame) { struct intr_event *ie; int vector; /* * We count software interrupts when we process them. The * code here follows previous practice, but there's an * argument for counting hardware interrupts when they're * processed too. */ (*isrc->is_count)++; PCPU_INC(cnt.v_intr); ie = isrc->is_event; /* * XXX: We assume that IRQ 0 is only used for the ISA timer * device (clk). */ vector = isrc->is_pic->pic_vector(isrc); if (vector == 0) clkintr_pending = 1; /* * For stray interrupts, mask and EOI the source, bump the * stray count, and log the condition. */ if (intr_event_handle(ie, frame) != 0) { isrc->is_pic->pic_disable_source(isrc, PIC_EOI); (*isrc->is_straycount)++; if (*isrc->is_straycount < MAX_STRAY_LOG) log(LOG_ERR, "stray irq%d\n", vector); else if (*isrc->is_straycount == MAX_STRAY_LOG) log(LOG_CRIT, "too many stray irq %d's: not logging anymore\n", vector); } } void intr_resume(bool suspend_cancelled) { struct pic *pic; #ifndef DEV_ATPIC atpic_reset(); #endif mtx_lock(&intr_table_lock); TAILQ_FOREACH(pic, &pics, pics) { if (pic->pic_resume != NULL) pic->pic_resume(pic, suspend_cancelled); } mtx_unlock(&intr_table_lock); } void intr_suspend(void) { struct pic *pic; mtx_lock(&intr_table_lock); TAILQ_FOREACH_REVERSE(pic, &pics, pics_head, pics) { if (pic->pic_suspend != NULL) pic->pic_suspend(pic); } mtx_unlock(&intr_table_lock); } static int intr_assign_cpu(void *arg, int cpu) { #ifdef SMP struct intsrc *isrc; int error; /* * Don't do anything during early boot. We will pick up the * assignment once the APs are started. */ if (assign_cpu && cpu != NOCPU) { isrc = arg; mtx_lock(&intr_table_lock); error = isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]); mtx_unlock(&intr_table_lock); } else error = 0; return (error); #else return (EOPNOTSUPP); #endif } static void intrcnt_setname(const char *name, int index) { snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); } static void intrcnt_updatename(struct intsrc *is) { intrcnt_setname(is->is_event->ie_fullname, is->is_index); } static void intrcnt_register(struct intsrc *is) { char straystr[MAXCOMLEN + 1]; KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__)); mtx_lock_spin(&intrcnt_lock); is->is_index = intrcnt_index; intrcnt_index += 2; snprintf(straystr, MAXCOMLEN + 1, "stray irq%d", is->is_pic->pic_vector(is)); intrcnt_updatename(is); is->is_count = &intrcnt[is->is_index]; intrcnt_setname(straystr, is->is_index + 1); is->is_straycount = &intrcnt[is->is_index + 1]; mtx_unlock_spin(&intrcnt_lock); } void intrcnt_add(const char *name, u_long **countp) { mtx_lock_spin(&intrcnt_lock); *countp = &intrcnt[intrcnt_index]; intrcnt_setname(name, intrcnt_index); intrcnt_index++; mtx_unlock_spin(&intrcnt_lock); } static void intr_init(void *dummy __unused) { intrcnt_setname("???", 0); intrcnt_index = 1; TAILQ_INIT(&pics); mtx_init(&intr_table_lock, "intr sources", NULL, MTX_DEF); mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN); } SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL); #ifndef DEV_ATPIC /* Initialize the two 8259A's to a known-good shutdown state. */ void atpic_reset(void) { outb(IO_ICU1, ICW1_RESET | ICW1_IC4); outb(IO_ICU1 + ICU_IMR_OFFSET, IDT_IO_INTS); outb(IO_ICU1 + ICU_IMR_OFFSET, IRQ_MASK(ICU_SLAVEID)); outb(IO_ICU1 + ICU_IMR_OFFSET, MASTER_MODE); outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff); outb(IO_ICU1, OCW3_SEL | OCW3_RR); outb(IO_ICU2, ICW1_RESET | ICW1_IC4); outb(IO_ICU2 + ICU_IMR_OFFSET, IDT_IO_INTS + 8); outb(IO_ICU2 + ICU_IMR_OFFSET, ICU_SLAVEID); outb(IO_ICU2 + ICU_IMR_OFFSET, SLAVE_MODE); outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff); outb(IO_ICU2, OCW3_SEL | OCW3_RR); } #endif /* Add a description to an active interrupt handler. */ int intr_describe(u_int vector, void *ih, const char *descr) { struct intsrc *isrc; int error; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); error = intr_event_describe_handler(isrc->is_event, ih, descr); if (error) return (error); intrcnt_updatename(isrc); return (0); } void intr_reprogram(void) { struct intsrc *is; int v; mtx_lock(&intr_table_lock); for (v = 0; v < NUM_IO_INTS; v++) { is = interrupt_sources[v]; if (is == NULL) continue; if (is->is_pic->pic_reprogram_pin != NULL) is->is_pic->pic_reprogram_pin(is); } mtx_unlock(&intr_table_lock); } #ifdef DDB /* * Dump data about interrupt handlers */ DB_SHOW_COMMAND(irqs, db_show_irqs) { struct intsrc **isrc; int i, verbose; if (strcmp(modif, "v") == 0) verbose = 1; else verbose = 0; isrc = interrupt_sources; for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++) if (*isrc != NULL) db_dump_intr_event((*isrc)->is_event, verbose); } #endif #ifdef SMP /* * Support for balancing interrupt sources across CPUs. For now we just * allocate CPUs round-robin. */ static cpuset_t intr_cpus = CPUSET_T_INITIALIZER(0x1); static int current_cpu; /* * Return the CPU that the next interrupt source should use. For now * this just returns the next local APIC according to round-robin. */ u_int intr_next_cpu(void) { u_int apic_id; /* Leave all interrupts on the BSP during boot. */ if (!assign_cpu) return (PCPU_GET(apic_id)); mtx_lock_spin(&icu_lock); apic_id = cpu_apic_ids[current_cpu]; do { current_cpu++; if (current_cpu > mp_maxid) current_cpu = 0; } while (!CPU_ISSET(current_cpu, &intr_cpus)); mtx_unlock_spin(&icu_lock); return (apic_id); } /* Attempt to bind the specified IRQ to the specified CPU. */ int intr_bind(u_int vector, u_char cpu) { struct intsrc *isrc; isrc = intr_lookup_source(vector); if (isrc == NULL) return (EINVAL); return (intr_event_bind(isrc->is_event, cpu)); } /* * Add a CPU to our mask of valid CPUs that can be destinations of * interrupts. */ void intr_add_cpu(u_int cpu) { if (cpu >= MAXCPU) panic("%s: Invalid CPU ID", __func__); if (bootverbose) printf("INTR: Adding local APIC %d as a target\n", cpu_apic_ids[cpu]); CPU_SET(cpu, &intr_cpus); } /* * Distribute all the interrupt sources among the available CPUs once the * AP's have been launched. */ static void intr_shuffle_irqs(void *arg __unused) { struct intsrc *isrc; int i; /* Don't bother on UP. */ if (mp_ncpus == 1) return; /* Round-robin assign a CPU to each enabled source. */ mtx_lock(&intr_table_lock); assign_cpu = 1; for (i = 0; i < NUM_IO_INTS; i++) { isrc = interrupt_sources[i]; if (isrc != NULL && isrc->is_handlers > 0) { /* * If this event is already bound to a CPU, * then assign the source to that CPU instead * of picking one via round-robin. Note that * this is careful to only advance the * round-robin if the CPU assignment succeeds. */ if (isrc->is_event->ie_cpu != NOCPU) (void)isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[isrc->is_event->ie_cpu]); else if (isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[current_cpu]) == 0) (void)intr_next_cpu(); } } mtx_unlock(&intr_table_lock); } SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs, NULL); #else /* * Always route interrupts to the current processor in the UP case. */ u_int intr_next_cpu(void) { return (PCPU_GET(apic_id)); } #endif Index: projects/powernv/x86/xen/hvm.c =================================================================== --- projects/powernv/x86/xen/hvm.c (revision 291050) +++ projects/powernv/x86/xen/hvm.c (revision 291051) @@ -1,427 +1,403 @@ /* * Copyright (c) 2008, 2013 Citrix Systems, Inc. * Copyright (c) 2012 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /*--------------------------- Forward Declarations ---------------------------*/ -#ifdef SMP -static void xen_hvm_cpu_resume(void); -#endif static void xen_hvm_cpu_init(void); -/*---------------------------- Extern Declarations ---------------------------*/ -/* Variables used by mp_machdep to perform the bitmap IPI */ -extern volatile u_int cpu_ipi_pending[MAXCPU]; - /*-------------------------------- Local Types -------------------------------*/ enum xen_hvm_init_type { XEN_HVM_INIT_COLD, XEN_HVM_INIT_CANCELLED_SUSPEND, XEN_HVM_INIT_RESUME }; /*-------------------------------- Global Data -------------------------------*/ enum xen_domain_type xen_domain_type = XEN_NATIVE; #ifdef SMP struct cpu_ops xen_hvm_cpu_ops = { .cpu_init = xen_hvm_cpu_init, - .cpu_resume = xen_hvm_cpu_resume + .cpu_resume = xen_hvm_cpu_init }; #endif static MALLOC_DEFINE(M_XENHVM, "xen_hvm", "Xen HVM PV Support"); /** * If non-zero, the hypervisor has been configured to use a direct * IDT event callback for interrupt injection. */ int xen_vector_callback_enabled; /*------------------------------- Per-CPU Data -------------------------------*/ DPCPU_DEFINE(struct vcpu_info, vcpu_local_info); DPCPU_DEFINE(struct vcpu_info *, vcpu_info); /*------------------ Hypervisor Access Shared Memory Regions -----------------*/ shared_info_t *HYPERVISOR_shared_info; start_info_t *HYPERVISOR_start_info; /*------------------------------ Sysctl tunables -----------------------------*/ int xen_disable_pv_disks = 0; int xen_disable_pv_nics = 0; TUNABLE_INT("hw.xen.disable_pv_disks", &xen_disable_pv_disks); TUNABLE_INT("hw.xen.disable_pv_nics", &xen_disable_pv_nics); -#ifdef SMP -/*---------------------- XEN diverged cpu operations -------------------------*/ -static void -xen_hvm_cpu_resume(void) -{ - u_int cpuid = PCPU_GET(cpuid); - - /* - * Reset pending bitmap IPIs, because Xen doesn't preserve pending - * event channels on migration. - */ - cpu_ipi_pending[cpuid] = 0; - - /* register vcpu_info area */ - xen_hvm_cpu_init(); -} -#endif /*---------------------- XEN Hypervisor Probe and Setup ----------------------*/ static uint32_t xen_hvm_cpuid_base(void) { uint32_t base, regs[4]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { do_cpuid(base, regs); if (!memcmp("XenVMMXenVMM", ®s[1], 12) && (regs[0] - base) >= 2) return (base); } return (0); } /* * Allocate and fill in the hypcall page. */ static int xen_hvm_init_hypercall_stubs(enum xen_hvm_init_type init_type) { uint32_t base, regs[4]; int i; if (xen_pv_domain()) { /* hypercall page is already set in the PV case */ return (0); } base = xen_hvm_cpuid_base(); if (base == 0) return (ENXIO); if (init_type == XEN_HVM_INIT_COLD) { do_cpuid(base + 1, regs); printf("XEN: Hypervisor version %d.%d detected.\n", regs[0] >> 16, regs[0] & 0xffff); } /* * Find the hypercall pages. */ do_cpuid(base + 2, regs); for (i = 0; i < regs[0]; i++) wrmsr(regs[1], vtophys(&hypercall_page + i * PAGE_SIZE) + i); return (0); } static void xen_hvm_init_shared_info_page(void) { struct xen_add_to_physmap xatp; if (xen_pv_domain()) { /* * Already setup in the PV case, shared_info is passed inside * of the start_info struct at start of day. */ return; } if (HYPERVISOR_shared_info == NULL) { HYPERVISOR_shared_info = malloc(PAGE_SIZE, M_XENHVM, M_NOWAIT); if (HYPERVISOR_shared_info == NULL) panic("Unable to allocate Xen shared info page"); } xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = vtophys(HYPERVISOR_shared_info) >> PAGE_SHIFT; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) panic("HYPERVISOR_memory_op failed"); } /* * Tell the hypervisor how to contact us for event channel callbacks. */ void xen_hvm_set_callback(device_t dev) { struct xen_hvm_param xhp; int irq; if (xen_vector_callback_enabled) return; xhp.domid = DOMID_SELF; xhp.index = HVM_PARAM_CALLBACK_IRQ; if (xen_feature(XENFEAT_hvm_callback_vector) != 0) { int error; xhp.value = HVM_CALLBACK_VECTOR(IDT_EVTCHN); error = HYPERVISOR_hvm_op(HVMOP_set_param, &xhp); if (error == 0) { xen_vector_callback_enabled = 1; return; } printf("Xen HVM callback vector registration failed (%d). " "Falling back to emulated device interrupt\n", error); } xen_vector_callback_enabled = 0; if (dev == NULL) { /* * Called from early boot or resume. * xenpci will invoke us again later. */ return; } irq = pci_get_irq(dev); if (irq < 16) { xhp.value = HVM_CALLBACK_GSI(irq); } else { u_int slot; u_int pin; slot = pci_get_slot(dev); pin = pci_get_intpin(dev) - 1; xhp.value = HVM_CALLBACK_PCI_INTX(slot, pin); } if (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp) != 0) panic("Can't set evtchn callback"); } #define XEN_MAGIC_IOPORT 0x10 enum { XMI_MAGIC = 0x49d2, XMI_UNPLUG_IDE_DISKS = 0x01, XMI_UNPLUG_NICS = 0x02, XMI_UNPLUG_IDE_EXCEPT_PRI_MASTER = 0x04 }; static void xen_hvm_disable_emulated_devices(void) { u_short disable_devs = 0; if (xen_pv_domain()) { /* * No emulated devices in the PV case, so no need to unplug * anything. */ if (xen_disable_pv_disks != 0 || xen_disable_pv_nics != 0) printf("PV devices cannot be disabled in PV guests\n"); return; } if (inw(XEN_MAGIC_IOPORT) != XMI_MAGIC) return; if (xen_disable_pv_disks == 0) { if (bootverbose) printf("XEN: disabling emulated disks\n"); disable_devs |= XMI_UNPLUG_IDE_DISKS; } if (xen_disable_pv_nics == 0) { if (bootverbose) printf("XEN: disabling emulated nics\n"); disable_devs |= XMI_UNPLUG_NICS; } if (disable_devs != 0) outw(XEN_MAGIC_IOPORT, disable_devs); } static void xen_hvm_init(enum xen_hvm_init_type init_type) { int error; int i; if (init_type == XEN_HVM_INIT_CANCELLED_SUSPEND) return; error = xen_hvm_init_hypercall_stubs(init_type); switch (init_type) { case XEN_HVM_INIT_COLD: if (error != 0) return; /* * If xen_domain_type is not set at this point * it means we are inside a (PV)HVM guest, because * for PVH the guest type is set much earlier * (see hammer_time_xen). */ if (!xen_domain()) { xen_domain_type = XEN_HVM_DOMAIN; vm_guest = VM_GUEST_XEN; } setup_xen_features(); #ifdef SMP cpu_ops = xen_hvm_cpu_ops; #endif break; case XEN_HVM_INIT_RESUME: if (error != 0) panic("Unable to init Xen hypercall stubs on resume"); /* Clear stale vcpu_info. */ CPU_FOREACH(i) DPCPU_ID_SET(i, vcpu_info, NULL); break; default: panic("Unsupported HVM initialization type"); } xen_vector_callback_enabled = 0; xen_hvm_set_callback(NULL); /* * On (PV)HVM domains we need to request the hypervisor to * fill the shared info page, for PVH guest the shared_info page * is passed inside the start_info struct and is already set, so this * functions are no-ops. */ xen_hvm_init_shared_info_page(); xen_hvm_disable_emulated_devices(); } void xen_hvm_suspend(void) { } void xen_hvm_resume(bool suspend_cancelled) { xen_hvm_init(suspend_cancelled ? XEN_HVM_INIT_CANCELLED_SUSPEND : XEN_HVM_INIT_RESUME); /* Register vcpu_info area for CPU#0. */ xen_hvm_cpu_init(); } static void xen_hvm_sysinit(void *arg __unused) { xen_hvm_init(XEN_HVM_INIT_COLD); } static void xen_set_vcpu_id(void) { struct pcpu *pc; int i; if (!xen_hvm_domain()) return; /* Set vcpu_id to acpi_id */ CPU_FOREACH(i) { pc = pcpu_find(i); pc->pc_vcpu_id = pc->pc_acpi_id; if (bootverbose) printf("XEN: CPU %u has VCPU ID %u\n", i, pc->pc_vcpu_id); } } static void xen_hvm_cpu_init(void) { struct vcpu_register_vcpu_info info; struct vcpu_info *vcpu_info; int cpu, rc; if (!xen_domain()) return; if (DPCPU_GET(vcpu_info) != NULL) { /* * vcpu_info is already set. We're resuming * from a failed migration and our pre-suspend * configuration is still valid. */ return; } vcpu_info = DPCPU_PTR(vcpu_local_info); cpu = PCPU_GET(vcpu_id); info.mfn = vtophys(vcpu_info) >> PAGE_SHIFT; info.offset = vtophys(vcpu_info) - trunc_page(vtophys(vcpu_info)); rc = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); if (rc != 0) DPCPU_SET(vcpu_info, &HYPERVISOR_shared_info->vcpu_info[cpu]); else DPCPU_SET(vcpu_info, vcpu_info); } SYSINIT(xen_hvm_init, SI_SUB_HYPERVISOR, SI_ORDER_FIRST, xen_hvm_sysinit, NULL); SYSINIT(xen_hvm_cpu_init, SI_SUB_INTR, SI_ORDER_FIRST, xen_hvm_cpu_init, NULL); SYSINIT(xen_set_vcpu_id, SI_SUB_CPU, SI_ORDER_ANY, xen_set_vcpu_id, NULL); Index: projects/powernv/x86/xen/xen_intr.c =================================================================== --- projects/powernv/x86/xen/xen_intr.c (revision 291050) +++ projects/powernv/x86/xen/xen_intr.c (revision 291051) @@ -1,1636 +1,1639 @@ /****************************************************************************** * xen_intr.c * * Xen event and interrupt services for x86 HVM guests. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, Intel Corporation * Copyright (c) 2012, Spectra Logic Corporation * * This file may be distributed separately from the Linux kernel, or * incorporated into other software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services"); /** * Per-cpu event channel processing state. */ struct xen_intr_pcpu_data { /** * The last event channel bitmap section (level one bit) processed. * This is used to ensure we scan all ports before * servicing an already servied port again. */ u_int last_processed_l1i; /** * The last event channel processed within the event channel * bitmap being scanned. */ u_int last_processed_l2i; /** Pointer to this CPU's interrupt statistic counter. */ u_long *evtchn_intrcnt; /** * A bitmap of ports that can be serviced from this CPU. * A set bit means interrupt handling is enabled. */ u_long evtchn_enabled[sizeof(u_long) * 8]; }; /* * Start the scan at port 0 by initializing the last scanned * location as the highest numbered event channel port. */ static DPCPU_DEFINE(struct xen_intr_pcpu_data, xen_intr_pcpu) = { .last_processed_l1i = LONG_BIT - 1, .last_processed_l2i = LONG_BIT - 1 }; DPCPU_DECLARE(struct vcpu_info *, vcpu_info); #define XEN_EEXIST 17 /* Xen "already exists" error */ #define XEN_ALLOCATE_VECTOR 0 /* Allocate a vector for this event channel */ #define XEN_INVALID_EVTCHN 0 /* Invalid event channel */ #define is_valid_evtchn(x) ((x) != XEN_INVALID_EVTCHN) struct xenisrc { struct intsrc xi_intsrc; enum evtchn_type xi_type; int xi_cpu; /* VCPU for delivery. */ int xi_vector; /* Global isrc vector number. */ evtchn_port_t xi_port; int xi_pirq; int xi_virq; void *xi_cookie; u_int xi_close:1; /* close on unbind? */ u_int xi_activehi:1; u_int xi_edgetrigger:1; u_int xi_masked:1; }; #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) static void xen_intr_suspend(struct pic *); static void xen_intr_resume(struct pic *, bool suspend_cancelled); static void xen_intr_enable_source(struct intsrc *isrc); static void xen_intr_disable_source(struct intsrc *isrc, int eoi); static void xen_intr_eoi_source(struct intsrc *isrc); static void xen_intr_enable_intr(struct intsrc *isrc); static void xen_intr_disable_intr(struct intsrc *isrc); static int xen_intr_vector(struct intsrc *isrc); static int xen_intr_source_pending(struct intsrc *isrc); static int xen_intr_config_intr(struct intsrc *isrc, enum intr_trigger trig, enum intr_polarity pol); static int xen_intr_assign_cpu(struct intsrc *isrc, u_int apic_id); static void xen_intr_pirq_enable_source(struct intsrc *isrc); static void xen_intr_pirq_disable_source(struct intsrc *isrc, int eoi); static void xen_intr_pirq_eoi_source(struct intsrc *isrc); static void xen_intr_pirq_enable_intr(struct intsrc *isrc); static void xen_intr_pirq_disable_intr(struct intsrc *isrc); static int xen_intr_pirq_config_intr(struct intsrc *isrc, enum intr_trigger trig, enum intr_polarity pol); /** * PIC interface for all event channel port types except physical IRQs. */ struct pic xen_intr_pic = { .pic_enable_source = xen_intr_enable_source, .pic_disable_source = xen_intr_disable_source, .pic_eoi_source = xen_intr_eoi_source, .pic_enable_intr = xen_intr_enable_intr, .pic_disable_intr = xen_intr_disable_intr, .pic_vector = xen_intr_vector, .pic_source_pending = xen_intr_source_pending, .pic_suspend = xen_intr_suspend, .pic_resume = xen_intr_resume, .pic_config_intr = xen_intr_config_intr, .pic_assign_cpu = xen_intr_assign_cpu }; /** * PIC interface for all event channel representing * physical interrupt sources. */ struct pic xen_intr_pirq_pic = { .pic_enable_source = xen_intr_pirq_enable_source, .pic_disable_source = xen_intr_pirq_disable_source, .pic_eoi_source = xen_intr_pirq_eoi_source, .pic_enable_intr = xen_intr_pirq_enable_intr, .pic_disable_intr = xen_intr_pirq_disable_intr, .pic_vector = xen_intr_vector, .pic_source_pending = xen_intr_source_pending, .pic_config_intr = xen_intr_pirq_config_intr, .pic_assign_cpu = xen_intr_assign_cpu }; static struct mtx xen_intr_isrc_lock; static int xen_intr_auto_vector_count; static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS]; static u_long *xen_intr_pirq_eoi_map; static boolean_t xen_intr_pirq_eoi_map_enabled; /*------------------------- Private Functions --------------------------------*/ /** * Disable signal delivery for an event channel port on the * specified CPU. * * \param port The event channel port to mask. * * This API is used to manage the port<=>CPU binding of event * channel handlers. * * \note This operation does not preclude reception of an event * for this event channel on another CPU. To mask the * event channel globally, use evtchn_mask(). */ static inline void evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port) { struct xen_intr_pcpu_data *pcpu; pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu); xen_clear_bit(port, pcpu->evtchn_enabled); } /** * Enable signal delivery for an event channel port on the * specified CPU. * * \param port The event channel port to unmask. * * This API is used to manage the port<=>CPU binding of event * channel handlers. * * \note This operation does not guarantee that event delivery * is enabled for this event channel port. The port must * also be globally enabled. See evtchn_unmask(). */ static inline void evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port) { struct xen_intr_pcpu_data *pcpu; pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu); xen_set_bit(port, pcpu->evtchn_enabled); } /** * Allocate and register a per-cpu Xen upcall interrupt counter. * * \param cpu The cpu for which to register this interrupt count. */ static void xen_intr_intrcnt_add(u_int cpu) { char buf[MAXCOMLEN + 1]; struct xen_intr_pcpu_data *pcpu; pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu); if (pcpu->evtchn_intrcnt != NULL) return; snprintf(buf, sizeof(buf), "cpu%d:xen", cpu); intrcnt_add(buf, &pcpu->evtchn_intrcnt); } /** * Search for an already allocated but currently unused Xen interrupt * source object. * * \param type Restrict the search to interrupt sources of the given * type. * * \return A pointer to a free Xen interrupt source object or NULL. */ static struct xenisrc * xen_intr_find_unused_isrc(enum evtchn_type type) { int isrc_idx; KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held")); for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) { struct xenisrc *isrc; u_int vector; vector = FIRST_EVTCHN_INT + isrc_idx; isrc = (struct xenisrc *)intr_lookup_source(vector); if (isrc != NULL && isrc->xi_type == EVTCHN_TYPE_UNBOUND) { KASSERT(isrc->xi_intsrc.is_handlers == 0, ("Free evtchn still has handlers")); isrc->xi_type = type; return (isrc); } } return (NULL); } /** * Allocate a Xen interrupt source object. * * \param type The type of interrupt source to create. * * \return A pointer to a newly allocated Xen interrupt source * object or NULL. */ static struct xenisrc * xen_intr_alloc_isrc(enum evtchn_type type, int vector) { static int warned; struct xenisrc *isrc; KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held")); if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) { if (!warned) { warned = 1; printf("xen_intr_alloc: Event channels exhausted.\n"); } return (NULL); } if (type != EVTCHN_TYPE_PIRQ) { vector = FIRST_EVTCHN_INT + xen_intr_auto_vector_count; xen_intr_auto_vector_count++; } KASSERT((intr_lookup_source(vector) == NULL), ("Trying to use an already allocated vector")); mtx_unlock(&xen_intr_isrc_lock); isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO); isrc->xi_intsrc.is_pic = (type == EVTCHN_TYPE_PIRQ) ? &xen_intr_pirq_pic : &xen_intr_pic; isrc->xi_vector = vector; isrc->xi_type = type; intr_register_source(&isrc->xi_intsrc); mtx_lock(&xen_intr_isrc_lock); return (isrc); } /** * Attempt to free an active Xen interrupt source object. * * \param isrc The interrupt source object to release. * * \returns EBUSY if the source is still in use, otherwise 0. */ static int xen_intr_release_isrc(struct xenisrc *isrc) { mtx_lock(&xen_intr_isrc_lock); if (isrc->xi_intsrc.is_handlers != 0) { mtx_unlock(&xen_intr_isrc_lock); return (EBUSY); } evtchn_mask_port(isrc->xi_port); evtchn_clear_port(isrc->xi_port); /* Rebind port to CPU 0. */ evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); evtchn_cpu_unmask_port(0, isrc->xi_port); if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) { struct evtchn_close close = { .port = isrc->xi_port }; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) panic("EVTCHNOP_close failed"); } xen_intr_port_to_isrc[isrc->xi_port] = NULL; isrc->xi_cpu = 0; isrc->xi_type = EVTCHN_TYPE_UNBOUND; isrc->xi_port = 0; isrc->xi_cookie = NULL; mtx_unlock(&xen_intr_isrc_lock); return (0); } /** * Associate an interrupt handler with an already allocated local Xen * event channel port. * * \param isrcp The returned Xen interrupt object associated with * the specified local port. * \param local_port The event channel to bind. * \param type The event channel type of local_port. * \param intr_owner The device making this bind request. * \param filter An interrupt filter handler. Specify NULL * to always dispatch to the ithread handler. * \param handler An interrupt ithread handler. Optional (can * specify NULL) if all necessary event actions * are performed by filter. * \param arg Argument to present to both filter and handler. * \param irqflags Interrupt handler flags. See sys/bus.h. * \param handlep Pointer to an opaque handle used to manage this * registration. * * \returns 0 on success, otherwise an errno. */ static int xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port, enum evtchn_type type, device_t intr_owner, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep) { struct xenisrc *isrc; int error; *isrcp = NULL; if (port_handlep == NULL) { device_printf(intr_owner, "xen_intr_bind_isrc: Bad event handle\n"); return (EINVAL); } mtx_lock(&xen_intr_isrc_lock); isrc = xen_intr_find_unused_isrc(type); if (isrc == NULL) { isrc = xen_intr_alloc_isrc(type, XEN_ALLOCATE_VECTOR); if (isrc == NULL) { mtx_unlock(&xen_intr_isrc_lock); return (ENOSPC); } } isrc->xi_port = local_port; xen_intr_port_to_isrc[local_port] = isrc; mtx_unlock(&xen_intr_isrc_lock); /* Assign the opaque handler (the event channel port) */ *port_handlep = &isrc->xi_port; #ifdef SMP if (type == EVTCHN_TYPE_PORT) { /* * By default all interrupts are assigned to vCPU#0 * unless specified otherwise, so shuffle them to balance * the interrupt load. */ xen_intr_assign_cpu(&isrc->xi_intsrc, intr_next_cpu()); } #endif if (filter == NULL && handler == NULL) { /* * No filter/handler provided, leave the event channel * masked and without a valid handler, the caller is * in charge of setting that up. */ *isrcp = isrc; return (0); } error = xen_intr_add_handler(intr_owner, filter, handler, arg, flags, *port_handlep); if (error != 0) { xen_intr_release_isrc(isrc); return (error); } *isrcp = isrc; return (0); } /** * Lookup a Xen interrupt source object given an interrupt binding handle. * * \param handle A handle initialized by a previous call to * xen_intr_bind_isrc(). * * \returns A pointer to the Xen interrupt source object associated * with the given interrupt handle. NULL if no association * currently exists. */ static struct xenisrc * xen_intr_isrc(xen_intr_handle_t handle) { evtchn_port_t port; if (handle == NULL) return (NULL); port = *(evtchn_port_t *)handle; if (!is_valid_evtchn(port) || port >= NR_EVENT_CHANNELS) return (NULL); return (xen_intr_port_to_isrc[port]); } /** * Determine the event channel ports at the given section of the * event port bitmap which have pending events for the given cpu. * * \param pcpu The Xen interrupt pcpu data for the cpu being querried. * \param sh The Xen shared info area. * \param idx The index of the section of the event channel bitmap to * inspect. * * \returns A u_long with bits set for every event channel with pending * events. */ static inline u_long xen_intr_active_ports(struct xen_intr_pcpu_data *pcpu, shared_info_t *sh, u_int idx) { CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(sh->evtchn_pending[0])); CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(pcpu->evtchn_enabled[0])); CTASSERT(sizeof(sh->evtchn_mask) == sizeof(sh->evtchn_pending)); CTASSERT(sizeof(sh->evtchn_mask) == sizeof(pcpu->evtchn_enabled)); return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx] & pcpu->evtchn_enabled[idx]); } /** * Interrupt handler for processing all Xen event channel events. * * \param trap_frame The trap frame context for the current interrupt. */ void xen_intr_handle_upcall(struct trapframe *trap_frame) { u_int l1i, l2i, port, cpu; u_long masked_l1, masked_l2; struct xenisrc *isrc; shared_info_t *s; vcpu_info_t *v; struct xen_intr_pcpu_data *pc; u_long l1, l2; /* * Disable preemption in order to always check and fire events * on the right vCPU */ critical_enter(); cpu = PCPU_GET(cpuid); pc = DPCPU_PTR(xen_intr_pcpu); s = HYPERVISOR_shared_info; v = DPCPU_GET(vcpu_info); if (xen_hvm_domain() && !xen_vector_callback_enabled) { KASSERT((cpu == 0), ("Fired PCI event callback on wrong CPU")); } v->evtchn_upcall_pending = 0; #if 0 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ /* Clear master flag /before/ clearing selector flag. */ wmb(); #endif #endif l1 = atomic_readandclear_long(&v->evtchn_pending_sel); l1i = pc->last_processed_l1i; l2i = pc->last_processed_l2i; (*pc->evtchn_intrcnt)++; while (l1 != 0) { l1i = (l1i + 1) % LONG_BIT; masked_l1 = l1 & ((~0UL) << l1i); if (masked_l1 == 0) { /* * if we masked out all events, wrap around * to the beginning. */ l1i = LONG_BIT - 1; l2i = LONG_BIT - 1; continue; } l1i = ffsl(masked_l1) - 1; do { l2 = xen_intr_active_ports(pc, s, l1i); l2i = (l2i + 1) % LONG_BIT; masked_l2 = l2 & ((~0UL) << l2i); if (masked_l2 == 0) { /* if we masked out all events, move on */ l2i = LONG_BIT - 1; break; } l2i = ffsl(masked_l2) - 1; /* process port */ port = (l1i * LONG_BIT) + l2i; synch_clear_bit(port, &s->evtchn_pending[0]); isrc = xen_intr_port_to_isrc[port]; if (__predict_false(isrc == NULL)) continue; /* Make sure we are firing on the right vCPU */ KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)), ("Received unexpected event on vCPU#%d, event bound to vCPU#%d", PCPU_GET(cpuid), isrc->xi_cpu)); intr_execute_handlers(&isrc->xi_intsrc, trap_frame); /* * If this is the final port processed, * we'll pick up here+1 next time. */ pc->last_processed_l1i = l1i; pc->last_processed_l2i = l2i; } while (l2i != LONG_BIT - 1); l2 = xen_intr_active_ports(pc, s, l1i); if (l2 == 0) { /* * We handled all ports, so we can clear the * selector bit. */ l1 &= ~(1UL << l1i); } } critical_exit(); } static int xen_intr_init(void *dummy __unused) { shared_info_t *s = HYPERVISOR_shared_info; struct xen_intr_pcpu_data *pcpu; struct physdev_pirq_eoi_gmfn eoi_gmfn; int i, rc; if (!xen_domain()) return (0); mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF); /* * Register interrupt count manually as we aren't * guaranteed to see a call to xen_intr_assign_cpu() * before our first interrupt. Also set the per-cpu * mask of CPU#0 to enable all, since by default * all event channels are bound to CPU#0. */ CPU_FOREACH(i) { pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu); memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0, sizeof(pcpu->evtchn_enabled)); xen_intr_intrcnt_add(i); } for (i = 0; i < nitems(s->evtchn_mask); i++) atomic_store_rel_long(&s->evtchn_mask[i], ~0); /* Try to register PIRQ EOI map */ xen_intr_pirq_eoi_map = malloc(PAGE_SIZE, M_XENINTR, M_WAITOK | M_ZERO); eoi_gmfn.gmfn = atop(vtophys(xen_intr_pirq_eoi_map)); rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); if (rc != 0 && bootverbose) printf("Xen interrupts: unable to register PIRQ EOI map\n"); else xen_intr_pirq_eoi_map_enabled = true; intr_register_pic(&xen_intr_pic); intr_register_pic(&xen_intr_pirq_pic); if (bootverbose) printf("Xen interrupt system initialized\n"); return (0); } SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_SECOND, xen_intr_init, NULL); /*--------------------------- Common PIC Functions ---------------------------*/ /** * Prepare this PIC for system suspension. */ static void xen_intr_suspend(struct pic *unused) { } static void xen_rebind_ipi(struct xenisrc *isrc) { #ifdef SMP int cpu = isrc->xi_cpu; int vcpu_id = pcpu_find(cpu)->pc_vcpu_id; int error; struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id }; error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi); if (error != 0) panic("unable to rebind xen IPI: %d", error); isrc->xi_port = bind_ipi.port; isrc->xi_cpu = 0; xen_intr_port_to_isrc[bind_ipi.port] = isrc; error = xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]); if (error) panic("unable to bind xen IPI to CPU#%d: %d", cpu, error); evtchn_unmask_port(bind_ipi.port); #else panic("Resume IPI event channel on UP"); #endif } static void xen_rebind_virq(struct xenisrc *isrc) { int cpu = isrc->xi_cpu; int vcpu_id = pcpu_find(cpu)->pc_vcpu_id; int error; struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq, .vcpu = vcpu_id }; error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq); if (error != 0) panic("unable to rebind xen VIRQ#%d: %d", isrc->xi_virq, error); isrc->xi_port = bind_virq.port; isrc->xi_cpu = 0; xen_intr_port_to_isrc[bind_virq.port] = isrc; #ifdef SMP error = xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]); if (error) panic("unable to bind xen VIRQ#%d to CPU#%d: %d", isrc->xi_virq, cpu, error); #endif evtchn_unmask_port(bind_virq.port); } /** * Return this PIC to service after being suspended. */ static void xen_intr_resume(struct pic *unused, bool suspend_cancelled) { shared_info_t *s = HYPERVISOR_shared_info; struct xenisrc *isrc; u_int isrc_idx; int i; if (suspend_cancelled) return; /* Reset the per-CPU masks */ CPU_FOREACH(i) { struct xen_intr_pcpu_data *pcpu; pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu); memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0, sizeof(pcpu->evtchn_enabled)); } /* Mask all event channels. */ for (i = 0; i < nitems(s->evtchn_mask); i++) atomic_store_rel_long(&s->evtchn_mask[i], ~0); /* Remove port -> isrc mappings */ memset(xen_intr_port_to_isrc, 0, sizeof(xen_intr_port_to_isrc)); /* Free unused isrcs and rebind VIRQs and IPIs */ for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx++) { u_int vector; vector = FIRST_EVTCHN_INT + isrc_idx; isrc = (struct xenisrc *)intr_lookup_source(vector); if (isrc != NULL) { isrc->xi_port = 0; switch (isrc->xi_type) { case EVTCHN_TYPE_IPI: xen_rebind_ipi(isrc); break; case EVTCHN_TYPE_VIRQ: xen_rebind_virq(isrc); break; default: + intr_remove_handler(isrc->xi_cookie); isrc->xi_cpu = 0; + isrc->xi_type = EVTCHN_TYPE_UNBOUND; + isrc->xi_cookie = NULL; break; } } } } /** * Disable a Xen interrupt source. * * \param isrc The interrupt source to disable. */ static void xen_intr_disable_intr(struct intsrc *base_isrc) { struct xenisrc *isrc = (struct xenisrc *)base_isrc; evtchn_mask_port(isrc->xi_port); } /** * Determine the global interrupt vector number for * a Xen interrupt source. * * \param isrc The interrupt source to query. * * \return The vector number corresponding to the given interrupt source. */ static int xen_intr_vector(struct intsrc *base_isrc) { struct xenisrc *isrc = (struct xenisrc *)base_isrc; return (isrc->xi_vector); } /** * Determine whether or not interrupt events are pending on the * the given interrupt source. * * \param isrc The interrupt source to query. * * \returns 0 if no events are pending, otherwise non-zero. */ static int xen_intr_source_pending(struct intsrc *isrc) { /* * EventChannels are edge triggered and never masked. * There can be no pending events. */ return (0); } /** * Perform configuration of an interrupt source. * * \param isrc The interrupt source to configure. * \param trig Edge or level. * \param pol Active high or low. * * \returns 0 if no events are pending, otherwise non-zero. */ static int xen_intr_config_intr(struct intsrc *isrc, enum intr_trigger trig, enum intr_polarity pol) { /* Configuration is only possible via the evtchn apis. */ return (ENODEV); } /** * Configure CPU affinity for interrupt source event delivery. * * \param isrc The interrupt source to configure. * \param apic_id The apic id of the CPU for handling future events. * * \returns 0 if successful, otherwise an errno. */ static int xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id) { #ifdef SMP struct evtchn_bind_vcpu bind_vcpu; struct xenisrc *isrc; u_int to_cpu, vcpu_id; int error, masked; if (xen_vector_callback_enabled == 0) return (EOPNOTSUPP); to_cpu = apic_cpuid(apic_id); vcpu_id = pcpu_find(to_cpu)->pc_vcpu_id; xen_intr_intrcnt_add(to_cpu); mtx_lock(&xen_intr_isrc_lock); isrc = (struct xenisrc *)base_isrc; if (!is_valid_evtchn(isrc->xi_port)) { mtx_unlock(&xen_intr_isrc_lock); return (EINVAL); } /* * Mask the event channel while binding it to prevent interrupt * delivery with an inconsistent state in isrc->xi_cpu. */ masked = evtchn_test_and_set_mask(isrc->xi_port); if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) || (isrc->xi_type == EVTCHN_TYPE_IPI)) { /* * Virtual IRQs are associated with a cpu by * the Hypervisor at evtchn_bind_virq time, so * all we need to do is update the per-CPU masks. */ evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); isrc->xi_cpu = to_cpu; evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port); goto out; } bind_vcpu.port = isrc->xi_port; bind_vcpu.vcpu = vcpu_id; error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu); if (isrc->xi_cpu != to_cpu) { if (error == 0) { /* Commit to new binding by removing the old one. */ evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port); isrc->xi_cpu = to_cpu; evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port); } } out: if (masked == 0) evtchn_unmask_port(isrc->xi_port); mtx_unlock(&xen_intr_isrc_lock); return (0); #else return (EOPNOTSUPP); #endif } /*------------------- Virtual Interrupt Source PIC Functions -----------------*/ /* * Mask a level triggered interrupt source. * * \param isrc The interrupt source to mask (if necessary). * \param eoi If non-zero, perform any necessary end-of-interrupt * acknowledgements. */ static void xen_intr_disable_source(struct intsrc *base_isrc, int eoi) { struct xenisrc *isrc; isrc = (struct xenisrc *)base_isrc; /* * NB: checking if the event channel is already masked is * needed because the event channel user-space device * masks event channels on it's filter as part of it's * normal operation, and those shouldn't be automatically * unmasked by the generic interrupt code. The event channel * device will unmask them when needed. */ isrc->xi_masked = !!evtchn_test_and_set_mask(isrc->xi_port); } /* * Unmask a level triggered interrupt source. * * \param isrc The interrupt source to unmask (if necessary). */ static void xen_intr_enable_source(struct intsrc *base_isrc) { struct xenisrc *isrc; isrc = (struct xenisrc *)base_isrc; if (isrc->xi_masked == 0) evtchn_unmask_port(isrc->xi_port); } /* * Perform any necessary end-of-interrupt acknowledgements. * * \param isrc The interrupt source to EOI. */ static void xen_intr_eoi_source(struct intsrc *base_isrc) { } /* * Enable and unmask the interrupt source. * * \param isrc The interrupt source to enable. */ static void xen_intr_enable_intr(struct intsrc *base_isrc) { struct xenisrc *isrc = (struct xenisrc *)base_isrc; evtchn_unmask_port(isrc->xi_port); } /*------------------ Physical Interrupt Source PIC Functions -----------------*/ /* * Mask a level triggered interrupt source. * * \param isrc The interrupt source to mask (if necessary). * \param eoi If non-zero, perform any necessary end-of-interrupt * acknowledgements. */ static void xen_intr_pirq_disable_source(struct intsrc *base_isrc, int eoi) { struct xenisrc *isrc; isrc = (struct xenisrc *)base_isrc; if (isrc->xi_edgetrigger == 0) evtchn_mask_port(isrc->xi_port); if (eoi == PIC_EOI) xen_intr_pirq_eoi_source(base_isrc); } /* * Unmask a level triggered interrupt source. * * \param isrc The interrupt source to unmask (if necessary). */ static void xen_intr_pirq_enable_source(struct intsrc *base_isrc) { struct xenisrc *isrc; isrc = (struct xenisrc *)base_isrc; if (isrc->xi_edgetrigger == 0) evtchn_unmask_port(isrc->xi_port); } /* * Perform any necessary end-of-interrupt acknowledgements. * * \param isrc The interrupt source to EOI. */ static void xen_intr_pirq_eoi_source(struct intsrc *base_isrc) { struct xenisrc *isrc; int error; isrc = (struct xenisrc *)base_isrc; if (xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)) { struct physdev_eoi eoi = { .irq = isrc->xi_pirq }; error = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); if (error != 0) panic("Unable to EOI PIRQ#%d: %d\n", isrc->xi_pirq, error); } } /* * Enable and unmask the interrupt source. * * \param isrc The interrupt source to enable. */ static void xen_intr_pirq_enable_intr(struct intsrc *base_isrc) { struct xenisrc *isrc; struct evtchn_bind_pirq bind_pirq; struct physdev_irq_status_query irq_status; int error; isrc = (struct xenisrc *)base_isrc; if (!xen_intr_pirq_eoi_map_enabled) { irq_status.irq = isrc->xi_pirq; error = HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status); if (error) panic("unable to get status of IRQ#%d", isrc->xi_pirq); if (irq_status.flags & XENIRQSTAT_needs_eoi) { /* * Since the dynamic PIRQ EOI map is not available * mark the PIRQ as needing EOI unconditionally. */ xen_set_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map); } } bind_pirq.pirq = isrc->xi_pirq; bind_pirq.flags = isrc->xi_edgetrigger ? 0 : BIND_PIRQ__WILL_SHARE; error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); if (error) panic("unable to bind IRQ#%d", isrc->xi_pirq); isrc->xi_port = bind_pirq.port; mtx_lock(&xen_intr_isrc_lock); KASSERT((xen_intr_port_to_isrc[bind_pirq.port] == NULL), ("trying to override an already setup event channel port")); xen_intr_port_to_isrc[bind_pirq.port] = isrc; mtx_unlock(&xen_intr_isrc_lock); evtchn_unmask_port(isrc->xi_port); } /* * Disable an interrupt source. * * \param isrc The interrupt source to disable. */ static void xen_intr_pirq_disable_intr(struct intsrc *base_isrc) { struct xenisrc *isrc; struct evtchn_close close; int error; isrc = (struct xenisrc *)base_isrc; evtchn_mask_port(isrc->xi_port); close.port = isrc->xi_port; error = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (error) panic("unable to close event channel %d IRQ#%d", isrc->xi_port, isrc->xi_pirq); mtx_lock(&xen_intr_isrc_lock); xen_intr_port_to_isrc[isrc->xi_port] = NULL; mtx_unlock(&xen_intr_isrc_lock); isrc->xi_port = 0; } /** * Perform configuration of an interrupt source. * * \param isrc The interrupt source to configure. * \param trig Edge or level. * \param pol Active high or low. * * \returns 0 if no events are pending, otherwise non-zero. */ static int xen_intr_pirq_config_intr(struct intsrc *base_isrc, enum intr_trigger trig, enum intr_polarity pol) { struct xenisrc *isrc = (struct xenisrc *)base_isrc; struct physdev_setup_gsi setup_gsi; int error; KASSERT(!(trig == INTR_TRIGGER_CONFORM || pol == INTR_POLARITY_CONFORM), ("%s: Conforming trigger or polarity\n", __func__)); setup_gsi.gsi = isrc->xi_pirq; setup_gsi.triggering = trig == INTR_TRIGGER_EDGE ? 0 : 1; setup_gsi.polarity = pol == INTR_POLARITY_HIGH ? 0 : 1; error = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi); if (error == -XEN_EEXIST) { if ((isrc->xi_edgetrigger && (trig != INTR_TRIGGER_EDGE)) || (isrc->xi_activehi && (pol != INTR_POLARITY_HIGH))) panic("unable to reconfigure interrupt IRQ#%d", isrc->xi_pirq); error = 0; } if (error) panic("unable to configure IRQ#%d\n", isrc->xi_pirq); isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0; isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0; return (0); } /*--------------------------- Public Functions -------------------------------*/ /*------- API comments for these methods can be found in xen/xenintr.h -------*/ int xen_intr_bind_local_port(device_t dev, evtchn_port_t local_port, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep) { struct xenisrc *isrc; int error; error = xen_intr_bind_isrc(&isrc, local_port, EVTCHN_TYPE_PORT, dev, filter, handler, arg, flags, port_handlep); if (error != 0) return (error); /* * The Event Channel API didn't open this port, so it is not * responsible for closing it automatically on unbind. */ isrc->xi_close = 0; return (0); } int xen_intr_alloc_and_bind_local_port(device_t dev, u_int remote_domain, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep) { struct xenisrc *isrc; struct evtchn_alloc_unbound alloc_unbound; int error; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_domain; error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (error != 0) { /* * XXX Trap Hypercall error code Linuxisms in * the HYPERCALL layer. */ return (-error); } error = xen_intr_bind_isrc(&isrc, alloc_unbound.port, EVTCHN_TYPE_PORT, dev, filter, handler, arg, flags, port_handlep); if (error != 0) { evtchn_close_t close = { .port = alloc_unbound.port }; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) panic("EVTCHNOP_close failed"); return (error); } isrc->xi_close = 1; return (0); } int xen_intr_bind_remote_port(device_t dev, u_int remote_domain, u_int remote_port, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep) { struct xenisrc *isrc; struct evtchn_bind_interdomain bind_interdomain; int error; bind_interdomain.remote_dom = remote_domain; bind_interdomain.remote_port = remote_port; error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &bind_interdomain); if (error != 0) { /* * XXX Trap Hypercall error code Linuxisms in * the HYPERCALL layer. */ return (-error); } error = xen_intr_bind_isrc(&isrc, bind_interdomain.local_port, EVTCHN_TYPE_PORT, dev, filter, handler, arg, flags, port_handlep); if (error) { evtchn_close_t close = { .port = bind_interdomain.local_port }; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) panic("EVTCHNOP_close failed"); return (error); } /* * The Event Channel API opened this port, so it is * responsible for closing it automatically on unbind. */ isrc->xi_close = 1; return (0); } int xen_intr_bind_virq(device_t dev, u_int virq, u_int cpu, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep) { int vcpu_id = pcpu_find(cpu)->pc_vcpu_id; struct xenisrc *isrc; struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = vcpu_id }; int error; /* Ensure the target CPU is ready to handle evtchn interrupts. */ xen_intr_intrcnt_add(cpu); isrc = NULL; error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq); if (error != 0) { /* * XXX Trap Hypercall error code Linuxisms in * the HYPERCALL layer. */ return (-error); } error = xen_intr_bind_isrc(&isrc, bind_virq.port, EVTCHN_TYPE_VIRQ, dev, filter, handler, arg, flags, port_handlep); #ifdef SMP if (error == 0) error = intr_event_bind(isrc->xi_intsrc.is_event, cpu); #endif if (error != 0) { evtchn_close_t close = { .port = bind_virq.port }; xen_intr_unbind(*port_handlep); if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) panic("EVTCHNOP_close failed"); return (error); } #ifdef SMP if (isrc->xi_cpu != cpu) { /* * Too early in the boot process for the generic interrupt * code to perform the binding. Update our event channel * masks manually so events can't fire on the wrong cpu * during AP startup. */ xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]); } #endif /* * The Event Channel API opened this port, so it is * responsible for closing it automatically on unbind. */ isrc->xi_close = 1; isrc->xi_virq = virq; return (0); } int xen_intr_alloc_and_bind_ipi(device_t dev, u_int cpu, driver_filter_t filter, enum intr_type flags, xen_intr_handle_t *port_handlep) { #ifdef SMP int vcpu_id = pcpu_find(cpu)->pc_vcpu_id; struct xenisrc *isrc; struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id }; int error; /* Ensure the target CPU is ready to handle evtchn interrupts. */ xen_intr_intrcnt_add(cpu); isrc = NULL; error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi); if (error != 0) { /* * XXX Trap Hypercall error code Linuxisms in * the HYPERCALL layer. */ return (-error); } error = xen_intr_bind_isrc(&isrc, bind_ipi.port, EVTCHN_TYPE_IPI, dev, filter, NULL, NULL, flags, port_handlep); if (error == 0) error = intr_event_bind(isrc->xi_intsrc.is_event, cpu); if (error != 0) { evtchn_close_t close = { .port = bind_ipi.port }; xen_intr_unbind(*port_handlep); if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) panic("EVTCHNOP_close failed"); return (error); } if (isrc->xi_cpu != cpu) { /* * Too early in the boot process for the generic interrupt * code to perform the binding. Update our event channel * masks manually so events can't fire on the wrong cpu * during AP startup. */ xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]); } /* * The Event Channel API opened this port, so it is * responsible for closing it automatically on unbind. */ isrc->xi_close = 1; return (0); #else return (EOPNOTSUPP); #endif } int xen_register_pirq(int vector, enum intr_trigger trig, enum intr_polarity pol) { struct physdev_map_pirq map_pirq; struct xenisrc *isrc; int error; if (vector == 0) return (EINVAL); if (bootverbose) printf("xen: register IRQ#%d\n", vector); map_pirq.domid = DOMID_SELF; map_pirq.type = MAP_PIRQ_TYPE_GSI; map_pirq.index = vector; map_pirq.pirq = vector; error = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_pirq); if (error) { printf("xen: unable to map IRQ#%d\n", vector); return (error); } mtx_lock(&xen_intr_isrc_lock); isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector); mtx_unlock(&xen_intr_isrc_lock); KASSERT((isrc != NULL), ("xen: unable to allocate isrc for interrupt")); isrc->xi_pirq = vector; isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0; isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0; return (0); } int xen_register_msi(device_t dev, int vector, int count) { struct physdev_map_pirq msi_irq; struct xenisrc *isrc; int ret; memset(&msi_irq, 0, sizeof(msi_irq)); msi_irq.domid = DOMID_SELF; msi_irq.type = count == 1 ? MAP_PIRQ_TYPE_MSI_SEG : MAP_PIRQ_TYPE_MULTI_MSI; msi_irq.index = -1; msi_irq.pirq = -1; msi_irq.bus = pci_get_bus(dev) | (pci_get_domain(dev) << 16); msi_irq.devfn = (pci_get_slot(dev) << 3) | pci_get_function(dev); msi_irq.entry_nr = count; ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &msi_irq); if (ret != 0) return (ret); if (count != msi_irq.entry_nr) { panic("unable to setup all requested MSI vectors " "(expected %d got %d)", count, msi_irq.entry_nr); } mtx_lock(&xen_intr_isrc_lock); for (int i = 0; i < count; i++) { isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector + i); KASSERT(isrc != NULL, ("xen: unable to allocate isrc for interrupt")); isrc->xi_pirq = msi_irq.pirq + i; /* MSI interrupts are always edge triggered */ isrc->xi_edgetrigger = 1; } mtx_unlock(&xen_intr_isrc_lock); return (0); } int xen_release_msi(int vector) { struct physdev_unmap_pirq unmap; struct xenisrc *isrc; int ret; isrc = (struct xenisrc *)intr_lookup_source(vector); if (isrc == NULL) return (ENXIO); unmap.pirq = isrc->xi_pirq; ret = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap); if (ret != 0) return (ret); xen_intr_release_isrc(isrc); return (0); } int xen_intr_describe(xen_intr_handle_t port_handle, const char *fmt, ...) { char descr[MAXCOMLEN + 1]; struct xenisrc *isrc; va_list ap; isrc = xen_intr_isrc(port_handle); if (isrc == NULL) return (EINVAL); va_start(ap, fmt); vsnprintf(descr, sizeof(descr), fmt, ap); va_end(ap); return (intr_describe(isrc->xi_vector, isrc->xi_cookie, descr)); } void xen_intr_unbind(xen_intr_handle_t *port_handlep) { struct xenisrc *isrc; KASSERT(port_handlep != NULL, ("NULL xen_intr_handle_t passed to xen_intr_unbind")); isrc = xen_intr_isrc(*port_handlep); *port_handlep = NULL; if (isrc == NULL) return; if (isrc->xi_cookie != NULL) intr_remove_handler(isrc->xi_cookie); xen_intr_release_isrc(isrc); } void xen_intr_signal(xen_intr_handle_t handle) { struct xenisrc *isrc; isrc = xen_intr_isrc(handle); if (isrc != NULL) { KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT || isrc->xi_type == EVTCHN_TYPE_IPI, ("evtchn_signal on something other than a local port")); struct evtchn_send send = { .port = isrc->xi_port }; (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send); } } evtchn_port_t xen_intr_port(xen_intr_handle_t handle) { struct xenisrc *isrc; isrc = xen_intr_isrc(handle); if (isrc == NULL) return (0); return (isrc->xi_port); } int xen_intr_add_handler(device_t dev, driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, xen_intr_handle_t handle) { struct xenisrc *isrc; int error; isrc = xen_intr_isrc(handle); if (isrc == NULL || isrc->xi_cookie != NULL) return (EINVAL); error = intr_add_handler(device_get_nameunit(dev), isrc->xi_vector, filter, handler, arg, flags|INTR_EXCL, &isrc->xi_cookie); if (error != 0) { device_printf(dev, "xen_intr_add_handler: intr_add_handler failed: %d\n", error); } return (error); } #ifdef DDB static const char * xen_intr_print_type(enum evtchn_type type) { static const char *evtchn_type_to_string[EVTCHN_TYPE_COUNT] = { [EVTCHN_TYPE_UNBOUND] = "UNBOUND", [EVTCHN_TYPE_PIRQ] = "PIRQ", [EVTCHN_TYPE_VIRQ] = "VIRQ", [EVTCHN_TYPE_IPI] = "IPI", [EVTCHN_TYPE_PORT] = "PORT", }; if (type >= EVTCHN_TYPE_COUNT) return ("UNKNOWN"); return (evtchn_type_to_string[type]); } static void xen_intr_dump_port(struct xenisrc *isrc) { struct xen_intr_pcpu_data *pcpu; shared_info_t *s = HYPERVISOR_shared_info; int i; db_printf("Port %d Type: %s\n", isrc->xi_port, xen_intr_print_type(isrc->xi_type)); if (isrc->xi_type == EVTCHN_TYPE_PIRQ) { db_printf("\tPirq: %d ActiveHi: %d EdgeTrigger: %d " "NeedsEOI: %d\n", isrc->xi_pirq, isrc->xi_activehi, isrc->xi_edgetrigger, !!xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)); } if (isrc->xi_type == EVTCHN_TYPE_VIRQ) db_printf("\tVirq: %d\n", isrc->xi_virq); db_printf("\tMasked: %d Pending: %d\n", !!xen_test_bit(isrc->xi_port, &s->evtchn_mask[0]), !!xen_test_bit(isrc->xi_port, &s->evtchn_pending[0])); db_printf("\tPer-CPU Masks: "); CPU_FOREACH(i) { pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu); db_printf("cpu#%d: %d ", i, !!xen_test_bit(isrc->xi_port, pcpu->evtchn_enabled)); } db_printf("\n"); } DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn) { int i; if (!xen_domain()) { db_printf("Only available on Xen guests\n"); return; } for (i = 0; i < NR_EVENT_CHANNELS; i++) { struct xenisrc *isrc; isrc = xen_intr_port_to_isrc[i]; if (isrc == NULL) continue; xen_intr_dump_port(isrc); } } #endif /* DDB */ Index: projects/powernv =================================================================== --- projects/powernv (revision 291050) +++ projects/powernv (revision 291051) Property changes on: projects/powernv ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys:r290991-291050