Index: head/sys/conf/files.riscv =================================================================== --- head/sys/conf/files.riscv (revision 347224) +++ head/sys/conf/files.riscv (revision 347225) @@ -1,68 +1,69 @@ # $FreeBSD$ cddl/compat/opensolaris/kern/opensolaris_atomic.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/dev/dtrace/riscv/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/riscv/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/riscv/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" crypto/blowfish/bf_enc.c optional crypto | ipsec | ipsec_support crypto/des/des_enc.c optional crypto | ipsec | ipsec_support | netsmb dev/ofw/ofw_cpu.c optional fdt dev/uart/uart_cpu_fdt.c optional uart fdt dev/uart/uart_dev_lowrisc.c optional uart_lowrisc dev/xilinx/axi_quad_spi.c optional xilinx_spi kern/kern_clocksource.c standard kern/msi_if.m standard kern/pic_if.m standard kern/subr_devmap.c standard kern/subr_dummy_vdso_tc.c standard kern/subr_intr.c standard libkern/bcmp.c standard libkern/bcopy.c standard libkern/ffs.c standard libkern/ffsl.c standard libkern/ffsll.c standard libkern/fls.c standard libkern/flsl.c standard libkern/flsll.c standard libkern/memcmp.c standard libkern/memset.c standard riscv/riscv/autoconf.c standard riscv/riscv/bus_machdep.c standard riscv/riscv/bus_space_asm.S standard +riscv/riscv/busdma_bounce.c standard riscv/riscv/busdma_machdep.c standard riscv/riscv/clock.c standard riscv/riscv/copyinout.S standard riscv/riscv/copystr.c standard riscv/riscv/cpufunc_asm.S standard riscv/riscv/db_disasm.c optional ddb riscv/riscv/db_interface.c optional ddb riscv/riscv/db_trace.c optional ddb riscv/riscv/dump_machdep.c standard riscv/riscv/elf_machdep.c standard riscv/riscv/exception.S standard riscv/riscv/intr_machdep.c standard riscv/riscv/in_cksum.c optional inet | inet6 riscv/riscv/identcpu.c standard riscv/riscv/locore.S standard no-obj riscv/riscv/machdep.c standard riscv/riscv/minidump_machdep.c standard riscv/riscv/mp_machdep.c optional smp riscv/riscv/mem.c standard riscv/riscv/nexus.c standard riscv/riscv/ofw_machdep.c optional fdt riscv/riscv/plic.c standard riscv/riscv/pmap.c standard riscv/riscv/riscv_console.c optional rcons riscv/riscv/soc.c standard riscv/riscv/stack_machdep.c optional ddb | stack riscv/riscv/support.S standard riscv/riscv/swtch.S standard riscv/riscv/sys_machdep.c standard riscv/riscv/trap.c standard riscv/riscv/timer.c standard riscv/riscv/uio_machdep.c standard riscv/riscv/uma_machdep.c standard riscv/riscv/unwind.c optional ddb | kdtrace_hooks | stack riscv/riscv/vm_machdep.c standard # Zstd contrib/zstd/lib/freebsd/zstd_kfreebsd.c optional zstdio compile-with ${ZSTD_C} Index: head/sys/riscv/include/bus_dma.h =================================================================== --- head/sys/riscv/include/bus_dma.h (revision 347224) +++ head/sys/riscv/include/bus_dma.h (revision 347225) @@ -1,9 +1,141 @@ /* $FreeBSD$ */ #ifndef _MACHINE_BUS_DMA_H_ #define _MACHINE_BUS_DMA_H_ +#define WANT_INLINE_DMAMAP #include -#include + +#include + +/* + * Allocate a handle for mapping from kva/uva/physical + * address space into bus device space. + */ +static inline int +bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) +{ + struct bus_dma_tag_common *tc; + + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->map_create(dmat, flags, mapp)); +} + +/* + * Destroy a handle for mapping from kva/uva/physical + * address space into bus device space. + */ +static inline int +bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) +{ + struct bus_dma_tag_common *tc; + + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->map_destroy(dmat, map)); +} + +/* + * Allocate a piece of memory that can be efficiently mapped into + * bus device space based on the constraints listed in the dma tag. + * A dmamap to for use with dmamap_load is also allocated. + */ +static inline int +bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, + bus_dmamap_t *mapp) +{ + struct bus_dma_tag_common *tc; + + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->mem_alloc(dmat, vaddr, flags, mapp)); +} + +/* + * Free a piece of memory and it's allociated dmamap, that was allocated + * via bus_dmamem_alloc. Make the same choice for free/contigfree. + */ +static inline void +bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) +{ + struct bus_dma_tag_common *tc; + + tc = (struct bus_dma_tag_common *)dmat; + tc->impl->mem_free(dmat, vaddr, map); +} + +/* + * Release the mapping held by map. + */ +static inline void +bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) +{ + struct bus_dma_tag_common *tc; + + tc = (struct bus_dma_tag_common *)dmat; + tc->impl->map_unload(dmat, map); +} + +static inline void +bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) +{ + struct bus_dma_tag_common *tc; + + tc = (struct bus_dma_tag_common *)dmat; + tc->impl->map_sync(dmat, map, op); +} + +static inline int +_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) +{ + struct bus_dma_tag_common *tc; + + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->load_phys(dmat, map, buf, buflen, flags, segs, + segp)); +} + +static inline int +_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, + bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, + int *segp) +{ + struct bus_dma_tag_common *tc; + + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->load_ma(dmat, map, ma, tlen, ma_offs, flags, + segs, segp)); +} + +static inline int +_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, + bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs, + int *segp) +{ + struct bus_dma_tag_common *tc; + + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->load_buffer(dmat, map, buf, buflen, pmap, flags, segs, + segp)); +} + +static inline void +_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, + struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) +{ + struct bus_dma_tag_common *tc; + + tc = (struct bus_dma_tag_common *)dmat; + tc->impl->map_waitok(dmat, map, mem, callback, callback_arg); +} + +static inline bus_dma_segment_t * +_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error) +{ + struct bus_dma_tag_common *tc; + + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->map_complete(dmat, map, segs, nsegs, error)); +} #endif /* !_MACHINE_BUS_DMA_H_ */ Index: head/sys/riscv/include/bus_dma_impl.h =================================================================== --- head/sys/riscv/include/bus_dma_impl.h (nonexistent) +++ head/sys/riscv/include/bus_dma_impl.h (revision 347225) @@ -0,0 +1,96 @@ +/*- + * Copyright (c) 2013 The FreeBSD Foundation + * All rights reserved. + * + * This software was developed by Konstantin Belousov + * under sponsorship from the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_BUS_DMA_IMPL_H_ +#define _MACHINE_BUS_DMA_IMPL_H_ + +struct bus_dma_tag_common { + struct bus_dma_impl *impl; + struct bus_dma_tag_common *parent; + bus_size_t alignment; + bus_addr_t boundary; + bus_addr_t lowaddr; + bus_addr_t highaddr; + bus_dma_filter_t *filter; + void *filterarg; + bus_size_t maxsize; + u_int nsegments; + bus_size_t maxsegsz; + int flags; + bus_dma_lock_t *lockfunc; + void *lockfuncarg; + int ref_count; +}; + +struct bus_dma_impl { + int (*tag_create)(bus_dma_tag_t parent, + bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, + bus_addr_t highaddr, bus_dma_filter_t *filter, + void *filterarg, bus_size_t maxsize, int nsegments, + bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, + void *lockfuncarg, bus_dma_tag_t *dmat); + int (*tag_destroy)(bus_dma_tag_t dmat); + int (*map_create)(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp); + int (*map_destroy)(bus_dma_tag_t dmat, bus_dmamap_t map); + int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags, + bus_dmamap_t *mapp); + void (*mem_free)(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map); + int (*load_ma)(bus_dma_tag_t dmat, bus_dmamap_t map, + struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, + bus_dma_segment_t *segs, int *segp); + int (*load_phys)(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, int flags, + bus_dma_segment_t *segs, int *segp); + int (*load_buffer)(bus_dma_tag_t dmat, bus_dmamap_t map, + void *buf, bus_size_t buflen, struct pmap *pmap, int flags, + bus_dma_segment_t *segs, int *segp); + void (*map_waitok)(bus_dma_tag_t dmat, bus_dmamap_t map, + struct memdesc *mem, bus_dmamap_callback_t *callback, + void *callback_arg); + bus_dma_segment_t *(*map_complete)(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error); + void (*map_unload)(bus_dma_tag_t dmat, bus_dmamap_t map); + void (*map_sync)(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dmasync_op_t op); +}; + +void bus_dma_dflt_lock(void *arg, bus_dma_lock_op_t op); +int bus_dma_run_filter(struct bus_dma_tag_common *dmat, bus_addr_t paddr); +int common_bus_dma_tag_create(struct bus_dma_tag_common *parent, + bus_size_t alignment, + bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, + bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, + int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, + void *lockfuncarg, size_t sz, void **dmat); + +extern struct bus_dma_impl bus_dma_bounce_impl; + +#endif Property changes on: head/sys/riscv/include/bus_dma_impl.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/riscv/include/cpufunc.h =================================================================== --- head/sys/riscv/include/cpufunc.h (revision 347224) +++ head/sys/riscv/include/cpufunc.h (revision 347225) @@ -1,124 +1,135 @@ /*- * Copyright (c) 2015-2016 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_CPUFUNC_H_ #define _MACHINE_CPUFUNC_H_ static __inline void breakpoint(void) { __asm("ebreak"); } #ifdef _KERNEL #include static __inline register_t intr_disable(void) { uint64_t ret; __asm __volatile( "csrrci %0, sstatus, %1" : "=&r" (ret) : "i" (SSTATUS_SIE) ); return (ret & (SSTATUS_SIE)); } static __inline void intr_restore(register_t s) { __asm __volatile( "csrs sstatus, %0" :: "r" (s) ); } static __inline void intr_enable(void) { __asm __volatile( "csrsi sstatus, %0" :: "i" (SSTATUS_SIE) ); } /* NB: fence() is defined as a macro in . */ static __inline void fence_i(void) { __asm __volatile("fence.i" ::: "memory"); } static __inline void sfence_vma(void) { __asm __volatile("sfence.vma" ::: "memory"); } static __inline void sfence_vma_page(uintptr_t addr) { __asm __volatile("sfence.vma %0" :: "r" (addr) : "memory"); } #define rdcycle() csr_read64(cycle) #define rdtime() csr_read64(time) #define rdinstret() csr_read64(instret) #define rdhpmcounter(n) csr_read64(hpmcounter##n) +extern int64_t dcache_line_size; +extern int64_t icache_line_size; + +#define cpu_dcache_wbinv_range(a, s) +#define cpu_dcache_inv_range(a, s) +#define cpu_dcache_wb_range(a, s) + +#define cpu_idcache_wbinv_range(a, s) +#define cpu_icache_sync_range(a, s) +#define cpu_icache_sync_range_checked(a, s) + static __inline void load_satp(uint64_t val) { __asm __volatile("csrw satp, %0" :: "r"(val)); } #define cpufunc_nullop() riscv_nullop() void riscv_nullop(void); #endif /* _KERNEL */ #endif /* _MACHINE_CPUFUNC_H_ */ Index: head/sys/riscv/riscv/busdma_bounce.c =================================================================== --- head/sys/riscv/riscv/busdma_bounce.c (nonexistent) +++ head/sys/riscv/riscv/busdma_bounce.c (revision 347225) @@ -0,0 +1,1330 @@ +/*- + * Copyright (c) 1997, 1998 Justin T. Gibbs. + * Copyright (c) 2015-2016 The FreeBSD Foundation + * All rights reserved. + * + * Portions of this software were developed by Andrew Turner + * under sponsorship of the FreeBSD Foundation. + * + * Portions of this software were developed by Semihalf + * under sponsorship of the FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification, immediately at the beginning of the file. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define MAX_BPAGES 4096 + +enum { + BF_COULD_BOUNCE = 0x01, + BF_MIN_ALLOC_COMP = 0x02, + BF_KMEM_ALLOC = 0x04, + BF_COHERENT = 0x10, +}; + +struct bounce_zone; + +struct bus_dma_tag { + struct bus_dma_tag_common common; + int map_count; + int bounce_flags; + bus_dma_segment_t *segments; + struct bounce_zone *bounce_zone; +}; + +struct bounce_page { + vm_offset_t vaddr; /* kva of bounce buffer */ + bus_addr_t busaddr; /* Physical address */ + vm_offset_t datavaddr; /* kva of client data */ + vm_page_t datapage; /* physical page of client data */ + vm_offset_t dataoffs; /* page offset of client data */ + bus_size_t datacount; /* client data count */ + STAILQ_ENTRY(bounce_page) links; +}; + +int busdma_swi_pending; + +struct bounce_zone { + STAILQ_ENTRY(bounce_zone) links; + STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; + int total_bpages; + int free_bpages; + int reserved_bpages; + int active_bpages; + int total_bounced; + int total_deferred; + int map_count; + bus_size_t alignment; + bus_addr_t lowaddr; + char zoneid[8]; + char lowaddrid[20]; + struct sysctl_ctx_list sysctl_tree; + struct sysctl_oid *sysctl_tree_top; +}; + +static struct mtx bounce_lock; +static int total_bpages; +static int busdma_zonecount; +static STAILQ_HEAD(, bounce_zone) bounce_zone_list; + +static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); +SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, + "Total bounce pages"); + +struct sync_list { + vm_offset_t vaddr; /* kva of client data */ + bus_addr_t paddr; /* physical address */ + vm_page_t pages; /* starting page of client data */ + bus_size_t datacount; /* client data count */ +}; + +struct bus_dmamap { + struct bp_list bpages; + int pagesneeded; + int pagesreserved; + bus_dma_tag_t dmat; + struct memdesc mem; + bus_dmamap_callback_t *callback; + void *callback_arg; + STAILQ_ENTRY(bus_dmamap) links; + u_int flags; +#define DMAMAP_COULD_BOUNCE (1 << 0) +#define DMAMAP_FROM_DMAMEM (1 << 1) + int sync_count; + struct sync_list slist[]; +}; + +static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; +static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; + +static void init_bounce_pages(void *dummy); +static int alloc_bounce_zone(bus_dma_tag_t dmat); +static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); +static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, + int commit); +static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_offset_t vaddr, bus_addr_t addr, bus_size_t size); +static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); +int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); +static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, + pmap_t pmap, void *buf, bus_size_t buflen, int flags); +static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, int flags); +static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, + int flags); + +/* + * Allocate a device specific dma_tag. + */ +static int +bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, + bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, + bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, + int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, + void *lockfuncarg, bus_dma_tag_t *dmat) +{ + bus_dma_tag_t newtag; + int error; + + *dmat = NULL; + error = common_bus_dma_tag_create(parent != NULL ? &parent->common : + NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg, + maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg, + sizeof (struct bus_dma_tag), (void **)&newtag); + if (error != 0) + return (error); + + newtag->common.impl = &bus_dma_bounce_impl; + newtag->map_count = 0; + newtag->segments = NULL; + + if ((flags & BUS_DMA_COHERENT) != 0) + newtag->bounce_flags |= BF_COHERENT; + + if (parent != NULL) { + if ((newtag->common.filter != NULL || + (parent->bounce_flags & BF_COULD_BOUNCE) != 0)) + newtag->bounce_flags |= BF_COULD_BOUNCE; + + /* Copy some flags from the parent */ + newtag->bounce_flags |= parent->bounce_flags & BF_COHERENT; + } + + if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) || + newtag->common.alignment > 1) + newtag->bounce_flags |= BF_COULD_BOUNCE; + + if (((newtag->bounce_flags & BF_COULD_BOUNCE) != 0) && + (flags & BUS_DMA_ALLOCNOW) != 0) { + struct bounce_zone *bz; + + /* Must bounce */ + if ((error = alloc_bounce_zone(newtag)) != 0) { + free(newtag, M_DEVBUF); + return (error); + } + bz = newtag->bounce_zone; + + if (ptoa(bz->total_bpages) < maxsize) { + int pages; + + pages = atop(maxsize) - bz->total_bpages; + + /* Add pages to our bounce pool */ + if (alloc_bounce_pages(newtag, pages) < pages) + error = ENOMEM; + } + /* Performed initial allocation */ + newtag->bounce_flags |= BF_MIN_ALLOC_COMP; + } else + error = 0; + + if (error != 0) + free(newtag, M_DEVBUF); + else + *dmat = newtag; + CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", + __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), + error); + return (error); +} + +static int +bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat) +{ + bus_dma_tag_t dmat_copy, parent; + int error; + + error = 0; + dmat_copy = dmat; + + if (dmat != NULL) { + if (dmat->map_count != 0) { + error = EBUSY; + goto out; + } + while (dmat != NULL) { + parent = (bus_dma_tag_t)dmat->common.parent; + atomic_subtract_int(&dmat->common.ref_count, 1); + if (dmat->common.ref_count == 0) { + if (dmat->segments != NULL) + free(dmat->segments, M_DEVBUF); + free(dmat, M_DEVBUF); + /* + * Last reference count, so + * release our reference + * count on our parent. + */ + dmat = parent; + } else + dmat = NULL; + } + } +out: + CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); + return (error); +} + +static bus_dmamap_t +alloc_dmamap(bus_dma_tag_t dmat, int flags) +{ + u_long mapsize; + bus_dmamap_t map; + + mapsize = sizeof(*map); + mapsize += sizeof(struct sync_list) * dmat->common.nsegments; + map = malloc(mapsize, M_DEVBUF, flags | M_ZERO); + if (map == NULL) + return (NULL); + + /* Initialize the new map */ + STAILQ_INIT(&map->bpages); + + return (map); +} + +/* + * Allocate a handle for mapping from kva/uva/physical + * address space into bus device space. + */ +static int +bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) +{ + struct bounce_zone *bz; + int error, maxpages, pages; + + error = 0; + + if (dmat->segments == NULL) { + dmat->segments = (bus_dma_segment_t *)malloc( + sizeof(bus_dma_segment_t) * dmat->common.nsegments, + M_DEVBUF, M_NOWAIT); + if (dmat->segments == NULL) { + CTR3(KTR_BUSDMA, "%s: tag %p error %d", + __func__, dmat, ENOMEM); + return (ENOMEM); + } + } + + *mapp = alloc_dmamap(dmat, M_NOWAIT); + if (*mapp == NULL) { + CTR3(KTR_BUSDMA, "%s: tag %p error %d", + __func__, dmat, ENOMEM); + return (ENOMEM); + } + + /* + * Bouncing might be required if the driver asks for an active + * exclusion region, a data alignment that is stricter than 1, and/or + * an active address boundary. + */ + if (dmat->bounce_flags & BF_COULD_BOUNCE) { + /* Must bounce */ + if (dmat->bounce_zone == NULL) { + if ((error = alloc_bounce_zone(dmat)) != 0) { + free(*mapp, M_DEVBUF); + return (error); + } + } + bz = dmat->bounce_zone; + + (*mapp)->flags = DMAMAP_COULD_BOUNCE; + + /* + * Attempt to add pages to our pool on a per-instance + * basis up to a sane limit. + */ + if (dmat->common.alignment > 1) + maxpages = MAX_BPAGES; + else + maxpages = MIN(MAX_BPAGES, Maxmem - + atop(dmat->common.lowaddr)); + if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP) == 0 || + (bz->map_count > 0 && bz->total_bpages < maxpages)) { + pages = MAX(atop(dmat->common.maxsize), 1); + pages = MIN(maxpages - bz->total_bpages, pages); + pages = MAX(pages, 1); + if (alloc_bounce_pages(dmat, pages) < pages) + error = ENOMEM; + if ((dmat->bounce_flags & BF_MIN_ALLOC_COMP) + == 0) { + if (error == 0) { + dmat->bounce_flags |= + BF_MIN_ALLOC_COMP; + } + } else + error = 0; + } + bz->map_count++; + } + if (error == 0) + dmat->map_count++; + else + free(*mapp, M_DEVBUF); + CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", + __func__, dmat, dmat->common.flags, error); + return (error); +} + +/* + * Destroy a handle for mapping from kva/uva/physical + * address space into bus device space. + */ +static int +bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) +{ + + /* Check we are destroying the correct map type */ + if ((map->flags & DMAMAP_FROM_DMAMEM) != 0) + panic("bounce_bus_dmamap_destroy: Invalid map freed\n"); + + if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { + CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY); + return (EBUSY); + } + if (dmat->bounce_zone) { + KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0, + ("%s: Bounce zone when cannot bounce", __func__)); + dmat->bounce_zone->map_count--; + } + free(map, M_DEVBUF); + dmat->map_count--; + CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); + return (0); +} + + +/* + * Allocate a piece of memory that can be efficiently mapped into + * bus device space based on the constraints lited in the dma tag. + * A dmamap to for use with dmamap_load is also allocated. + */ +static int +bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, + bus_dmamap_t *mapp) +{ + /* + * XXX ARM64TODO: + * This bus_dma implementation requires IO-Coherent architecutre. + * If IO-Coherency is not guaranteed, the BUS_DMA_COHERENT flag has + * to be implented using non-cacheable memory. + */ + + vm_memattr_t attr; + int mflags; + + if (flags & BUS_DMA_NOWAIT) + mflags = M_NOWAIT; + else + mflags = M_WAITOK; + + if (dmat->segments == NULL) { + dmat->segments = (bus_dma_segment_t *)malloc( + sizeof(bus_dma_segment_t) * dmat->common.nsegments, + M_DEVBUF, mflags); + if (dmat->segments == NULL) { + CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", + __func__, dmat, dmat->common.flags, ENOMEM); + return (ENOMEM); + } + } + if (flags & BUS_DMA_ZERO) + mflags |= M_ZERO; + if (flags & BUS_DMA_NOCACHE) + attr = VM_MEMATTR_UNCACHEABLE; + else if ((flags & BUS_DMA_COHERENT) != 0 && + (dmat->bounce_flags & BF_COHERENT) == 0) + /* + * If we have a non-coherent tag, and are trying to allocate + * a coherent block of memory it needs to be uncached. + */ + attr = VM_MEMATTR_UNCACHEABLE; + else + attr = VM_MEMATTR_DEFAULT; + + /* + * Create the map, but don't set the could bounce flag as + * this allocation should never bounce; + */ + *mapp = alloc_dmamap(dmat, mflags); + if (*mapp == NULL) { + CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", + __func__, dmat, dmat->common.flags, ENOMEM); + return (ENOMEM); + } + (*mapp)->flags = DMAMAP_FROM_DMAMEM; + + /* + * Allocate the buffer from the malloc(9) allocator if... + * - It's small enough to fit into a single power of two sized bucket. + * - The alignment is less than or equal to the maximum size + * - The low address requirement is fulfilled. + * else allocate non-contiguous pages if... + * - The page count that could get allocated doesn't exceed + * nsegments also when the maximum segment size is less + * than PAGE_SIZE. + * - The alignment constraint isn't larger than a page boundary. + * - There are no boundary-crossing constraints. + * else allocate a block of contiguous pages because one or more of the + * constraints is something that only the contig allocator can fulfill. + * + * NOTE: The (dmat->common.alignment <= dmat->maxsize) check + * below is just a quick hack. The exact alignment guarantees + * of malloc(9) need to be nailed down, and the code below + * should be rewritten to take that into account. + * + * In the meantime warn the user if malloc gets it wrong. + */ + if ((dmat->common.maxsize <= PAGE_SIZE) && + (dmat->common.alignment <= dmat->common.maxsize) && + dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) && + attr == VM_MEMATTR_DEFAULT) { + *vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags); + } else if (dmat->common.nsegments >= + howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) && + dmat->common.alignment <= PAGE_SIZE && + (dmat->common.boundary % PAGE_SIZE) == 0) { + /* Page-based multi-segment allocations allowed */ + *vaddr = (void *)kmem_alloc_attr(dmat->common.maxsize, mflags, + 0ul, dmat->common.lowaddr, attr); + dmat->bounce_flags |= BF_KMEM_ALLOC; + } else { + *vaddr = (void *)kmem_alloc_contig(dmat->common.maxsize, mflags, + 0ul, dmat->common.lowaddr, dmat->common.alignment != 0 ? + dmat->common.alignment : 1ul, dmat->common.boundary, attr); + dmat->bounce_flags |= BF_KMEM_ALLOC; + } + if (*vaddr == NULL) { + CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", + __func__, dmat, dmat->common.flags, ENOMEM); + free(*mapp, M_DEVBUF); + return (ENOMEM); + } else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) { + printf("bus_dmamem_alloc failed to align memory properly.\n"); + } + dmat->map_count++; + CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", + __func__, dmat, dmat->common.flags, 0); + return (0); +} + +/* + * Free a piece of memory and it's allociated dmamap, that was allocated + * via bus_dmamem_alloc. Make the same choice for free/contigfree. + */ +static void +bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) +{ + + /* + * Check the map came from bounce_bus_dmamem_alloc, so the map + * should be NULL and the BF_KMEM_ALLOC flag cleared if malloc() + * was used and set if kmem_alloc_contig() was used. + */ + if ((map->flags & DMAMAP_FROM_DMAMEM) == 0) + panic("bus_dmamem_free: Invalid map freed\n"); + if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0) + free(vaddr, M_DEVBUF); + else + kmem_free((vm_offset_t)vaddr, dmat->common.maxsize); + free(map, M_DEVBUF); + dmat->map_count--; + CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, + dmat->bounce_flags); +} + +static void +_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags) +{ + bus_addr_t curaddr; + bus_size_t sgsize; + + if ((map->flags & DMAMAP_COULD_BOUNCE) != 0 && map->pagesneeded == 0) { + /* + * Count the number of bounce pages + * needed in order to complete this transfer + */ + curaddr = buf; + while (buflen != 0) { + sgsize = MIN(buflen, dmat->common.maxsegsz); + if (bus_dma_run_filter(&dmat->common, curaddr)) { + sgsize = MIN(sgsize, + PAGE_SIZE - (curaddr & PAGE_MASK)); + map->pagesneeded++; + } + curaddr += sgsize; + buflen -= sgsize; + } + CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); + } +} + +static void +_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, + void *buf, bus_size_t buflen, int flags) +{ + vm_offset_t vaddr; + vm_offset_t vendaddr; + bus_addr_t paddr; + bus_size_t sg_len; + + if ((map->flags & DMAMAP_COULD_BOUNCE) != 0 && map->pagesneeded == 0) { + CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " + "alignment= %d", dmat->common.lowaddr, + ptoa((vm_paddr_t)Maxmem), + dmat->common.boundary, dmat->common.alignment); + CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, + map->pagesneeded); + /* + * Count the number of bounce pages + * needed in order to complete this transfer + */ + vaddr = (vm_offset_t)buf; + vendaddr = (vm_offset_t)buf + buflen; + + while (vaddr < vendaddr) { + sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); + if (pmap == kernel_pmap) + paddr = pmap_kextract(vaddr); + else + paddr = pmap_extract(pmap, vaddr); + if (bus_dma_run_filter(&dmat->common, paddr) != 0) { + sg_len = roundup2(sg_len, + dmat->common.alignment); + map->pagesneeded++; + } + vaddr += sg_len; + } + CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); + } +} + +static int +_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) +{ + + /* Reserve Necessary Bounce Pages */ + mtx_lock(&bounce_lock); + if (flags & BUS_DMA_NOWAIT) { + if (reserve_bounce_pages(dmat, map, 0) != 0) { + mtx_unlock(&bounce_lock); + return (ENOMEM); + } + } else { + if (reserve_bounce_pages(dmat, map, 1) != 0) { + /* Queue us for resources */ + STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); + mtx_unlock(&bounce_lock); + return (EINPROGRESS); + } + } + mtx_unlock(&bounce_lock); + + return (0); +} + +/* + * Add a single contiguous physical range to the segment list. + */ +static int +_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, + bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) +{ + bus_addr_t baddr, bmask; + int seg; + + /* + * Make sure we don't cross any boundaries. + */ + bmask = ~(dmat->common.boundary - 1); + if (dmat->common.boundary > 0) { + baddr = (curaddr + dmat->common.boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } + + /* + * Insert chunk into a segment, coalescing with + * previous segment if possible. + */ + seg = *segp; + if (seg == -1) { + seg = 0; + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } else { + if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && + (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz && + (dmat->common.boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) + segs[seg].ds_len += sgsize; + else { + if (++seg >= dmat->common.nsegments) + return (0); + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } + } + *segp = seg; + return (sgsize); +} + +/* + * Utility function to load a physical buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. + */ +static int +bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, + int *segp) +{ + struct sync_list *sl; + bus_size_t sgsize; + bus_addr_t curaddr, sl_end; + int error; + + if (segs == NULL) + segs = dmat->segments; + + if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) { + _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } + } + + sl = map->slist + map->sync_count - 1; + sl_end = 0; + + while (buflen > 0) { + curaddr = buf; + sgsize = MIN(buflen, dmat->common.maxsegsz); + if (((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) && + map->pagesneeded != 0 && + bus_dma_run_filter(&dmat->common, curaddr)) { + sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); + curaddr = add_bounce_page(dmat, map, 0, curaddr, + sgsize); + } else if ((dmat->bounce_flags & BF_COHERENT) == 0) { + if (map->sync_count > 0) + sl_end = sl->paddr + sl->datacount; + + if (map->sync_count == 0 || curaddr != sl_end) { + if (++map->sync_count > dmat->common.nsegments) + break; + sl++; + sl->vaddr = 0; + sl->paddr = curaddr; + sl->datacount = sgsize; + sl->pages = PHYS_TO_VM_PAGE(curaddr); + KASSERT(sl->pages != NULL, + ("%s: page at PA:0x%08lx is not in " + "vm_page_array", __func__, curaddr)); + } else + sl->datacount += sgsize; + } + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + buf += sgsize; + buflen -= sgsize; + } + + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ +} + +/* + * Utility function to load a linear buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. + */ +static int +bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, + bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, + int *segp) +{ + struct sync_list *sl; + bus_size_t sgsize, max_sgsize; + bus_addr_t curaddr, sl_pend; + vm_offset_t kvaddr, vaddr, sl_vend; + int error; + + if (segs == NULL) + segs = dmat->segments; + + if ((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) { + _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); + if (map->pagesneeded != 0) { + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } + } + + sl = map->slist + map->sync_count - 1; + vaddr = (vm_offset_t)buf; + sl_pend = 0; + sl_vend = 0; + + while (buflen > 0) { + /* + * Get the physical address for this segment. + */ + if (pmap == kernel_pmap) { + curaddr = pmap_kextract(vaddr); + kvaddr = vaddr; + } else { + curaddr = pmap_extract(pmap, vaddr); + kvaddr = 0; + } + + /* + * Compute the segment size, and adjust counts. + */ + max_sgsize = MIN(buflen, dmat->common.maxsegsz); + sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); + if (((dmat->bounce_flags & BF_COULD_BOUNCE) != 0) && + map->pagesneeded != 0 && + bus_dma_run_filter(&dmat->common, curaddr)) { + sgsize = roundup2(sgsize, dmat->common.alignment); + sgsize = MIN(sgsize, max_sgsize); + curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, + sgsize); + } else if ((dmat->bounce_flags & BF_COHERENT) == 0) { + sgsize = MIN(sgsize, max_sgsize); + if (map->sync_count > 0) { + sl_pend = sl->paddr + sl->datacount; + sl_vend = sl->vaddr + sl->datacount; + } + + if (map->sync_count == 0 || + (kvaddr != 0 && kvaddr != sl_vend) || + (curaddr != sl_pend)) { + + if (++map->sync_count > dmat->common.nsegments) + goto cleanup; + sl++; + sl->vaddr = kvaddr; + sl->paddr = curaddr; + if (kvaddr != 0) { + sl->pages = NULL; + } else { + sl->pages = PHYS_TO_VM_PAGE(curaddr); + KASSERT(sl->pages != NULL, + ("%s: page at PA:0x%08lx is not " + "in vm_page_array", __func__, + curaddr)); + } + sl->datacount = sgsize; + } else + sl->datacount += sgsize; + } else { + sgsize = MIN(sgsize, max_sgsize); + } + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + vaddr += sgsize; + buflen -= sgsize; + } + +cleanup: + /* + * Did we fit? + */ + return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ +} + +static void +bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, + struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) +{ + + if ((map->flags & DMAMAP_COULD_BOUNCE) == 0) + return; + map->mem = *mem; + map->dmat = dmat; + map->callback = callback; + map->callback_arg = callback_arg; +} + +static bus_dma_segment_t * +bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error) +{ + + if (segs == NULL) + segs = dmat->segments; + return (segs); +} + +/* + * Release the mapping held by map. + */ +static void +bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) +{ + struct bounce_page *bpage; + + while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { + STAILQ_REMOVE_HEAD(&map->bpages, links); + free_bounce_page(dmat, bpage); + } + + map->sync_count = 0; +} + +static void +dma_preread_safe(vm_offset_t va, vm_size_t size) +{ + /* + * Write back any partial cachelines immediately before and + * after the DMA region. + */ + if (va & (dcache_line_size - 1)) + cpu_dcache_wb_range(va, 1); + if ((va + size) & (dcache_line_size - 1)) + cpu_dcache_wb_range(va + size, 1); + + cpu_dcache_inv_range(va, size); +} + +static void +dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op) +{ + uint32_t len, offset; + vm_page_t m; + vm_paddr_t pa; + vm_offset_t va, tempva; + bus_size_t size; + + offset = sl->paddr & PAGE_MASK; + m = sl->pages; + size = sl->datacount; + pa = sl->paddr; + + for ( ; size != 0; size -= len, pa += len, offset = 0, ++m) { + tempva = 0; + if (sl->vaddr == 0) { + len = min(PAGE_SIZE - offset, size); + tempva = pmap_quick_enter_page(m); + va = tempva | offset; + KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset), + ("unexpected vm_page_t phys: 0x%16lx != 0x%16lx", + VM_PAGE_TO_PHYS(m) | offset, pa)); + } else { + len = sl->datacount; + va = sl->vaddr; + } + + switch (op) { + case BUS_DMASYNC_PREWRITE: + case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD: + cpu_dcache_wb_range(va, len); + break; + case BUS_DMASYNC_PREREAD: + /* + * An mbuf may start in the middle of a cacheline. There + * will be no cpu writes to the beginning of that line + * (which contains the mbuf header) while dma is in + * progress. Handle that case by doing a writeback of + * just the first cacheline before invalidating the + * overall buffer. Any mbuf in a chain may have this + * misalignment. Buffers which are not mbufs bounce if + * they are not aligned to a cacheline. + */ + dma_preread_safe(va, len); + break; + case BUS_DMASYNC_POSTREAD: + case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE: + cpu_dcache_inv_range(va, len); + break; + default: + panic("unsupported combination of sync operations: " + "0x%08x\n", op); + } + + if (tempva != 0) + pmap_quick_remove_page(tempva); + } +} + +static void +bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dmasync_op_t op) +{ + struct bounce_page *bpage; + struct sync_list *sl, *end; + vm_offset_t datavaddr, tempvaddr; + + if (op == BUS_DMASYNC_POSTWRITE) + return; + + if ((op & BUS_DMASYNC_POSTREAD) != 0) { + /* + * Wait for any DMA operations to complete before the bcopy. + */ + fence(); + } + + if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { + CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " + "performing bounce", __func__, dmat, dmat->common.flags, + op); + + if ((op & BUS_DMASYNC_PREWRITE) != 0) { + while (bpage != NULL) { + tempvaddr = 0; + datavaddr = bpage->datavaddr; + if (datavaddr == 0) { + tempvaddr = pmap_quick_enter_page( + bpage->datapage); + datavaddr = tempvaddr | bpage->dataoffs; + } + + bcopy((void *)datavaddr, + (void *)bpage->vaddr, bpage->datacount); + if (tempvaddr != 0) + pmap_quick_remove_page(tempvaddr); + if ((dmat->bounce_flags & BF_COHERENT) == 0) + cpu_dcache_wb_range(bpage->vaddr, + bpage->datacount); + bpage = STAILQ_NEXT(bpage, links); + } + dmat->bounce_zone->total_bounced++; + } else if ((op & BUS_DMASYNC_PREREAD) != 0) { + while (bpage != NULL) { + if ((dmat->bounce_flags & BF_COHERENT) == 0) + cpu_dcache_wbinv_range(bpage->vaddr, + bpage->datacount); + bpage = STAILQ_NEXT(bpage, links); + } + } + + if ((op & BUS_DMASYNC_POSTREAD) != 0) { + while (bpage != NULL) { + if ((dmat->bounce_flags & BF_COHERENT) == 0) + cpu_dcache_inv_range(bpage->vaddr, + bpage->datacount); + tempvaddr = 0; + datavaddr = bpage->datavaddr; + if (datavaddr == 0) { + tempvaddr = pmap_quick_enter_page( + bpage->datapage); + datavaddr = tempvaddr | bpage->dataoffs; + } + + bcopy((void *)bpage->vaddr, + (void *)datavaddr, bpage->datacount); + + if (tempvaddr != 0) + pmap_quick_remove_page(tempvaddr); + bpage = STAILQ_NEXT(bpage, links); + } + dmat->bounce_zone->total_bounced++; + } + } + + /* + * Cache maintenance for normal (non-COHERENT non-bounce) buffers. + */ + if (map->sync_count != 0) { + sl = &map->slist[0]; + end = &map->slist[map->sync_count]; + CTR3(KTR_BUSDMA, "%s: tag %p op 0x%x " + "performing sync", __func__, dmat, op); + + for ( ; sl != end; ++sl) + dma_dcache_sync(sl, op); + } + + if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0) { + /* + * Wait for the bcopy to complete before any DMA operations. + */ + fence(); + } +} + +static void +init_bounce_pages(void *dummy __unused) +{ + + total_bpages = 0; + STAILQ_INIT(&bounce_zone_list); + STAILQ_INIT(&bounce_map_waitinglist); + STAILQ_INIT(&bounce_map_callbacklist); + mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); +} +SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); + +static struct sysctl_ctx_list * +busdma_sysctl_tree(struct bounce_zone *bz) +{ + + return (&bz->sysctl_tree); +} + +static struct sysctl_oid * +busdma_sysctl_tree_top(struct bounce_zone *bz) +{ + + return (bz->sysctl_tree_top); +} + +static int +alloc_bounce_zone(bus_dma_tag_t dmat) +{ + struct bounce_zone *bz; + + /* Check to see if we already have a suitable zone */ + STAILQ_FOREACH(bz, &bounce_zone_list, links) { + if ((dmat->common.alignment <= bz->alignment) && + (dmat->common.lowaddr >= bz->lowaddr)) { + dmat->bounce_zone = bz; + return (0); + } + } + + if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, + M_NOWAIT | M_ZERO)) == NULL) + return (ENOMEM); + + STAILQ_INIT(&bz->bounce_page_list); + bz->free_bpages = 0; + bz->reserved_bpages = 0; + bz->active_bpages = 0; + bz->lowaddr = dmat->common.lowaddr; + bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE); + bz->map_count = 0; + snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); + busdma_zonecount++; + snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); + STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); + dmat->bounce_zone = bz; + + sysctl_ctx_init(&bz->sysctl_tree); + bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, + SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, + CTLFLAG_RD, 0, ""); + if (bz->sysctl_tree_top == NULL) { + sysctl_ctx_free(&bz->sysctl_tree); + return (0); /* XXX error code? */ + } + + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, + "Total bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, + "Free bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, + "Reserved bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, + "Active bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, + "Total bounce requests"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, + "Total bounce requests that were deferred"); + SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); + SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "alignment", CTLFLAG_RD, &bz->alignment, ""); + + return (0); +} + +static int +alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) +{ + struct bounce_zone *bz; + int count; + + bz = dmat->bounce_zone; + count = 0; + while (numpages > 0) { + struct bounce_page *bpage; + + bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, + M_NOWAIT | M_ZERO); + + if (bpage == NULL) + break; + bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, + M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); + if (bpage->vaddr == 0) { + free(bpage, M_DEVBUF); + break; + } + bpage->busaddr = pmap_kextract(bpage->vaddr); + mtx_lock(&bounce_lock); + STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); + total_bpages++; + bz->total_bpages++; + bz->free_bpages++; + mtx_unlock(&bounce_lock); + count++; + numpages--; + } + return (count); +} + +static int +reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) +{ + struct bounce_zone *bz; + int pages; + + mtx_assert(&bounce_lock, MA_OWNED); + bz = dmat->bounce_zone; + pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); + if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) + return (map->pagesneeded - (map->pagesreserved + pages)); + bz->free_bpages -= pages; + bz->reserved_bpages += pages; + map->pagesreserved += pages; + pages = map->pagesneeded - map->pagesreserved; + + return (pages); +} + +static bus_addr_t +add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, + bus_addr_t addr, bus_size_t size) +{ + struct bounce_zone *bz; + struct bounce_page *bpage; + + KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); + KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0, + ("add_bounce_page: bad map %p", map)); + + bz = dmat->bounce_zone; + if (map->pagesneeded == 0) + panic("add_bounce_page: map doesn't need any pages"); + map->pagesneeded--; + + if (map->pagesreserved == 0) + panic("add_bounce_page: map doesn't need any pages"); + map->pagesreserved--; + + mtx_lock(&bounce_lock); + bpage = STAILQ_FIRST(&bz->bounce_page_list); + if (bpage == NULL) + panic("add_bounce_page: free page list is empty"); + + STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); + bz->reserved_bpages--; + bz->active_bpages++; + mtx_unlock(&bounce_lock); + + if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { + /* Page offset needs to be preserved. */ + bpage->vaddr |= addr & PAGE_MASK; + bpage->busaddr |= addr & PAGE_MASK; + } + bpage->datavaddr = vaddr; + bpage->datapage = PHYS_TO_VM_PAGE(addr); + bpage->dataoffs = addr & PAGE_MASK; + bpage->datacount = size; + STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); + return (bpage->busaddr); +} + +static void +free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) +{ + struct bus_dmamap *map; + struct bounce_zone *bz; + + bz = dmat->bounce_zone; + bpage->datavaddr = 0; + bpage->datacount = 0; + if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { + /* + * Reset the bounce page to start at offset 0. Other uses + * of this bounce page may need to store a full page of + * data and/or assume it starts on a page boundary. + */ + bpage->vaddr &= ~PAGE_MASK; + bpage->busaddr &= ~PAGE_MASK; + } + + mtx_lock(&bounce_lock); + STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); + bz->free_bpages++; + bz->active_bpages--; + if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { + if (reserve_bounce_pages(map->dmat, map, 1) == 0) { + STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); + STAILQ_INSERT_TAIL(&bounce_map_callbacklist, + map, links); + busdma_swi_pending = 1; + bz->total_deferred++; + swi_sched(vm_ih, 0); + } + } + mtx_unlock(&bounce_lock); +} + +void +busdma_swi(void) +{ + bus_dma_tag_t dmat; + struct bus_dmamap *map; + + mtx_lock(&bounce_lock); + while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { + STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); + mtx_unlock(&bounce_lock); + dmat = map->dmat; + (dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK); + bus_dmamap_load_mem(map->dmat, map, &map->mem, + map->callback, map->callback_arg, BUS_DMA_WAITOK); + (dmat->common.lockfunc)(dmat->common.lockfuncarg, + BUS_DMA_UNLOCK); + mtx_lock(&bounce_lock); + } + mtx_unlock(&bounce_lock); +} + +struct bus_dma_impl bus_dma_bounce_impl = { + .tag_create = bounce_bus_dma_tag_create, + .tag_destroy = bounce_bus_dma_tag_destroy, + .map_create = bounce_bus_dmamap_create, + .map_destroy = bounce_bus_dmamap_destroy, + .mem_alloc = bounce_bus_dmamem_alloc, + .mem_free = bounce_bus_dmamem_free, + .load_phys = bounce_bus_dmamap_load_phys, + .load_buffer = bounce_bus_dmamap_load_buffer, + .load_ma = bus_dmamap_load_ma_triv, + .map_waitok = bounce_bus_dmamap_waitok, + .map_complete = bounce_bus_dmamap_complete, + .map_unload = bounce_bus_dmamap_unload, + .map_sync = bounce_bus_dmamap_sync +}; Property changes on: head/sys/riscv/riscv/busdma_bounce.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/riscv/riscv/busdma_machdep.c =================================================================== --- head/sys/riscv/riscv/busdma_machdep.c (revision 347224) +++ head/sys/riscv/riscv/busdma_machdep.c (revision 347225) @@ -1,102 +1,231 @@ /*- * Copyright (c) 1997, 1998 Justin T. Gibbs. + * Copyright (c) 2013, 2015 The FreeBSD Foundation + * All rights reserved. * + * This software was developed by Konstantin Belousov + * under sponsorship from the FreeBSD Foundation. + * + * Portions of this software were developed by Semihalf + * under sponsorship of the FreeBSD Foundation. + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include -int -_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, - bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) +/* + * Convenience function for manipulating driver locks from busdma (during + * busdma_swi, for example). Drivers that don't provide their own locks + * should specify &Giant to dmat->lockfuncarg. Drivers that use their own + * non-mutex locking scheme don't have to use this at all. + */ +void +busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) { + struct mtx *dmtx; - panic("busdma"); + dmtx = (struct mtx *)arg; + switch (op) { + case BUS_DMA_LOCK: + mtx_lock(dmtx); + break; + case BUS_DMA_UNLOCK: + mtx_unlock(dmtx); + break; + default: + panic("Unknown operation 0x%x for busdma_lock_mutex!", op); + } } -int -_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, - bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, - int *segp) +/* + * dflt_lock should never get called. It gets put into the dma tag when + * lockfunc == NULL, which is only valid if the maps that are associated + * with the tag are meant to never be defered. + * XXX Should have a way to identify which driver is responsible here. + */ +void +bus_dma_dflt_lock(void *arg, bus_dma_lock_op_t op) { - panic("busdma"); + panic("driver error: busdma dflt_lock called"); } +/* + * Return true if a match is made. + * + * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. + * + * If paddr is within the bounds of the dma tag then call the filter callback + * to check for a match, if there is no filter callback then assume a match. + */ int -_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, - int *segp) +bus_dma_run_filter(struct bus_dma_tag_common *tc, bus_addr_t paddr) { + int retval; - panic("busdma"); + retval = 0; + do { + if (((paddr > tc->lowaddr && paddr <= tc->highaddr) || + ((paddr & (tc->alignment - 1)) != 0)) && + (tc->filter == NULL || + (*tc->filter)(tc->filterarg, paddr) != 0)) + retval = 1; + + tc = tc->parent; + } while (retval == 0 && tc != NULL); + return (retval); } -void -_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, - struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) +int +common_bus_dma_tag_create(struct bus_dma_tag_common *parent, + bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, + bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, + bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, + bus_dma_lock_t *lockfunc, void *lockfuncarg, size_t sz, void **dmat) { + void *newtag; + struct bus_dma_tag_common *common; - panic("busdma"); -} + KASSERT(sz >= sizeof(struct bus_dma_tag_common), ("sz")); + /* Return a NULL tag on failure */ + *dmat = NULL; + /* Basic sanity checking */ + if (boundary != 0 && boundary < maxsegsz) + maxsegsz = boundary; + if (maxsegsz == 0) + return (EINVAL); -bus_dma_segment_t * -_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, - bus_dma_segment_t *segs, int nsegs, int error) -{ + newtag = malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT); + if (newtag == NULL) { + CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", + __func__, newtag, 0, ENOMEM); + return (ENOMEM); + } - panic("busdma"); + common = newtag; + common->impl = &bus_dma_bounce_impl; + common->parent = parent; + common->alignment = alignment; + common->boundary = boundary; + common->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); + common->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); + common->filter = filter; + common->filterarg = filterarg; + common->maxsize = maxsize; + common->nsegments = nsegments; + common->maxsegsz = maxsegsz; + common->flags = flags; + common->ref_count = 1; /* Count ourself */ + if (lockfunc != NULL) { + common->lockfunc = lockfunc; + common->lockfuncarg = lockfuncarg; + } else { + common->lockfunc = bus_dma_dflt_lock; + common->lockfuncarg = NULL; + } + + /* Take into account any restrictions imposed by our parent tag */ + if (parent != NULL) { + common->impl = parent->impl; + common->lowaddr = MIN(parent->lowaddr, common->lowaddr); + common->highaddr = MAX(parent->highaddr, common->highaddr); + if (common->boundary == 0) + common->boundary = parent->boundary; + else if (parent->boundary != 0) { + common->boundary = MIN(parent->boundary, + common->boundary); + } + if (common->filter == NULL) { + /* + * Short circuit looking at our parent directly + * since we have encapsulated all of its information + */ + common->filter = parent->filter; + common->filterarg = parent->filterarg; + common->parent = parent->parent; + } + atomic_add_int(&parent->ref_count, 1); + } + *dmat = common; + return (0); } /* - * Release the mapping held by map. + * Allocate a device specific dma_tag. */ -void -bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) +int +bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, + bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, + bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, + int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, + void *lockfuncarg, bus_dma_tag_t *dmat) { + struct bus_dma_tag_common *tc; + int error; - panic("busdma"); + if (parent == NULL) { + error = bus_dma_bounce_impl.tag_create(parent, alignment, + boundary, lowaddr, highaddr, filter, filterarg, maxsize, + nsegments, maxsegsz, flags, lockfunc, lockfuncarg, dmat); + } else { + tc = (struct bus_dma_tag_common *)parent; + error = tc->impl->tag_create(parent, alignment, + boundary, lowaddr, highaddr, filter, filterarg, maxsize, + nsegments, maxsegsz, flags, lockfunc, lockfuncarg, dmat); + } + return (error); } -void -bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) +int +bus_dma_tag_destroy(bus_dma_tag_t dmat) { + struct bus_dma_tag_common *tc; - panic("busdma"); + tc = (struct bus_dma_tag_common *)dmat; + return (tc->impl->tag_destroy(dmat)); +} + +int +bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain) +{ + + return (0); } Index: head/sys/riscv/riscv/machdep.c =================================================================== --- head/sys/riscv/riscv/machdep.c (revision 347224) +++ head/sys/riscv/riscv/machdep.c (revision 347225) @@ -1,891 +1,895 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2015-2017 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FPE #include #endif #ifdef FDT #include #include #endif struct pcpu __pcpu[MAXCPU]; static struct trapframe proc0_tf; vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2]; vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2]; int early_boot = 1; int cold = 1; long realmem = 0; long Maxmem = 0; #define DTB_SIZE_MAX (1024 * 1024) #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) vm_paddr_t physmap[PHYSMAP_SIZE]; u_int physmap_idx; struct kva_md_info kmi; int64_t dcache_line_size; /* The minimum D cache line size */ int64_t icache_line_size; /* The minimum I cache line size */ int64_t idcache_line_size; /* The minimum cache line size */ extern int *end; extern int *initstack_end; uintptr_t mcall_trap(uintptr_t mcause, uintptr_t* regs); uintptr_t mcall_trap(uintptr_t mcause, uintptr_t* regs) { return (0); } static void cpu_startup(void *dummy) { identify_cpu(); vm_ksubmap_init(&kmi); bufinit(); vm_pager_bufferinit(); } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); int cpu_idle_wakeup(int cpu) { return (0); } int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; regs->sepc = frame->tf_sepc; regs->sstatus = frame->tf_sstatus; regs->ra = frame->tf_ra; regs->sp = frame->tf_sp; regs->gp = frame->tf_gp; regs->tp = frame->tf_tp; memcpy(regs->t, frame->tf_t, sizeof(regs->t)); memcpy(regs->s, frame->tf_s, sizeof(regs->s)); memcpy(regs->a, frame->tf_a, sizeof(regs->a)); return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; frame->tf_sepc = regs->sepc; frame->tf_ra = regs->ra; frame->tf_sp = regs->sp; frame->tf_gp = regs->gp; frame->tf_tp = regs->tp; memcpy(frame->tf_t, regs->t, sizeof(frame->tf_t)); memcpy(frame->tf_s, regs->s, sizeof(frame->tf_s)); memcpy(frame->tf_a, regs->a, sizeof(frame->tf_a)); return (0); } int fill_fpregs(struct thread *td, struct fpreg *regs) { #ifdef FPE struct pcb *pcb; pcb = td->td_pcb; if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running FPE instructions we will * need to save the state to memcpy it below. */ if (td == curthread) fpe_state_save(td); memcpy(regs->fp_x, pcb->pcb_x, sizeof(regs->fp_x)); regs->fp_fcsr = pcb->pcb_fcsr; } else #endif memset(regs, 0, sizeof(*regs)); return (0); } int set_fpregs(struct thread *td, struct fpreg *regs) { #ifdef FPE struct trapframe *frame; struct pcb *pcb; frame = td->td_frame; pcb = td->td_pcb; memcpy(pcb->pcb_x, regs->fp_x, sizeof(regs->fp_x)); pcb->pcb_fcsr = regs->fp_fcsr; pcb->pcb_fpflags |= PCB_FP_STARTED; frame->tf_sstatus &= ~SSTATUS_FS_MASK; frame->tf_sstatus |= SSTATUS_FS_CLEAN; #endif return (0); } int fill_dbregs(struct thread *td, struct dbreg *regs) { panic("fill_dbregs"); } int set_dbregs(struct thread *td, struct dbreg *regs) { panic("set_dbregs"); } int ptrace_set_pc(struct thread *td, u_long addr) { td->td_frame->tf_sepc = addr; return (0); } int ptrace_single_step(struct thread *td) { /* TODO; */ return (EOPNOTSUPP); } int ptrace_clear_single_step(struct thread *td) { /* TODO; */ return (EOPNOTSUPP); } void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf; struct pcb *pcb; tf = td->td_frame; pcb = td->td_pcb; memset(tf, 0, sizeof(struct trapframe)); tf->tf_a[0] = stack; tf->tf_sp = STACKALIGN(stack); tf->tf_ra = imgp->entry_addr; tf->tf_sepc = imgp->entry_addr; pcb->pcb_fpflags &= ~PCB_FP_STARTED; } /* Sanity check these are the same size, they will be memcpy'd to and fro */ CTASSERT(sizeof(((struct trapframe *)0)->tf_a) == sizeof((struct gpregs *)0)->gp_a); CTASSERT(sizeof(((struct trapframe *)0)->tf_s) == sizeof((struct gpregs *)0)->gp_s); CTASSERT(sizeof(((struct trapframe *)0)->tf_t) == sizeof((struct gpregs *)0)->gp_t); CTASSERT(sizeof(((struct trapframe *)0)->tf_a) == sizeof((struct reg *)0)->a); CTASSERT(sizeof(((struct trapframe *)0)->tf_s) == sizeof((struct reg *)0)->s); CTASSERT(sizeof(((struct trapframe *)0)->tf_t) == sizeof((struct reg *)0)->t); /* Support for FDT configurations only. */ CTASSERT(FDT); int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; memcpy(mcp->mc_gpregs.gp_t, tf->tf_t, sizeof(mcp->mc_gpregs.gp_t)); memcpy(mcp->mc_gpregs.gp_s, tf->tf_s, sizeof(mcp->mc_gpregs.gp_s)); memcpy(mcp->mc_gpregs.gp_a, tf->tf_a, sizeof(mcp->mc_gpregs.gp_a)); if (clear_ret & GET_MC_CLEAR_RET) { mcp->mc_gpregs.gp_a[0] = 0; mcp->mc_gpregs.gp_t[0] = 0; /* clear syscall error */ } mcp->mc_gpregs.gp_ra = tf->tf_ra; mcp->mc_gpregs.gp_sp = tf->tf_sp; mcp->mc_gpregs.gp_gp = tf->tf_gp; mcp->mc_gpregs.gp_tp = tf->tf_tp; mcp->mc_gpregs.gp_sepc = tf->tf_sepc; mcp->mc_gpregs.gp_sstatus = tf->tf_sstatus; return (0); } int set_mcontext(struct thread *td, mcontext_t *mcp) { struct trapframe *tf; tf = td->td_frame; memcpy(tf->tf_t, mcp->mc_gpregs.gp_t, sizeof(tf->tf_t)); memcpy(tf->tf_s, mcp->mc_gpregs.gp_s, sizeof(tf->tf_s)); memcpy(tf->tf_a, mcp->mc_gpregs.gp_a, sizeof(tf->tf_a)); tf->tf_ra = mcp->mc_gpregs.gp_ra; tf->tf_sp = mcp->mc_gpregs.gp_sp; tf->tf_gp = mcp->mc_gpregs.gp_gp; tf->tf_sepc = mcp->mc_gpregs.gp_sepc; tf->tf_sstatus = mcp->mc_gpregs.gp_sstatus; return (0); } static void get_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef FPE struct pcb *curpcb; critical_enter(); curpcb = curthread->td_pcb; KASSERT(td->td_pcb == curpcb, ("Invalid fpe pcb")); if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running FPE instructions we will * need to save the state to memcpy it below. */ fpe_state_save(td); KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0, ("Non-userspace FPE flags set in get_fpcontext")); memcpy(mcp->mc_fpregs.fp_x, curpcb->pcb_x, sizeof(mcp->mc_fpregs)); mcp->mc_fpregs.fp_fcsr = curpcb->pcb_fcsr; mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags; mcp->mc_flags |= _MC_FP_VALID; } critical_exit(); #endif } static void set_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef FPE struct pcb *curpcb; critical_enter(); if ((mcp->mc_flags & _MC_FP_VALID) != 0) { curpcb = curthread->td_pcb; /* FPE usage is enabled, override registers. */ memcpy(curpcb->pcb_x, mcp->mc_fpregs.fp_x, sizeof(mcp->mc_fpregs)); curpcb->pcb_fcsr = mcp->mc_fpregs.fp_fcsr; curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK; } critical_exit(); #endif } void cpu_idle(int busy) { spinlock_enter(); if (!busy) cpu_idleclock(); if (!sched_runnable()) __asm __volatile( "fence \n" "wfi \n"); if (!busy) cpu_activeclock(); spinlock_exit(); } void cpu_halt(void) { intr_disable(); for (;;) __asm __volatile("wfi"); } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { /* TBD */ } /* Get current clock frequency for the given CPU ID. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { panic("cpu_est_clockrate"); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { } void spinlock_enter(void) { struct thread *td; td = curthread; if (td->td_md.md_spinlock_count == 0) { td->td_md.md_spinlock_count = 1; td->td_md.md_saved_sstatus_ie = intr_disable(); } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; register_t sstatus_ie; td = curthread; critical_exit(); sstatus_ie = td->td_md.md_saved_sstatus_ie; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) intr_restore(sstatus_ie); } #ifndef _SYS_SYSPROTO_H_ struct sigreturn_args { ucontext_t *ucp; }; #endif int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { uint64_t sstatus; ucontext_t uc; int error; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); /* * Make sure the processor mode has not been tampered with and * interrupts have not been disabled. * Supervisor interrupts in user mode are always enabled. */ sstatus = uc.uc_mcontext.mc_gpregs.gp_sstatus; if ((sstatus & SSTATUS_SPP) != 0) return (EINVAL); error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); set_fpcontext(td, &uc.uc_mcontext); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { memcpy(pcb->pcb_t, tf->tf_t, sizeof(tf->tf_t)); memcpy(pcb->pcb_s, tf->tf_s, sizeof(tf->tf_s)); memcpy(pcb->pcb_a, tf->tf_a, sizeof(tf->tf_a)); pcb->pcb_ra = tf->tf_ra; pcb->pcb_sp = tf->tf_sp; pcb->pcb_gp = tf->tf_gp; pcb->pcb_tp = tf->tf_tp; pcb->pcb_sepc = tf->tf_sepc; } void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct sigframe *fp, frame; struct sysentvec *sysent; struct trapframe *tf; struct sigacts *psp; struct thread *td; struct proc *p; int onstack; int sig; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); } else { fp = (struct sigframe *)td->td_frame->tf_sp; } /* Make room, keeping the stack aligned */ fp--; fp = (struct sigframe *)STACKALIGN(fp); /* Fill in the frame to copy out */ bzero(&frame, sizeof(frame)); get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); get_fpcontext(td, &frame.sf_uc.uc_mcontext); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack = td->td_sigstk; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ? (onstack ? SS_ONSTACK : 0) : SS_DISABLE; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } tf->tf_a[0] = sig; tf->tf_a[1] = (register_t)&fp->sf_si; tf->tf_a[2] = (register_t)&fp->sf_uc; tf->tf_sepc = (register_t)catcher; tf->tf_sp = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_ra = (register_t)sysent->sv_sigcode_base; else tf->tf_ra = (register_t)(sysent->sv_psstrings - *(sysent->sv_szsigcode)); CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_sepc, tf->tf_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } static void init_proc0(vm_offset_t kstack) { struct pcpu *pcpup; pcpup = &__pcpu[0]; proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1; thread0.td_pcb->pcb_fpflags = 0; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; } static int add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap, u_int *physmap_idxp) { u_int i, insert_idx, _physmap_idx; _physmap_idx = *physmap_idxp; if (length == 0) return (1); /* * Find insertion point while checking for overlap. Start off by * assuming the new entry will be added to the end. */ insert_idx = _physmap_idx; for (i = 0; i <= _physmap_idx; i += 2) { if (base < physmap[i + 1]) { if (base + length <= physmap[i]) { insert_idx = i; break; } if (boothowto & RB_VERBOSE) printf( "Overlapping memory regions, ignoring second region\n"); return (1); } } /* See if we can prepend to the next entry. */ if (insert_idx <= _physmap_idx && base + length == physmap[insert_idx]) { physmap[insert_idx] = base; return (1); } /* See if we can append to the previous entry. */ if (insert_idx > 0 && base == physmap[insert_idx - 1]) { physmap[insert_idx - 1] += length; return (1); } _physmap_idx += 2; *physmap_idxp = _physmap_idx; if (_physmap_idx == PHYSMAP_SIZE) { printf( "Too many segments in the physical address map, giving up\n"); return (0); } /* * Move the last 'N' entries down to make room for the new * entry if needed. */ for (i = _physmap_idx; i > insert_idx; i -= 2) { physmap[i] = physmap[i - 2]; physmap[i + 1] = physmap[i - 1]; } /* Insert the new entry. */ physmap[insert_idx] = base; physmap[insert_idx + 1] = base + length; printf("physmap[%d] = 0x%016lx\n", insert_idx, base); printf("physmap[%d] = 0x%016lx\n", insert_idx + 1, base + length); return (1); } #ifdef FDT static void try_load_dtb(caddr_t kmdp, vm_offset_t dtbp) { #if defined(FDT_DTB_STATIC) dtbp = (vm_offset_t)&fdt_static_dtb; #endif if (dtbp == (vm_offset_t)NULL) { printf("ERROR loading DTB\n"); return; } if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); } #endif static void cache_setup(void) { /* TODO */ + + dcache_line_size = 0; + icache_line_size = 0; + idcache_line_size = 0; } /* * Fake up a boot descriptor table. * RISCVTODO: This needs to be done via loader (when it's available). */ vm_offset_t fake_preload_metadata(struct riscv_bootparams *rvbp __unused) { static uint32_t fake_preload[35]; #ifdef DDB vm_offset_t zstart = 0, zend = 0; #endif vm_offset_t lastaddr; int i; i = 0; fake_preload[i++] = MODINFO_NAME; fake_preload[i++] = strlen("kernel") + 1; strcpy((char*)&fake_preload[i++], "kernel"); i += 1; fake_preload[i++] = MODINFO_TYPE; fake_preload[i++] = strlen("elf64 kernel") + 1; strcpy((char*)&fake_preload[i++], "elf64 kernel"); i += 3; fake_preload[i++] = MODINFO_ADDR; fake_preload[i++] = sizeof(vm_offset_t); *(vm_offset_t *)&fake_preload[i++] = (vm_offset_t)(KERNBASE + KERNENTRY); i += 1; fake_preload[i++] = MODINFO_SIZE; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = (vm_offset_t)&end - (vm_offset_t)(KERNBASE + KERNENTRY); i += 1; #ifdef DDB #if 0 /* RISCVTODO */ if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) { fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4); fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8); lastaddr = *(uint32_t *)(KERNVIRTADDR + 8); zend = lastaddr; zstart = *(uint32_t *)(KERNVIRTADDR + 4); db_fetch_ksymtab(zstart, zend); } else #endif #endif lastaddr = (vm_offset_t)&end; fake_preload[i++] = 0; fake_preload[i] = 0; preload_metadata = (void *)fake_preload; return (lastaddr); } void initriscv(struct riscv_bootparams *rvbp) { struct mem_region mem_regions[FDT_MEM_REGIONS]; struct pcpu *pcpup; vm_offset_t rstart, rend; vm_offset_t s, e; int mem_regions_sz; vm_offset_t lastaddr; vm_size_t kernlen; caddr_t kmdp; int i; /* Set the pcpu data, this is needed by pmap_bootstrap */ pcpup = &__pcpu[0]; pcpu_init(pcpup, 0, sizeof(struct pcpu)); /* Set the pcpu pointer */ __asm __volatile("mv gp, %0" :: "r"(pcpup)); PCPU_SET(curthread, &thread0); /* Set the module data location */ lastaddr = fake_preload_metadata(rvbp); /* Find the kernel address */ kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); boothowto = RB_VERBOSE | RB_SINGLE; boothowto = RB_VERBOSE; kern_envp = NULL; #ifdef FDT try_load_dtb(kmdp, rvbp->dtbp_virt); #endif /* Load the physical memory ranges */ physmap_idx = 0; #ifdef FDT /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, NULL) != 0) panic("Cannot get physical memory regions"); s = rvbp->dtbp_phys; e = s + DTB_SIZE_MAX; for (i = 0; i < mem_regions_sz; i++) { rstart = mem_regions[i].mr_start; rend = (mem_regions[i].mr_start + mem_regions[i].mr_size); if ((rstart < s) && (rend > e)) { /* Exclude DTB region. */ add_physmap_entry(rstart, (s - rstart), physmap, &physmap_idx); add_physmap_entry(e, (rend - e), physmap, &physmap_idx); } else { add_physmap_entry(mem_regions[i].mr_start, mem_regions[i].mr_size, physmap, &physmap_idx); } } #endif /* Do basic tuning, hz etc */ init_param1(); cache_setup(); /* Bootstrap enough of pmap to enter the kernel proper */ kernlen = (lastaddr - KERNBASE); pmap_bootstrap(rvbp->kern_l1pt, mem_regions[0].mr_start, kernlen); cninit(); init_proc0(rvbp->kern_stack); msgbufinit(msgbufp, msgbufsize); mutex_init(); init_param2(physmem); kdb_init(); early_boot = 0; } #undef bzero void bzero(void *buf, size_t len) { uint8_t *p; p = buf; while(len-- > 0) *p++ = 0; }