diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64 index 61f1cbf75982..2036bbe918ba 100644 --- a/sys/conf/files.arm64 +++ b/sys/conf/files.arm64 @@ -1,696 +1,699 @@ ## ## Kernel ## kern/msi_if.m optional intrng kern/pic_if.m optional intrng kern/subr_devmap.c standard kern/subr_intr.c optional intrng kern/subr_physmem.c standard libkern/strlen.c standard libkern/arm64/crc32c_armv8.S standard arm/arm/generic_timer.c standard arm/arm/gic.c standard arm/arm/gic_acpi.c optional acpi arm/arm/gic_fdt.c optional fdt arm/arm/gic_if.m standard arm/arm/pmu.c standard arm/arm/pmu_acpi.c optional acpi arm/arm/pmu_fdt.c optional fdt arm64/acpica/acpi_iort.c optional acpi arm64/acpica/acpi_machdep.c optional acpi arm64/acpica/OsdEnvironment.c optional acpi arm64/acpica/acpi_wakeup.c optional acpi arm64/acpica/pci_cfgreg.c optional acpi pci arm64/arm64/autoconf.c standard arm64/arm64/bus_machdep.c standard arm64/arm64/bus_space_asm.S standard arm64/arm64/busdma_bounce.c standard arm64/arm64/busdma_machdep.c standard arm64/arm64/clock.c standard arm64/arm64/copyinout.S standard arm64/arm64/cpu_errata.c standard arm64/arm64/cpufunc_asm.S standard arm64/arm64/db_disasm.c optional ddb arm64/arm64/db_interface.c optional ddb arm64/arm64/db_trace.c optional ddb arm64/arm64/debug_monitor.c standard arm64/arm64/disassem.c optional ddb arm64/arm64/dump_machdep.c standard arm64/arm64/efirt_machdep.c optional efirt arm64/arm64/elf32_machdep.c optional compat_freebsd32 arm64/arm64/elf_machdep.c standard arm64/arm64/exception.S standard arm64/arm64/exec_machdep.c standard arm64/arm64/freebsd32_machdep.c optional compat_freebsd32 arm64/arm64/gdb_machdep.c optional gdb arm64/arm64/gicv3_its.c optional intrng fdt arm64/arm64/gic_v3.c standard arm64/arm64/gic_v3_acpi.c optional acpi arm64/arm64/gic_v3_fdt.c optional fdt arm64/arm64/hyp_stub.S standard arm64/arm64/identcpu.c standard arm64/arm64/locore.S standard no-obj arm64/arm64/machdep.c standard arm64/arm64/machdep_boot.c standard arm64/arm64/mem.c standard arm64/arm64/memcmp.S standard arm64/arm64/memcpy.S standard arm64/arm64/memset.S standard arm64/arm64/minidump_machdep.c standard arm64/arm64/mp_machdep.c optional smp arm64/arm64/nexus.c standard arm64/arm64/ofw_machdep.c optional fdt arm64/arm64/pl031_rtc.c optional fdt pl031 arm64/arm64/ptrauth.c standard \ compile-with "${NORMAL_C:N-mbranch-protection*}" arm64/arm64/pmap.c standard arm64/arm64/ptrace_machdep.c standard arm64/arm64/sigtramp.S standard arm64/arm64/stack_machdep.c optional ddb | stack arm64/arm64/strcmp.S standard arm64/arm64/strncmp.S standard arm64/arm64/support_ifunc.c standard arm64/arm64/support.S standard arm64/arm64/swtch.S standard arm64/arm64/sys_machdep.c standard arm64/arm64/trap.c standard arm64/arm64/uio_machdep.c standard arm64/arm64/uma_machdep.c standard arm64/arm64/undefined.c standard arm64/arm64/unwind.c optional ddb | kdtrace_hooks | stack \ compile-with "${NORMAL_C:N-fsanitize*}" arm64/arm64/vfp.c standard arm64/arm64/vm_machdep.c standard arm64/coresight/coresight.c standard arm64/coresight/coresight_acpi.c optional acpi arm64/coresight/coresight_fdt.c optional fdt arm64/coresight/coresight_if.m standard arm64/coresight/coresight_cmd.c standard arm64/coresight/coresight_cpu_debug.c optional fdt arm64/coresight/coresight_etm4x.c standard arm64/coresight/coresight_etm4x_acpi.c optional acpi arm64/coresight/coresight_etm4x_fdt.c optional fdt arm64/coresight/coresight_funnel.c standard arm64/coresight/coresight_funnel_acpi.c optional acpi arm64/coresight/coresight_funnel_fdt.c optional fdt arm64/coresight/coresight_replicator.c standard arm64/coresight/coresight_replicator_acpi.c optional acpi arm64/coresight/coresight_replicator_fdt.c optional fdt arm64/coresight/coresight_tmc.c standard arm64/coresight/coresight_tmc_acpi.c optional acpi arm64/coresight/coresight_tmc_fdt.c optional fdt dev/smbios/smbios_subr.c standard arm64/iommu/iommu.c optional iommu arm64/iommu/iommu_if.m optional iommu arm64/iommu/iommu_pmap.c optional iommu arm64/iommu/smmu.c optional iommu arm64/iommu/smmu_acpi.c optional iommu acpi arm64/iommu/smmu_fdt.c optional iommu fdt arm64/iommu/smmu_quirks.c optional iommu dev/iommu/busdma_iommu.c optional iommu dev/iommu/iommu_gas.c optional iommu crypto/armv8/armv8_crypto.c optional armv8crypto armv8_crypto_wrap.o optional armv8crypto \ dependency "$S/crypto/armv8/armv8_crypto_wrap.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8/ ${WERROR} ${NO_WCAST_QUAL} ${CFLAGS:M-march=*:S/^$/-march=armv8-a/}+crypto ${.IMPSRC}" \ no-implicit-rule \ clean "armv8_crypto_wrap.o" aesv8-armx.o optional armv8crypto | ossl \ dependency "$S/crypto/openssl/aarch64/aesv8-armx.S" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8/ ${WERROR} ${NO_WCAST_QUAL} ${CFLAGS:M-march=*:S/^$/-march=armv8-a/}+crypto ${.IMPSRC}" \ no-implicit-rule \ clean "aesv8-armx.o" ghashv8-armx.o optional armv8crypto \ dependency "$S/crypto/openssl/aarch64/ghashv8-armx.S" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8/ ${WERROR} ${NO_WCAST_QUAL} ${CFLAGS:M-march=*:S/^$/-march=armv8-a/}+crypto ${.IMPSRC}" \ no-implicit-rule \ clean "ghashv8-armx.o" crypto/des/des_enc.c optional netsmb crypto/openssl/ossl_aarch64.c optional ossl crypto/openssl/aarch64/chacha-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC}" crypto/openssl/aarch64/poly1305-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC}" crypto/openssl/aarch64/sha1-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC}" crypto/openssl/aarch64/sha256-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC}" crypto/openssl/aarch64/sha512-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC}" crypto/openssl/aarch64/vpaes-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC}" dev/acpica/acpi_bus_if.m optional acpi dev/acpica/acpi_if.m optional acpi dev/acpica/acpi_pci_link.c optional acpi pci dev/acpica/acpi_pcib.c optional acpi pci dev/acpica/acpi_pxm.c optional acpi dev/ahci/ahci_generic.c optional ahci cddl/dev/dtrace/aarch64/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/aarch64/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/aarch64/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" # zfs blake3 hash support contrib/openzfs/module/icp/asm-aarch64/blake3/b3_aarch64_sse2.S optional zfs compile-with "${ZFS_S:N-mgeneral-regs-only}" contrib/openzfs/module/icp/asm-aarch64/blake3/b3_aarch64_sse41.S optional zfs compile-with "${ZFS_S:N-mgeneral-regs-only}" # zfs sha2 hash support zfs-sha256-armv8.o optional zfs \ dependency "$S/contrib/openzfs/module/icp/asm-aarch64/sha2/sha256-armv8.S" \ compile-with "${CC} -c ${ZFS_ASM_CFLAGS:N-mgeneral-regs-only} -o ${.TARGET} ${WERROR} $S/contrib/openzfs/module/icp/asm-aarch64/sha2/sha256-armv8.S" \ no-implicit-rule \ clean "zfs-sha256-armv8.o" zfs-sha512-armv8.o optional zfs \ dependency "$S/contrib/openzfs/module/icp/asm-aarch64/sha2/sha512-armv8.S" \ compile-with "${CC} -c ${ZFS_ASM_CFLAGS:N-mgeneral-regs-only} -o ${.TARGET} ${WERROR} $S/contrib/openzfs/module/icp/asm-aarch64/sha2/sha512-armv8.S" \ no-implicit-rule \ clean "zfs-sha512-armv8.o" ## ## ASoC support ## dev/sound/fdt/audio_dai_if.m optional sound fdt dev/sound/fdt/audio_soc.c optional sound fdt dev/sound/fdt/dummy_codec.c optional sound fdt dev/sound/fdt/simple_amplifier.c optional sound fdt ## ## Device drivers ## dev/axgbe/if_axgbe.c optional axa fdt dev/axgbe/xgbe-desc.c optional axa fdt dev/axgbe/xgbe-dev.c optional axa fdt dev/axgbe/xgbe-drv.c optional axa fdt dev/axgbe/xgbe-mdio.c optional axa fdt dev/axgbe/xgbe-sysctl.c optional axa fdt dev/axgbe/xgbe-txrx.c optional axa fdt dev/axgbe/xgbe_osdep.c optional axa fdt dev/axgbe/xgbe-phy-v1.c optional axa fdt dev/cpufreq/cpufreq_dt.c optional cpufreq fdt dev/dpaa2/dpaa2_bp.c optional soc_nxp_ls dpaa2 +dev/dpaa2/dpaa2_buf.c optional soc_nxp_ls dpaa2 +dev/dpaa2/dpaa2_channel.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_cmd_if.m optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_con.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_console.c optional soc_nxp_ls dpaa2 fdt dev/dpaa2/dpaa2_io.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mac.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mc.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mc_acpi.c optional soc_nxp_ls dpaa2 acpi dev/dpaa2/dpaa2_mc_fdt.c optional soc_nxp_ls dpaa2 fdt dev/dpaa2/dpaa2_mc_if.m optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mcp.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_ni.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_rc.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_swp.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_swp_if.m optional soc_nxp_ls dpaa2 +dev/dpaa2/dpaa2_types.c optional soc_nxp_ls dpaa2 dev/dpaa2/memac_mdio_acpi.c optional soc_nxp_ls dpaa2 acpi dev/dpaa2/memac_mdio_common.c optional soc_nxp_ls dpaa2 acpi | soc_nxp_ls dpaa2 fdt dev/dpaa2/memac_mdio_fdt.c optional soc_nxp_ls dpaa2 fdt dev/dpaa2/memac_mdio_if.m optional soc_nxp_ls dpaa2 acpi | soc_nxp_ls dpaa2 fdt dev/dwc/if_dwc.c optional fdt dwc_rk soc_rockchip_rk3328 | fdt dwc_rk soc_rockchip_rk3399 | fdt dwc_socfpga soc_intel_stratix10 dev/dwc/if_dwc_if.m optional fdt dwc_rk soc_rockchip_rk3328 | fdt dwc_rk soc_rockchip_rk3399 | fdt dwc_socfpga soc_intel_stratix10 dev/enetc/enetc_mdio.c optional enetc soc_nxp_ls dev/enetc/if_enetc.c optional enetc iflib pci fdt soc_nxp_ls dev/eqos/if_eqos.c optional eqos dev/eqos/if_eqos_if.m optional eqos dev/eqos/if_eqos_fdt.c optional eqos fdt dev/etherswitch/felix/felix.c optional enetc etherswitch fdt felix pci soc_nxp_ls dev/firmware/arm/scmi.c optional fdt scmi dev/firmware/arm/scmi_clk.c optional fdt scmi dev/firmware/arm/scmi_shmem.c optional fdt scmi dev/gpio/pl061.c optional pl061 gpio dev/gpio/pl061_acpi.c optional pl061 gpio acpi dev/gpio/pl061_fdt.c optional pl061 gpio fdt dev/gpio/qoriq_gpio.c optional soc_nxp_ls gpio fdt dev/hwpmc/hwpmc_arm64.c optional hwpmc dev/hwpmc/hwpmc_arm64_md.c optional hwpmc dev/hwpmc/hwpmc_cmn600.c optional hwpmc acpi arm64/arm64/cmn600.c optional hwpmc acpi dev/hwpmc/hwpmc_dmc620.c optional hwpmc acpi dev/hwpmc/pmu_dmc620.c optional hwpmc acpi # Microsoft Hyper-V dev/hyperv/vmbus/hyperv.c optional hyperv acpi dev/hyperv/vmbus/aarch64/hyperv_aarch64.c optional hyperv acpi dev/hyperv/vmbus/vmbus.c optional hyperv acpi pci dev/hyperv/vmbus/aarch64/vmbus_aarch64.c optional hyperv acpi dev/hyperv/vmbus/vmbus_if.m optional hyperv acpi dev/hyperv/vmbus/vmbus_res.c optional hyperv acpi dev/hyperv/vmbus/vmbus_xact.c optional hyperv acpi dev/hyperv/vmbus/aarch64/hyperv_machdep.c optional hyperv acpi dev/hyperv/vmbus/vmbus_chan.c optional hyperv acpi dev/hyperv/vmbus/hyperv_busdma.c optional hyperv acpi dev/hyperv/vmbus/vmbus_br.c optional hyperv acpi dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c optional hyperv acpi dev/hyperv/utilities/vmbus_timesync.c optional hyperv acpi dev/hyperv/utilities/vmbus_heartbeat.c optional hyperv acpi dev/hyperv/utilities/vmbus_ic.c optional hyperv acpi dev/hyperv/utilities/vmbus_shutdown.c optional hyperv acpi dev/hyperv/utilities/hv_kvp.c optional hyperv acpi dev/hyperv/input/hv_kbd.c optional hyperv acpi dev/hyperv/input/hv_kbdc.c optional hyperv acpi dev/hyperv/netvsc/hn_nvs.c optional hyperv acpi dev/hyperv/netvsc/hn_rndis.c optional hyperv acpi dev/hyperv/netvsc/if_hn.c optional hyperv acpi dev/hyperv/pcib/vmbus_pcib.c optional hyperv pci acpi dev/ice/if_ice_iflib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_lib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_osdep.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_resmgr.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_strings.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_recovery_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_common.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_controlq.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_dcb.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flex_pipe.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flow.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_nvm.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_sched.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_switch.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_vlan_mode.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_fw_logging.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_fwlog.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_rdma.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/irdma_if.m optional ice pci \ compile-with "${NORMAL_M} -I$S/dev/ice" dev/ice/irdma_di_if.m optional ice pci \ compile-with "${NORMAL_M} -I$S/dev/ice" dev/ice/ice_ddp_common.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" ice_ddp.c optional ice_ddp \ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031e00 -mice_ddp -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "ice_ddp.c" ice_ddp.fwo optional ice_ddp \ dependency "ice_ddp.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ice_ddp.fwo" ice_ddp.fw optional ice_ddp \ dependency "$S/contrib/dev/ice/ice-1.3.30.0.pkg" \ compile-with "${CP} $S/contrib/dev/ice/ice-1.3.30.0.pkg ice_ddp.fw" \ no-obj no-implicit-rule \ clean "ice_ddp.fw" dev/iicbus/sy8106a.c optional sy8106a fdt dev/iicbus/twsi/mv_twsi.c optional twsi fdt dev/iicbus/twsi/a10_twsi.c optional twsi fdt dev/iicbus/twsi/twsi.c optional twsi fdt dev/ipmi/ipmi.c optional ipmi dev/ipmi/ipmi_acpi.c optional ipmi acpi dev/ipmi/ipmi_bt.c optional ipmi dev/ipmi/ipmi_kcs.c optional ipmi dev/ipmi/ipmi_smic.c optional ipmi dev/mailbox/arm/arm_doorbell.c optional fdt arm_doorbell dev/mbox/mbox_if.m optional soc_brcm_bcm2837 dev/mmc/host/dwmmc.c optional dwmmc fdt dev/mmc/host/dwmmc_altera.c optional dwmmc dwmmc_altera fdt dev/mmc/host/dwmmc_hisi.c optional dwmmc dwmmc_hisi fdt dev/mmc/host/dwmmc_rockchip.c optional dwmmc rk_dwmmc fdt dev/neta/if_mvneta_fdt.c optional neta fdt dev/neta/if_mvneta.c optional neta mdio mii fdt dev/ofw/ofw_cpu.c optional fdt dev/ofw/ofw_pci.c optional fdt pci dev/ofw/ofw_pcib.c optional fdt pci dev/pci/controller/pci_n1sdp.c optional pci_n1sdp acpi dev/pci/pci_host_generic.c optional pci dev/pci/pci_host_generic_acpi.c optional pci acpi dev/pci/pci_host_generic_den0115.c optional pci acpi dev/pci/pci_host_generic_fdt.c optional pci fdt dev/pci/pci_dw_mv.c optional pci fdt dev/pci/pci_dw.c optional pci fdt dev/pci/pci_dw_if.m optional pci fdt dev/psci/psci.c standard dev/psci/smccc_arm64.S standard dev/psci/smccc.c standard dev/safexcel/safexcel.c optional safexcel fdt dev/sdhci/sdhci_xenon.c optional sdhci_xenon sdhci dev/sdhci/sdhci_xenon_acpi.c optional sdhci_xenon sdhci acpi dev/sdhci/sdhci_xenon_fdt.c optional sdhci_xenon sdhci fdt dev/sram/mmio_sram.c optional fdt mmio_sram dev/sram/mmio_sram_if.m optional fdt mmio_sram dev/uart/uart_cpu_arm64.c optional uart dev/uart/uart_dev_mu.c optional uart uart_mu fdt dev/uart/uart_dev_pl011.c optional uart pl011 dev/usb/controller/dwc_otg_hisi.c optional dwcotg fdt soc_hisi_hi6220 dev/usb/controller/dwc3.c optional xhci acpi dwc3 | xhci fdt dwc3 dev/usb/controller/ehci_mv.c optional ehci_mv fdt dev/usb/controller/generic_ehci.c optional ehci dev/usb/controller/generic_ehci_acpi.c optional ehci acpi dev/usb/controller/generic_ehci_fdt.c optional ehci fdt dev/usb/controller/generic_ohci.c optional ohci fdt dev/usb/controller/generic_usb_if.m optional ohci fdt dev/usb/controller/musb_otg_allwinner.c optional musb fdt soc_allwinner_a64 dev/usb/controller/usb_nop_xceiv.c optional fdt dev/usb/controller/generic_xhci.c optional xhci dev/usb/controller/generic_xhci_acpi.c optional xhci acpi dev/usb/controller/generic_xhci_fdt.c optional xhci fdt dev/vnic/mrml_bridge.c optional vnic fdt dev/vnic/nic_main.c optional vnic pci dev/vnic/nicvf_main.c optional vnic pci pci_iov dev/vnic/nicvf_queues.c optional vnic pci pci_iov dev/vnic/thunder_bgx_fdt.c optional soc_cavm_thunderx pci vnic fdt dev/vnic/thunder_bgx.c optional soc_cavm_thunderx pci vnic pci dev/vnic/thunder_mdio_fdt.c optional soc_cavm_thunderx pci vnic fdt dev/vnic/thunder_mdio.c optional soc_cavm_thunderx pci vnic dev/vnic/lmac_if.m optional inet | inet6 | vnic ## ## SoC Support ## # Allwinner common files arm/allwinner/a10_timer.c optional a10_timer fdt arm/allwinner/a10_codec.c optional sound a10_codec fdt arm/allwinner/a31_dmac.c optional a31_dmac fdt arm/allwinner/a33_codec.c optional fdt sound a33_codec arm/allwinner/a64/sun50i_a64_acodec.c optional fdt sound a64_codec arm/allwinner/sunxi_dma_if.m optional a31_dmac arm/allwinner/aw_cir.c optional evdev aw_cir fdt arm/allwinner/aw_dwc3.c optional aw_dwc3 fdt arm/allwinner/aw_gpio.c optional gpio aw_gpio fdt arm/allwinner/aw_i2s.c optional fdt sound aw_i2s arm/allwinner/aw_mmc.c optional mmc aw_mmc fdt | mmccam aw_mmc fdt arm/allwinner/aw_nmi.c optional aw_nmi fdt \ compile-with "${NORMAL_C} -I$S/contrib/device-tree/include" arm/allwinner/aw_pwm.c optional aw_pwm fdt arm/allwinner/aw_r_intc.c optional aw_r_intc fdt arm/allwinner/aw_rsb.c optional aw_rsb fdt arm/allwinner/aw_rtc.c optional aw_rtc fdt arm/allwinner/aw_sid.c optional aw_sid nvmem fdt arm/allwinner/aw_spi.c optional aw_spi fdt arm/allwinner/aw_syscon.c optional aw_syscon syscon fdt arm/allwinner/aw_thermal.c optional aw_thermal nvmem fdt arm/allwinner/aw_usbphy.c optional ehci aw_usbphy fdt arm/allwinner/aw_usb3phy.c optional xhci aw_usbphy fdt arm/allwinner/aw_wdog.c optional aw_wdog fdt arm/allwinner/axp81x.c optional axp81x fdt arm/allwinner/if_awg.c optional awg syscon aw_sid nvmem fdt # Allwinner clock driver arm/allwinner/clkng/aw_ccung.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_frac.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_m.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_mipi.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_nkmp.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_nm.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_nmm.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_np.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_prediv_mux.c optional aw_ccu fdt arm/allwinner/clkng/ccu_a64.c optional soc_allwinner_a64 aw_ccu fdt arm/allwinner/clkng/ccu_h3.c optional soc_allwinner_h5 aw_ccu fdt arm/allwinner/clkng/ccu_h6.c optional soc_allwinner_h6 aw_ccu fdt arm/allwinner/clkng/ccu_h6_r.c optional soc_allwinner_h6 aw_ccu fdt arm/allwinner/clkng/ccu_sun8i_r.c optional aw_ccu fdt arm/allwinner/clkng/ccu_de2.c optional aw_ccu fdt # Allwinner padconf files arm/allwinner/a64/a64_padconf.c optional soc_allwinner_a64 fdt arm/allwinner/a64/a64_r_padconf.c optional soc_allwinner_a64 fdt arm/allwinner/h3/h3_padconf.c optional soc_allwinner_h5 fdt arm/allwinner/h3/h3_r_padconf.c optional soc_allwinner_h5 fdt arm/allwinner/h6/h6_padconf.c optional soc_allwinner_h6 fdt arm/allwinner/h6/h6_r_padconf.c optional soc_allwinner_h6 fdt # Altera/Intel dev/altera/dwc/if_dwc_socfpga.c optional fdt dwc_socfpga arm64/intel/stratix10-soc-fpga-mgr.c optional soc_intel_stratix10 fdt arm64/intel/stratix10-svc.c optional soc_intel_stratix10 fdt # Annapurna arm/annapurna/alpine/alpine_ccu.c optional al_ccu fdt arm/annapurna/alpine/alpine_nb_service.c optional al_nb_service fdt arm/annapurna/alpine/alpine_pci.c optional al_pci fdt arm/annapurna/alpine/alpine_pci_msix.c optional al_pci fdt arm/annapurna/alpine/alpine_serdes.c optional al_serdes fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" # Broadcom arm64/broadcom/brcmmdio/mdio_mux_iproc.c optional soc_brcm_ns2 fdt arm64/broadcom/brcmmdio/mdio_nexus_iproc.c optional soc_brcm_ns2 fdt arm64/broadcom/brcmmdio/mdio_ns2_pcie_phy.c optional soc_brcm_ns2 fdt pci arm64/broadcom/genet/if_genet.c optional soc_brcm_bcm2838 fdt genet arm/broadcom/bcm2835/bcm2835_audio.c optional sound vchiq fdt \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" arm/broadcom/bcm2835/bcm2835_bsc.c optional bcm2835_bsc fdt arm/broadcom/bcm2835/bcm2835_clkman.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_cpufreq.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_dma.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_fbd.c optional vt soc_brcm_bcm2837 fdt | vt soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_firmware.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_ft5406.c optional evdev bcm2835_ft5406 fdt arm/broadcom/bcm2835/bcm2835_gpio.c optional gpio soc_brcm_bcm2837 fdt | gpio soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_intr.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_mbox.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_rng.c optional !random_loadable soc_brcm_bcm2837 fdt | !random_loadable soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_sdhci.c optional sdhci soc_brcm_bcm2837 fdt | sdhci soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_sdhost.c optional sdhci soc_brcm_bcm2837 fdt | sdhci soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_spi.c optional bcm2835_spi fdt arm/broadcom/bcm2835/bcm2835_vcbus.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_vcio.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_wdog.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2836.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm283x_dwc_fdt.c optional dwcotg fdt soc_brcm_bcm2837 | dwcotg fdt soc_brcm_bcm2838 arm/broadcom/bcm2835/bcm2838_pci.c optional soc_brcm_bcm2838 fdt pci arm/broadcom/bcm2835/bcm2838_xhci.c optional soc_brcm_bcm2838 fdt pci xhci arm/broadcom/bcm2835/raspberrypi_gpio.c optional soc_brcm_bcm2837 gpio fdt | soc_brcm_bcm2838 gpio fdt contrib/vchiq/interface/compat/vchi_bsd.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_arm.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_connected.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_core.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_kern_lib.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_kmod.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_shim.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_util.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" # Cavium arm64/cavium/thunder_pcie_fdt.c optional soc_cavm_thunderx pci fdt arm64/cavium/thunder_pcie_pem.c optional soc_cavm_thunderx pci arm64/cavium/thunder_pcie_pem_fdt.c optional soc_cavm_thunderx pci fdt arm64/cavium/thunder_pcie_common.c optional soc_cavm_thunderx pci # i.MX8 Clock support arm64/freescale/imx/imx8mq_ccm.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_gate.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_mux.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_composite.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_sscg_pll.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_frac_pll.c optional fdt soc_freescale_imx8 # iMX drivers arm/freescale/imx/imx_gpio.c optional gpio soc_freescale_imx8 fdt arm/freescale/imx/imx_i2c.c optional fsliic arm/freescale/imx/imx_machdep.c optional fdt soc_freescale_imx8 arm64/freescale/imx/imx7gpc.c optional fdt soc_freescale_imx8 dev/ffec/if_ffec.c optional ffec # Marvell arm/mv/a37x0_gpio.c optional a37x0_gpio gpio fdt arm/mv/a37x0_iic.c optional a37x0_iic iicbus fdt arm/mv/a37x0_spi.c optional a37x0_spi spibus fdt arm/mv/clk/a37x0_tbg.c optional a37x0_tbg clk fdt syscon arm/mv/clk/a37x0_xtal.c optional a37x0_xtal clk fdt syscon arm/mv/armada38x/armada38x_rtc.c optional mv_rtc fdt arm/mv/gpio.c optional mv_gpio fdt arm/mv/mvebu_gpio.c optional mv_gpio fdt arm/mv/mvebu_pinctrl.c optional mvebu_pinctrl fdt arm/mv/mv_ap806_clock.c optional soc_marvell_8k fdt arm/mv/mv_ap806_gicp.c optional mv_ap806_gicp fdt arm/mv/mv_ap806_sei.c optional mv_ap806_sei fdt arm/mv/mv_cp110_clock.c optional soc_marvell_8k fdt arm/mv/mv_cp110_icu.c optional mv_cp110_icu fdt arm/mv/mv_cp110_icu_bus.c optional mv_cp110_icu fdt arm/mv/mv_thermal.c optional soc_marvell_8k mv_thermal fdt arm/mv/clk/a37x0_tbg_pll.c optional a37x0_tbg clk fdt syscon arm/mv/clk/a37x0_periph_clk_driver.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon arm/mv/clk/a37x0_nb_periph_clk_driver.c optional a37x0_nb_periph clk fdt syscon arm/mv/clk/a37x0_sb_periph_clk_driver.c optional a37x0_sb_periph clk fdt syscon arm/mv/clk/periph.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon arm/mv/clk/periph_clk_d.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon arm/mv/clk/periph_clk_fixed.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon arm/mv/clk/periph_clk_gate.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon arm/mv/clk/periph_clk_mux_gate.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon # NVidia arm/nvidia/tegra_abpmisc.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_ahci.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_efuse.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_ehci.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_gpio.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_i2c.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_lic.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_mc.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_pcie.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_sdhci.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_soctherm_if.m optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_soctherm.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_uart.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_usbphy.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_xhci.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620_gpio.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620_regulators.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620_rtc.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_car.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_clk_per.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_clk_pll.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_clk_super.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_coretemp.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_cpufreq.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_pinmux.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_pmc.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_xusbpadctl.c optional fdt soc_nvidia_tegra210 # Nvidia firmware for Tegra tegra210_xusb_fw.c optional tegra210_xusb_fw \ dependency "$S/conf/files.arm64" \ compile-with "${AWK} -f $S/tools/fw_stub.awk tegra210_xusb.fw:tegra210_xusb_fw -mtegra210_xusb_fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "tegra210_xusb_fw.c" tegra210_xusb.fwo optional tegra210_xusb_fw \ dependency "tegra210_xusb.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "tegra210_xusb.fwo" tegra210_xusb.fw optional tegra210_xusb_fw \ dependency "$S/contrib/dev/nvidia/tegra210_xusb.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "tegra210_xusb.fw" # NXP arm/freescale/vybrid/vf_i2c.c optional vf_i2c iicbus soc_nxp_ls arm64/qoriq/qoriq_dw_pci.c optional pci fdt soc_nxp_ls arm64/qoriq/qoriq_gpio_pic.c optional gpio fdt soc_nxp_ls arm64/qoriq/qoriq_therm.c optional pci fdt soc_nxp_ls arm64/qoriq/qoriq_therm_if.m optional pci fdt soc_nxp_ls arm64/qoriq/clk/ls1028a_clkgen.c optional clk soc_nxp_ls fdt arm64/qoriq/clk/ls1028a_flexspi_clk.c optional clk soc_nxp_ls fdt arm64/qoriq/clk/ls1046a_clkgen.c optional clk soc_nxp_ls fdt arm64/qoriq/clk/ls1088a_clkgen.c optional clk soc_nxp_ls fdt arm64/qoriq/clk/lx2160a_clkgen.c optional clk soc_nxp_ls fdt arm64/qoriq/clk/qoriq_clk_pll.c optional clk soc_nxp_ls arm64/qoriq/clk/qoriq_clkgen.c optional clk soc_nxp_ls fdt dev/ahci/ahci_fsl_fdt.c optional soc_nxp_ls ahci fdt dev/flash/flexspi/flex_spi.c optional clk flex_spi soc_nxp_ls fdt # Qualcomm arm64/qualcomm/qcom_gcc.c optional qcom_gcc fdt dev/qcom_mdio/qcom_mdio_ipq4018.c optional qcom_mdio fdt mdio mii # RockChip Drivers arm64/rockchip/rk3328_codec.c optional fdt rk3328codec soc_rockchip_rk3328 arm64/rockchip/rk3399_emmcphy.c optional fdt rk_emmcphy soc_rockchip_rk3399 arm64/rockchip/rk3568_combphy.c optional fdt rk_combphy soc_rockchip_rk3568 arm64/rockchip/rk3568_pcie.c optional fdt pci soc_rockchip_rk3568 arm64/rockchip/rk3568_pciephy.c optional fdt pci soc_rockchip_rk3568 arm64/rockchip/rk_dwc3.c optional fdt rk_dwc3 soc_rockchip_rk3399 | fdt rk_dwc3 soc_rockchip_rk3568 arm64/rockchip/rk_i2c.c optional fdt rk_i2c soc_rockchip_rk3328 | fdt rk_i2c soc_rockchip_rk3399 | fdt rk_i2c soc_rockchip_rk3568 arm64/rockchip/rk_i2s.c optional fdt sound soc_rockchip_rk3328 | fdt sound soc_rockchip_rk3399 arm64/rockchip/rk_otp.c optional fdt soc_rockchip_rk3568 arm64/rockchip/rk_otp_if.m optional fdt soc_rockchip_rk3568 dev/iicbus/pmic/rockchip/rk8xx.c optional fdt rk805 soc_rockchip_rk3328 | fdt rk805 soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 dev/iicbus/pmic/rockchip/rk8xx_clocks.c optional fdt rk805 soc_rockchip_rk3328 | fdt rk805 soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 dev/iicbus/pmic/rockchip/rk8xx_regulators.c optional fdt rk805 soc_rockchip_rk3328 | fdt rk805 soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 dev/iicbus/pmic/rockchip/rk8xx_rtc.c optional fdt rk805 soc_rockchip_rk3328 | fdt rk805 soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 dev/iicbus/pmic/rockchip/rk805.c optional fdt rk805 soc_rockchip_rk3328 dev/iicbus/pmic/rockchip/rk808.c optional fdt rk805 soc_rockchip_rk3399 dev/iicbus/pmic/rockchip/rk817.c optional fdt rk817 soc_rockchip_rk3568 arm64/rockchip/rk_grf.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/rk_pinctrl.c optional fdt rk_pinctrl soc_rockchip_rk3328 | fdt rk_pinctrl soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/rk_gpio.c optional fdt rk_gpio soc_rockchip_rk3328 | fdt rk_gpio soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/rk_iodomain.c optional fdt rk_iodomain arm64/rockchip/rk_spi.c optional fdt rk_spi arm64/rockchip/rk_usb2phy.c optional fdt rk_usb2phy soc_rockchip_rk3328 | fdt rk_usb2phy soc_rockchip_rk3399 | fdt rk_usb2phy soc_rockchip_rk3568 arm64/rockchip/rk_typec_phy.c optional fdt rk_typec_phy soc_rockchip_rk3399 arm64/rockchip/if_dwc_rk.c optional fdt dwc_rk soc_rockchip_rk3328 | fdt dwc_rk soc_rockchip_rk3399 arm64/rockchip/rk_tsadc_if.m optional fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/rk_tsadc.c optional fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/rk_pwm.c optional fdt rk_pwm arm64/rockchip/rk_pcie.c optional fdt pci soc_rockchip_rk3399 arm64/rockchip/rk_pcie_phy.c optional fdt pci soc_rockchip_rk3399 # RockChip Clock support arm64/rockchip/clk/rk_cru.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/clk/rk_clk_armclk.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/clk/rk_clk_composite.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/clk/rk_clk_fract.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/clk/rk_clk_gate.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/clk/rk_clk_mux.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/clk/rk_clk_pll.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/clk/rk3328_cru.c optional fdt soc_rockchip_rk3328 arm64/rockchip/clk/rk3399_cru.c optional fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk3399_pmucru.c optional fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk3568_cru.c optional fdt soc_rockchip_rk3568 arm64/rockchip/clk/rk3568_pmucru.c optional fdt soc_rockchip_rk3568 # Xilinx arm/xilinx/uart_dev_cdnc.c optional uart soc_xilinx_zynq fdt arm/xilinx/zy7_gpio.c optional gpio soc_xilinx_zynq fdt diff --git a/sys/dev/dpaa2/dpaa2_buf.c b/sys/dev/dpaa2/dpaa2_buf.c new file mode 100644 index 000000000000..7739eda5d8de --- /dev/null +++ b/sys/dev/dpaa2/dpaa2_buf.c @@ -0,0 +1,246 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright © 2023 Dmitry Salychev + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "dpaa2_types.h" +#include "dpaa2_buf.h" +#include "dpaa2_bp.h" +#include "dpaa2_channel.h" +#include "dpaa2_swp.h" +#include "dpaa2_swp_if.h" +#include "dpaa2_ni.h" + +MALLOC_DEFINE(M_DPAA2_RXB, "dpaa2_rxb", "DPAA2 DMA-mapped buffer (Rx)"); + +/** + * @brief Allocate Rx buffers visible to QBMan and release them to the + * buffer pool. + */ +int +dpaa2_buf_seed_pool(device_t dev, device_t bpdev, void *arg, uint32_t count, + int size, struct mtx *dma_mtx) +{ + struct dpaa2_ni_softc *sc = device_get_softc(dev); + struct dpaa2_bp_softc *bpsc = device_get_softc(bpdev); + struct dpaa2_channel *ch = (struct dpaa2_channel *)arg; + struct dpaa2_buf *buf; + const int alloc = DPAA2_ATOMIC_READ(&sc->buf_num); + const uint16_t bpid = bpsc->attr.bpid; + bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD]; + int error, bufn = 0; + +#if defined(INVARIANTS) + KASSERT(ch->rx_dmat != NULL, ("%s: no DMA tag?", __func__)); + if (dma_mtx != NULL) { + mtx_assert(dma_mtx, MA_OWNED); + } +#endif /* INVARIANTS */ + +#ifdef _notyet_ + /* Limit amount of buffers released to the pool */ + count = (alloc + count > DPAA2_NI_BUFS_MAX) + ? DPAA2_NI_BUFS_MAX - alloc : count; +#endif + + /* Release "count" buffers to the pool */ + for (int i = alloc; i < alloc + count; i++) { + /* Enough buffers were allocated for a single command */ + if (bufn == DPAA2_SWP_BUFS_PER_CMD) { + error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpid, paddr, + bufn); + if (error) { + device_printf(sc->dev, "%s: failed to release " + "buffers to the pool (1)\n", __func__); + return (error); + } + DPAA2_ATOMIC_ADD(&sc->buf_num, bufn); + bufn = 0; + } + + buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_RXB, M_NOWAIT); + if (buf == NULL) { + device_printf(dev, "%s: malloc() failed\n", __func__); + return (ENOMEM); + } + DPAA2_BUF_INIT_TAGOPT(buf, ch->rx_dmat, ch); + + error = dpaa2_buf_seed_rxb(dev, buf, size, dma_mtx); + if (error != 0) { + device_printf(dev, "%s: dpaa2_buf_seed_rxb() failed: " + "error=%d/n", __func__, error); + break; + } + paddr[bufn] = buf->paddr; + bufn++; + } + + /* Release reminder of the buffers to the pool */ + if (bufn > 0) { + error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpid, paddr, bufn); + if (error) { + device_printf(sc->dev, "%s: failed to release " + "buffers to the pool (2)\n", __func__); + return (error); + } + DPAA2_ATOMIC_ADD(&sc->buf_num, bufn); + } + + return (0); +} + +/** + * @brief Prepare Rx buffer to be released to the buffer pool. + */ +int +dpaa2_buf_seed_rxb(device_t dev, struct dpaa2_buf *buf, int size, + struct mtx *dma_mtx) +{ + struct dpaa2_ni_softc *sc = device_get_softc(dev); + struct dpaa2_fa *fa; + bool map_created = false; + bool mbuf_alloc = false; + int error; + +#if defined(INVARIANTS) + DPAA2_BUF_ASSERT_RXPREP(buf); + if (dma_mtx != NULL) { + mtx_assert(dma_mtx, MA_OWNED); + } +#endif /* INVARIANTS */ + + if (__predict_false(buf->dmap == NULL)) { + error = bus_dmamap_create(buf->dmat, 0, &buf->dmap); + if (error != 0) { + device_printf(dev, "%s: failed to create DMA map: " + "error=%d\n", __func__, error); + goto fail_map_create; + } + map_created = true; + } + + if (__predict_true(buf->m == NULL)) { + buf->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size); + if (__predict_false(buf->m == NULL)) { + device_printf(dev, "%s: m_getjcl() failed\n", __func__); + error = ENOMEM; + goto fail_mbuf_alloc; + } + buf->m->m_len = buf->m->m_ext.ext_size; + buf->m->m_pkthdr.len = buf->m->m_ext.ext_size; + mbuf_alloc = true; + } + + error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, buf->m, &buf->seg, + &buf->nseg, BUS_DMA_NOWAIT); + KASSERT(buf->nseg == 1, ("%s: one segment expected: nseg=%d", __func__, + buf->nseg)); + KASSERT(error == 0, ("%s: bus_dmamap_load_mbuf_sg() failed: error=%d", + __func__, error)); + if (__predict_false(error != 0 || buf->nseg != 1)) { + device_printf(sc->dev, "%s: bus_dmamap_load_mbuf_sg() failed: " + "error=%d, nsegs=%d\n", __func__, error, buf->nseg); + goto fail_mbuf_map; + } + buf->paddr = buf->seg.ds_addr; + buf->vaddr = buf->m->m_data; + + /* Populate frame annotation for future use */ + fa = (struct dpaa2_fa *)buf->vaddr; + fa->magic = DPAA2_MAGIC; + fa->buf = buf; + + bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREREAD); + + DPAA2_BUF_ASSERT_RXREADY(buf); + + return (0); + +fail_mbuf_map: + if (mbuf_alloc) { + m_freem(buf->m); + buf->m = NULL; + } +fail_mbuf_alloc: + if (map_created) { + (void)bus_dmamap_destroy(buf->dmat, buf->dmap); + } +fail_map_create: + return (error); +} + +/** + * @brief Prepare Tx buffer to be added to the Tx ring. + */ +int +dpaa2_buf_seed_txb(device_t dev, struct dpaa2_buf *buf) +{ + struct dpaa2_buf *sgt = buf->sgt; + bool map_created = false; + int error; + + DPAA2_BUF_ASSERT_TXPREP(buf); + + if (buf->dmap == NULL) { + error = bus_dmamap_create(buf->dmat, 0, &buf->dmap); + if (error != 0) { + device_printf(dev, "%s: bus_dmamap_create() failed: " + "error=%d\n", __func__, error); + goto fail_map_create; + } + map_created = true; + } + + if (sgt->vaddr == NULL) { + error = bus_dmamem_alloc(sgt->dmat, (void **)&sgt->vaddr, + BUS_DMA_ZERO | BUS_DMA_COHERENT, &sgt->dmap); + if (error != 0) { + device_printf(dev, "%s: bus_dmamem_alloc() failed: " + "error=%d\n", __func__, error); + goto fail_mem_alloc; + } + } + + DPAA2_BUF_ASSERT_TXREADY(buf); + + return (0); + +fail_mem_alloc: + if (map_created) { + (void)bus_dmamap_destroy(buf->dmat, buf->dmap); + } +fail_map_create: + return (error); +} diff --git a/sys/dev/dpaa2/dpaa2_buf.h b/sys/dev/dpaa2/dpaa2_buf.h new file mode 100644 index 000000000000..853a4fa78d3a --- /dev/null +++ b/sys/dev/dpaa2/dpaa2_buf.h @@ -0,0 +1,173 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright © 2023 Dmitry Salychev + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _DPAA2_BUF_H +#define _DPAA2_BUF_H + +#include +#include +#include +#include +#include + +#include + +#define DPAA2_RX_BUF_SIZE (MJUM9BYTES) + +struct dpaa2_buf { + bus_addr_t paddr; + caddr_t vaddr; + bus_dma_tag_t dmat; + bus_dmamap_t dmap; + bus_dma_segment_t seg; + int nseg; + struct mbuf *m; + struct dpaa2_buf *sgt; + void *opt; +}; + +#define DPAA2_BUF_INIT_TAGOPT(__buf, __tag, __opt) do { \ + KASSERT((__buf) != NULL, ("%s: buf is NULL", __func__)); \ + \ + (__buf)->paddr = 0; \ + (__buf)->vaddr = NULL; \ + (__buf)->dmat = (__tag); \ + (__buf)->dmap = NULL; \ + (__buf)->seg.ds_addr = 0; \ + (__buf)->seg.ds_len = 0; \ + (__buf)->nseg = 0; \ + (__buf)->m = NULL; \ + (__buf)->sgt = NULL; \ + (__buf)->opt = (__opt); \ +} while(0) +#define DPAA2_BUF_INIT(__buf) DPAA2_BUF_INIT_TAGOPT((__buf), NULL, NULL) + +#if defined(INVARIANTS) +/* + * TXPREP/TXREADY macros allow to verify whether Tx buffer is prepared to be + * seeded and/or ready to be used for transmission. + * + * NOTE: Any modification should be carefully analyzed and justified. + */ +#define DPAA2_BUF_ASSERT_TXPREP(__buf) do { \ + struct dpaa2_buf *__sgt = (__buf)->sgt; \ + KASSERT((__sgt) != NULL, ("%s: no S/G table?", __func__)); \ + \ + KASSERT((__buf)->paddr == 0, ("%s: paddr set?", __func__)); \ + KASSERT((__buf)->vaddr == NULL, ("%s: vaddr set?", __func__)); \ + KASSERT((__buf)->dmat != NULL, ("%s: no DMA tag?", __func__)); \ + KASSERT((__buf)->dmap == NULL, ("%s: DMA map set?", __func__)); \ + KASSERT((__buf)->seg.ds_addr == 0, ("%s: already mapped?", __func__)); \ + KASSERT((__buf)->seg.ds_len == 0, ("%s: already mapped?", __func__)); \ + KASSERT((__buf)->nseg == 0, ("%s: nseg > 0?", __func__)); \ + KASSERT((__buf)->m == NULL, ("%s: mbuf set?", __func__)); \ + KASSERT((__buf)->opt != NULL, ("%s: no Tx ring?", __func__)); \ + \ + KASSERT((__sgt)->paddr == 0, ("%s: S/G paddr set?", __func__)); \ + KASSERT((__sgt)->vaddr == NULL, ("%s: S/G vaddr set?", __func__)); \ + KASSERT((__sgt)->dmat != NULL, ("%s: no S/G DMA tag?", __func__)); \ + KASSERT((__sgt)->dmap == NULL, ("%s: S/G DMA map set?", __func__)); \ + KASSERT((__sgt)->seg.ds_addr == 0, ("%s: S/G mapped?", __func__)); \ + KASSERT((__sgt)->seg.ds_len == 0, ("%s: S/G mapped?", __func__)); \ + KASSERT((__sgt)->nseg == 0, ("%s: S/G nseg > 0?", __func__)); \ + KASSERT((__sgt)->m == NULL, ("%s: S/G mbuf set?", __func__)); \ + KASSERT((__sgt)->opt == (__buf),("%s: buf not linked?", __func__)); \ +} while(0) +#define DPAA2_BUF_ASSERT_TXREADY(__buf) do { \ + struct dpaa2_buf *__sgt = (__buf)->sgt; \ + KASSERT((__sgt) != NULL, ("%s: no S/G table?", __func__)); \ + \ + KASSERT((__buf)->paddr == 0, ("%s: paddr set?", __func__)); \ + KASSERT((__buf)->vaddr == NULL, ("%s: vaddr set?", __func__)); \ + KASSERT((__buf)->dmat != NULL, ("%s: no DMA tag?", __func__)); \ + KASSERT((__buf)->dmap != NULL, ("%s: no DMA map?", __func__)); \ + KASSERT((__buf)->seg.ds_addr == 0, ("%s: already mapped?", __func__)); \ + KASSERT((__buf)->seg.ds_len == 0, ("%s: already mapped?", __func__)); \ + KASSERT((__buf)->nseg == 0, ("%s: nseg > 0?", __func__)); \ + KASSERT((__buf)->m == NULL, ("%s: mbuf set?", __func__)); \ + KASSERT((__buf)->opt != NULL, ("%s: no Tx ring?", __func__)); \ + \ + KASSERT((__sgt)->paddr == 0, ("%s: S/G paddr set?", __func__)); \ + KASSERT((__sgt)->vaddr != NULL, ("%s: no S/G vaddr?", __func__)); \ + KASSERT((__sgt)->dmat != NULL, ("%s: no S/G DMA tag?", __func__)); \ + KASSERT((__sgt)->dmap != NULL, ("%s: no S/G DMA map?", __func__)); \ + KASSERT((__sgt)->seg.ds_addr == 0, ("%s: S/G mapped?", __func__)); \ + KASSERT((__sgt)->seg.ds_len == 0, ("%s: S/G mapped?", __func__)); \ + KASSERT((__sgt)->nseg == 0, ("%s: S/G nseg > 0?", __func__)); \ + KASSERT((__sgt)->m == NULL, ("%s: S/G mbuf set?", __func__)); \ + KASSERT((__sgt)->opt == (__buf),("%s: buf not linked?", __func__)); \ +} while(0) +#else /* !INVARIANTS */ +#define DPAA2_BUF_ASSERT_TXPREP(__buf) do { \ +} while(0) +#define DPAA2_BUF_ASSERT_TXREADY(__buf) do { \ +} while(0) +#endif /* INVARIANTS */ + +#if defined(INVARIANTS) +/* + * RXPREP/RXREADY macros allow to verify whether Rx buffer is prepared to be + * seeded and/or ready to be used for reception. + * + * NOTE: Any modification should be carefully analyzed and justified. + */ +#define DPAA2_BUF_ASSERT_RXPREP(__buf) do { \ + KASSERT((__buf)->paddr == 0, ("%s: paddr set?", __func__)); \ + KASSERT((__buf)->vaddr == NULL, ("%s: vaddr set?", __func__)); \ + KASSERT((__buf)->dmat != NULL, ("%s: no DMA tag?", __func__)); \ + /* KASSERT((__buf)->dmap == NULL, ("%s: DMA map set?", __func__)); */ \ + KASSERT((__buf)->seg.ds_addr == 0, ("%s: already mapped?", __func__)); \ + KASSERT((__buf)->seg.ds_len == 0, ("%s: already mapped?", __func__)); \ + KASSERT((__buf)->nseg == 0, ("%s: nseg > 0?", __func__)); \ + KASSERT((__buf)->m == NULL, ("%s: mbuf set?", __func__)); \ + KASSERT((__buf)->sgt == NULL, ("%s: S/G table set?", __func__)); \ + KASSERT((__buf)->opt != NULL, ("%s: no channel?", __func__)); \ +} while(0) +#define DPAA2_BUF_ASSERT_RXREADY(__buf) do { \ + KASSERT((__buf)->paddr != 0, ("%s: paddr not set?", __func__)); \ + KASSERT((__buf)->vaddr != NULL, ("%s: vaddr not set?", __func__)); \ + KASSERT((__buf)->dmat != NULL, ("%s: no DMA tag?", __func__)); \ + KASSERT((__buf)->dmap != NULL, ("%s: no DMA map?", __func__)); \ + KASSERT((__buf)->seg.ds_addr != 0, ("%s: not mapped?", __func__)); \ + KASSERT((__buf)->seg.ds_len != 0, ("%s: not mapped?", __func__)); \ + KASSERT((__buf)->nseg == 1, ("%s: nseg != 1?", __func__)); \ + KASSERT((__buf)->m != NULL, ("%s: no mbuf?", __func__)); \ + KASSERT((__buf)->sgt == NULL, ("%s: S/G table set?", __func__)); \ + KASSERT((__buf)->opt != NULL, ("%s: no channel?", __func__)); \ +} while(0) +#else /* !INVARIANTS */ +#define DPAA2_BUF_ASSERT_RXPREP(__buf) do { \ +} while(0) +#define DPAA2_BUF_ASSERT_RXREADY(__buf) do { \ +} while(0) +#endif /* INVARIANTS */ + +int dpaa2_buf_seed_pool(device_t, device_t, void *, uint32_t, int, struct mtx *); +int dpaa2_buf_seed_rxb(device_t, struct dpaa2_buf *, int, struct mtx *); +int dpaa2_buf_seed_txb(device_t, struct dpaa2_buf *); + +#endif /* _DPAA2_BUF_H */ diff --git a/sys/dev/dpaa2/dpaa2_channel.c b/sys/dev/dpaa2/dpaa2_channel.c new file mode 100644 index 000000000000..87b76923a16d --- /dev/null +++ b/sys/dev/dpaa2/dpaa2_channel.c @@ -0,0 +1,557 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright © 2023 Dmitry Salychev + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * QBMan channel to process ingress traffic (Rx, Tx confirmation, Rx error). + * + * NOTE: Several WQs are organized into a single channel. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "dpaa2_types.h" +#include "dpaa2_channel.h" +#include "dpaa2_ni.h" +#include "dpaa2_mc.h" +#include "dpaa2_mc_if.h" +#include "dpaa2_mcp.h" +#include "dpaa2_io.h" +#include "dpaa2_con.h" +#include "dpaa2_buf.h" +#include "dpaa2_swp.h" +#include "dpaa2_swp_if.h" +#include "dpaa2_bp.h" +#include "dpaa2_cmd_if.h" + +MALLOC_DEFINE(M_DPAA2_CH, "dpaa2_ch", "DPAA2 QBMan Channel"); + +#define RX_SEG_N (1u) +#define RX_SEG_SZ (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE) +#define RX_SEG_MAXSZ (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE) +CTASSERT(RX_SEG_SZ % PAGE_SIZE == 0); +CTASSERT(RX_SEG_MAXSZ % PAGE_SIZE == 0); + +#define TX_SEG_N (16u) /* XXX-DSL: does DPAA2 limit exist? */ +#define TX_SEG_SZ (PAGE_SIZE) +#define TX_SEG_MAXSZ (TX_SEG_N * TX_SEG_SZ) +CTASSERT(TX_SEG_SZ % PAGE_SIZE == 0); +CTASSERT(TX_SEG_MAXSZ % PAGE_SIZE == 0); + +#define SGT_SEG_N (1u) +#define SGT_SEG_SZ (PAGE_SIZE) +#define SGT_SEG_MAXSZ (PAGE_SIZE) +CTASSERT(SGT_SEG_SZ % PAGE_SIZE == 0); +CTASSERT(SGT_SEG_MAXSZ % PAGE_SIZE == 0); + +static int dpaa2_chan_setup_dma(device_t, struct dpaa2_channel *, bus_size_t); +static int dpaa2_chan_alloc_storage(device_t, struct dpaa2_channel *, bus_size_t, + int, bus_size_t); +static void dpaa2_chan_bp_task(void *, int); + +/** + * @brief Сonfigures QBMan channel and registers data availability notifications. + */ +int +dpaa2_chan_setup(device_t dev, device_t iodev, device_t condev, device_t bpdev, + struct dpaa2_channel **channel, uint32_t flowid, task_fn_t cleanup_task_fn) +{ + device_t pdev = device_get_parent(dev); + device_t child = dev; + struct dpaa2_ni_softc *sc = device_get_softc(dev); + struct dpaa2_io_softc *iosc = device_get_softc(iodev); + struct dpaa2_con_softc *consc = device_get_softc(condev); + struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); + struct dpaa2_devinfo *ioinfo = device_get_ivars(iodev); + struct dpaa2_devinfo *coninfo = device_get_ivars(condev); + struct dpaa2_con_notif_cfg notif_cfg; + struct dpaa2_io_notif_ctx *ctx; + struct dpaa2_channel *ch = NULL; + struct dpaa2_cmd cmd; + uint16_t rctk, contk; + int error; + + DPAA2_CMD_INIT(&cmd); + + error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rctk); + if (error) { + device_printf(dev, "%s: failed to open DPRC: id=%d, error=%d\n", + __func__, rcinfo->id, error); + goto fail_rc_open; + } + error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, coninfo->id, &contk); + if (error) { + device_printf(dev, "%s: failed to open DPCON: id=%d, error=%d\n", + __func__, coninfo->id, error); + goto fail_con_open; + } + + error = DPAA2_CMD_CON_ENABLE(dev, child, &cmd); + if (error) { + device_printf(dev, "%s: failed to enable channel: dpcon_id=%d, " + "chan_id=%d\n", __func__, coninfo->id, consc->attr.chan_id); + goto fail_con_enable; + } + + ch = malloc(sizeof(struct dpaa2_channel), M_DPAA2_CH, M_WAITOK | M_ZERO); + if (ch == NULL) { + device_printf(dev, "%s: malloc() failed\n", __func__); + error = ENOMEM; + goto fail_malloc; + } + + ch->ni_dev = dev; + ch->io_dev = iodev; + ch->con_dev = condev; + ch->id = consc->attr.chan_id; + ch->flowid = flowid; + ch->tx_frames = 0; /* for debug purposes */ + ch->tx_dropped = 0; /* for debug purposes */ + ch->store_sz = 0; + ch->store_idx = 0; + ch->recycled_n = 0; + ch->rxq_n = 0; + + NET_TASK_INIT(&ch->cleanup_task, 0, cleanup_task_fn, ch); + NET_TASK_INIT(&ch->bp_task, 0, dpaa2_chan_bp_task, ch); + + ch->cleanup_tq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK, + taskqueue_thread_enqueue, &ch->cleanup_tq); + taskqueue_start_threads_cpuset(&ch->cleanup_tq, 1, PI_NET, + &iosc->cpu_mask, "dpaa2_ch%d cleanup", ch->id); + + error = dpaa2_chan_setup_dma(dev, ch, sc->buf_align); + if (error != 0) { + device_printf(dev, "%s: failed to setup DMA\n", __func__); + goto fail_dma_setup; + } + + mtx_init(&ch->xmit_mtx, "dpaa2_ch_xmit", NULL, MTX_DEF); + + ch->xmit_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT, + &ch->xmit_mtx); + if (ch->xmit_br == NULL) { + device_printf(dev, "%s: buf_ring_alloc() failed\n", __func__); + error = ENOMEM; + goto fail_buf_ring; + } + + DPAA2_BUF_INIT(&ch->store); + + /* Register the new notification context */ + ctx = &ch->ctx; + ctx->qman_ctx = (uint64_t)ctx; + ctx->cdan_en = true; + ctx->fq_chan_id = ch->id; + ctx->io_dev = ch->io_dev; + ctx->channel = ch; + error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, ctx); + if (error) { + device_printf(dev, "%s: failed to register CDAN context\n", + __func__); + goto fail_dpcon_notif; + } + + /* Register DPCON notification within Management Complex */ + notif_cfg.dpio_id = ioinfo->id; + notif_cfg.prior = 0; + notif_cfg.qman_ctx = ctx->qman_ctx; + error = DPAA2_CMD_CON_SET_NOTIF(dev, child, &cmd, ¬if_cfg); + if (error) { + device_printf(dev, "%s: failed to register DPCON " + "notifications: dpcon_id=%d, chan_id=%d\n", __func__, + coninfo->id, consc->attr.chan_id); + goto fail_dpcon_notif; + } + + /* Allocate initial # of Rx buffers and a channel storage */ + error = dpaa2_buf_seed_pool(dev, bpdev, ch, DPAA2_NI_BUFS_INIT, + DPAA2_RX_BUF_SIZE, NULL); + if (error) { + device_printf(dev, "%s: failed to seed buffer pool\n", + __func__); + goto fail_dpcon_notif; + } + error = dpaa2_chan_alloc_storage(dev, ch, DPAA2_ETH_STORE_SIZE, + BUS_DMA_NOWAIT, sc->buf_align); + if (error != 0) { + device_printf(dev, "%s: failed to allocate channel storage\n", + __func__); + goto fail_dpcon_notif; + } else { + ch->store_sz = DPAA2_ETH_STORE_FRAMES; + } + + /* Prepare queues for the channel */ + error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_TX_CONF); + if (error) { + device_printf(dev, "%s: failed to prepare TxConf queue: " + "error=%d\n", __func__, error); + goto fail_fq_setup; + } + error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_RX); + if (error) { + device_printf(dev, "%s: failed to prepare Rx queue: error=%d\n", + __func__, error); + goto fail_fq_setup; + } + + if (bootverbose) { + device_printf(dev, "channel: dpio_id=%d dpcon_id=%d chan_id=%d, " + "priorities=%d\n", ioinfo->id, coninfo->id, ch->id, + consc->attr.prior_num); + } + + *channel = ch; + + (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk)); + + return (0); + +fail_fq_setup: + if (ch->store.vaddr != NULL) { + bus_dmamem_free(ch->store.dmat, ch->store.vaddr, ch->store.dmap); + } + if (ch->store.dmat != NULL) { + bus_dma_tag_destroy(ch->store.dmat); + } + ch->store.dmat = NULL; + ch->store.vaddr = NULL; + ch->store.paddr = 0; + ch->store.nseg = 0; +fail_dpcon_notif: + buf_ring_free(ch->xmit_br, M_DEVBUF); +fail_buf_ring: + mtx_destroy(&ch->xmit_mtx); +fail_dma_setup: + /* while (taskqueue_cancel(ch->cleanup_tq, &ch->cleanup_task, NULL)) { */ + /* taskqueue_drain(ch->cleanup_tq, &ch->cleanup_task); */ + /* } */ + /* taskqueue_free(ch->cleanup_tq); */ +fail_malloc: + (void)DPAA2_CMD_CON_DISABLE(dev, child, DPAA2_CMD_TK(&cmd, contk)); +fail_con_enable: + (void)DPAA2_CMD_CON_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, contk)); +fail_con_open: + (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk)); +fail_rc_open: + return (error); +} + +/** + * @brief Performs an initial configuration of the frame queue. + */ +int +dpaa2_chan_setup_fq(device_t dev, struct dpaa2_channel *ch, + enum dpaa2_ni_queue_type queue_type) +{ + struct dpaa2_ni_softc *sc = device_get_softc(dev); + struct dpaa2_ni_fq *fq; + + switch (queue_type) { + case DPAA2_NI_QUEUE_TX_CONF: + /* One queue per channel */ + fq = &ch->txc_queue; + fq->chan = ch; + fq->flowid = ch->flowid; + fq->tc = 0; /* ignored */ + fq->type = queue_type; + break; + case DPAA2_NI_QUEUE_RX: + KASSERT(sc->attr.num.rx_tcs <= DPAA2_MAX_TCS, + ("too many Rx traffic classes: rx_tcs=%d\n", + sc->attr.num.rx_tcs)); + + /* One queue per Rx traffic class within a channel */ + for (int i = 0; i < sc->attr.num.rx_tcs; i++) { + fq = &ch->rx_queues[i]; + fq->chan = ch; + fq->flowid = ch->flowid; + fq->tc = (uint8_t) i; + fq->type = queue_type; + + ch->rxq_n++; + } + break; + case DPAA2_NI_QUEUE_RX_ERR: + /* One queue per network interface */ + fq = &sc->rxe_queue; + fq->chan = ch; + fq->flowid = 0; /* ignored */ + fq->tc = 0; /* ignored */ + fq->type = queue_type; + break; + default: + device_printf(dev, "%s: unexpected frame queue type: %d\n", + __func__, queue_type); + return (EINVAL); + } + + return (0); +} + +/** + * @brief Obtain the next dequeue response from the channel storage. + */ +int +dpaa2_chan_next_frame(struct dpaa2_channel *ch, struct dpaa2_dq **dq) +{ + struct dpaa2_buf *buf = &ch->store; + struct dpaa2_dq *msgs = (struct dpaa2_dq *)buf->vaddr; + struct dpaa2_dq *msg = &msgs[ch->store_idx]; + int rc = EINPROGRESS; + + ch->store_idx++; + + if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) { + rc = EALREADY; /* VDQ command is expired */ + ch->store_idx = 0; + if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME)) { + msg = NULL; /* Null response, FD is invalid */ + } + } + if (msg != NULL && (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY)) { + rc = ENOENT; /* FQ is empty */ + ch->store_idx = 0; + } + + if (dq != NULL) { + *dq = msg; + } + + return (rc); +} + +static int +dpaa2_chan_setup_dma(device_t dev, struct dpaa2_channel *ch, + bus_size_t alignment) +{ + int error; + + mtx_init(&ch->dma_mtx, "dpaa2_ch_dma_mtx", NULL, MTX_DEF); + + error = bus_dma_tag_create( + bus_get_dma_tag(dev), /* parent */ + alignment, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR, /* low restricted addr */ + BUS_SPACE_MAXADDR, /* high restricted addr */ + NULL, NULL, /* filter, filterarg */ + RX_SEG_MAXSZ, /* maxsize */ + RX_SEG_N, /* nsegments */ + RX_SEG_SZ, /* maxsegsize */ + 0, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockarg */ + &ch->rx_dmat); + if (error) { + device_printf(dev, "%s: failed to create rx_dmat\n", __func__); + goto fail_rx_tag; + } + + error = bus_dma_tag_create( + bus_get_dma_tag(dev), /* parent */ + alignment, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR, /* low restricted addr */ + BUS_SPACE_MAXADDR, /* high restricted addr */ + NULL, NULL, /* filter, filterarg */ + TX_SEG_MAXSZ, /* maxsize */ + TX_SEG_N, /* nsegments */ + TX_SEG_SZ, /* maxsegsize */ + 0, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockarg */ + &ch->tx_dmat); + if (error) { + device_printf(dev, "%s: failed to create tx_dmat\n", __func__); + goto fail_tx_tag; + } + + error = bus_dma_tag_create( + bus_get_dma_tag(dev), /* parent */ + alignment, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR, /* low restricted addr */ + BUS_SPACE_MAXADDR, /* high restricted addr */ + NULL, NULL, /* filter, filterarg */ + SGT_SEG_MAXSZ, /* maxsize */ + SGT_SEG_N, /* nsegments */ + SGT_SEG_SZ, /* maxsegsize */ + 0, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockarg */ + &ch->sgt_dmat); + if (error) { + device_printf(dev, "%s: failed to create sgt_dmat\n", __func__); + goto fail_sgt_tag; + } + + return (0); + +fail_sgt_tag: + bus_dma_tag_destroy(ch->tx_dmat); +fail_tx_tag: + bus_dma_tag_destroy(ch->rx_dmat); +fail_rx_tag: + mtx_destroy(&ch->dma_mtx); + ch->rx_dmat = NULL; + ch->tx_dmat = NULL; + ch->sgt_dmat = NULL; + + return (error); +} + +/** + * @brief Allocate a DMA-mapped storage to keep responses from VDQ command. + */ +static int +dpaa2_chan_alloc_storage(device_t dev, struct dpaa2_channel *ch, bus_size_t size, + int mapflags, bus_size_t alignment) +{ + struct dpaa2_buf *buf = &ch->store; + uint32_t maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; + int error; + + error = bus_dma_tag_create( + bus_get_dma_tag(dev), /* parent */ + alignment, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR, /* low restricted addr */ + BUS_SPACE_MAXADDR, /* high restricted addr */ + NULL, NULL, /* filter, filterarg */ + maxsize, /* maxsize */ + 1, /* nsegments */ + maxsize, /* maxsegsize */ + BUS_DMA_ALLOCNOW, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockarg */ + &buf->dmat); + if (error != 0) { + device_printf(dev, "%s: failed to create DMA tag\n", __func__); + goto fail_tag; + } + + error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr, + BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap); + if (error != 0) { + device_printf(dev, "%s: failed to allocate storage memory\n", + __func__); + goto fail_map_create; + } + + buf->paddr = 0; + error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr, size, + dpaa2_dmamap_oneseg_cb, &buf->paddr, mapflags); + if (error != 0) { + device_printf(dev, "%s: failed to map storage memory\n", + __func__); + goto fail_map_load; + } + + bus_dmamap_sync(buf->dmat, buf->dmap, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + buf->nseg = 1; + + return (0); + +fail_map_load: + bus_dmamem_free(buf->dmat, buf->vaddr, buf->dmap); +fail_map_create: + bus_dma_tag_destroy(buf->dmat); +fail_tag: + buf->dmat = NULL; + buf->vaddr = NULL; + buf->paddr = 0; + buf->nseg = 0; + + return (error); +} + +/** + * @brief Release new buffers to the buffer pool if necessary. + */ +static void +dpaa2_chan_bp_task(void *arg, int count) +{ + struct dpaa2_channel *ch = (struct dpaa2_channel *)arg; + struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev); + struct dpaa2_bp_softc *bpsc; + struct dpaa2_bp_conf bpconf; + const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num); + device_t bpdev; + int error; + + /* There's only one buffer pool for now */ + bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]); + bpsc = device_get_softc(bpdev); + + /* Get state of the buffer pool */ + error = DPAA2_SWP_QUERY_BP(ch->io_dev, bpsc->attr.bpid, &bpconf); + if (error) { + device_printf(sc->dev, "%s: DPAA2_SWP_QUERY_BP() failed: " + "error=%d\n", __func__, error); + return; + } + + /* Double allocated Rx buffers if amount of free buffers is < 25% */ + if (bpconf.free_bufn < (buf_num >> 2)) { + mtx_assert(&ch->dma_mtx, MA_NOTOWNED); + mtx_lock(&ch->dma_mtx); + (void)dpaa2_buf_seed_pool(ch->ni_dev, bpdev, ch, buf_num, + DPAA2_RX_BUF_SIZE, &ch->dma_mtx); + mtx_unlock(&ch->dma_mtx); + + DPAA2_ATOMIC_XCHG(&sc->buf_free, bpconf.free_bufn); + } +} diff --git a/sys/dev/dpaa2/dpaa2_channel.h b/sys/dev/dpaa2/dpaa2_channel.h new file mode 100644 index 000000000000..67b0285b7db6 --- /dev/null +++ b/sys/dev/dpaa2/dpaa2_channel.h @@ -0,0 +1,95 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright © 2023 Dmitry Salychev + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _DPAA2_CHANNEL_H +#define _DPAA2_CHANNEL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dpaa2_types.h" +#include "dpaa2_io.h" +#include "dpaa2_ni.h" + +#define DPAA2_TX_BUFRING_SZ (4096u) + +/** + * @brief QBMan channel to process ingress traffic. + * + * NOTE: Several WQs are organized into a single channel. + */ +struct dpaa2_channel { + device_t ni_dev; + device_t io_dev; + device_t con_dev; + uint16_t id; + uint16_t flowid; + + uint64_t tx_frames; + uint64_t tx_dropped; + + struct mtx dma_mtx; + bus_dma_tag_t rx_dmat; + bus_dma_tag_t tx_dmat; + bus_dma_tag_t sgt_dmat; + + struct dpaa2_io_notif_ctx ctx; /* to configure CDANs */ + + struct dpaa2_buf store; /* to keep VDQ responses */ + uint32_t store_sz; /* in frames */ + uint32_t store_idx; /* frame index */ + + uint32_t recycled_n; + struct dpaa2_buf *recycled[DPAA2_SWP_BUFS_PER_CMD]; + + uint32_t rxq_n; + struct dpaa2_ni_fq rx_queues[DPAA2_MAX_TCS]; + struct dpaa2_ni_fq txc_queue; + + struct taskqueue *cleanup_tq; + struct task cleanup_task; + struct task bp_task; + + struct mtx xmit_mtx; + struct buf_ring *xmit_br; +} __aligned(CACHE_LINE_SIZE); + +int dpaa2_chan_setup(device_t, device_t, device_t, device_t, + struct dpaa2_channel **, uint32_t, task_fn_t); +int dpaa2_chan_setup_fq(device_t, struct dpaa2_channel *, + enum dpaa2_ni_queue_type); +int dpaa2_chan_next_frame(struct dpaa2_channel *, struct dpaa2_dq **); + +#endif /* _DPAA2_CHANNEL_H */ diff --git a/sys/dev/dpaa2/dpaa2_io.c b/sys/dev/dpaa2/dpaa2_io.c index 069dd00e849e..ef82a726ad82 100644 --- a/sys/dev/dpaa2/dpaa2_io.c +++ b/sys/dev/dpaa2/dpaa2_io.c @@ -1,586 +1,603 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * QBMan command interface and the DPAA2 I/O (DPIO) driver. * * The DPIO object allows configuration of the QBMan software portal with * optional notification capabilities. * * Software portals are used by the driver to communicate with the QBMan. The * DPIO object’s main purpose is to enable the driver to perform I/O – enqueue * and dequeue operations, as well as buffer release and acquire operations – * using QBMan. */ +#include "opt_rss.h" + #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#ifdef RSS +#include +#endif + #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mc.h" #include "dpaa2_mcp.h" #include "dpaa2_swp.h" #include "dpaa2_swp_if.h" #include "dpaa2_cmd_if.h" #include "dpaa2_io.h" #include "dpaa2_ni.h" +#include "dpaa2_channel.h" #define DPIO_IRQ_INDEX 0 /* index of the only DPIO IRQ */ #define DPIO_POLL_MAX 32 /* * Memory: * 0: cache-enabled part of the QBMan software portal. * 1: cache-inhibited part of the QBMan software portal. * 2: control registers of the QBMan software portal? * * Note that MSI should be allocated separately using pseudo-PCI interface. */ struct resource_spec dpaa2_io_spec[] = { /* * System Memory resources. */ #define MEM_RES_NUM (3u) #define MEM_RID_OFF (0u) #define MEM_RID(rid) ((rid) + MEM_RID_OFF) { SYS_RES_MEMORY, MEM_RID(0), RF_ACTIVE | RF_UNMAPPED }, { SYS_RES_MEMORY, MEM_RID(1), RF_ACTIVE | RF_UNMAPPED }, { SYS_RES_MEMORY, MEM_RID(2), RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL }, /* * DPMCP resources. * * NOTE: MC command portals (MCPs) are used to send commands to, and * receive responses from, the MC firmware. One portal per DPIO. */ #define MCP_RES_NUM (1u) #define MCP_RID_OFF (MEM_RID_OFF + MEM_RES_NUM) #define MCP_RID(rid) ((rid) + MCP_RID_OFF) /* --- */ { DPAA2_DEV_MCP, MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, /* --- */ RESOURCE_SPEC_END }; /* Configuration routines. */ static int dpaa2_io_setup_irqs(device_t dev); static int dpaa2_io_release_irqs(device_t dev); static int dpaa2_io_setup_msi(struct dpaa2_io_softc *sc); static int dpaa2_io_release_msi(struct dpaa2_io_softc *sc); /* Interrupt handlers */ static void dpaa2_io_intr(void *arg); static int dpaa2_io_probe(device_t dev) { /* DPIO device will be added by a parent resource container itself. */ device_set_desc(dev, "DPAA2 I/O"); return (BUS_PROBE_DEFAULT); } static int dpaa2_io_detach(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_io_softc *sc = device_get_softc(dev); struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_cmd cmd; uint16_t rc_token, io_token; int error; DPAA2_CMD_INIT(&cmd); /* Tear down interrupt handler and release IRQ resources. */ dpaa2_io_release_irqs(dev); /* Free software portal helper object. */ dpaa2_swp_free_portal(sc->swp); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open DPRC: error=%d\n", __func__, error); goto err_exit; } error = DPAA2_CMD_IO_OPEN(dev, child, &cmd, dinfo->id, &io_token); if (error) { device_printf(dev, "%s: failed to open DPIO: id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } error = DPAA2_CMD_IO_DISABLE(dev, child, &cmd); if (error && bootverbose) { device_printf(dev, "%s: failed to disable DPIO: id=%d, " "error=%d\n", __func__, dinfo->id, error); } (void)DPAA2_CMD_IO_CLOSE(dev, child, &cmd); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); /* Unmap memory resources of the portal. */ for (int i = 0; i < MEM_RES_NUM; i++) { if (sc->res[MEM_RID(i)] == NULL) { continue; } error = bus_unmap_resource(sc->dev, SYS_RES_MEMORY, sc->res[MEM_RID(i)], &sc->map[MEM_RID(i)]); if (error && bootverbose) { device_printf(dev, "%s: failed to unmap memory " "resource: rid=%d, error=%d\n", __func__, MEM_RID(i), error); } } /* Release allocated resources. */ bus_release_resources(dev, dpaa2_io_spec, sc->res); return (0); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (error); } static int dpaa2_io_attach(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; device_t mcp_dev; struct dpaa2_io_softc *sc = device_get_softc(dev); struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *mcp_dinfo; struct dpaa2_cmd cmd; struct resource_map_request req; struct { vm_memattr_t memattr; char *label; } map_args[MEM_RES_NUM] = { { VM_MEMATTR_WRITE_BACK, "cache-enabled part" }, { VM_MEMATTR_DEVICE, "cache-inhibited part" }, { VM_MEMATTR_DEVICE, "control registers" } }; uint16_t rc_token, io_token; int error; sc->dev = dev; sc->swp = NULL; sc->intr = NULL; sc->irq_resource = NULL; /* Allocate resources. */ error = bus_alloc_resources(sc->dev, dpaa2_io_spec, sc->res); if (error) { device_printf(dev, "%s: failed to allocate resources: " "error=%d\n", __func__, error); return (ENXIO); } /* Set allocated MC portal up. */ mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]); mcp_dinfo = device_get_ivars(mcp_dev); dinfo->portal = mcp_dinfo->portal; /* Map memory resources of the portal. */ for (int i = 0; i < MEM_RES_NUM; i++) { if (sc->res[MEM_RID(i)] == NULL) { continue; } resource_init_map_request(&req); req.memattr = map_args[i].memattr; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[MEM_RID(i)], &req, &sc->map[MEM_RID(i)]); if (error) { device_printf(dev, "%s: failed to map %s: error=%d\n", __func__, map_args[i].label, error); goto err_exit; } } DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open DPRC: error=%d\n", __func__, error); goto err_exit; } error = DPAA2_CMD_IO_OPEN(dev, child, &cmd, dinfo->id, &io_token); if (error) { device_printf(dev, "%s: failed to open DPIO: id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } error = DPAA2_CMD_IO_RESET(dev, child, &cmd); if (error) { device_printf(dev, "%s: failed to reset DPIO: id=%d, error=%d\n", __func__, dinfo->id, error); goto close_io; } error = DPAA2_CMD_IO_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr); if (error) { device_printf(dev, "%s: failed to get DPIO attributes: id=%d, " "error=%d\n", __func__, dinfo->id, error); goto close_io; } error = DPAA2_CMD_IO_ENABLE(dev, child, &cmd); if (error) { device_printf(dev, "%s: failed to enable DPIO: id=%d, " "error=%d\n", __func__, dinfo->id, error); goto close_io; } /* Prepare descriptor of the QBMan software portal. */ sc->swp_desc.dpio_dev = dev; sc->swp_desc.swp_version = sc->attr.swp_version; sc->swp_desc.swp_clk = sc->attr.swp_clk; sc->swp_desc.swp_id = sc->attr.swp_id; sc->swp_desc.has_notif = sc->attr.priors_num ? true : false; sc->swp_desc.has_8prio = sc->attr.priors_num == 8u ? true : false; sc->swp_desc.cena_res = sc->res[0]; sc->swp_desc.cena_map = &sc->map[0]; sc->swp_desc.cinh_res = sc->res[1]; sc->swp_desc.cinh_map = &sc->map[1]; /* * Compute how many 256 QBMAN cycles fit into one ns. This is because * the interrupt timeout period register needs to be specified in QBMAN * clock cycles in increments of 256. */ sc->swp_desc.swp_cycles_ratio = 256000 / (sc->swp_desc.swp_clk / 1000000); /* Initialize QBMan software portal. */ error = dpaa2_swp_init_portal(&sc->swp, &sc->swp_desc, DPAA2_SWP_DEF); if (error) { device_printf(dev, "%s: failed to initialize dpaa2_swp: " "error=%d\n", __func__, error); goto err_exit; } error = dpaa2_io_setup_irqs(dev); if (error) { device_printf(dev, "%s: failed to setup IRQs: error=%d\n", __func__, error); goto err_exit; } if (bootverbose) { device_printf(dev, "dpio_id=%d, swp_id=%d, chan_mode=%s, " "notif_priors=%d, swp_version=0x%x\n", sc->attr.id, sc->attr.swp_id, sc->attr.chan_mode == DPAA2_IO_LOCAL_CHANNEL ? "local_channel" : "no_channel", sc->attr.priors_num, sc->attr.swp_version); } (void)DPAA2_CMD_IO_CLOSE(dev, child, &cmd); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); close_io: (void)DPAA2_CMD_IO_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, io_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: dpaa2_io_detach(dev); return (ENXIO); } /** * @brief Enqueue multiple frames to a frame queue using one FQID. */ static int dpaa2_io_enq_multiple_fq(device_t iodev, uint32_t fqid, struct dpaa2_fd *fd, int frames_n) { struct dpaa2_io_softc *sc = device_get_softc(iodev); struct dpaa2_swp *swp = sc->swp; struct dpaa2_eq_desc ed; uint32_t flags = 0; memset(&ed, 0, sizeof(ed)); /* Setup enqueue descriptor. */ dpaa2_swp_set_ed_norp(&ed, false); dpaa2_swp_set_ed_fq(&ed, fqid); return (dpaa2_swp_enq_mult(swp, &ed, fd, &flags, frames_n)); } /** * @brief Configure the channel data availability notification (CDAN) * in a particular WQ channel paired with DPIO. */ static int dpaa2_io_conf_wq_channel(device_t iodev, struct dpaa2_io_notif_ctx *ctx) { struct dpaa2_io_softc *sc = device_get_softc(iodev); /* Enable generation of the CDAN notifications. */ if (ctx->cdan_en) { return (dpaa2_swp_conf_wq_channel(sc->swp, ctx->fq_chan_id, DPAA2_WQCHAN_WE_EN | DPAA2_WQCHAN_WE_CTX, ctx->cdan_en, ctx->qman_ctx)); } return (0); } /** * @brief Query current configuration/state of the buffer pool. */ static int dpaa2_io_query_bp(device_t iodev, uint16_t bpid, struct dpaa2_bp_conf *conf) { struct dpaa2_io_softc *sc = device_get_softc(iodev); return (dpaa2_swp_query_bp(sc->swp, bpid, conf)); } /** * @brief Release one or more buffer pointers to the QBMan buffer pool. */ static int dpaa2_io_release_bufs(device_t iodev, uint16_t bpid, bus_addr_t *buf, uint32_t buf_num) { struct dpaa2_io_softc *sc = device_get_softc(iodev); return (dpaa2_swp_release_bufs(sc->swp, bpid, buf, buf_num)); } /** * @brief Configure DPNI object to generate interrupts. */ static int dpaa2_io_setup_irqs(device_t dev) { struct dpaa2_io_softc *sc = device_get_softc(dev); int error; /* * Setup interrupts generated by the software portal. */ dpaa2_swp_set_intr_trigger(sc->swp, DPAA2_SWP_INTR_DQRI); dpaa2_swp_clear_intr_status(sc->swp, 0xFFFFFFFFu); /* Configure IRQs. */ error = dpaa2_io_setup_msi(sc); if (error) { device_printf(dev, "%s: failed to allocate MSI: error=%d\n", __func__, error); return (error); } if ((sc->irq_resource = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) { device_printf(dev, "%s: failed to allocate IRQ resource\n", __func__); return (ENXIO); } if (bus_setup_intr(dev, sc->irq_resource, INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, NULL, dpaa2_io_intr, sc, &sc->intr)) { device_printf(dev, "%s: failed to setup IRQ resource\n", __func__); return (ENXIO); } - /* Wrap DPIO ID around number of CPUs. */ - bus_bind_intr(dev, sc->irq_resource, sc->attr.id % mp_ncpus); + /* Wrap DPIO ID around number of CPUs/RSS buckets */ +#ifdef RSS + sc->cpu = rss_getcpu(sc->attr.id % rss_getnumbuckets()); +#else + sc->cpu = sc->attr.id % mp_ncpus; +#endif + CPU_SETOF(sc->cpu, &sc->cpu_mask); + bus_bind_intr(dev, sc->irq_resource, sc->cpu); /* * Setup and enable Static Dequeue Command to receive CDANs from * channel 0. */ if (sc->swp_desc.has_notif) dpaa2_swp_set_push_dequeue(sc->swp, 0, true); return (0); } static int dpaa2_io_release_irqs(device_t dev) { struct dpaa2_io_softc *sc = device_get_softc(dev); /* Disable receiving CDANs from channel 0. */ if (sc->swp_desc.has_notif) dpaa2_swp_set_push_dequeue(sc->swp, 0, false); /* Release IRQ resources. */ if (sc->intr != NULL) bus_teardown_intr(dev, sc->irq_resource, &sc->intr); if (sc->irq_resource != NULL) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid[0], sc->irq_resource); (void)dpaa2_io_release_msi(device_get_softc(dev)); /* Configure software portal to stop generating interrupts. */ dpaa2_swp_set_intr_trigger(sc->swp, 0); dpaa2_swp_clear_intr_status(sc->swp, 0xFFFFFFFFu); return (0); } /** * @brief Allocate MSI interrupts for this DPAA2 I/O object. */ static int dpaa2_io_setup_msi(struct dpaa2_io_softc *sc) { int val; val = pci_msi_count(sc->dev); if (val < DPAA2_IO_MSI_COUNT) device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val, DPAA2_IO_MSI_COUNT); val = MIN(val, DPAA2_IO_MSI_COUNT); if (pci_alloc_msi(sc->dev, &val) != 0) return (EINVAL); for (int i = 0; i < val; i++) sc->irq_rid[i] = i + 1; return (0); } static int dpaa2_io_release_msi(struct dpaa2_io_softc *sc) { int error; error = pci_release_msi(sc->dev); if (error) { device_printf(sc->dev, "%s: failed to release MSI: error=%d/n", __func__, error); return (error); } return (0); } /** * @brief DPAA2 I/O interrupt handler. */ static void dpaa2_io_intr(void *arg) { struct dpaa2_io_softc *sc = (struct dpaa2_io_softc *) arg; + /* struct dpaa2_ni_softc *nisc = NULL; */ struct dpaa2_io_notif_ctx *ctx[DPIO_POLL_MAX]; + struct dpaa2_channel *chan; struct dpaa2_dq dq; uint32_t idx, status; uint16_t flags; int rc, cdan_n = 0; status = dpaa2_swp_read_intr_status(sc->swp); if (status == 0) { return; } DPAA2_SWP_LOCK(sc->swp, &flags); if (flags & DPAA2_SWP_DESTROYED) { /* Terminate operation if portal is destroyed. */ DPAA2_SWP_UNLOCK(sc->swp); return; } for (int i = 0; i < DPIO_POLL_MAX; i++) { rc = dpaa2_swp_dqrr_next_locked(sc->swp, &dq, &idx); if (rc) { break; } if ((dq.common.verb & DPAA2_DQRR_RESULT_MASK) == DPAA2_DQRR_RESULT_CDAN) { ctx[cdan_n++] = (struct dpaa2_io_notif_ctx *) dq.scn.ctx; } else { /* TODO: Report unknown DQRR entry. */ } dpaa2_swp_write_reg(sc->swp, DPAA2_SWP_CINH_DCAP, idx); } DPAA2_SWP_UNLOCK(sc->swp); for (int i = 0; i < cdan_n; i++) { - ctx[i]->poll(ctx[i]->channel); + chan = (struct dpaa2_channel *)ctx[i]->channel; + /* nisc = device_get_softc(chan->ni_dev); */ + taskqueue_enqueue(chan->cleanup_tq, &chan->cleanup_task); } /* Enable software portal interrupts back */ dpaa2_swp_clear_intr_status(sc->swp, status); dpaa2_swp_write_reg(sc->swp, DPAA2_SWP_CINH_IIR, 0); } static device_method_t dpaa2_io_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dpaa2_io_probe), DEVMETHOD(device_attach, dpaa2_io_attach), DEVMETHOD(device_detach, dpaa2_io_detach), /* QBMan software portal interface */ DEVMETHOD(dpaa2_swp_enq_multiple_fq, dpaa2_io_enq_multiple_fq), DEVMETHOD(dpaa2_swp_conf_wq_channel, dpaa2_io_conf_wq_channel), DEVMETHOD(dpaa2_swp_query_bp, dpaa2_io_query_bp), DEVMETHOD(dpaa2_swp_release_bufs, dpaa2_io_release_bufs), DEVMETHOD_END }; static driver_t dpaa2_io_driver = { "dpaa2_io", dpaa2_io_methods, sizeof(struct dpaa2_io_softc), }; DRIVER_MODULE(dpaa2_io, dpaa2_rc, dpaa2_io_driver, 0, 0); diff --git a/sys/dev/dpaa2/dpaa2_io.h b/sys/dev/dpaa2/dpaa2_io.h index 13def050fffb..cc3156fcc595 100644 --- a/sys/dev/dpaa2/dpaa2_io.h +++ b/sys/dev/dpaa2/dpaa2_io.h @@ -1,105 +1,107 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * - * Copyright © 2021-2022 Dmitry Salychev + * Copyright © 2021-2023 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _DPAA2_IO_H #define _DPAA2_IO_H #include #include #include #include "dpaa2_types.h" #include "dpaa2_mcp.h" +#include "dpaa2_swp.h" /* Maximum resources per DPIO: 3 SYS_MEM + 1 DPMCP. */ #define DPAA2_IO_MAX_RESOURCES 4 /* Maximum number of MSIs supported by the DPIO objects. */ #define DPAA2_IO_MSI_COUNT 1 enum dpaa2_io_chan_mode { DPAA2_IO_NO_CHANNEL, DPAA2_IO_LOCAL_CHANNEL }; /** * @brief Attributes of the DPIO object. * * swp_ce_paddr: Physical address of the cache-enabled area. * swp_ci_paddr: Physical address of the cache-inhibited area. * swp_version: Hardware IP version of the software portal. * swp_clk: QBMAN clock frequency value in Hz. * id: DPIO object ID. * swp_id: Software portal ID. * priors_num: Number of priorities for the notification channel (1-8); * relevant only if channel mode is "local channel". * chan_mode: Notification channel mode. */ struct dpaa2_io_attr { uint64_t swp_ce_paddr; uint64_t swp_ci_paddr; uint32_t swp_version; uint32_t swp_clk; uint32_t id; uint16_t swp_id; uint8_t priors_num; enum dpaa2_io_chan_mode chan_mode; }; /** * @brief Context used by DPIO to configure data availability notifications * (CDAN) on a particular WQ channel. */ struct dpaa2_io_notif_ctx { - void (*poll)(void *); - device_t io_dev; void *channel; uint64_t qman_ctx; uint16_t fq_chan_id; bool cdan_en; }; /** * @brief Software context for the DPAA2 I/O driver. */ struct dpaa2_io_softc { device_t dev; struct dpaa2_swp_desc swp_desc; struct dpaa2_swp *swp; struct dpaa2_io_attr attr; struct resource *res[DPAA2_IO_MAX_RESOURCES]; struct resource_map map[DPAA2_IO_MAX_RESOURCES]; int irq_rid[DPAA2_IO_MSI_COUNT]; struct resource *irq_resource; void *intr; /* interrupt handle */ + + int cpu; + cpuset_t cpu_mask; }; extern struct resource_spec dpaa2_io_spec[]; #endif /* _DPAA2_IO_H */ diff --git a/sys/dev/dpaa2/dpaa2_mc.c b/sys/dev/dpaa2/dpaa2_mc.c index 10f142b0b97d..0deebf7e8b24 100644 --- a/sys/dev/dpaa2/dpaa2_mc.c +++ b/sys/dev/dpaa2/dpaa2_mc.c @@ -1,972 +1,908 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * The DPAA2 Management Complex (MC) bus driver. * * MC is a hardware resource manager which can be found in several NXP * SoCs (LX2160A, for example) and provides an access to the specialized * hardware objects used in network-oriented packet processing applications. */ #include "opt_acpi.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_ACPI #include #include #endif #ifdef FDT #include #include #include #include #endif #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mc.h" /* Macros to read/write MC registers */ #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r)) #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v)) -#define COMPARE_TYPE(t, v) (strncmp((v), (t), strlen((v))) == 0) - #define IORT_DEVICE_NAME "MCE" /* MC Registers */ #define MC_REG_GCR1 0x0000u #define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */ #define MC_REG_GSR 0x0008u #define MC_REG_FAPR 0x0028u /* General Control Register 1 (GCR1) */ #define GCR1_P1_STOP 0x80000000u #define GCR1_P2_STOP 0x40000000u /* General Status Register (GSR) */ #define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31) #define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30) #define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8) #define GSR_MCS(v) (((v) & 0xFFu) >> 0) /* Timeouts to wait for the MC status. */ #define MC_STAT_TIMEOUT 1000u /* us */ #define MC_STAT_ATTEMPTS 100u /** * @brief Structure to describe a DPAA2 device as a managed resource. */ struct dpaa2_mc_devinfo { STAILQ_ENTRY(dpaa2_mc_devinfo) link; device_t dpaa2_dev; uint32_t flags; uint32_t owners; }; MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex"); static struct resource_spec dpaa2_mc_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED }, { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL }, RESOURCE_SPEC_END }; static u_int dpaa2_mc_get_xref(device_t, device_t); static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *); static struct rman *dpaa2_mc_rman(device_t, int); static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *); static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *); static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *, uint32_t *); /* * For device interface. */ int dpaa2_mc_attach(device_t dev) { struct dpaa2_mc_softc *sc; struct resource_map_request req; uint32_t val; int error; sc = device_get_softc(dev); sc->dev = dev; sc->msi_allocated = false; sc->msi_owner = NULL; error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res); if (error) { device_printf(dev, "%s: failed to allocate resources\n", __func__); return (ENXIO); } if (sc->res[1]) { resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1], &req, &sc->map[1]); if (error) { device_printf(dev, "%s: failed to map control " "registers\n", __func__); dpaa2_mc_detach(dev); return (ENXIO); } if (bootverbose) device_printf(dev, "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", mcreg_read_4(sc, MC_REG_GCR1), mcreg_read_4(sc, MC_REG_GCR2), mcreg_read_4(sc, MC_REG_GSR), mcreg_read_4(sc, MC_REG_FAPR)); /* Reset P1_STOP and P2_STOP bits to resume MC processor. */ val = mcreg_read_4(sc, MC_REG_GCR1) & ~(GCR1_P1_STOP | GCR1_P2_STOP); mcreg_write_4(sc, MC_REG_GCR1, val); /* Poll MC status. */ if (bootverbose) device_printf(dev, "polling MC status...\n"); for (int i = 0; i < MC_STAT_ATTEMPTS; i++) { val = mcreg_read_4(sc, MC_REG_GSR); if (GSR_MCS(val) != 0u) break; DELAY(MC_STAT_TIMEOUT); } if (bootverbose) device_printf(dev, "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n", mcreg_read_4(sc, MC_REG_GCR1), mcreg_read_4(sc, MC_REG_GCR2), mcreg_read_4(sc, MC_REG_GSR), mcreg_read_4(sc, MC_REG_FAPR)); } /* At least 64 bytes of the command portal should be available. */ if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) { device_printf(dev, "%s: MC portal memory region too small: " "%jd\n", __func__, rman_get_size(sc->res[0])); dpaa2_mc_detach(dev); return (ENXIO); } /* Map MC portal memory resource. */ resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0], &req, &sc->map[0]); if (error) { device_printf(dev, "Failed to map MC portal memory\n"); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 I/O objects. */ sc->dpio_rman.rm_type = RMAN_ARRAY; sc->dpio_rman.rm_descr = "DPAA2 DPIO objects"; error = rman_init(&sc->dpio_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 I/O objects: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 buffer pools. */ sc->dpbp_rman.rm_type = RMAN_ARRAY; sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects"; error = rman_init(&sc->dpbp_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 buffer pools: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 concentrators. */ sc->dpcon_rman.rm_type = RMAN_ARRAY; sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects"; error = rman_init(&sc->dpcon_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 concentrators: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a resource manager for the DPAA2 MC portals. */ sc->dpmcp_rman.rm_type = RMAN_ARRAY; sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects"; error = rman_init(&sc->dpmcp_rman); if (error) { device_printf(dev, "Failed to initialize a resource manager for " "the DPAA2 MC portals: error=%d\n", error); dpaa2_mc_detach(dev); return (ENXIO); } /* Initialize a list of non-allocatable DPAA2 devices. */ mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF); STAILQ_INIT(&sc->mdev_list); mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF); /* * Add a root resource container as the only child of the bus. All of * the direct descendant containers will be attached to the root one * instead of the MC device. */ sc->rcdev = device_add_child(dev, "dpaa2_rc", 0); if (sc->rcdev == NULL) { dpaa2_mc_detach(dev); return (ENXIO); } bus_generic_probe(dev); bus_generic_attach(dev); return (0); } int dpaa2_mc_detach(device_t dev) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo = NULL; int error; bus_generic_detach(dev); sc = device_get_softc(dev); if (sc->rcdev) device_delete_child(dev, sc->rcdev); bus_release_resources(dev, dpaa2_mc_spec, sc->res); dinfo = device_get_ivars(dev); if (dinfo) free(dinfo, M_DPAA2_MC); error = bus_generic_detach(dev); if (error != 0) return (error); return (device_delete_children(dev)); } /* * For bus interface. */ struct resource * dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; struct rman *rm; int error; rm = dpaa2_mc_rman(mcdev, type); if (!rm) return (BUS_ALLOC_RESOURCE(device_get_parent(mcdev), child, type, rid, start, end, count, flags)); /* * Skip managing DPAA2-specific resource. It must be provided to MC by * calling DPAA2_MC_MANAGE_DEV() beforehand. */ if (type <= DPAA2_DEV_MC) { error = rman_manage_region(rm, start, end); if (error) { device_printf(mcdev, "rman_manage_region() failed: " "start=%#jx, end=%#jx, error=%d\n", start, end, error); goto fail; } } res = rman_reserve_resource(rm, start, end, count, flags, child); if (!res) { device_printf(mcdev, "rman_reserve_resource() failed: " "start=%#jx, end=%#jx, count=%#jx\n", start, end, count); goto fail; } rman_set_rid(res, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, res)) { device_printf(mcdev, "bus_activate_resource() failed: " "rid=%d, res=%#jx\n", *rid, (uintmax_t) res); rman_release_resource(res); goto fail; } } return (res); fail: device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, " "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end, count, flags); return (NULL); } int dpaa2_mc_adjust_resource(device_t mcdev, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, type); if (rm) return (rman_adjust_resource(r, start, end)); return (bus_generic_adjust_resource(mcdev, child, type, r, start, end)); } int dpaa2_mc_release_resource(device_t mcdev, device_t child, int type, int rid, struct resource *r) { struct rman *rm; rm = dpaa2_mc_rman(mcdev, type); if (rm) { KASSERT(rman_is_region_manager(r, rm), ("rman mismatch")); rman_release_resource(r); } return (bus_generic_release_resource(mcdev, child, type, rid, r)); } int dpaa2_mc_activate_resource(device_t mcdev, device_t child, int type, int rid, struct resource *r) { int rc; if ((rc = rman_activate_resource(r)) != 0) return (rc); return (BUS_ACTIVATE_RESOURCE(device_get_parent(mcdev), child, type, rid, r)); } int dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, int type, int rid, struct resource *r) { int rc; if ((rc = rman_deactivate_resource(r)) != 0) return (rc); return (BUS_DEACTIVATE_RESOURCE(device_get_parent(mcdev), child, type, rid, r)); } /* * For pseudo-pcib interface. */ int dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount, int *irqs) { #if defined(INTRNG) return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs)); #else return (ENXIO); #endif } int dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs) { #if defined(INTRNG) return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs)); #else return (ENXIO); #endif } int dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr, uint32_t *data) { #if defined(INTRNG) return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data)); #else return (ENXIO); #endif } int dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type, uintptr_t *id) { struct dpaa2_devinfo *dinfo; dinfo = device_get_ivars(child); if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (ENXIO); if (type == PCI_ID_MSI) return (dpaa2_mc_map_id(mcdev, child, id)); *id = dinfo->icid; return (0); } /* * For DPAA2 Management Complex bus driver interface. */ int dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; struct rman *rm; int error; sc = device_get_softc(mcdev); dinfo = device_get_ivars(dpaa2_dev); if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO); if (!di) return (ENOMEM); di->dpaa2_dev = dpaa2_dev; di->flags = flags; di->owners = 0; /* Append a new managed DPAA2 device to the queue. */ mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_INSERT_TAIL(&sc->mdev_list, di, link); mtx_unlock(&sc->mdev_lock); if (flags & DPAA2_MC_DEV_ALLOCATABLE) { /* Select rman based on a type of the DPAA2 device. */ rm = dpaa2_mc_rman(mcdev, dinfo->dtype); if (!rm) return (ENOENT); /* Manage DPAA2 device as an allocatable resource. */ error = rman_manage_region(rm, (rman_res_t) dpaa2_dev, (rman_res_t) dpaa2_dev); if (error) return (error); } return (0); } int dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype) { struct rman *rm; rman_res_t start, end; int error; if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); /* Select resource manager based on a type of the DPAA2 device. */ rm = dpaa2_mc_rman(mcdev, devtype); if (!rm) return (ENOENT); /* Find first free DPAA2 device of the given type. */ error = rman_first_free_region(rm, &start, &end); if (error) return (error); KASSERT(start == end, ("start != end, but should be the same pointer " "to the DPAA2 device: start=%jx, end=%jx", start, end)); *dpaa2_dev = (device_t) start; return (0); } int dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype, uint32_t obj_id) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { dinfo = device_get_ivars(di->dpaa2_dev); if (dinfo->dtype == devtype && dinfo->id == obj_id) { *dpaa2_dev = di->dpaa2_dev; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_devinfo *dinfo; struct dpaa2_mc_devinfo *di; device_t dev = NULL; uint32_t owners = UINT32_MAX; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { dinfo = device_get_ivars(di->dpaa2_dev); if ((dinfo->dtype == devtype) && (di->flags & DPAA2_MC_DEV_SHAREABLE) && (di->owners < owners)) { dev = di->dpaa2_dev; owners = di->owners; } } if (dev) { *dpaa2_dev = dev; error = 0; } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { if (di->dpaa2_dev == dpaa2_dev && (di->flags & DPAA2_MC_DEV_SHAREABLE)) { di->owners++; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } int dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev, enum dpaa2_dev_type devtype) { struct dpaa2_mc_softc *sc; struct dpaa2_mc_devinfo *di; int error = ENOENT; sc = device_get_softc(mcdev); if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0) return (EINVAL); mtx_assert(&sc->mdev_lock, MA_NOTOWNED); mtx_lock(&sc->mdev_lock); STAILQ_FOREACH(di, &sc->mdev_list, link) { if (di->dpaa2_dev == dpaa2_dev && (di->flags & DPAA2_MC_DEV_SHAREABLE)) { di->owners -= di->owners > 0 ? 1 : 0; error = 0; break; } } mtx_unlock(&sc->mdev_lock); return (error); } -/** - * @brief Convert DPAA2 device type to string. - */ -const char * -dpaa2_ttos(enum dpaa2_dev_type type) -{ - switch (type) { - case DPAA2_DEV_MC: - return ("mc"); /* NOTE: to print as information only. */ - case DPAA2_DEV_RC: - return ("dprc"); - case DPAA2_DEV_IO: - return ("dpio"); - case DPAA2_DEV_NI: - return ("dpni"); - case DPAA2_DEV_MCP: - return ("dpmcp"); - case DPAA2_DEV_BP: - return ("dpbp"); - case DPAA2_DEV_CON: - return ("dpcon"); - case DPAA2_DEV_MAC: - return ("dpmac"); - case DPAA2_DEV_MUX: - return ("dpdmux"); - case DPAA2_DEV_SW: - return ("dpsw"); - default: - break; - } - return ("notype"); -} - -/** - * @brief Convert string to DPAA2 device type. - */ -enum dpaa2_dev_type -dpaa2_stot(const char *str) -{ - if (COMPARE_TYPE(str, "dprc")) { - return (DPAA2_DEV_RC); - } else if (COMPARE_TYPE(str, "dpio")) { - return (DPAA2_DEV_IO); - } else if (COMPARE_TYPE(str, "dpni")) { - return (DPAA2_DEV_NI); - } else if (COMPARE_TYPE(str, "dpmcp")) { - return (DPAA2_DEV_MCP); - } else if (COMPARE_TYPE(str, "dpbp")) { - return (DPAA2_DEV_BP); - } else if (COMPARE_TYPE(str, "dpcon")) { - return (DPAA2_DEV_CON); - } else if (COMPARE_TYPE(str, "dpmac")) { - return (DPAA2_DEV_MAC); - } else if (COMPARE_TYPE(str, "dpdmux")) { - return (DPAA2_DEV_MUX); - } else if (COMPARE_TYPE(str, "dpsw")) { - return (DPAA2_DEV_SW); - } - - return (DPAA2_DEV_NOTYPE); -} - /** * @internal */ static u_int dpaa2_mc_get_xref(device_t mcdev, device_t child) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); struct dpaa2_devinfo *dinfo = device_get_ivars(child); #ifdef DEV_ACPI u_int xref, devid; #endif #ifdef FDT phandle_t msi_parent; #endif int error; if (sc && dinfo) { #ifdef DEV_ACPI if (sc->acpi_based) { /* * NOTE: The first named component from the IORT table * with the given name (as a substring) will be used. */ error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, &xref, &devid); if (error) return (0); return (xref); } #endif #ifdef FDT if (!sc->acpi_based) { /* FDT-based driver. */ error = ofw_bus_msimap(sc->ofw_node, dinfo->icid, &msi_parent, NULL); if (error) return (0); return ((u_int) msi_parent); } #endif } return (0); } /** * @internal */ static u_int dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id) { struct dpaa2_devinfo *dinfo; #ifdef DEV_ACPI u_int xref, devid; int error; #endif dinfo = device_get_ivars(child); if (dinfo) { /* * The first named components from IORT table with the given * name (as a substring) will be used. */ #ifdef DEV_ACPI error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid, &xref, &devid); if (error == 0) *id = devid; else #endif *id = dinfo->icid; /* RID not in IORT, likely FW bug */ return (0); } return (ENXIO); } /** * @internal * @brief Obtain a resource manager based on the given type of the resource. */ static struct rman * dpaa2_mc_rman(device_t mcdev, int type) { struct dpaa2_mc_softc *sc; sc = device_get_softc(mcdev); switch (type) { case DPAA2_DEV_IO: return (&sc->dpio_rman); case DPAA2_DEV_BP: return (&sc->dpbp_rman); case DPAA2_DEV_CON: return (&sc->dpcon_rman); case DPAA2_DEV_MCP: return (&sc->dpmcp_rman); default: break; } return (NULL); } #if defined(INTRNG) && !defined(IOMMU) /** * @internal * @brief Allocates requested number of MSIs. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. */ static int dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount, int *irqs) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); int msi_irqs[DPAA2_MC_MSI_COUNT]; int error; /* Pre-allocate a bunch of MSIs for MC to be used by its children. */ if (!sc->msi_allocated) { error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev, child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs); if (error) { device_printf(mcdev, "failed to pre-allocate %d MSIs: " "error=%d\n", DPAA2_MC_MSI_COUNT, error); return (error); } mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { sc->msi[i].child = NULL; sc->msi[i].irq = msi_irqs[i]; } sc->msi_owner = child; sc->msi_allocated = true; mtx_unlock(&sc->msi_lock); } error = ENOENT; /* Find the first free MSIs from the pre-allocated pool. */ mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child != NULL) continue; error = 0; for (int j = 0; j < count; j++) { if (i + j >= DPAA2_MC_MSI_COUNT) { device_printf(mcdev, "requested %d MSIs exceed " "limit of %d available\n", count, DPAA2_MC_MSI_COUNT); error = E2BIG; break; } sc->msi[i + j].child = child; irqs[j] = sc->msi[i + j].irq; } break; } mtx_unlock(&sc->msi_lock); return (error); } /** * @internal * @brief Marks IRQs as free in the pre-allocated pool of MSIs. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. * NOTE: MSIs are kept allocated in the kernel as a part of the pool. */ static int dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child != child) continue; for (int j = 0; j < count; j++) { if (sc->msi[i].irq == irqs[j]) { sc->msi[i].child = NULL; break; } } } mtx_unlock(&sc->msi_lock); return (0); } /** * @internal * @brief Provides address to write to and data according to the given MSI from * the pre-allocated pool. * * NOTE: This function is a part of fallback solution when IOMMU isn't available. * Total number of IRQs is limited to 32. */ static int dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct dpaa2_mc_softc *sc = device_get_softc(mcdev); int error = EINVAL; mtx_assert(&sc->msi_lock, MA_NOTOWNED); mtx_lock(&sc->msi_lock); for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) { if (sc->msi[i].child == child && sc->msi[i].irq == irq) { error = 0; break; } } mtx_unlock(&sc->msi_lock); if (error) return (error); return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev, sc->msi_owner), irq, addr, data)); } #endif /* defined(INTRNG) && !defined(IOMMU) */ static device_method_t dpaa2_mc_methods[] = { DEVMETHOD_END }; DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods, sizeof(struct dpaa2_mc_softc)); diff --git a/sys/dev/dpaa2/dpaa2_mcp.c b/sys/dev/dpaa2/dpaa2_mcp.c index bebd7deced7e..1ddf0e33d8c9 100644 --- a/sys/dev/dpaa2/dpaa2_mcp.c +++ b/sys/dev/dpaa2/dpaa2_mcp.c @@ -1,256 +1,255 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * DPAA2 MC command portal and helper routines. */ #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_mcp.h" #include "dpaa2_mc.h" #include "dpaa2_cmd_if.h" MALLOC_DEFINE(M_DPAA2_MCP, "dpaa2_mcp", "DPAA2 Management Complex Portal"); static struct resource_spec dpaa2_mcp_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED }, RESOURCE_SPEC_END }; int dpaa2_mcp_init_portal(struct dpaa2_mcp **mcp, struct resource *res, struct resource_map *map, uint16_t flags) { const int mflags = flags & DPAA2_PORTAL_NOWAIT_ALLOC ? (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO); struct dpaa2_mcp *p; if (!mcp || !res || !map) return (DPAA2_CMD_STAT_EINVAL); p = malloc(sizeof(struct dpaa2_mcp), M_DPAA2_MCP, mflags); if (p == NULL) return (DPAA2_CMD_STAT_NO_MEMORY); mtx_init(&p->lock, "mcp_sleep_lock", NULL, MTX_DEF); p->res = res; p->map = map; p->flags = flags; p->rc_api_major = 0; /* DPRC API version to be cached later. */ p->rc_api_minor = 0; *mcp = p; return (0); } void dpaa2_mcp_free_portal(struct dpaa2_mcp *mcp) { uint16_t flags; KASSERT(mcp != NULL, ("%s: mcp is NULL", __func__)); DPAA2_MCP_LOCK(mcp, &flags); mcp->flags |= DPAA2_PORTAL_DESTROYED; DPAA2_MCP_UNLOCK(mcp); /* Let threads stop using this portal. */ DELAY(DPAA2_PORTAL_TIMEOUT); mtx_destroy(&mcp->lock); free(mcp, M_DPAA2_MCP); } struct dpaa2_cmd * dpaa2_mcp_tk(struct dpaa2_cmd *cmd, uint16_t token) { struct dpaa2_cmd_header *hdr; KASSERT(cmd != NULL, ("%s: cmd is NULL", __func__)); hdr = (struct dpaa2_cmd_header *) &cmd->header; hdr->token = token; return (cmd); } struct dpaa2_cmd * dpaa2_mcp_f(struct dpaa2_cmd *cmd, uint16_t flags) { struct dpaa2_cmd_header *hdr; KASSERT(cmd != NULL, ("%s: cmd is NULL", __func__)); hdr = (struct dpaa2_cmd_header *) &cmd->header; hdr->flags_hw = DPAA2_CMD_DEF; hdr->flags_sw = DPAA2_CMD_DEF; if (flags & DPAA2_CMD_HIGH_PRIO) { hdr->flags_hw |= DPAA2_HW_FLAG_HIGH_PRIO; } if (flags & DPAA2_CMD_INTR_DIS) { hdr->flags_sw |= DPAA2_SW_FLAG_INTR_DIS; } return (cmd); } static int dpaa2_mcp_probe(device_t dev) { /* DPMCP device will be added by the parent resource container. */ device_set_desc(dev, "DPAA2 MC portal"); return (BUS_PROBE_DEFAULT); } static int dpaa2_mcp_detach(device_t dev) { return (0); } static int dpaa2_mcp_attach(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_mcp_softc *sc = device_get_softc(dev); struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_cmd cmd; struct dpaa2_mcp *portal; struct resource_map_request req; uint16_t rc_token, mcp_token; int error; sc->dev = dev; error = bus_alloc_resources(sc->dev, dpaa2_mcp_spec, sc->res); if (error) { device_printf(dev, "%s: failed to allocate resources\n", __func__); goto err_exit; } /* At least 64 bytes of the command portal should be available. */ if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) { device_printf(dev, "%s: MC portal memory region too small: " "%jd\n", __func__, rman_get_size(sc->res[0])); goto err_exit; } /* Map MC portal memory resource. */ resource_init_map_request(&req); req.memattr = VM_MEMATTR_DEVICE; error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0], &req, &sc->map[0]); if (error) { device_printf(dev, "%s: failed to map MC portal memory\n", __func__); goto err_exit; } /* Initialize portal to send commands to MC. */ error = dpaa2_mcp_init_portal(&portal, sc->res[0], &sc->map[0], DPAA2_PORTAL_DEF); if (error) { device_printf(dev, "%s: failed to initialize dpaa2_mcp: " "error=%d\n", __func__, error); goto err_exit; } DPAA2_CMD_INIT(&cmd); /* Open resource container and DPMCP object. */ error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open DPRC: error=%d\n", __func__, error); goto err_exit; } error = DPAA2_CMD_MCP_OPEN(dev, child, &cmd, dinfo->id, &mcp_token); if (error) { device_printf(dev, "%s: failed to open DPMCP: id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } /* Prepare DPMCP object. */ error = DPAA2_CMD_MCP_RESET(dev, child, &cmd); if (error) { device_printf(dev, "%s: failed to reset DPMCP: id=%d, " "error=%d\n", __func__, dinfo->id, error); goto close_mcp; } (void)DPAA2_CMD_MCP_CLOSE(dev, child, &cmd); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); dinfo->portal = portal; return (0); close_mcp: (void)DPAA2_CMD_MCP_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, mcp_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: dpaa2_mcp_detach(dev); return (ENXIO); } static device_method_t dpaa2_mcp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dpaa2_mcp_probe), DEVMETHOD(device_attach, dpaa2_mcp_attach), DEVMETHOD(device_detach, dpaa2_mcp_detach), DEVMETHOD_END }; static driver_t dpaa2_mcp_driver = { "dpaa2_mcp", dpaa2_mcp_methods, sizeof(struct dpaa2_mcp_softc), }; DRIVER_MODULE(dpaa2_mcp, dpaa2_rc, dpaa2_mcp_driver, 0, 0); diff --git a/sys/dev/dpaa2/dpaa2_mcp.h b/sys/dev/dpaa2/dpaa2_mcp.h index 5e1926308b53..8a9f942c6f71 100644 --- a/sys/dev/dpaa2/dpaa2_mcp.h +++ b/sys/dev/dpaa2/dpaa2_mcp.h @@ -1,473 +1,472 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _DPAA2_MCP_H #define _DPAA2_MCP_H #include -#include #include #include "dpaa2_types.h" /* * DPAA2 MC command interface helper routines. */ #define DPAA2_PORTAL_TIMEOUT 100000 /* us */ #define DPAA2_MCP_MEM_WIDTH 0x40 /* Minimal size of the MC portal. */ #define DPAA2_MCP_MAX_RESOURCES 1 /* resources per DPMCP: 1 SYS_MEM */ /* * Portal flags. * * TODO: Use the same flags for both MC and software portals. */ #define DPAA2_PORTAL_DEF 0x0u #define DPAA2_PORTAL_NOWAIT_ALLOC 0x2u /* Do not sleep during init */ #define DPAA2_PORTAL_LOCKED 0x4000u /* Wait till portal's unlocked */ #define DPAA2_PORTAL_DESTROYED 0x8000u /* Terminate any operations */ /* Command flags. */ #define DPAA2_CMD_DEF 0x0u #define DPAA2_CMD_HIGH_PRIO 0x80u /* High priority command */ #define DPAA2_CMD_INTR_DIS 0x100u /* Disable cmd finished intr */ #define DPAA2_CMD_NOWAIT_ALLOC 0x8000u /* Do not sleep during init */ /* DPAA2 command return codes. */ #define DPAA2_CMD_STAT_OK 0x0 /* Set by MC on success */ #define DPAA2_CMD_STAT_READY 0x1 /* Ready to be processed */ #define DPAA2_CMD_STAT_AUTH_ERR 0x3 /* Illegal object-portal-icid */ #define DPAA2_CMD_STAT_NO_PRIVILEGE 0x4 /* No privilege */ #define DPAA2_CMD_STAT_DMA_ERR 0x5 /* DMA or I/O error */ #define DPAA2_CMD_STAT_CONFIG_ERR 0x6 /* Invalid/conflicting params */ #define DPAA2_CMD_STAT_TIMEOUT 0x7 /* Command timed out */ #define DPAA2_CMD_STAT_NO_RESOURCE 0x8 /* No DPAA2 resources */ #define DPAA2_CMD_STAT_NO_MEMORY 0x9 /* No memory available */ #define DPAA2_CMD_STAT_BUSY 0xA /* Device is busy */ #define DPAA2_CMD_STAT_UNSUPPORTED_OP 0xB /* Unsupported operation */ #define DPAA2_CMD_STAT_INVALID_STATE 0xC /* Invalid state */ /* Driver-specific return codes. */ #define DPAA2_CMD_STAT_UNKNOWN_OBJ 0xFD /* Unknown DPAA2 object. */ #define DPAA2_CMD_STAT_EINVAL 0xFE /* Invalid argument */ #define DPAA2_CMD_STAT_ERR 0xFF /* General error */ /* Object's memory region flags. */ #define DPAA2_RC_REG_CACHEABLE 0x1 /* Cacheable memory mapping */ #define DPAA2_HW_FLAG_HIGH_PRIO 0x80u #define DPAA2_SW_FLAG_INTR_DIS 0x01u #define DPAA2_CMD_PARAMS_N 7u #define DPAA2_LABEL_SZ 16 /* ------------------------- MNG command IDs -------------------------------- */ #define CMD_MNG_BASE_VERSION 1 #define CMD_MNG_ID_OFFSET 4 #define CMD_MNG(id) (((id) << CMD_MNG_ID_OFFSET) | CMD_MNG_BASE_VERSION) #define CMDID_MNG_GET_VER CMD_MNG(0x831) #define CMDID_MNG_GET_SOC_VER CMD_MNG(0x832) #define CMDID_MNG_GET_CONT_ID CMD_MNG(0x830) /* ------------------------- DPRC command IDs ------------------------------- */ #define CMD_RC_BASE_VERSION 1 #define CMD_RC_2ND_VERSION 2 #define CMD_RC_3RD_VERSION 3 #define CMD_RC_ID_OFFSET 4 #define CMD_RC(id) (((id) << CMD_RC_ID_OFFSET) | CMD_RC_BASE_VERSION) #define CMD_RC_V2(id) (((id) << CMD_RC_ID_OFFSET) | CMD_RC_2ND_VERSION) #define CMD_RC_V3(id) (((id) << CMD_RC_ID_OFFSET) | CMD_RC_3RD_VERSION) #define CMDID_RC_OPEN CMD_RC(0x805) #define CMDID_RC_CLOSE CMD_RC(0x800) #define CMDID_RC_GET_API_VERSION CMD_RC(0xA05) #define CMDID_RC_GET_ATTR CMD_RC(0x004) #define CMDID_RC_RESET_CONT CMD_RC(0x005) #define CMDID_RC_RESET_CONT_V2 CMD_RC_V2(0x005) #define CMDID_RC_SET_IRQ CMD_RC(0x010) #define CMDID_RC_SET_IRQ_ENABLE CMD_RC(0x012) #define CMDID_RC_SET_IRQ_MASK CMD_RC(0x014) #define CMDID_RC_GET_IRQ_STATUS CMD_RC(0x016) #define CMDID_RC_CLEAR_IRQ_STATUS CMD_RC(0x017) #define CMDID_RC_GET_CONT_ID CMD_RC(0x830) #define CMDID_RC_GET_OBJ_COUNT CMD_RC(0x159) #define CMDID_RC_GET_OBJ CMD_RC(0x15A) #define CMDID_RC_GET_OBJ_DESC CMD_RC(0x162) #define CMDID_RC_GET_OBJ_REG CMD_RC(0x15E) #define CMDID_RC_GET_OBJ_REG_V2 CMD_RC_V2(0x15E) #define CMDID_RC_GET_OBJ_REG_V3 CMD_RC_V3(0x15E) #define CMDID_RC_SET_OBJ_IRQ CMD_RC(0x15F) #define CMDID_RC_GET_CONN CMD_RC(0x16C) /* ------------------------- DPIO command IDs ------------------------------- */ #define CMD_IO_BASE_VERSION 1 #define CMD_IO_ID_OFFSET 4 #define CMD_IO(id) (((id) << CMD_IO_ID_OFFSET) | CMD_IO_BASE_VERSION) #define CMDID_IO_OPEN CMD_IO(0x803) #define CMDID_IO_CLOSE CMD_IO(0x800) #define CMDID_IO_ENABLE CMD_IO(0x002) #define CMDID_IO_DISABLE CMD_IO(0x003) #define CMDID_IO_GET_ATTR CMD_IO(0x004) #define CMDID_IO_RESET CMD_IO(0x005) #define CMDID_IO_SET_IRQ_ENABLE CMD_IO(0x012) #define CMDID_IO_SET_IRQ_MASK CMD_IO(0x014) #define CMDID_IO_GET_IRQ_STATUS CMD_IO(0x016) #define CMDID_IO_ADD_STATIC_DQ_CHAN CMD_IO(0x122) /* ------------------------- DPNI command IDs ------------------------------- */ #define CMD_NI_BASE_VERSION 1 #define CMD_NI_2ND_VERSION 2 #define CMD_NI_4TH_VERSION 4 #define CMD_NI_ID_OFFSET 4 #define CMD_NI(id) (((id) << CMD_NI_ID_OFFSET) | CMD_NI_BASE_VERSION) #define CMD_NI_V2(id) (((id) << CMD_NI_ID_OFFSET) | CMD_NI_2ND_VERSION) #define CMD_NI_V4(id) (((id) << CMD_NI_ID_OFFSET) | CMD_NI_4TH_VERSION) #define CMDID_NI_OPEN CMD_NI(0x801) #define CMDID_NI_CLOSE CMD_NI(0x800) #define CMDID_NI_ENABLE CMD_NI(0x002) #define CMDID_NI_DISABLE CMD_NI(0x003) #define CMDID_NI_GET_API_VER CMD_NI(0xA01) #define CMDID_NI_RESET CMD_NI(0x005) #define CMDID_NI_GET_ATTR CMD_NI(0x004) #define CMDID_NI_SET_BUF_LAYOUT CMD_NI(0x265) #define CMDID_NI_GET_TX_DATA_OFF CMD_NI(0x212) #define CMDID_NI_GET_PORT_MAC_ADDR CMD_NI(0x263) #define CMDID_NI_SET_PRIM_MAC_ADDR CMD_NI(0x224) #define CMDID_NI_GET_PRIM_MAC_ADDR CMD_NI(0x225) #define CMDID_NI_SET_LINK_CFG CMD_NI(0x21A) #define CMDID_NI_GET_LINK_CFG CMD_NI(0x278) #define CMDID_NI_GET_LINK_STATE CMD_NI(0x215) #define CMDID_NI_SET_QOS_TABLE CMD_NI(0x240) #define CMDID_NI_CLEAR_QOS_TABLE CMD_NI(0x243) #define CMDID_NI_SET_POOLS CMD_NI(0x200) #define CMDID_NI_SET_ERR_BEHAVIOR CMD_NI(0x20B) #define CMDID_NI_GET_QUEUE CMD_NI(0x25F) #define CMDID_NI_SET_QUEUE CMD_NI(0x260) #define CMDID_NI_GET_QDID CMD_NI(0x210) #define CMDID_NI_ADD_MAC_ADDR CMD_NI(0x226) #define CMDID_NI_REMOVE_MAC_ADDR CMD_NI(0x227) #define CMDID_NI_CLEAR_MAC_FILTERS CMD_NI(0x228) #define CMDID_NI_SET_MFL CMD_NI(0x216) #define CMDID_NI_SET_OFFLOAD CMD_NI(0x26C) #define CMDID_NI_SET_IRQ_MASK CMD_NI(0x014) #define CMDID_NI_SET_IRQ_ENABLE CMD_NI(0x012) #define CMDID_NI_GET_IRQ_STATUS CMD_NI(0x016) #define CMDID_NI_SET_UNI_PROMISC CMD_NI(0x222) #define CMDID_NI_SET_MULTI_PROMISC CMD_NI(0x220) #define CMDID_NI_GET_STATISTICS CMD_NI(0x25D) #define CMDID_NI_SET_RX_TC_DIST CMD_NI(0x235) /* ------------------------- DPBP command IDs ------------------------------- */ #define CMD_BP_BASE_VERSION 1 #define CMD_BP_ID_OFFSET 4 #define CMD_BP(id) (((id) << CMD_BP_ID_OFFSET) | CMD_BP_BASE_VERSION) #define CMDID_BP_OPEN CMD_BP(0x804) #define CMDID_BP_CLOSE CMD_BP(0x800) #define CMDID_BP_ENABLE CMD_BP(0x002) #define CMDID_BP_DISABLE CMD_BP(0x003) #define CMDID_BP_GET_ATTR CMD_BP(0x004) #define CMDID_BP_RESET CMD_BP(0x005) /* ------------------------- DPMAC command IDs ------------------------------ */ #define CMD_MAC_BASE_VERSION 1 #define CMD_MAC_2ND_VERSION 2 #define CMD_MAC_ID_OFFSET 4 #define CMD_MAC(id) (((id) << CMD_MAC_ID_OFFSET) | CMD_MAC_BASE_VERSION) #define CMD_MAC_V2(id) (((id) << CMD_MAC_ID_OFFSET) | CMD_MAC_2ND_VERSION) #define CMDID_MAC_OPEN CMD_MAC(0x80C) #define CMDID_MAC_CLOSE CMD_MAC(0x800) #define CMDID_MAC_RESET CMD_MAC(0x005) #define CMDID_MAC_MDIO_READ CMD_MAC(0x0C0) #define CMDID_MAC_MDIO_WRITE CMD_MAC(0x0C1) #define CMDID_MAC_GET_ADDR CMD_MAC(0x0C5) #define CMDID_MAC_GET_ATTR CMD_MAC(0x004) #define CMDID_MAC_SET_LINK_STATE CMD_MAC_V2(0x0C3) #define CMDID_MAC_SET_IRQ_MASK CMD_MAC(0x014) #define CMDID_MAC_SET_IRQ_ENABLE CMD_MAC(0x012) #define CMDID_MAC_GET_IRQ_STATUS CMD_MAC(0x016) /* ------------------------- DPCON command IDs ------------------------------ */ #define CMD_CON_BASE_VERSION 1 #define CMD_CON_ID_OFFSET 4 #define CMD_CON(id) (((id) << CMD_CON_ID_OFFSET) | CMD_CON_BASE_VERSION) #define CMDID_CON_OPEN CMD_CON(0x808) #define CMDID_CON_CLOSE CMD_CON(0x800) #define CMDID_CON_ENABLE CMD_CON(0x002) #define CMDID_CON_DISABLE CMD_CON(0x003) #define CMDID_CON_GET_ATTR CMD_CON(0x004) #define CMDID_CON_RESET CMD_CON(0x005) #define CMDID_CON_SET_NOTIF CMD_CON(0x100) /* ------------------------- DPMCP command IDs ------------------------------ */ #define CMD_MCP_BASE_VERSION 1 #define CMD_MCP_2ND_VERSION 2 #define CMD_MCP_ID_OFFSET 4 #define CMD_MCP(id) (((id) << CMD_MCP_ID_OFFSET) | CMD_MCP_BASE_VERSION) #define CMD_MCP_V2(id) (((id) << CMD_MCP_ID_OFFSET) | CMD_MCP_2ND_VERSION) #define CMDID_MCP_CREATE CMD_MCP_V2(0x90B) #define CMDID_MCP_DESTROY CMD_MCP(0x98B) #define CMDID_MCP_OPEN CMD_MCP(0x80B) #define CMDID_MCP_CLOSE CMD_MCP(0x800) #define CMDID_MCP_RESET CMD_MCP(0x005) #define DPAA2_MCP_LOCK(__mcp, __flags) do { \ mtx_assert(&(__mcp)->lock, MA_NOTOWNED); \ mtx_lock(&(__mcp)->lock); \ *(__flags) = (__mcp)->flags; \ (__mcp)->flags |= DPAA2_PORTAL_LOCKED; \ } while (0) #define DPAA2_MCP_UNLOCK(__mcp) do { \ mtx_assert(&(__mcp)->lock, MA_OWNED); \ (__mcp)->flags &= ~DPAA2_PORTAL_LOCKED; \ mtx_unlock(&(__mcp)->lock); \ } while (0) enum dpaa2_rc_region_type { DPAA2_RC_REG_MC_PORTAL, DPAA2_RC_REG_QBMAN_PORTAL }; /** * @brief Helper object to interact with the MC portal. * * res: Unmapped portal's I/O memory. * map: Mapped portal's I/O memory. * lock: Lock to send a command to the portal and wait for the * result. * flags: Current state of the object. * rc_api_major: Major version of the DPRC API. * rc_api_minor: Minor version of the DPRC API. */ struct dpaa2_mcp { struct resource *res; struct resource_map *map; struct mtx lock; uint16_t flags; uint16_t rc_api_major; uint16_t rc_api_minor; }; /** * @brief Command object holds data to be written to the MC portal. * * header: 8 least significant bytes of the MC portal. * params: Parameters to pass together with the command to MC. Might keep * command execution results. * * NOTE: 64 bytes. */ struct dpaa2_cmd { uint64_t header; uint64_t params[DPAA2_CMD_PARAMS_N]; }; /** * @brief Helper object to access fields of the MC command header. * * srcid: The SoC architected source ID of the submitter. This field is * reserved and cannot be written by the driver. * flags_hw: Bits from 8 to 15 of the command header. Most of them are * reserved at the moment. * status: Command ready/status. This field is used as the handshake field * between MC and the driver. MC reports command completion with * success/error codes in this field. * flags_sw: ... * token: ... * cmdid: ... * * NOTE: 8 bytes. */ struct dpaa2_cmd_header { uint8_t srcid; uint8_t flags_hw; uint8_t status; uint8_t flags_sw; uint16_t token; uint16_t cmdid; } __packed; /** * @brief Information about DPAA2 object. * * id: ID of a logical object resource. * vendor: Object vendor identifier. * irq_count: Number of interrupts supported by the object. * reg_count: Number of mappable regions supported by the object. * state: Object state (combination of states). * ver_major: Major version of the object. * ver_minor: Minor version of the object. * flags: Object attributes flags. * type: ... * label: ... */ struct dpaa2_obj { uint32_t id; uint16_t vendor; uint8_t irq_count; uint8_t reg_count; uint32_t state; uint16_t ver_major; uint16_t ver_minor; uint16_t flags; uint8_t label[DPAA2_LABEL_SZ]; enum dpaa2_dev_type type; }; /** * @brief Attributes of the DPRC object. * * cont_id: Container ID. * portal_id: Container's portal ID. * options: Container's options as set at container's creation. * icid: Container's isolation context ID. */ struct dpaa2_rc_attr { uint32_t cont_id; uint32_t portal_id; uint32_t options; uint32_t icid; }; /** * @brief Description of the object's memory region. * * base_paddr: Region base physical address. * base_offset: Region base offset. * size: Region size (in bytes). * flags: Region flags (cacheable, etc.) * type: Type of a software portal this region belongs to. */ struct dpaa2_rc_obj_region { uint64_t base_paddr; uint64_t base_offset; uint32_t size; uint32_t flags; enum dpaa2_rc_region_type type; }; /** * @brief DPAA2 endpoint descriptor. * * obj_id: Endpoint object ID. * if_id: Interface ID; for endpoints with multiple interfaces * (DPSW, DPDMUX), 0 - otherwise. * type: Endpoint object type, null-terminated string. */ struct dpaa2_ep_desc { uint32_t obj_id; uint32_t if_id; enum dpaa2_dev_type type; }; /** * @brief Configuration of the channel data availability notification (CDAN). * * qman_ctx: Context value provided with each CDAN message. * dpio_id: DPIO object ID configured with a notification channel. * prior: Priority selection within the DPIO channel; valid values * are 0-7, depending on the number of priorities in that channel. */ struct dpaa2_con_notif_cfg { uint64_t qman_ctx; uint32_t dpio_id; uint8_t prior; }; /** * @brief Attributes of the DPMCP object. * * id: DPMCP object ID. * options: Options of the MC portal (disabled high-prio commands, etc.). */ struct dpaa2_mcp_attr { uint32_t id; uint32_t options; }; /** * @brief Software context for the DPAA2 MC portal. */ struct dpaa2_mcp_softc { device_t dev; struct dpaa2_mcp_attr attr; struct resource *res[DPAA2_MCP_MAX_RESOURCES]; struct resource_map map[DPAA2_MCP_MAX_RESOURCES]; }; int dpaa2_mcp_init_portal(struct dpaa2_mcp **mcp, struct resource *res, struct resource_map *map, uint16_t flags); void dpaa2_mcp_free_portal(struct dpaa2_mcp *mcp); /* to quickly update command token */ struct dpaa2_cmd *dpaa2_mcp_tk(struct dpaa2_cmd *cmd, const uint16_t token); /* to quickly update command flags */ struct dpaa2_cmd *dpaa2_mcp_f(struct dpaa2_cmd *cmd, const uint16_t flags); #define DPAA2_CMD_INIT_FLAGS(__cmd, __flags) do { \ KASSERT((__cmd) != NULL, ("%s:%d: failed", __func__, __LINE__)); \ struct dpaa2_cmd_header *__hdr; \ uint32_t __dcpi; \ \ __hdr = (struct dpaa2_cmd_header *)&((__cmd)->header); \ __hdr->srcid = 0; \ __hdr->status = DPAA2_CMD_STAT_OK; \ __hdr->token = 0; \ __hdr->cmdid = 0; \ __hdr->flags_hw = DPAA2_CMD_DEF; \ __hdr->flags_sw = DPAA2_CMD_DEF; \ if ((__flags) & DPAA2_CMD_HIGH_PRIO) { \ __hdr->flags_hw |= DPAA2_HW_FLAG_HIGH_PRIO; \ } \ if ((__flags) & DPAA2_CMD_INTR_DIS) { \ __hdr->flags_sw |= DPAA2_SW_FLAG_INTR_DIS; \ } \ for (__dcpi = 0; __dcpi < DPAA2_CMD_PARAMS_N; __dcpi++) { \ (__cmd)->params[__dcpi] = 0; \ } \ } while (0) #define DPAA2_CMD_INIT(c) DPAA2_CMD_INIT_FLAGS((c), DPAA2_CMD_DEF) #define DPAA2_CMD_TK(c, t) dpaa2_mcp_tk((c), (t)) #define DPAA2_CMD_F(c, f) dpaa2_mcp_f((c), (f)) #endif /* _DPAA2_MCP_H */ diff --git a/sys/dev/dpaa2/dpaa2_ni.c b/sys/dev/dpaa2/dpaa2_ni.c index b0d7441a55e0..7cb472f45ee4 100644 --- a/sys/dev/dpaa2/dpaa2_ni.c +++ b/sys/dev/dpaa2/dpaa2_ni.c @@ -1,4182 +1,3697 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2023 Dmitry Salychev * Copyright © 2022 Mathew McBride * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * The DPAA2 Network Interface (DPNI) driver. * * The DPNI object is a network interface that is configurable to support a wide * range of features from a very basic Ethernet interface up to a * high-functioning network interface. The DPNI supports features that are * expected by standard network stacks, from basic features to offloads. * * DPNIs work with Ethernet traffic, starting with the L2 header. Additional * functions are provided for standard network protocols (L2, L3, L4, etc.). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_acpi.h" #include "opt_platform.h" #include "pcib_if.h" #include "pci_if.h" #include "miibus_if.h" #include "memac_mdio_if.h" #include "dpaa2_types.h" #include "dpaa2_mc.h" #include "dpaa2_mc_if.h" #include "dpaa2_mcp.h" #include "dpaa2_swp.h" #include "dpaa2_swp_if.h" #include "dpaa2_cmd_if.h" #include "dpaa2_ni.h" +#include "dpaa2_channel.h" +#include "dpaa2_buf.h" #define BIT(x) (1ul << (x)) #define WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0) #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) /* Frame Dequeue Response status bits. */ #define IS_NULL_RESPONSE(stat) ((((stat) >> 4) & 1) == 0) #define ALIGN_UP(x, y) roundup2((x), (y)) #define ALIGN_DOWN(x, y) rounddown2((x), (y)) #define CACHE_LINE_ALIGN(x) ALIGN_UP((x), CACHE_LINE_SIZE) #define DPNI_LOCK(__sc) do { \ mtx_assert(&(__sc)->lock, MA_NOTOWNED); \ mtx_lock(&(__sc)->lock); \ } while (0) #define DPNI_UNLOCK(__sc) do { \ mtx_assert(&(__sc)->lock, MA_OWNED); \ mtx_unlock(&(__sc)->lock); \ } while (0) -#define TX_LOCK(__tx) do { \ - mtx_assert(&(__tx)->lock, MA_NOTOWNED); \ - mtx_lock(&(__tx)->lock); \ -} while (0) -#define TX_UNLOCK(__tx) do { \ - mtx_assert(&(__tx)->lock, MA_OWNED); \ - mtx_unlock(&(__tx)->lock); \ -} while (0) - -#define DPAA2_TX_RING(sc, chan, tc) \ +#define DPAA2_TX_RING(sc, chan, tc) \ (&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)]) +MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)"); + +/* + * How many times channel cleanup routine will be repeated if the RX or TX + * budget was depleted. + */ +#define DPAA2_CLEAN_BUDGET 64 /* sysctl(9)? */ +/* TX/RX budget for the channel cleanup task */ +#define DPAA2_TX_BUDGET 128 /* sysctl(9)? */ +#define DPAA2_RX_BUDGET 256 /* sysctl(9)? */ + #define DPNI_IRQ_INDEX 0 /* Index of the only DPNI IRQ. */ #define DPNI_IRQ_LINK_CHANGED 1 /* Link state changed */ #define DPNI_IRQ_EP_CHANGED 2 /* DPAA2 endpoint dis/connected */ /* Default maximum frame length. */ #define DPAA2_ETH_MFL (ETHER_MAX_LEN - ETHER_CRC_LEN) /* Minimally supported version of the DPNI API. */ #define DPNI_VER_MAJOR 7 #define DPNI_VER_MINOR 0 /* Rx/Tx buffers configuration. */ #define BUF_ALIGN_V1 256 /* WRIOP v1.0.0 limitation */ #define BUF_ALIGN 64 #define BUF_SWA_SIZE 64 /* SW annotation size */ #define BUF_RX_HWA_SIZE 64 /* HW annotation size */ #define BUF_TX_HWA_SIZE 128 /* HW annotation size */ -#define BUF_SIZE (MJUM9BYTES) -#define DPAA2_TX_BUFRING_SZ (4096u) +#define DPAA2_RX_BUFRING_SZ (4096u) +#define DPAA2_RXE_BUFRING_SZ (1024u) +#define DPAA2_TXC_BUFRING_SZ (4096u) #define DPAA2_TX_SEGLIMIT (16u) /* arbitrary number */ -#define DPAA2_TX_SEG_SZ (4096u) +#define DPAA2_TX_SEG_SZ (PAGE_SIZE) #define DPAA2_TX_SEGS_MAXSZ (DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ) #define DPAA2_TX_SGT_SZ (PAGE_SIZE) /* bytes */ /* Size of a buffer to keep a QoS table key configuration. */ -#define ETH_QOS_KCFG_BUF_SIZE 256 +#define ETH_QOS_KCFG_BUF_SIZE (PAGE_SIZE) /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */ -#define DPAA2_CLASSIFIER_DMA_SIZE 256 - -/* Channel storage buffer configuration. */ -#define ETH_STORE_FRAMES 16u -#define ETH_STORE_SIZE ((ETH_STORE_FRAMES + 1) * sizeof(struct dpaa2_dq)) -#define ETH_STORE_ALIGN 64u +#define DPAA2_CLASSIFIER_DMA_SIZE (PAGE_SIZE) /* Buffers layout options. */ #define BUF_LOPT_TIMESTAMP 0x1 #define BUF_LOPT_PARSER_RESULT 0x2 #define BUF_LOPT_FRAME_STATUS 0x4 #define BUF_LOPT_PRIV_DATA_SZ 0x8 #define BUF_LOPT_DATA_ALIGN 0x10 #define BUF_LOPT_DATA_HEAD_ROOM 0x20 #define BUF_LOPT_DATA_TAIL_ROOM 0x40 #define DPAA2_NI_BUF_ADDR_MASK (0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */ #define DPAA2_NI_BUF_CHAN_MASK (0xFu) #define DPAA2_NI_BUF_CHAN_SHIFT (60) #define DPAA2_NI_BUF_IDX_MASK (0x7FFFu) #define DPAA2_NI_BUF_IDX_SHIFT (49) #define DPAA2_NI_TX_IDX_MASK (0x7u) #define DPAA2_NI_TX_IDX_SHIFT (57) #define DPAA2_NI_TXBUF_IDX_MASK (0xFFu) #define DPAA2_NI_TXBUF_IDX_SHIFT (49) #define DPAA2_NI_FD_FMT_MASK (0x3u) #define DPAA2_NI_FD_FMT_SHIFT (12) #define DPAA2_NI_FD_ERR_MASK (0xFFu) #define DPAA2_NI_FD_ERR_SHIFT (0) #define DPAA2_NI_FD_SL_MASK (0x1u) #define DPAA2_NI_FD_SL_SHIFT (14) #define DPAA2_NI_FD_LEN_MASK (0x3FFFFu) #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu) /* Enables TCAM for Flow Steering and QoS look-ups. */ #define DPNI_OPT_HAS_KEY_MASKING 0x10 /* Unique IDs for the supported Rx classification header fields. */ #define DPAA2_ETH_DIST_ETHDST BIT(0) #define DPAA2_ETH_DIST_ETHSRC BIT(1) #define DPAA2_ETH_DIST_ETHTYPE BIT(2) #define DPAA2_ETH_DIST_VLAN BIT(3) #define DPAA2_ETH_DIST_IPSRC BIT(4) #define DPAA2_ETH_DIST_IPDST BIT(5) #define DPAA2_ETH_DIST_IPPROTO BIT(6) #define DPAA2_ETH_DIST_L4SRC BIT(7) #define DPAA2_ETH_DIST_L4DST BIT(8) #define DPAA2_ETH_DIST_ALL (~0ULL) /* L3-L4 network traffic flow hash options. */ #define RXH_L2DA (1 << 1) #define RXH_VLAN (1 << 2) #define RXH_L3_PROTO (1 << 3) #define RXH_IP_SRC (1 << 4) #define RXH_IP_DST (1 << 5) #define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */ #define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */ #define RXH_DISCARD (1 << 31) /* Default Rx hash options, set during attaching. */ #define DPAA2_RXH_DEFAULT (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface"); -/* DPAA2 Network Interface resource specification. */ +/* + * DPAA2 Network Interface resource specification. + * + * NOTE: Don't forget to update macros in dpaa2_ni.h in case of any changes in + * the specification! + */ struct resource_spec dpaa2_ni_spec[] = { /* * DPMCP resources. * * NOTE: MC command portals (MCPs) are used to send commands to, and * receive responses from, the MC firmware. One portal per DPNI. */ -#define MCP_RES_NUM (1u) -#define MCP_RID_OFF (0u) -#define MCP_RID(rid) ((rid) + MCP_RID_OFF) - /* --- */ - { DPAA2_DEV_MCP, MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_MCP, DPAA2_NI_MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, /* * DPIO resources (software portals). * * NOTE: One per running core. While DPIOs are the source of data * availability interrupts, the DPCONs are used to identify the * network interface that has produced ingress data to that core. */ -#define IO_RES_NUM (16u) -#define IO_RID_OFF (MCP_RID_OFF + MCP_RES_NUM) -#define IO_RID(rid) ((rid) + IO_RID_OFF) - /* --- */ - { DPAA2_DEV_IO, IO_RID(0), RF_ACTIVE | RF_SHAREABLE }, - { DPAA2_DEV_IO, IO_RID(1), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, - { DPAA2_DEV_IO, IO_RID(2), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, - { DPAA2_DEV_IO, IO_RID(3), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, - { DPAA2_DEV_IO, IO_RID(4), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, - { DPAA2_DEV_IO, IO_RID(5), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, - { DPAA2_DEV_IO, IO_RID(6), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, - { DPAA2_DEV_IO, IO_RID(7), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, - { DPAA2_DEV_IO, IO_RID(8), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, - { DPAA2_DEV_IO, IO_RID(9), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, - { DPAA2_DEV_IO, IO_RID(10), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, - { DPAA2_DEV_IO, IO_RID(11), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, - { DPAA2_DEV_IO, IO_RID(12), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, - { DPAA2_DEV_IO, IO_RID(13), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, - { DPAA2_DEV_IO, IO_RID(14), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, - { DPAA2_DEV_IO, IO_RID(15), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(0), RF_ACTIVE | RF_SHAREABLE }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(1), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(2), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(3), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(4), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(5), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(6), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(7), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(8), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(9), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(10), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(11), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(12), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(13), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(14), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, + { DPAA2_DEV_IO, DPAA2_NI_IO_RID(15), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, /* * DPBP resources (buffer pools). * * NOTE: One per network interface. */ -#define BP_RES_NUM (1u) -#define BP_RID_OFF (IO_RID_OFF + IO_RES_NUM) -#define BP_RID(rid) ((rid) + BP_RID_OFF) - /* --- */ - { DPAA2_DEV_BP, BP_RID(0), RF_ACTIVE }, + { DPAA2_DEV_BP, DPAA2_NI_BP_RID(0), RF_ACTIVE }, /* * DPCON resources (channels). * * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be * distributed to. * NOTE: Since it is necessary to distinguish between traffic from * different network interfaces arriving on the same core, the * DPCONs must be private to the DPNIs. */ -#define CON_RES_NUM (16u) -#define CON_RID_OFF (BP_RID_OFF + BP_RES_NUM) -#define CON_RID(rid) ((rid) + CON_RID_OFF) - /* --- */ - { DPAA2_DEV_CON, CON_RID(0), RF_ACTIVE }, - { DPAA2_DEV_CON, CON_RID(1), RF_ACTIVE | RF_OPTIONAL }, - { DPAA2_DEV_CON, CON_RID(2), RF_ACTIVE | RF_OPTIONAL }, - { DPAA2_DEV_CON, CON_RID(3), RF_ACTIVE | RF_OPTIONAL }, - { DPAA2_DEV_CON, CON_RID(4), RF_ACTIVE | RF_OPTIONAL }, - { DPAA2_DEV_CON, CON_RID(5), RF_ACTIVE | RF_OPTIONAL }, - { DPAA2_DEV_CON, CON_RID(6), RF_ACTIVE | RF_OPTIONAL }, - { DPAA2_DEV_CON, CON_RID(7), RF_ACTIVE | RF_OPTIONAL }, - { DPAA2_DEV_CON, CON_RID(8), RF_ACTIVE | RF_OPTIONAL }, - { DPAA2_DEV_CON, CON_RID(9), RF_ACTIVE | RF_OPTIONAL }, - { DPAA2_DEV_CON, CON_RID(10), RF_ACTIVE | RF_OPTIONAL }, - { DPAA2_DEV_CON, CON_RID(11), RF_ACTIVE | RF_OPTIONAL }, - { DPAA2_DEV_CON, CON_RID(12), RF_ACTIVE | RF_OPTIONAL }, - { DPAA2_DEV_CON, CON_RID(13), RF_ACTIVE | RF_OPTIONAL }, - { DPAA2_DEV_CON, CON_RID(14), RF_ACTIVE | RF_OPTIONAL }, - { DPAA2_DEV_CON, CON_RID(15), RF_ACTIVE | RF_OPTIONAL }, - /* --- */ + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(0), RF_ACTIVE }, + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(1), RF_ACTIVE | RF_OPTIONAL }, + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(2), RF_ACTIVE | RF_OPTIONAL }, + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(3), RF_ACTIVE | RF_OPTIONAL }, + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(4), RF_ACTIVE | RF_OPTIONAL }, + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(5), RF_ACTIVE | RF_OPTIONAL }, + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(6), RF_ACTIVE | RF_OPTIONAL }, + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(7), RF_ACTIVE | RF_OPTIONAL }, + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(8), RF_ACTIVE | RF_OPTIONAL }, + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(9), RF_ACTIVE | RF_OPTIONAL }, + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(10), RF_ACTIVE | RF_OPTIONAL }, + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(11), RF_ACTIVE | RF_OPTIONAL }, + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(12), RF_ACTIVE | RF_OPTIONAL }, + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(13), RF_ACTIVE | RF_OPTIONAL }, + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(14), RF_ACTIVE | RF_OPTIONAL }, + { DPAA2_DEV_CON, DPAA2_NI_CON_RID(15), RF_ACTIVE | RF_OPTIONAL }, + RESOURCE_SPEC_END }; /* Supported header fields for Rx hash distribution key */ static const struct dpaa2_eth_dist_fields dist_fields[] = { { /* L2 header */ .rxnfc_field = RXH_L2DA, .cls_prot = NET_PROT_ETH, .cls_field = NH_FLD_ETH_DA, .id = DPAA2_ETH_DIST_ETHDST, .size = 6, }, { .cls_prot = NET_PROT_ETH, .cls_field = NH_FLD_ETH_SA, .id = DPAA2_ETH_DIST_ETHSRC, .size = 6, }, { /* This is the last ethertype field parsed: * depending on frame format, it can be the MAC ethertype * or the VLAN etype. */ .cls_prot = NET_PROT_ETH, .cls_field = NH_FLD_ETH_TYPE, .id = DPAA2_ETH_DIST_ETHTYPE, .size = 2, }, { /* VLAN header */ .rxnfc_field = RXH_VLAN, .cls_prot = NET_PROT_VLAN, .cls_field = NH_FLD_VLAN_TCI, .id = DPAA2_ETH_DIST_VLAN, .size = 2, }, { /* IP header */ .rxnfc_field = RXH_IP_SRC, .cls_prot = NET_PROT_IP, .cls_field = NH_FLD_IP_SRC, .id = DPAA2_ETH_DIST_IPSRC, .size = 4, }, { .rxnfc_field = RXH_IP_DST, .cls_prot = NET_PROT_IP, .cls_field = NH_FLD_IP_DST, .id = DPAA2_ETH_DIST_IPDST, .size = 4, }, { .rxnfc_field = RXH_L3_PROTO, .cls_prot = NET_PROT_IP, .cls_field = NH_FLD_IP_PROTO, .id = DPAA2_ETH_DIST_IPPROTO, .size = 1, }, { /* Using UDP ports, this is functionally equivalent to raw * byte pairs from L4 header. */ .rxnfc_field = RXH_L4_B_0_1, .cls_prot = NET_PROT_UDP, .cls_field = NH_FLD_UDP_PORT_SRC, .id = DPAA2_ETH_DIST_L4SRC, .size = 2, }, { .rxnfc_field = RXH_L4_B_2_3, .cls_prot = NET_PROT_UDP, .cls_field = NH_FLD_UDP_PORT_DST, .id = DPAA2_ETH_DIST_L4DST, .size = 2, }, }; static struct dpni_stat { int page; int cnt; char *name; char *desc; } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = { /* PAGE, COUNTER, NAME, DESCRIPTION */ { 0, 0, "in_all_frames", "All accepted ingress frames" }, { 0, 1, "in_all_bytes", "Bytes in all accepted ingress frames" }, { 0, 2, "in_multi_frames", "Multicast accepted ingress frames" }, { 1, 0, "eg_all_frames", "All egress frames transmitted" }, { 1, 1, "eg_all_bytes", "Bytes in all frames transmitted" }, { 1, 2, "eg_multi_frames", "Multicast egress frames transmitted" }, { 2, 0, "in_filtered_frames", "All ingress frames discarded due to " "filtering" }, { 2, 1, "in_discarded_frames", "All frames discarded due to errors" }, { 2, 2, "in_nobuf_discards", "Discards on ingress side due to buffer " "depletion in DPNI buffer pools" }, }; +struct dpaa2_ni_rx_ctx { + struct mbuf *head; + struct mbuf *tail; + int cnt; + bool last; +}; + /* Device interface */ static int dpaa2_ni_probe(device_t); static int dpaa2_ni_attach(device_t); static int dpaa2_ni_detach(device_t); /* DPAA2 network interface setup and configuration */ static int dpaa2_ni_setup(device_t); static int dpaa2_ni_setup_channels(device_t); -static int dpaa2_ni_setup_fq(device_t, struct dpaa2_ni_channel *, - enum dpaa2_ni_queue_type); static int dpaa2_ni_bind(device_t); static int dpaa2_ni_setup_rx_dist(device_t); static int dpaa2_ni_setup_irqs(device_t); static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *); static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *); static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *); static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *); static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *); /* Tx/Rx flow configuration */ static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *); static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *); static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *); /* Configuration subroutines */ static int dpaa2_ni_set_buf_layout(device_t); static int dpaa2_ni_set_pause_frame(device_t); static int dpaa2_ni_set_qos_table(device_t); static int dpaa2_ni_set_mac_addr(device_t); static int dpaa2_ni_set_hash(device_t, uint64_t); static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t); -/* Buffers and buffer pools */ -static int dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *, uint32_t); -static int dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *); -static int dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *); -static int dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *, - struct dpaa2_ni_channel *); - /* Frame descriptor routines */ static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *, struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *); static int dpaa2_ni_fd_err(struct dpaa2_fd *); static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *); static int dpaa2_ni_fd_format(struct dpaa2_fd *); static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *); static int dpaa2_ni_fd_offset(struct dpaa2_fd *); /* Various subroutines */ static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t); static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *); -static int dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *, - struct dpaa2_dq **); /* Network interface routines */ static void dpaa2_ni_init(void *); static int dpaa2_ni_transmit(if_t , struct mbuf *); static void dpaa2_ni_qflush(if_t ); static int dpaa2_ni_ioctl(if_t , u_long, caddr_t); static int dpaa2_ni_update_mac_filters(if_t ); static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int); /* Interrupt handlers */ static void dpaa2_ni_intr(void *); /* MII handlers */ static void dpaa2_ni_miibus_statchg(device_t); static int dpaa2_ni_media_change(if_t ); static void dpaa2_ni_media_status(if_t , struct ifmediareq *); static void dpaa2_ni_media_tick(void *); -/* DMA mapping callback */ -static void dpaa2_ni_dmamap_cb(void *, bus_dma_segment_t *, int, int); - /* Tx/Rx routines. */ -static void dpaa2_ni_poll(void *); -static void dpaa2_ni_tx_locked(struct dpaa2_ni_softc *, +static int dpaa2_ni_rx_cleanup(struct dpaa2_channel *); +static int dpaa2_ni_tx_cleanup(struct dpaa2_channel *); +static void dpaa2_ni_tx(struct dpaa2_ni_softc *, struct dpaa2_channel *, struct dpaa2_ni_tx_ring *, struct mbuf *); -static void dpaa2_ni_bp_task(void *, int); +static void dpaa2_ni_cleanup_task(void *, int); /* Tx/Rx subroutines */ -static int dpaa2_ni_consume_frames(struct dpaa2_ni_channel *, - struct dpaa2_ni_fq **, uint32_t *); -static int dpaa2_ni_rx(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *, - struct dpaa2_fd *); -static int dpaa2_ni_rx_err(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *, +static int dpaa2_ni_consume_frames(struct dpaa2_channel *, struct dpaa2_ni_fq **, + uint32_t *); +static int dpaa2_ni_rx(struct dpaa2_channel *, struct dpaa2_ni_fq *, + struct dpaa2_fd *, struct dpaa2_ni_rx_ctx *); +static int dpaa2_ni_rx_err(struct dpaa2_channel *, struct dpaa2_ni_fq *, struct dpaa2_fd *); -static int dpaa2_ni_tx_conf(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *, +static int dpaa2_ni_tx_conf(struct dpaa2_channel *, struct dpaa2_ni_fq *, struct dpaa2_fd *); /* sysctl(9) */ static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS); static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS); static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS); static int dpaa2_ni_probe(device_t dev) { /* DPNI device will be added by a parent resource container itself. */ device_set_desc(dev, "DPAA2 Network Interface"); return (BUS_PROBE_DEFAULT); } static int dpaa2_ni_attach(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; device_t mcp_dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *mcp_dinfo; struct dpaa2_cmd cmd; uint16_t rc_token, ni_token; if_t ifp; char tq_name[32]; int error; sc->dev = dev; sc->ifp = NULL; sc->miibus = NULL; sc->mii = NULL; sc->media_status = 0; sc->if_flags = 0; sc->link_state = LINK_STATE_UNKNOWN; sc->buf_align = 0; /* For debug purposes only! */ sc->rx_anomaly_frames = 0; sc->rx_single_buf_frames = 0; sc->rx_sg_buf_frames = 0; sc->rx_enq_rej_frames = 0; sc->rx_ieoi_err_frames = 0; sc->tx_single_buf_frames = 0; sc->tx_sg_frames = 0; DPAA2_ATOMIC_XCHG(&sc->buf_num, 0); DPAA2_ATOMIC_XCHG(&sc->buf_free, 0); - sc->bp_dmat = NULL; - sc->st_dmat = NULL; sc->rxd_dmat = NULL; sc->qos_dmat = NULL; - sc->qos_kcfg.type = DPAA2_BUF_STORE; - sc->qos_kcfg.store.dmap = NULL; - sc->qos_kcfg.store.paddr = 0; - sc->qos_kcfg.store.vaddr = NULL; + sc->qos_kcfg.dmap = NULL; + sc->qos_kcfg.paddr = 0; + sc->qos_kcfg.vaddr = NULL; - sc->rxd_kcfg.type = DPAA2_BUF_STORE; - sc->rxd_kcfg.store.dmap = NULL; - sc->rxd_kcfg.store.paddr = 0; - sc->rxd_kcfg.store.vaddr = NULL; + sc->rxd_kcfg.dmap = NULL; + sc->rxd_kcfg.paddr = 0; + sc->rxd_kcfg.vaddr = NULL; sc->mac.dpmac_id = 0; sc->mac.phy_dev = NULL; memset(sc->mac.addr, 0, ETHER_ADDR_LEN); error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res); if (error) { device_printf(dev, "%s: failed to allocate resources: " "error=%d\n", __func__, error); goto err_exit; } /* Obtain MC portal. */ - mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]); + mcp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_MCP_RID(0)]); mcp_dinfo = device_get_ivars(mcp_dev); dinfo->portal = mcp_dinfo->portal; mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF); /* Allocate network interface */ ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "%s: failed to allocate network interface\n", __func__); goto err_exit; } sc->ifp = ifp; if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev)); if_setsoftc(ifp, sc); if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST); if_setinitfn(ifp, dpaa2_ni_init); if_setioctlfn(ifp, dpaa2_ni_ioctl); if_settransmitfn(ifp, dpaa2_ni_transmit); if_setqflushfn(ifp, dpaa2_ni_qflush); if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU); if_setcapenable(ifp, if_getcapabilities(ifp)); DPAA2_CMD_INIT(&cmd); /* Open resource container and network interface object. */ error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } + bzero(tq_name, sizeof(tq_name)); + snprintf(tq_name, sizeof(tq_name), "%s_tqbp", device_get_nameunit(dev)); + /* * XXX-DSL: Release new buffers on Buffer Pool State Change Notification * (BPSCN) returned as a result to the VDQ command instead. * It is similar to CDAN processed in dpaa2_io_intr(). */ /* Create a taskqueue thread to release new buffers to the pool. */ - TASK_INIT(&sc->bp_task, 0, dpaa2_ni_bp_task, sc); - bzero(tq_name, sizeof (tq_name)); - snprintf(tq_name, sizeof (tq_name), "%s_tqbp", - device_get_nameunit(dev)); sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK, taskqueue_thread_enqueue, &sc->bp_taskq); if (sc->bp_taskq == NULL) { device_printf(dev, "%s: failed to allocate task queue: %s\n", __func__, tq_name); goto close_ni; } taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name); + /* sc->cleanup_taskq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK, */ + /* taskqueue_thread_enqueue, &sc->cleanup_taskq); */ + /* taskqueue_start_threads(&sc->cleanup_taskq, 1, PI_NET, */ + /* "dpaa2_ch cleanup"); */ + error = dpaa2_ni_setup(dev); if (error) { device_printf(dev, "%s: failed to setup DPNI: error=%d\n", __func__, error); goto close_ni; } error = dpaa2_ni_setup_channels(dev); if (error) { device_printf(dev, "%s: failed to setup QBMan channels: " "error=%d\n", __func__, error); goto close_ni; } error = dpaa2_ni_bind(dev); if (error) { device_printf(dev, "%s: failed to bind DPNI: error=%d\n", __func__, error); goto close_ni; } error = dpaa2_ni_setup_irqs(dev); if (error) { device_printf(dev, "%s: failed to setup IRQs: error=%d\n", __func__, error); goto close_ni; } error = dpaa2_ni_setup_sysctls(sc); if (error) { device_printf(dev, "%s: failed to setup sysctls: error=%d\n", __func__, error); goto close_ni; } ether_ifattach(sc->ifp, sc->mac.addr); callout_init(&sc->mii_callout, 0); return (0); close_ni: DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (ENXIO); } static void dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr) { struct dpaa2_ni_softc *sc = if_getsoftc(ifp); DPNI_LOCK(sc); ifmr->ifm_count = 0; ifmr->ifm_mask = 0; ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; ifmr->ifm_current = ifmr->ifm_active = sc->fixed_ifmedia.ifm_cur->ifm_media; /* * In non-PHY usecases, we need to signal link state up, otherwise * certain things requiring a link event (e.g async DHCP client) from * devd do not happen. */ if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) { if_link_state_change(ifp, LINK_STATE_UP); } /* * TODO: Check the status of the link partner (DPMAC, DPNI or other) and * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as * the MC firmware sets the status, instead of us telling the MC what * it is. */ DPNI_UNLOCK(sc); return; } static void dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc) { /* * FIXME: When the DPNI is connected to a DPMAC, we can get the * 'apparent' speed from it. */ sc->fixed_link = true; ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change, dpaa2_ni_fixed_media_status); ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL); ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T); } static int dpaa2_ni_detach(device_t dev) { /* TBD */ return (0); } /** * @brief Configure DPAA2 network interface object. */ static int dpaa2_ni_setup(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */ struct dpaa2_cmd cmd; uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */ uint16_t rc_token, ni_token, mac_token; struct dpaa2_mac_attr attr; enum dpaa2_mac_link_type link_type; uint32_t link; int error; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } /* Check if we can work with this DPNI object. */ error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major, &sc->api_minor); if (error) { device_printf(dev, "%s: failed to get DPNI API version\n", __func__); goto close_ni; } if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { device_printf(dev, "%s: DPNI API version %u.%u not supported, " "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor, DPNI_VER_MAJOR, DPNI_VER_MINOR); error = ENODEV; goto close_ni; } /* Reset the DPNI object. */ error = DPAA2_CMD_NI_RESET(dev, child, &cmd); if (error) { device_printf(dev, "%s: failed to reset DPNI: id=%d\n", __func__, dinfo->id); goto close_ni; } /* Obtain attributes of the DPNI object. */ error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr); if (error) { device_printf(dev, "%s: failed to obtain DPNI attributes: " "id=%d\n", __func__, dinfo->id); goto close_ni; } if (bootverbose) { device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d " "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues, sc->attr.num.channels, sc->attr.wriop_ver); device_printf(dev, "\ttraffic classes: rx=%d tx=%d " "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs, sc->attr.num.cgs); device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d " "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan, sc->attr.entries.qos, sc->attr.entries.fs); device_printf(dev, "\tkey sizes: qos=%d fs=%d\n", sc->attr.key_size.qos, sc->attr.key_size.fs); } /* Configure buffer layouts of the DPNI queues. */ error = dpaa2_ni_set_buf_layout(dev); if (error) { device_printf(dev, "%s: failed to configure buffer layout\n", __func__); goto close_ni; } /* Configure DMA resources. */ error = dpaa2_ni_setup_dma(sc); if (error) { device_printf(dev, "%s: failed to setup DMA\n", __func__); goto close_ni; } /* Setup link between DPNI and an object it's connected to. */ ep1_desc.obj_id = dinfo->id; ep1_desc.if_id = 0; /* DPNI has the only endpoint */ ep1_desc.type = dinfo->dtype; error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token), &ep1_desc, &ep2_desc, &link); if (error) { device_printf(dev, "%s: failed to obtain an object DPNI is " "connected to: error=%d\n", __func__, error); } else { device_printf(dev, "connected to %s (id=%d)\n", dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id); error = dpaa2_ni_set_mac_addr(dev); if (error) { device_printf(dev, "%s: failed to set MAC address: " "error=%d\n", __func__, error); } if (ep2_desc.type == DPAA2_DEV_MAC) { /* * This is the simplest case when DPNI is connected to * DPMAC directly. */ sc->mac.dpmac_id = ep2_desc.obj_id; link_type = DPAA2_MAC_LINK_TYPE_NONE; /* * Need to determine if DPMAC type is PHY (attached to * conventional MII PHY) or FIXED (usually SFP/SerDes, * link state managed by MC firmware). */ error = DPAA2_CMD_MAC_OPEN(sc->dev, child, DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id, &mac_token); /* * Under VFIO, the DPMAC might be sitting in another * container (DPRC) we don't have access to. * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is * the case. */ if (error) { device_printf(dev, "%s: failed to open " "connected DPMAC: %d (assuming in other DPRC)\n", __func__, sc->mac.dpmac_id); link_type = DPAA2_MAC_LINK_TYPE_FIXED; } else { error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child, &cmd, &attr); if (error) { device_printf(dev, "%s: failed to get " "DPMAC attributes: id=%d, " "error=%d\n", __func__, dinfo->id, error); } else { link_type = attr.link_type; } } DPAA2_CMD_MAC_CLOSE(dev, child, &cmd); if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) { device_printf(dev, "connected DPMAC is in FIXED " "mode\n"); dpaa2_ni_setup_fixed_link(sc); } else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) { device_printf(dev, "connected DPMAC is in PHY " "mode\n"); error = DPAA2_MC_GET_PHY_DEV(dev, &sc->mac.phy_dev, sc->mac.dpmac_id); if (error == 0) { error = MEMAC_MDIO_SET_NI_DEV( sc->mac.phy_dev, dev); if (error != 0) { device_printf(dev, "%s: failed " "to set dpni dev on memac " "mdio dev %s: error=%d\n", __func__, device_get_nameunit( sc->mac.phy_dev), error); } } if (error == 0) { error = MEMAC_MDIO_GET_PHY_LOC( sc->mac.phy_dev, &sc->mac.phy_loc); if (error == ENODEV) { error = 0; } if (error != 0) { device_printf(dev, "%s: failed " "to get phy location from " "memac mdio dev %s: error=%d\n", __func__, device_get_nameunit( sc->mac.phy_dev), error); } } if (error == 0) { error = mii_attach(sc->mac.phy_dev, &sc->miibus, sc->ifp, dpaa2_ni_media_change, dpaa2_ni_media_status, BMSR_DEFCAPMASK, sc->mac.phy_loc, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(dev, "%s: failed " "to attach to miibus: " "error=%d\n", __func__, error); } } if (error == 0) { sc->mii = device_get_softc(sc->miibus); } } else { device_printf(dev, "%s: DPMAC link type is not " "supported\n", __func__); } } else if (ep2_desc.type == DPAA2_DEV_NI || ep2_desc.type == DPAA2_DEV_MUX || ep2_desc.type == DPAA2_DEV_SW) { dpaa2_ni_setup_fixed_link(sc); } } /* Select mode to enqueue frames. */ /* ... TBD ... */ /* * Update link configuration to enable Rx/Tx pause frames support. * * NOTE: MC may generate an interrupt to the DPMAC and request changes * in link configuration. It might be necessary to attach miibus * and PHY before this point. */ error = dpaa2_ni_set_pause_frame(dev); if (error) { device_printf(dev, "%s: failed to configure Rx/Tx pause " "frames\n", __func__); goto close_ni; } /* Configure ingress traffic classification. */ error = dpaa2_ni_set_qos_table(dev); if (error) { device_printf(dev, "%s: failed to configure QoS table: " "error=%d\n", __func__, error); goto close_ni; } /* Add broadcast physical address to the MAC filtering table. */ memset(eth_bca, 0xff, ETHER_ADDR_LEN); error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd, ni_token), eth_bca); if (error) { device_printf(dev, "%s: failed to add broadcast physical " "address to the MAC filtering table\n", __func__); goto close_ni; } /* Set the maximum allowed length for received frames. */ error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL); if (error) { device_printf(dev, "%s: failed to set maximum length for " "received frames\n", __func__); goto close_ni; } (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); close_ni: (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (error); } /** * @brief Сonfigure QBMan channels and register data availability notifications. */ static int dpaa2_ni_setup_channels(device_t dev) { - device_t pdev = device_get_parent(dev); - device_t child = dev; - device_t io_dev, con_dev; + device_t iodev, condev, bpdev; struct dpaa2_ni_softc *sc = device_get_softc(dev); - struct dpaa2_ni_channel *channel; - struct dpaa2_con_softc *consc; - struct dpaa2_con_notif_cfg notif_cfg; - struct dpaa2_devinfo *rc_info = device_get_ivars(pdev); - struct dpaa2_devinfo *io_info; - struct dpaa2_devinfo *con_info; - struct dpaa2_io_notif_ctx *ctx; - struct dpaa2_buf *buf; - struct dpaa2_cmd cmd; - struct sysctl_ctx_list *sysctl_ctx; - struct sysctl_oid *node; - struct sysctl_oid_list *parent; uint32_t i, num_chan; - uint16_t rc_token, con_token; int error; - /* Calculate number of the channels based on the allocated resources. */ - for (i = 0; i < IO_RES_NUM; i++) { - if (!sc->res[IO_RID(i)]) { + /* Calculate number of the channels based on the allocated resources */ + for (i = 0; i < DPAA2_NI_IO_RES_NUM; i++) { + if (!sc->res[DPAA2_NI_IO_RID(i)]) { break; } } num_chan = i; - for (i = 0; i < CON_RES_NUM; i++) { - if (!sc->res[CON_RID(i)]) { + for (i = 0; i < DPAA2_NI_CON_RES_NUM; i++) { + if (!sc->res[DPAA2_NI_CON_RID(i)]) { break; } } num_chan = i < num_chan ? i : num_chan; - sc->chan_n = num_chan > DPAA2_NI_MAX_CHANNELS - ? DPAA2_NI_MAX_CHANNELS : num_chan; + sc->chan_n = num_chan > DPAA2_MAX_CHANNELS + ? DPAA2_MAX_CHANNELS : num_chan; sc->chan_n = sc->chan_n > sc->attr.num.queues ? sc->attr.num.queues : sc->chan_n; - device_printf(dev, "channels=%d\n", sc->chan_n); - - sysctl_ctx = device_get_sysctl_ctx(sc->dev); - parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); - node = SYSCTL_ADD_NODE(sysctl_ctx, parent, OID_AUTO, "channels", - CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels"); - parent = SYSCTL_CHILDREN(node); - - /* Setup channels for the portal. */ - for (uint32_t i = 0; i < sc->chan_n; i++) { - io_dev = (device_t) rman_get_start(sc->res[IO_RID(i)]); - io_info = device_get_ivars(io_dev); - - con_dev = (device_t) rman_get_start(sc->res[CON_RID(i)]); - consc = device_get_softc(con_dev); - con_info = device_get_ivars(con_dev); - - DPAA2_CMD_INIT(&cmd); + KASSERT(sc->chan_n > 0u, ("%s: positive number of channels expected: " + "chan_n=%d", __func__, sc->chan_n)); - error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rc_info->id, - &rc_token); - if (error) { - device_printf(dev, "%s: failed to open resource " - "container: id=%d, error=%d\n", __func__, - rc_info->id, error); - return (error); - } - error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, con_info->id, - &con_token); - if (error) { - device_printf(dev, "%s: failed to open DPCON: id=%d, " - "error=%d\n", __func__, con_info->id, error); - (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, - rc_token)); - return (error); - } - - error = DPAA2_CMD_CON_ENABLE(dev, child, &cmd); - if (error) { - device_printf(dev, "%s: failed to enable channel: " - "dpcon_id=%d, chan_id=%d\n", __func__, con_info->id, - consc->attr.chan_id); - (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); - (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, - rc_token)); - return (error); - } - - channel = malloc(sizeof(struct dpaa2_ni_channel), M_DPAA2_NI, - M_WAITOK | M_ZERO); - if (!channel) { - device_printf(dev, "%s: failed to allocate a channel\n", - __func__); - (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); - (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, - rc_token)); - return (ENOMEM); - } - - sc->channels[i] = channel; - - channel->id = consc->attr.chan_id; - channel->flowid = i; - channel->ni_dev = dev; - channel->io_dev = io_dev; - channel->con_dev = con_dev; - channel->recycled_n = 0; - channel->tx_frames = 0; /* for debug purposes */ - channel->tx_dropped = 0; /* for debug purposes */ - channel->rxq_n = 0; - - buf = &channel->store; - buf->type = DPAA2_BUF_STORE; - buf->store.dmat = NULL; - buf->store.dmap = NULL; - buf->store.paddr = 0; - buf->store.vaddr = NULL; - - /* Setup WQ channel notification context. */ - ctx = &channel->ctx; - ctx->qman_ctx = (uint64_t) ctx; - ctx->cdan_en = true; - ctx->fq_chan_id = channel->id; - ctx->io_dev = channel->io_dev; - ctx->channel = channel; - ctx->poll = dpaa2_ni_poll; - - /* Register the new notification context. */ - error = DPAA2_SWP_CONF_WQ_CHANNEL(channel->io_dev, ctx); - if (error) { - device_printf(dev, "%s: failed to register notification " - "context\n", __func__); - (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); - (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, - rc_token)); - return (error); - } - - /* Register DPCON notification with Management Complex. */ - notif_cfg.dpio_id = io_info->id; - notif_cfg.prior = 0; - notif_cfg.qman_ctx = ctx->qman_ctx; - error = DPAA2_CMD_CON_SET_NOTIF(dev, child, &cmd, ¬if_cfg); - if (error) { - device_printf(dev, "%s: failed to set DPCON " - "notification: dpcon_id=%d, chan_id=%d\n", __func__, - con_info->id, consc->attr.chan_id); - (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); - (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, - rc_token)); - return (error); - } + device_printf(dev, "channels=%d\n", sc->chan_n); - /* Allocate initial # of Rx buffers and a channel storage. */ - error = dpaa2_ni_seed_buf_pool(sc, DPAA2_NI_BUFS_INIT); - if (error) { - device_printf(dev, "%s: failed to seed buffer pool\n", - __func__); - (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); - (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, - rc_token)); - return (error); - } - error = dpaa2_ni_seed_chan_storage(sc, channel); - if (error) { - device_printf(dev, "%s: failed to seed channel " - "storage\n", __func__); - (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); - (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, - rc_token)); - return (error); - } + for (i = 0; i < sc->chan_n; i++) { + iodev = (device_t)rman_get_start(sc->res[DPAA2_NI_IO_RID(i)]); + condev = (device_t)rman_get_start(sc->res[DPAA2_NI_CON_RID(i)]); + /* Only one buffer pool available at the moment */ + bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]); - /* Prepare queues for this channel. */ - error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_TX_CONF); - if (error) { - device_printf(dev, "%s: failed to prepare TxConf " - "queue: error=%d\n", __func__, error); - (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); - (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, - rc_token)); - return (error); - } - error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_RX); - if (error) { - device_printf(dev, "%s: failed to prepare Rx queue: " - "error=%d\n", __func__, error); - (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); - (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, - rc_token)); + error = dpaa2_chan_setup(dev, iodev, condev, bpdev, + &sc->channels[i], i, dpaa2_ni_cleanup_task); + if (error != 0) { + device_printf(dev, "%s: dpaa2_chan_setup() failed: " + "error=%d, chan_id=%d\n", __func__, error, i); return (error); } - - if (bootverbose) { - device_printf(dev, "channel: dpio_id=%d " - "dpcon_id=%d chan_id=%d, priorities=%d\n", - io_info->id, con_info->id, channel->id, - consc->attr.prior_num); - } - - (void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd); - (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, - rc_token)); } - /* There is exactly one Rx error queue per DPNI. */ - error = dpaa2_ni_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR); - if (error) { + /* There is exactly one Rx error queue per network interface */ + error = dpaa2_chan_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR); + if (error != 0) { device_printf(dev, "%s: failed to prepare RxError queue: " "error=%d\n", __func__, error); return (error); } return (0); } -/** - * @brief Performs an initial configuration of the frame queues. - */ -static int -dpaa2_ni_setup_fq(device_t dev, struct dpaa2_ni_channel *chan, - enum dpaa2_ni_queue_type queue_type) -{ - struct dpaa2_ni_softc *sc = device_get_softc(dev); - struct dpaa2_ni_fq *fq; - - switch (queue_type) { - case DPAA2_NI_QUEUE_TX_CONF: - /* One queue per channel. */ - fq = &chan->txc_queue; - - fq->consume = dpaa2_ni_tx_conf; - fq->chan = chan; - fq->flowid = chan->flowid; - fq->tc = 0; /* ignored */ - fq->type = queue_type; - - break; - case DPAA2_NI_QUEUE_RX: - KASSERT(sc->attr.num.rx_tcs <= DPAA2_NI_MAX_TCS, - ("too many Rx traffic classes: rx_tcs=%d\n", - sc->attr.num.rx_tcs)); - - /* One queue per Rx traffic class within a channel. */ - for (int i = 0; i < sc->attr.num.rx_tcs; i++) { - fq = &chan->rx_queues[i]; - - fq->consume = dpaa2_ni_rx; - fq->chan = chan; - fq->flowid = chan->flowid; - fq->tc = (uint8_t) i; - fq->type = queue_type; - - chan->rxq_n++; - } - break; - case DPAA2_NI_QUEUE_RX_ERR: - /* One queue per network interface. */ - fq = &sc->rxe_queue; - - fq->consume = dpaa2_ni_rx_err; - fq->chan = chan; - fq->flowid = 0; /* ignored */ - fq->tc = 0; /* ignored */ - fq->type = queue_type; - break; - default: - device_printf(dev, "%s: unexpected frame queue type: %d\n", - __func__, queue_type); - return (EINVAL); - } - - return (0); -} - /** * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels. */ static int dpaa2_ni_bind(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; device_t bp_dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *bp_info; struct dpaa2_cmd cmd; struct dpaa2_ni_pools_cfg pools_cfg; struct dpaa2_ni_err_cfg err_cfg; - struct dpaa2_ni_channel *chan; + struct dpaa2_channel *chan; uint16_t rc_token, ni_token; int error; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } /* Select buffer pool (only one available at the moment). */ - bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]); + bp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]); bp_info = device_get_ivars(bp_dev); /* Configure buffers pool. */ pools_cfg.pools_num = 1; pools_cfg.pools[0].bp_obj_id = bp_info->id; pools_cfg.pools[0].backup_flag = 0; pools_cfg.pools[0].buf_sz = sc->buf_sz; error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg); if (error) { device_printf(dev, "%s: failed to set buffer pools\n", __func__); goto close_ni; } /* Setup ingress traffic distribution. */ error = dpaa2_ni_setup_rx_dist(dev); if (error && error != EOPNOTSUPP) { device_printf(dev, "%s: failed to setup ingress traffic " "distribution\n", __func__); goto close_ni; } if (bootverbose && error == EOPNOTSUPP) { device_printf(dev, "Ingress traffic distribution not " "supported\n"); } /* Configure handling of error frames. */ err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK; err_cfg.set_err_fas = false; err_cfg.action = DPAA2_NI_ERR_DISCARD; error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg); if (error) { device_printf(dev, "%s: failed to set errors behavior\n", __func__); goto close_ni; } /* Configure channel queues to generate CDANs. */ for (uint32_t i = 0; i < sc->chan_n; i++) { chan = sc->channels[i]; /* Setup Rx flows. */ for (uint32_t j = 0; j < chan->rxq_n; j++) { error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]); if (error) { device_printf(dev, "%s: failed to setup Rx " "flow: error=%d\n", __func__, error); goto close_ni; } } /* Setup Tx flow. */ error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue); if (error) { device_printf(dev, "%s: failed to setup Tx " "flow: error=%d\n", __func__, error); goto close_ni; } } /* Configure RxError queue to generate CDAN. */ error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue); if (error) { device_printf(dev, "%s: failed to setup RxError flow: " "error=%d\n", __func__, error); goto close_ni; } /* * Get the Queuing Destination ID (QDID) that should be used for frame * enqueue operations. */ error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX, &sc->tx_qdid); if (error) { device_printf(dev, "%s: failed to get Tx queuing destination " "ID\n", __func__); goto close_ni; } (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); close_ni: (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (error); } /** * @brief Setup ingress traffic distribution. * * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option * hasn't been set for DPNI and a number of DPNI queues > 1. */ static int dpaa2_ni_setup_rx_dist(device_t dev) { /* * Have the interface implicitly distribute traffic based on the default * hash key. */ return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT)); } static int dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq) { device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *con_info; struct dpaa2_cmd cmd; struct dpaa2_ni_queue_cfg queue_cfg = {0}; uint16_t rc_token, ni_token; int error; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } /* Obtain DPCON associated with the FQ's channel. */ con_info = device_get_ivars(fq->chan->con_dev); queue_cfg.type = DPAA2_NI_QUEUE_RX; queue_cfg.tc = fq->tc; queue_cfg.idx = fq->flowid; error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg); if (error) { device_printf(dev, "%s: failed to obtain Rx queue " "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc, queue_cfg.idx); goto close_ni; } fq->fqid = queue_cfg.fqid; queue_cfg.dest_id = con_info->id; queue_cfg.dest_type = DPAA2_NI_DEST_DPCON; queue_cfg.priority = 1; queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq; queue_cfg.options = DPAA2_NI_QUEUE_OPT_USER_CTX | DPAA2_NI_QUEUE_OPT_DEST; error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg); if (error) { device_printf(dev, "%s: failed to update Rx queue " "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc, queue_cfg.idx); goto close_ni; } if (bootverbose) { device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, " "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id, fq->fqid, (uint64_t) fq); } (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); close_ni: (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (error); } static int dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq) { device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); + struct dpaa2_channel *ch = fq->chan; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *con_info; struct dpaa2_ni_queue_cfg queue_cfg = {0}; struct dpaa2_ni_tx_ring *tx; struct dpaa2_buf *buf; struct dpaa2_cmd cmd; uint32_t tx_rings_n = 0; uint16_t rc_token, ni_token; int error; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } /* Obtain DPCON associated with the FQ's channel. */ con_info = device_get_ivars(fq->chan->con_dev); - KASSERT(sc->attr.num.tx_tcs <= DPAA2_NI_MAX_TCS, + KASSERT(sc->attr.num.tx_tcs <= DPAA2_MAX_TCS, ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__, sc->attr.num.tx_tcs)); KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX, ("%s: too many Tx buffers (%d): max=%d\n", __func__, DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX)); /* Setup Tx rings. */ for (int i = 0; i < sc->attr.num.tx_tcs; i++) { queue_cfg.type = DPAA2_NI_QUEUE_TX; queue_cfg.tc = i; queue_cfg.idx = fq->flowid; queue_cfg.chan_id = fq->chan->id; error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg); if (error) { device_printf(dev, "%s: failed to obtain Tx queue " "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc, queue_cfg.idx); goto close_ni; } tx = &fq->tx_rings[i]; tx->fq = fq; tx->fqid = queue_cfg.fqid; tx->txid = tx_rings_n; if (bootverbose) { device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, " "fqid=%d\n", fq->flowid, i, fq->chan->id, queue_cfg.fqid); } mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF); /* Allocate Tx ring buffer. */ - tx->idx_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, - M_NOWAIT, &tx->lock); - if (tx->idx_br == NULL) { + tx->br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT, + &tx->lock); + if (tx->br == NULL) { device_printf(dev, "%s: failed to setup Tx ring buffer" " (2) fqid=%d\n", __func__, tx->fqid); goto close_ni; } - /* Configure Tx buffers. */ + /* Configure Tx buffers */ for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) { - buf = &tx->buf[j]; - buf->type = DPAA2_BUF_TX; - buf->tx.dmat = buf->tx.sgt_dmat = NULL; - buf->tx.dmap = buf->tx.sgt_dmap = NULL; - buf->tx.paddr = buf->tx.sgt_paddr = 0; - buf->tx.vaddr = buf->tx.sgt_vaddr = NULL; - buf->tx.m = NULL; - buf->tx.idx = j; - - error = dpaa2_ni_seed_txbuf(sc, buf); - - /* Add index of the Tx buffer to the ring. */ - buf_ring_enqueue(tx->idx_br, (void *) j); + buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB, + M_WAITOK); + if (buf == NULL) { + device_printf(dev, "%s: malloc() failed (buf)\n", + __func__); + return (ENOMEM); + } + /* Keep DMA tag and Tx ring linked to the buffer */ + DPAA2_BUF_INIT_TAGOPT(buf, ch->tx_dmat, tx); + + buf->sgt = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB, + M_WAITOK); + if (buf->sgt == NULL) { + device_printf(dev, "%s: malloc() failed (sgt)\n", + __func__); + return (ENOMEM); + } + /* Link SGT to DMA tag and back to its Tx buffer */ + DPAA2_BUF_INIT_TAGOPT(buf->sgt, ch->sgt_dmat, buf); + + error = dpaa2_buf_seed_txb(dev, buf); + + /* Add Tx buffer to the ring */ + buf_ring_enqueue(tx->br, buf); } tx_rings_n++; } /* All Tx queues which belong to the same flowid have the same qdbin. */ fq->tx_qdbin = queue_cfg.qdbin; queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF; queue_cfg.tc = 0; /* ignored for TxConf queue */ queue_cfg.idx = fq->flowid; error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg); if (error) { device_printf(dev, "%s: failed to obtain TxConf queue " "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc, queue_cfg.idx); goto close_ni; } fq->fqid = queue_cfg.fqid; queue_cfg.dest_id = con_info->id; queue_cfg.dest_type = DPAA2_NI_DEST_DPCON; queue_cfg.priority = 0; queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq; queue_cfg.options = DPAA2_NI_QUEUE_OPT_USER_CTX | DPAA2_NI_QUEUE_OPT_DEST; error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg); if (error) { device_printf(dev, "%s: failed to update TxConf queue " "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc, queue_cfg.idx); goto close_ni; } (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); close_ni: (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (error); } static int dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq) { device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_devinfo *con_info; struct dpaa2_ni_queue_cfg queue_cfg = {0}; struct dpaa2_cmd cmd; uint16_t rc_token, ni_token; int error; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } /* Obtain DPCON associated with the FQ's channel. */ con_info = device_get_ivars(fq->chan->con_dev); queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR; queue_cfg.tc = fq->tc; /* ignored */ queue_cfg.idx = fq->flowid; /* ignored */ error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg); if (error) { device_printf(dev, "%s: failed to obtain RxErr queue " "configuration\n", __func__); goto close_ni; } fq->fqid = queue_cfg.fqid; queue_cfg.dest_id = con_info->id; queue_cfg.dest_type = DPAA2_NI_DEST_DPCON; queue_cfg.priority = 1; queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq; queue_cfg.options = DPAA2_NI_QUEUE_OPT_USER_CTX | DPAA2_NI_QUEUE_OPT_DEST; error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg); if (error) { device_printf(dev, "%s: failed to update RxErr queue " "configuration\n", __func__); goto close_ni; } (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); close_ni: (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (error); } /** * @brief Configure DPNI object to generate interrupts. */ static int dpaa2_ni_setup_irqs(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_cmd cmd; uint16_t rc_token, ni_token; int error; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } /* Configure IRQs. */ error = dpaa2_ni_setup_msi(sc); if (error) { device_printf(dev, "%s: failed to allocate MSI\n", __func__); goto close_ni; } if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) { device_printf(dev, "%s: failed to allocate IRQ resource\n", __func__); goto close_ni; } if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, dpaa2_ni_intr, sc, &sc->intr)) { device_printf(dev, "%s: failed to setup IRQ resource\n", __func__); goto close_ni; } error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX, DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED); if (error) { device_printf(dev, "%s: failed to set DPNI IRQ mask\n", __func__); goto close_ni; } error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX, true); if (error) { device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__); goto close_ni; } (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); close_ni: (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (error); } /** * @brief Allocate MSI interrupts for DPNI. */ static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc) { int val; val = pci_msi_count(sc->dev); if (val < DPAA2_NI_MSI_COUNT) device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val, DPAA2_IO_MSI_COUNT); val = MIN(val, DPAA2_NI_MSI_COUNT); if (pci_alloc_msi(sc->dev, &val) != 0) return (EINVAL); for (int i = 0; i < val; i++) sc->irq_rid[i] = i + 1; return (0); } /** * @brief Update DPNI according to the updated interface capabilities. */ static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc) { const bool en_rxcsum = if_getcapenable(sc->ifp) & IFCAP_RXCSUM; const bool en_txcsum = if_getcapenable(sc->ifp) & IFCAP_TXCSUM; device_t pdev = device_get_parent(sc->dev); device_t dev = sc->dev; device_t child = dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_cmd cmd; uint16_t rc_token, ni_token; int error; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } /* Setup checksums validation. */ error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd, DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum); if (error) { device_printf(dev, "%s: failed to %s L3 checksum validation\n", __func__, en_rxcsum ? "enable" : "disable"); goto close_ni; } error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd, DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum); if (error) { device_printf(dev, "%s: failed to %s L4 checksum validation\n", __func__, en_rxcsum ? "enable" : "disable"); goto close_ni; } /* Setup checksums generation. */ error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd, DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum); if (error) { device_printf(dev, "%s: failed to %s L3 checksum generation\n", __func__, en_txcsum ? "enable" : "disable"); goto close_ni; } error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd, DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum); if (error) { device_printf(dev, "%s: failed to %s L4 checksum generation\n", __func__, en_txcsum ? "enable" : "disable"); goto close_ni; } (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); close_ni: (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (error); } /** * @brief Update DPNI according to the updated interface flags. */ static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc) { const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC; const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI; device_t pdev = device_get_parent(sc->dev); device_t dev = sc->dev; device_t child = dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_cmd cmd; uint16_t rc_token, ni_token; int error; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd, en_promisc ? true : en_allmulti); if (error) { device_printf(dev, "%s: failed to %s multicast promiscuous " "mode\n", __func__, en_allmulti ? "enable" : "disable"); goto close_ni; } error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc); if (error) { device_printf(dev, "%s: failed to %s unicast promiscuous mode\n", __func__, en_promisc ? "enable" : "disable"); goto close_ni; } (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); close_ni: (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (error); } static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *node, *node2; struct sysctl_oid_list *parent, *parent2; char cbuf[128]; int i; ctx = device_get_sysctl_ctx(sc->dev); parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); /* Add DPNI statistics. */ node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics"); parent = SYSCTL_CHILDREN(node); for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) { SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name, CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats, "IU", dpni_stat_sysctls[i].desc); } SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames", CTLFLAG_RD, &sc->rx_anomaly_frames, "Rx frames in the buffers outside of the buffer pools"); SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames", CTLFLAG_RD, &sc->rx_single_buf_frames, "Rx frames in single buffers"); SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames", CTLFLAG_RD, &sc->rx_sg_buf_frames, "Rx frames in scatter/gather list"); SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames", CTLFLAG_RD, &sc->rx_enq_rej_frames, "Enqueue rejected by QMan"); SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames", CTLFLAG_RD, &sc->rx_ieoi_err_frames, "QMan IEOI error"); SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames", CTLFLAG_RD, &sc->tx_single_buf_frames, "Tx single buffer frames"); SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames", CTLFLAG_RD, &sc->tx_sg_frames, "Tx S/G frames"); SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num", CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num, "IU", "number of Rx buffers in the buffer pool"); SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free", CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free, "IU", "number of free Rx buffers in the buffer pool"); parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); /* Add channels statistics. */ node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels"); parent = SYSCTL_CHILDREN(node); for (int i = 0; i < sc->chan_n; i++) { snprintf(cbuf, sizeof(cbuf), "%d", i); node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel"); parent2 = SYSCTL_CHILDREN(node2); SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames", CTLFLAG_RD, &sc->channels[i]->tx_frames, "Tx frames counter"); SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped", CTLFLAG_RD, &sc->channels[i]->tx_dropped, "Tx dropped counter"); } return (0); } static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc) { device_t dev = sc->dev; int error; KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1), ("unexpected buffer alignment: %d\n", sc->buf_align)); - /* DMA tag to allocate buffers for Rx buffer pool. */ - error = bus_dma_tag_create( - bus_get_dma_tag(dev), - sc->buf_align, 0, /* alignment, boundary */ - BUS_SPACE_MAXADDR, /* low restricted addr */ - BUS_SPACE_MAXADDR, /* high restricted addr */ - NULL, NULL, /* filter, filterarg */ - BUF_SIZE, 1, /* maxsize, nsegments */ - BUF_SIZE, 0, /* maxsegsize, flags */ - NULL, NULL, /* lockfunc, lockarg */ - &sc->bp_dmat); - if (error) { - device_printf(dev, "%s: failed to create DMA tag for buffer " - "pool\n", __func__); - return (error); - } - - /* DMA tag to map Tx mbufs. */ - error = bus_dma_tag_create( - bus_get_dma_tag(dev), - sc->buf_align, 0, /* alignment, boundary */ - BUS_SPACE_MAXADDR, /* low restricted addr */ - BUS_SPACE_MAXADDR, /* high restricted addr */ - NULL, NULL, /* filter, filterarg */ - DPAA2_TX_SEGS_MAXSZ, /* maxsize */ - DPAA2_TX_SEGLIMIT, /* nsegments */ - DPAA2_TX_SEG_SZ, 0, /* maxsegsize, flags */ - NULL, NULL, /* lockfunc, lockarg */ - &sc->tx_dmat); - if (error) { - device_printf(dev, "%s: failed to create DMA tag for Tx " - "buffers\n", __func__); - return (error); - } - - /* DMA tag to allocate channel storage. */ - error = bus_dma_tag_create( - bus_get_dma_tag(dev), - ETH_STORE_ALIGN, 0, /* alignment, boundary */ - BUS_SPACE_MAXADDR, /* low restricted addr */ - BUS_SPACE_MAXADDR, /* high restricted addr */ - NULL, NULL, /* filter, filterarg */ - ETH_STORE_SIZE, 1, /* maxsize, nsegments */ - ETH_STORE_SIZE, 0, /* maxsegsize, flags */ - NULL, NULL, /* lockfunc, lockarg */ - &sc->st_dmat); - if (error) { - device_printf(dev, "%s: failed to create DMA tag for channel " - "storage\n", __func__); - return (error); - } - /* DMA tag for Rx distribution key. */ error = bus_dma_tag_create( bus_get_dma_tag(dev), PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* low restricted addr */ BUS_SPACE_MAXADDR, /* high restricted addr */ NULL, NULL, /* filter, filterarg */ DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */ DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rxd_dmat); if (error) { device_printf(dev, "%s: failed to create DMA tag for Rx " "distribution key\n", __func__); return (error); } error = bus_dma_tag_create( bus_get_dma_tag(dev), PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* low restricted addr */ BUS_SPACE_MAXADDR, /* high restricted addr */ NULL, NULL, /* filter, filterarg */ ETH_QOS_KCFG_BUF_SIZE, 1, /* maxsize, nsegments */ ETH_QOS_KCFG_BUF_SIZE, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->qos_dmat); if (error) { device_printf(dev, "%s: failed to create DMA tag for QoS key\n", __func__); return (error); } - error = bus_dma_tag_create( - bus_get_dma_tag(dev), - PAGE_SIZE, 0, /* alignment, boundary */ - BUS_SPACE_MAXADDR, /* low restricted addr */ - BUS_SPACE_MAXADDR, /* high restricted addr */ - NULL, NULL, /* filter, filterarg */ - DPAA2_TX_SGT_SZ, 1, /* maxsize, nsegments */ - DPAA2_TX_SGT_SZ, 0, /* maxsegsize, flags */ - NULL, NULL, /* lockfunc, lockarg */ - &sc->sgt_dmat); - if (error) { - device_printf(dev, "%s: failed to create DMA tag for S/G " - "tables\n", __func__); - return (error); - } - return (0); } /** * @brief Configure buffer layouts of the different DPNI queues. */ static int dpaa2_ni_set_buf_layout(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_ni_softc *sc = device_get_softc(dev); struct dpaa2_ni_buf_layout buf_layout = {0}; struct dpaa2_cmd cmd; uint16_t rc_token, ni_token; int error; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, " "error=%d\n", __func__, dinfo->id, error); goto close_rc; } /* * Select Rx/Tx buffer alignment. It's necessary to ensure that the * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending * on the WRIOP version. */ sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) || sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0)) ? BUF_ALIGN_V1 : BUF_ALIGN; /* * We need to ensure that the buffer size seen by WRIOP is a multiple * of 64 or 256 bytes depending on the WRIOP version. */ - sc->buf_sz = ALIGN_DOWN(BUF_SIZE, sc->buf_align); + sc->buf_sz = ALIGN_DOWN(DPAA2_RX_BUF_SIZE, sc->buf_align); if (bootverbose) { device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n", sc->buf_sz, sc->buf_align); } /* * Frame Descriptor Tx buffer layout * * ADDR -> |---------------------| * | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes * |---------------------| * | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes * |---------------------| * | DATA HEADROOM | * ADDR + OFFSET -> |---------------------| * | | * | | * | FRAME DATA | * | | * | | * |---------------------| * | DATA TAILROOM | * |---------------------| * * NOTE: It's for a single buffer frame only. */ buf_layout.queue_type = DPAA2_NI_QUEUE_TX; buf_layout.pd_size = BUF_SWA_SIZE; buf_layout.pass_timestamp = true; buf_layout.pass_frame_status = true; buf_layout.options = BUF_LOPT_PRIV_DATA_SZ | BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */ BUF_LOPT_FRAME_STATUS; error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout); if (error) { device_printf(dev, "%s: failed to set Tx buffer layout\n", __func__); goto close_ni; } /* Tx-confirmation buffer layout */ buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF; buf_layout.options = BUF_LOPT_TIMESTAMP | BUF_LOPT_FRAME_STATUS; error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout); if (error) { device_printf(dev, "%s: failed to set TxConf buffer layout\n", __func__); goto close_ni; } /* * Driver should reserve the amount of space indicated by this command * as headroom in all Tx frames. */ error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off); if (error) { device_printf(dev, "%s: failed to obtain Tx data offset\n", __func__); goto close_ni; } if (bootverbose) { device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off); } if ((sc->tx_data_off % 64) != 0) { device_printf(dev, "Tx data offset (%d) is not a multiplication " "of 64 bytes\n", sc->tx_data_off); } /* * Frame Descriptor Rx buffer layout * * ADDR -> |---------------------| * | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes * |---------------------| * | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes * |---------------------| * | DATA HEADROOM | OFFSET-BUF_RX_HWA_SIZE * ADDR + OFFSET -> |---------------------| * | | * | | * | FRAME DATA | * | | * | | * |---------------------| * | DATA TAILROOM | 0 bytes * |---------------------| * * NOTE: It's for a single buffer frame only. */ buf_layout.queue_type = DPAA2_NI_QUEUE_RX; buf_layout.pd_size = BUF_SWA_SIZE; buf_layout.fd_align = sc->buf_align; buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE - BUF_SWA_SIZE; buf_layout.tail_size = 0; buf_layout.pass_frame_status = true; buf_layout.pass_parser_result = true; buf_layout.pass_timestamp = true; buf_layout.options = BUF_LOPT_PRIV_DATA_SZ | BUF_LOPT_DATA_ALIGN | BUF_LOPT_DATA_HEAD_ROOM | BUF_LOPT_DATA_TAIL_ROOM | BUF_LOPT_FRAME_STATUS | BUF_LOPT_PARSER_RESULT | BUF_LOPT_TIMESTAMP; error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout); if (error) { device_printf(dev, "%s: failed to set Rx buffer layout\n", __func__); goto close_ni; } error = 0; close_ni: (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (error); } /** * @brief Enable Rx/Tx pause frames. * * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI * itself generates pause frames (Tx frame). */ static int dpaa2_ni_set_pause_frame(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_ni_softc *sc = device_get_softc(dev); struct dpaa2_ni_link_cfg link_cfg = {0}; struct dpaa2_cmd cmd; uint16_t rc_token, ni_token; int error; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, " "error=%d\n", __func__, dinfo->id, error); goto close_rc; } error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg); if (error) { device_printf(dev, "%s: failed to obtain link configuration: " "error=%d\n", __func__, error); goto close_ni; } /* Enable both Rx and Tx pause frames by default. */ link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE; link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE; error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg); if (error) { device_printf(dev, "%s: failed to set link configuration: " "error=%d\n", __func__, error); goto close_ni; } sc->link_options = link_cfg.options; error = 0; close_ni: (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (error); } /** * @brief Configure QoS table to determine the traffic class for the received * frame. */ static int dpaa2_ni_set_qos_table(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_ni_softc *sc = device_get_softc(dev); struct dpaa2_ni_qos_table tbl; struct dpaa2_buf *buf = &sc->qos_kcfg; struct dpaa2_cmd cmd; uint16_t rc_token, ni_token; int error; if (sc->attr.num.rx_tcs == 1 || !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) { if (bootverbose) { device_printf(dev, "Ingress traffic classification is " "not supported\n"); } return (0); } /* * Allocate a buffer visible to the device to hold the QoS table key * configuration. */ - KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer", - __func__)); - if (__predict_true(buf->store.dmat == NULL)) { - buf->store.dmat = sc->qos_dmat; + + if (__predict_true(buf->dmat == NULL)) { + buf->dmat = sc->qos_dmat; } - error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr, - BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap); + error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr, + BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap); if (error) { device_printf(dev, "%s: failed to allocate a buffer for QoS key " "configuration\n", __func__); goto err_exit; } - error = bus_dmamap_load(buf->store.dmat, buf->store.dmap, - buf->store.vaddr, ETH_QOS_KCFG_BUF_SIZE, dpaa2_ni_dmamap_cb, - &buf->store.paddr, BUS_DMA_NOWAIT); + error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr, + ETH_QOS_KCFG_BUF_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr, + BUS_DMA_NOWAIT); if (error) { device_printf(dev, "%s: failed to map QoS key configuration " "buffer into bus space\n", __func__); goto err_exit; } DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, " "error=%d\n", __func__, dinfo->id, error); goto close_rc; } tbl.default_tc = 0; tbl.discard_on_miss = false; tbl.keep_entries = false; - tbl.kcfg_busaddr = buf->store.paddr; + tbl.kcfg_busaddr = buf->paddr; error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl); if (error) { device_printf(dev, "%s: failed to set QoS table\n", __func__); goto close_ni; } error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd); if (error) { device_printf(dev, "%s: failed to clear QoS table\n", __func__); goto close_ni; } error = 0; close_ni: (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (error); } static int dpaa2_ni_set_mac_addr(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); if_t ifp = sc->ifp; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_cmd cmd; struct ether_addr rnd_mac_addr; uint16_t rc_token, ni_token; uint8_t mac_addr[ETHER_ADDR_LEN]; uint8_t dpni_mac_addr[ETHER_ADDR_LEN]; int error; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, " "error=%d\n", __func__, dinfo->id, error); goto close_rc; } /* * Get the MAC address associated with the physical port, if the DPNI is * connected to a DPMAC directly associated with one of the physical * ports. */ error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr); if (error) { device_printf(dev, "%s: failed to obtain the MAC address " "associated with the physical port\n", __func__); goto close_ni; } /* Get primary MAC address from the DPNI attributes. */ error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr); if (error) { device_printf(dev, "%s: failed to obtain primary MAC address\n", __func__); goto close_ni; } if (!ETHER_IS_ZERO(mac_addr)) { /* Set MAC address of the physical port as DPNI's primary one. */ error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd, mac_addr); if (error) { device_printf(dev, "%s: failed to set primary MAC " "address\n", __func__); goto close_ni; } for (int i = 0; i < ETHER_ADDR_LEN; i++) { sc->mac.addr[i] = mac_addr[i]; } } else if (ETHER_IS_ZERO(dpni_mac_addr)) { /* Generate random MAC address as DPNI's primary one. */ ether_gen_addr(ifp, &rnd_mac_addr); for (int i = 0; i < ETHER_ADDR_LEN; i++) { mac_addr[i] = rnd_mac_addr.octet[i]; } error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd, mac_addr); if (error) { device_printf(dev, "%s: failed to set random primary " "MAC address\n", __func__); goto close_ni; } for (int i = 0; i < ETHER_ADDR_LEN; i++) { sc->mac.addr[i] = mac_addr[i]; } } else { for (int i = 0; i < ETHER_ADDR_LEN; i++) { sc->mac.addr[i] = dpni_mac_addr[i]; } } error = 0; close_ni: (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (error); } static void dpaa2_ni_miibus_statchg(device_t dev) { device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); struct dpaa2_mac_link_state mac_link = { 0 }; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_cmd cmd; uint16_t rc_token, mac_token; int error, link_state; if (sc->fixed_link || sc->mii == NULL) { return; } /* * Note: ifp link state will only be changed AFTER we are called so we * cannot rely on ifp->if_linkstate here. */ if (sc->mii->mii_media_status & IFM_AVALID) { if (sc->mii->mii_media_status & IFM_ACTIVE) { link_state = LINK_STATE_UP; } else { link_state = LINK_STATE_DOWN; } } else { link_state = LINK_STATE_UNKNOWN; } if (link_state != sc->link_state) { sc->link_state = link_state; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource " "container: id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id, &mac_token); if (error) { device_printf(sc->dev, "%s: failed to open DPMAC: " "id=%d, error=%d\n", __func__, sc->mac.dpmac_id, error); goto close_rc; } if (link_state == LINK_STATE_UP || link_state == LINK_STATE_DOWN) { /* Update DPMAC link state. */ mac_link.supported = sc->mii->mii_media.ifm_media; mac_link.advert = sc->mii->mii_media.ifm_media; mac_link.rate = 1000; /* TODO: Where to get from? */ /* ifmedia_baudrate? */ mac_link.options = DPAA2_MAC_LINK_OPT_AUTONEG | DPAA2_MAC_LINK_OPT_PAUSE; mac_link.up = (link_state == LINK_STATE_UP) ? true : false; mac_link.state_valid = true; /* Inform DPMAC about link state. */ error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd, &mac_link); if (error) { device_printf(sc->dev, "%s: failed to set DPMAC " "link state: id=%d, error=%d\n", __func__, sc->mac.dpmac_id, error); } } (void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); } return; close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return; } /** * @brief Callback function to process media change request. */ static int dpaa2_ni_media_change(if_t ifp) { struct dpaa2_ni_softc *sc = if_getsoftc(ifp); DPNI_LOCK(sc); if (sc->mii) { mii_mediachg(sc->mii); sc->media_status = sc->mii->mii_media.ifm_media; } else if (sc->fixed_link) { if_printf(ifp, "%s: can't change media in fixed mode\n", __func__); } DPNI_UNLOCK(sc); return (0); } /** * @brief Callback function to process media status request. */ static void dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr) { struct dpaa2_ni_softc *sc = if_getsoftc(ifp); DPNI_LOCK(sc); if (sc->mii) { mii_pollstat(sc->mii); ifmr->ifm_active = sc->mii->mii_media_active; ifmr->ifm_status = sc->mii->mii_media_status; } DPNI_UNLOCK(sc); } /** * @brief Callout function to check and update media status. */ static void dpaa2_ni_media_tick(void *arg) { struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg; /* Check for media type change */ if (sc->mii) { mii_tick(sc->mii); if (sc->media_status != sc->mii->mii_media.ifm_media) { printf("%s: media type changed (ifm_media=%x)\n", __func__, sc->mii->mii_media.ifm_media); dpaa2_ni_media_change(sc->ifp); } } /* Schedule another timeout one second from now */ callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc); } static void dpaa2_ni_init(void *arg) { struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg; if_t ifp = sc->ifp; device_t pdev = device_get_parent(sc->dev); device_t dev = sc->dev; device_t child = dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_cmd cmd; uint16_t rc_token, ni_token; int error; DPNI_LOCK(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { DPNI_UNLOCK(sc); return; } DPNI_UNLOCK(sc); DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd); if (error) { device_printf(dev, "%s: failed to enable DPNI: error=%d\n", __func__, error); } DPNI_LOCK(sc); if (sc->mii) { mii_mediachg(sc->mii); } callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); DPNI_UNLOCK(sc); /* Force link-state update to initilize things. */ dpaa2_ni_miibus_statchg(dev); (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return; close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return; } static int dpaa2_ni_transmit(if_t ifp, struct mbuf *m) { struct dpaa2_ni_softc *sc = if_getsoftc(ifp); - struct dpaa2_ni_channel *chan; - struct dpaa2_ni_tx_ring *tx; + struct dpaa2_channel *ch; uint32_t fqid; bool found = false; - int chan_n = 0; + int chidx = 0, error; - if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) + if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) { return (0); + } if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { fqid = m->m_pkthdr.flowid; for (int i = 0; i < sc->chan_n; i++) { - chan = sc->channels[i]; - for (int j = 0; j < chan->rxq_n; j++) { - if (fqid == chan->rx_queues[j].fqid) { - chan_n = chan->flowid; + ch = sc->channels[i]; + for (int j = 0; j < ch->rxq_n; j++) { + if (fqid == ch->rx_queues[j].fqid) { + chidx = ch->flowid; found = true; break; } } if (found) { break; } } } - tx = DPAA2_TX_RING(sc, chan_n, 0); - TX_LOCK(tx); - dpaa2_ni_tx_locked(sc, tx, m); - TX_UNLOCK(tx); + ch = sc->channels[chidx]; + error = buf_ring_enqueue(ch->xmit_br, m); + if (__predict_false(error != 0)) { + m_freem(m); + } else { + taskqueue_enqueue(ch->cleanup_tq, &ch->cleanup_task); + } - return (0); + return (error); } static void dpaa2_ni_qflush(if_t ifp) { /* TODO: Find a way to drain Tx queues in QBMan. */ if_qflush(ifp); } static int dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data) { struct dpaa2_ni_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *) data; device_t pdev = device_get_parent(sc->dev); device_t dev = sc->dev; device_t child = dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_cmd cmd; uint32_t changed = 0; uint16_t rc_token, ni_token; int mtu, error, rc = 0; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } switch (c) { case SIOCSIFMTU: DPNI_LOCK(sc); mtu = ifr->ifr_mtu; if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) { DPNI_UNLOCK(sc); error = EINVAL; goto close_ni; } if_setmtu(ifp, mtu); DPNI_UNLOCK(sc); /* Update maximum frame length. */ error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, mtu + ETHER_HDR_LEN); if (error) { device_printf(dev, "%s: failed to update maximum frame " "length: error=%d\n", __func__, error); goto close_ni; } break; case SIOCSIFCAP: changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap; if (changed & IFCAP_HWCSUM) { if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) { if_setcapenablebit(ifp, IFCAP_HWCSUM, 0); } else { if_setcapenablebit(ifp, 0, IFCAP_HWCSUM); } } rc = dpaa2_ni_setup_if_caps(sc); if (rc) { printf("%s: failed to update iface capabilities: " "error=%d\n", __func__, rc); rc = ENXIO; } break; case SIOCSIFFLAGS: DPNI_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { changed = if_getflags(ifp) ^ sc->if_flags; if (changed & IFF_PROMISC || changed & IFF_ALLMULTI) { rc = dpaa2_ni_setup_if_flags(sc); } } else { DPNI_UNLOCK(sc); dpaa2_ni_init(sc); DPNI_LOCK(sc); } } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* FIXME: Disable DPNI. See dpaa2_ni_init(). */ } sc->if_flags = if_getflags(ifp); DPNI_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: DPNI_LOCK(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { DPNI_UNLOCK(sc); rc = dpaa2_ni_update_mac_filters(ifp); if (rc) { device_printf(dev, "%s: failed to update MAC " "filters: error=%d\n", __func__, rc); } DPNI_LOCK(sc); } DPNI_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->mii) rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c); else if(sc->fixed_link) { rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c); } break; default: rc = ether_ioctl(ifp, c, data); break; } (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (rc); close_ni: (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (error); } static int dpaa2_ni_update_mac_filters(if_t ifp) { struct dpaa2_ni_softc *sc = if_getsoftc(ifp); struct dpaa2_ni_mcaddr_ctx ctx; device_t pdev = device_get_parent(sc->dev); device_t dev = sc->dev; device_t child = dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_cmd cmd; uint16_t rc_token, ni_token; int error; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } /* Remove all multicast MAC filters. */ error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true); if (error) { device_printf(dev, "%s: failed to clear multicast MAC filters: " "error=%d\n", __func__, error); goto close_ni; } ctx.ifp = ifp; ctx.error = 0; ctx.nent = 0; if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx); error = ctx.error; close_ni: (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return (error); } static u_int dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct dpaa2_ni_mcaddr_ctx *ctx = arg; struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp); device_t pdev = device_get_parent(sc->dev); device_t dev = sc->dev; device_t child = dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_cmd cmd; uint16_t rc_token, ni_token; int error; if (ctx->error != 0) { return (0); } if (ETHER_IS_MULTICAST(LLADDR(sdl))) { DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource " "container: id=%d, error=%d\n", __func__, rcinfo->id, error); return (0); } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); return (0); } ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd, LLADDR(sdl)); (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); if (ctx->error != 0) { device_printf(dev, "%s: can't add more then %d MAC " "addresses, switching to the multicast promiscuous " "mode\n", __func__, ctx->nent); /* Enable multicast promiscuous mode. */ DPNI_LOCK(sc); if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0); sc->if_flags |= IFF_ALLMULTI; ctx->error = dpaa2_ni_setup_if_flags(sc); DPNI_UNLOCK(sc); return (0); } ctx->nent++; } return (1); } static void dpaa2_ni_intr(void *arg) { struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg; device_t pdev = device_get_parent(sc->dev); device_t dev = sc->dev; device_t child = dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_cmd cmd; uint32_t status = ~0u; /* clear all IRQ status bits */ uint16_t rc_token, ni_token; int error; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX, &status); if (error) { device_printf(sc->dev, "%s: failed to obtain IRQ status: " "error=%d\n", __func__, error); } (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); err_exit: return; } /** - * @brief Callback to obtain a physical address of the only DMA segment mapped. + * @brief Execute channel's Rx/Tx routines. + * + * NOTE: Should not be re-entrant for the same channel. It is achieved by + * enqueuing the cleanup routine on a single-threaded taskqueue. */ static void -dpaa2_ni_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) +dpaa2_ni_cleanup_task(void *arg, int count) { - if (error == 0) { - KASSERT(nseg == 1, ("too many segments: nseg=%d\n", nseg)); - *(bus_addr_t *) arg = segs[0].ds_addr; - } -} + struct dpaa2_channel *ch = (struct dpaa2_channel *)arg; + struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev); + int error, rxc, txc; -/** - * @brief Release new buffers to the buffer pool if necessary. - */ -static void -dpaa2_ni_bp_task(void *arg, int count) -{ - device_t bp_dev; - struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg; - struct dpaa2_bp_softc *bpsc; - struct dpaa2_bp_conf bp_conf; - const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num); - int error; + for (int i = 0; i < DPAA2_CLEAN_BUDGET; i++) { + rxc = dpaa2_ni_rx_cleanup(ch); + txc = dpaa2_ni_tx_cleanup(ch); - /* There's only one buffer pool for now. */ - bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]); - bpsc = device_get_softc(bp_dev); + if (__predict_false((if_getdrvflags(sc->ifp) & + IFF_DRV_RUNNING) == 0)) { + return; + } - /* Get state of the buffer pool. */ - error = DPAA2_SWP_QUERY_BP(sc->channels[0]->io_dev, bpsc->attr.bpid, - &bp_conf); - if (error) { - device_printf(sc->dev, "%s: failed to query buffer pool " - "configuration: error=%d\n", __func__, error); - return; + if ((txc != DPAA2_TX_BUDGET) && (rxc != DPAA2_RX_BUDGET)) { + break; + } } - /* Double allocated buffers number if free buffers < 25%. */ - if (bp_conf.free_bufn < (buf_num >> 2)) { - (void)dpaa2_ni_seed_buf_pool(sc, buf_num); - DPAA2_ATOMIC_XCHG(&sc->buf_free, bp_conf.free_bufn); + /* Re-arm channel to generate CDAN */ + error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, &ch->ctx); + if (error != 0) { + panic("%s: failed to rearm channel: chan_id=%d, error=%d\n", + __func__, ch->id, error); } } /** * @brief Poll frames from a specific channel when CDAN is received. - * - * NOTE: To be called from the DPIO interrupt handler. */ -static void -dpaa2_ni_poll(void *arg) +static int +dpaa2_ni_rx_cleanup(struct dpaa2_channel *ch) { - struct dpaa2_ni_channel *chan = (struct dpaa2_ni_channel *) arg; - struct dpaa2_io_softc *iosc; - struct dpaa2_swp *swp; + struct dpaa2_io_softc *iosc = device_get_softc(ch->io_dev); + struct dpaa2_swp *swp = iosc->swp; struct dpaa2_ni_fq *fq; + struct dpaa2_buf *buf = &ch->store; + int budget = DPAA2_RX_BUDGET; int error, consumed = 0; - KASSERT(chan != NULL, ("%s: channel is NULL", __func__)); - - iosc = device_get_softc(chan->io_dev); - swp = iosc->swp; - do { - error = dpaa2_swp_pull(swp, chan->id, &chan->store, - ETH_STORE_FRAMES); + error = dpaa2_swp_pull(swp, ch->id, buf, DPAA2_ETH_STORE_FRAMES); if (error) { - device_printf(chan->ni_dev, "%s: failed to pull frames: " - "chan_id=%d, error=%d\n", __func__, chan->id, error); + device_printf(ch->ni_dev, "%s: failed to pull frames: " + "chan_id=%d, error=%d\n", __func__, ch->id, error); break; } - - /* - * TODO: Combine frames from the same Rx queue returned as - * a result to the current VDQ command into a chain (linked - * with m_nextpkt) to ammortize the FQ lock. - */ - error = dpaa2_ni_consume_frames(chan, &fq, &consumed); - if (error == ENOENT) { + error = dpaa2_ni_consume_frames(ch, &fq, &consumed); + if (error == ENOENT || error == EALREADY) { break; } if (error == ETIMEDOUT) { - device_printf(chan->ni_dev, "%s: timeout to consume " - "frames: chan_id=%d\n", __func__, chan->id); + device_printf(ch->ni_dev, "%s: timeout to consume " + "frames: chan_id=%d\n", __func__, ch->id); } - } while (true); + } while (--budget); - /* Re-arm channel to generate CDAN. */ - error = DPAA2_SWP_CONF_WQ_CHANNEL(chan->io_dev, &chan->ctx); - if (error) { - device_printf(chan->ni_dev, "%s: failed to rearm: chan_id=%d, " - "error=%d\n", __func__, chan->id, error); - } + return (DPAA2_RX_BUDGET - budget); +} + +static int +dpaa2_ni_tx_cleanup(struct dpaa2_channel *ch) +{ + struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev); + struct dpaa2_ni_tx_ring *tx = &ch->txc_queue.tx_rings[0]; + struct mbuf *m = NULL; + int budget = DPAA2_TX_BUDGET; + + do { + mtx_assert(&ch->xmit_mtx, MA_NOTOWNED); + mtx_lock(&ch->xmit_mtx); + m = buf_ring_dequeue_sc(ch->xmit_br); + mtx_unlock(&ch->xmit_mtx); + + if (__predict_false(m == NULL)) { + /* TODO: Do not give up easily */ + break; + } else { + dpaa2_ni_tx(sc, ch, tx, m); + } + } while (--budget); + + return (DPAA2_TX_BUDGET - budget); } -/** - * @brief Transmit mbufs. - */ static void -dpaa2_ni_tx_locked(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx, - struct mbuf *m) +dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch, + struct dpaa2_ni_tx_ring *tx, struct mbuf *m) { + device_t dev = sc->dev; struct dpaa2_ni_fq *fq = tx->fq; - struct dpaa2_buf *buf; + struct dpaa2_buf *buf, *sgt; struct dpaa2_fd fd; - struct mbuf *m_d; - bus_dma_segment_t txsegs[DPAA2_TX_SEGLIMIT]; - uint64_t idx; - void *pidx; - int error, rc, txnsegs; - - /* Obtain an index of a Tx buffer. */ - pidx = buf_ring_dequeue_sc(tx->idx_br); - if (__predict_false(pidx == NULL)) { - /* TODO: Do not give up easily. */ + struct mbuf *md; + bus_dma_segment_t segs[DPAA2_TX_SEGLIMIT]; + int rc, nsegs; + int error; + + mtx_assert(&tx->lock, MA_NOTOWNED); + mtx_lock(&tx->lock); + buf = buf_ring_dequeue_sc(tx->br); + mtx_unlock(&tx->lock); + if (__predict_false(buf == NULL)) { + /* TODO: Do not give up easily */ m_freem(m); return; } else { - idx = (uint64_t) pidx; - buf = &tx->buf[idx]; - buf->tx.m = m; - buf->tx.sgt_paddr = 0; + DPAA2_BUF_ASSERT_TXREADY(buf); + buf->m = m; + sgt = buf->sgt; } - /* Load mbuf to transmit. */ - error = bus_dmamap_load_mbuf_sg(buf->tx.dmat, buf->tx.dmap, m, - txsegs, &txnsegs, BUS_DMA_NOWAIT); +#if defined(INVARIANTS) + struct dpaa2_ni_tx_ring *btx = (struct dpaa2_ni_tx_ring *)buf->opt; + KASSERT(buf->opt == tx, ("%s: unexpected Tx ring", __func__)); + KASSERT(btx->fq->chan == ch, ("%s: unexpected channel", __func__)); +#endif /* INVARIANTS */ + + error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs, &nsegs, + BUS_DMA_NOWAIT); if (__predict_false(error != 0)) { /* Too many fragments, trying to defragment... */ - m_d = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT); - if (m_d == NULL) { - device_printf(sc->dev, "%s: mbuf " - "defragmentation failed\n", __func__); + md = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT); + if (md == NULL) { + device_printf(dev, "%s: m_collapse() failed\n", __func__); fq->chan->tx_dropped++; goto err; } - buf->tx.m = m = m_d; - error = bus_dmamap_load_mbuf_sg(buf->tx.dmat, - buf->tx.dmap, m, txsegs, &txnsegs, BUS_DMA_NOWAIT); + buf->m = m = md; + error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs, + &nsegs, BUS_DMA_NOWAIT); if (__predict_false(error != 0)) { - device_printf(sc->dev, "%s: failed to load " - "mbuf: error=%d\n", __func__, error); + device_printf(dev, "%s: bus_dmamap_load_mbuf_sg() " + "failed: error=%d\n", __func__, error); fq->chan->tx_dropped++; goto err; } } - /* Build frame descriptor. */ - error = dpaa2_ni_build_fd(sc, tx, buf, txsegs, txnsegs, &fd); + error = dpaa2_ni_build_fd(sc, tx, buf, segs, nsegs, &fd); if (__predict_false(error != 0)) { - device_printf(sc->dev, "%s: failed to build frame " - "descriptor: error=%d\n", __func__, error); + device_printf(dev, "%s: failed to build frame descriptor: " + "error=%d\n", __func__, error); fq->chan->tx_dropped++; goto err_unload; } - /* TODO: Enqueue several frames in a single command. */ + /* TODO: Enqueue several frames in a single command */ for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) { - /* TODO: Return error codes instead of # of frames. */ - rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid, - &fd, 1); + /* TODO: Return error codes instead of # of frames */ + rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid, &fd, 1); if (rc == 1) { break; } } - bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap, BUS_DMASYNC_PREWRITE); - bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE); if (rc != 1) { fq->chan->tx_dropped++; goto err_unload; } else { fq->chan->tx_frames++; } return; err_unload: - bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap); - if (buf->tx.sgt_paddr != 0) { - bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap); + bus_dmamap_unload(buf->dmat, buf->dmap); + if (sgt->paddr != 0) { + bus_dmamap_unload(sgt->dmat, sgt->dmap); } err: - m_freem(buf->tx.m); - buf_ring_enqueue(tx->idx_br, pidx); + m_freem(buf->m); + buf_ring_enqueue(tx->br, buf); } static int -dpaa2_ni_consume_frames(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq **src, +dpaa2_ni_consume_frames(struct dpaa2_channel *chan, struct dpaa2_ni_fq **src, uint32_t *consumed) { struct dpaa2_ni_fq *fq = NULL; struct dpaa2_dq *dq; struct dpaa2_fd *fd; + struct dpaa2_ni_rx_ctx ctx = { + .head = NULL, + .tail = NULL, + .cnt = 0, + .last = false + }; int rc, frames = 0; do { - rc = dpaa2_ni_chan_storage_next(chan, &dq); + rc = dpaa2_chan_next_frame(chan, &dq); if (rc == EINPROGRESS) { if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) { fd = &dq->fdr.fd; fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx; - fq->consume(chan, fq, fd); + + switch (fq->type) { + case DPAA2_NI_QUEUE_RX: + (void)dpaa2_ni_rx(chan, fq, fd, &ctx); + break; + case DPAA2_NI_QUEUE_RX_ERR: + (void)dpaa2_ni_rx_err(chan, fq, fd); + break; + case DPAA2_NI_QUEUE_TX_CONF: + (void)dpaa2_ni_tx_conf(chan, fq, fd); + break; + default: + panic("%s: unknown queue type (1)", + __func__); + } frames++; } } else if (rc == EALREADY || rc == ENOENT) { if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) { fd = &dq->fdr.fd; fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx; - fq->consume(chan, fq, fd); + + switch (fq->type) { + case DPAA2_NI_QUEUE_RX: + /* + * Last VDQ response (mbuf) in a chain + * obtained from the Rx queue. + */ + ctx.last = true; + (void)dpaa2_ni_rx(chan, fq, fd, &ctx); + break; + case DPAA2_NI_QUEUE_RX_ERR: + (void)dpaa2_ni_rx_err(chan, fq, fd); + break; + case DPAA2_NI_QUEUE_TX_CONF: + (void)dpaa2_ni_tx_conf(chan, fq, fd); + break; + default: + panic("%s: unknown queue type (2)", + __func__); + } frames++; } break; } else { - KASSERT(1 == 0, ("%s: should not reach here", __func__)); + panic("%s: should not reach here: rc=%d", __func__, rc); } } while (true); - KASSERT(chan->store_idx < chan->store_sz, - ("channel store idx >= size: store_idx=%d, store_sz=%d", - chan->store_idx, chan->store_sz)); + KASSERT(chan->store_idx < chan->store_sz, ("%s: store_idx(%d) >= " + "store_sz(%d)", __func__, chan->store_idx, chan->store_sz)); /* - * A dequeue operation pulls frames from a single queue into the store. + * VDQ operation pulls frames from a single queue into the store. * Return the frame queue and a number of consumed frames as an output. */ - if (src != NULL) + if (src != NULL) { *src = fq; - if (consumed != NULL) + } + if (consumed != NULL) { *consumed = frames; + } return (rc); } /** * @brief Receive frames. */ static int -dpaa2_ni_rx(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq, - struct dpaa2_fd *fd) +dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd, + struct dpaa2_ni_rx_ctx *ctx) { - struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev); + bus_addr_t paddr = (bus_addr_t)fd->addr; + struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr); + struct dpaa2_buf *buf = fa->buf; + struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt; + struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev); struct dpaa2_bp_softc *bpsc; - struct dpaa2_buf *buf; - struct dpaa2_fa *fa; - if_t ifp = sc->ifp; struct mbuf *m; - device_t bp_dev; - bus_addr_t paddr = (bus_addr_t) fd->addr; + device_t bpdev; bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD]; void *buf_data; int buf_len, error, released_n = 0; - fa = (struct dpaa2_fa *) PHYS_TO_DMAP(paddr); - buf = fa->buf; - KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__)); - KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__)); - if (__predict_false(paddr != buf->rx.paddr)) { + /* + * NOTE: Current channel might not be the same as the "buffer" channel + * and it's fine. It must not be NULL though. + */ + KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__)); + + if (__predict_false(paddr != buf->paddr)) { panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)", - __func__, paddr, buf->rx.paddr); + __func__, paddr, buf->paddr); } - /* Update statistics. */ switch (dpaa2_ni_fd_err(fd)) { case 1: /* Enqueue rejected by QMan */ sc->rx_enq_rej_frames++; break; case 2: /* QMan IEOI error */ sc->rx_ieoi_err_frames++; break; default: break; } switch (dpaa2_ni_fd_format(fd)) { case DPAA2_FD_SINGLE: sc->rx_single_buf_frames++; break; case DPAA2_FD_SG: sc->rx_sg_buf_frames++; break; default: break; } - m = buf->rx.m; - buf->rx.m = NULL; - bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap, BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap); + mtx_assert(&bch->dma_mtx, MA_NOTOWNED); + mtx_lock(&bch->dma_mtx); + bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(buf->dmat, buf->dmap); + m = buf->m; buf_len = dpaa2_ni_fd_data_len(fd); - buf_data = (uint8_t *)buf->rx.vaddr + dpaa2_ni_fd_offset(fd); + buf_data = (uint8_t *)buf->vaddr + dpaa2_ni_fd_offset(fd); + /* Prepare buffer to be re-cycled */ + buf->m = NULL; + buf->paddr = 0; + buf->vaddr = NULL; + buf->seg.ds_addr = 0; + buf->seg.ds_len = 0; + buf->nseg = 0; - /* Prefetch mbuf data. */ - __builtin_prefetch(buf_data); + mtx_unlock(&bch->dma_mtx); - /* Write value to mbuf (avoid reading). */ m->m_flags |= M_PKTHDR; m->m_data = buf_data; m->m_len = buf_len; m->m_pkthdr.len = buf_len; - m->m_pkthdr.rcvif = ifp; + m->m_pkthdr.rcvif = sc->ifp; m->m_pkthdr.flowid = fq->fqid; M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); - if_input(ifp, m); + if (ctx->head == NULL) { + KASSERT(ctx->tail == NULL, ("%s: tail already given?", __func__)); + ctx->head = m; + ctx->tail = m; + } else { + KASSERT(ctx->head != NULL, ("%s: head is NULL", __func__)); + ctx->tail->m_nextpkt = m; + ctx->tail = m; + } + ctx->cnt++; + + if (ctx->last) { + ctx->tail->m_nextpkt = NULL; + if_input(sc->ifp, ctx->head); + } + + /* Keep the buffer to be recycled */ + ch->recycled[ch->recycled_n++] = buf; - /* Keep the buffer to be recycled. */ - chan->recycled[chan->recycled_n++] = buf; - KASSERT(chan->recycled_n <= DPAA2_SWP_BUFS_PER_CMD, - ("%s: too many buffers to recycle", __func__)); + /* Re-seed and release recycled buffers back to the pool */ + if (ch->recycled_n == DPAA2_SWP_BUFS_PER_CMD) { + /* Release new buffers to the pool if needed */ + taskqueue_enqueue(sc->bp_taskq, &ch->bp_task); - /* Re-seed and release recycled buffers back to the pool. */ - if (chan->recycled_n == DPAA2_SWP_BUFS_PER_CMD) { - /* Release new buffers to the pool if needed. */ - taskqueue_enqueue(sc->bp_taskq, &sc->bp_task); + for (int i = 0; i < ch->recycled_n; i++) { + buf = ch->recycled[i]; + bch = (struct dpaa2_channel *)buf->opt; - for (int i = 0; i < chan->recycled_n; i++) { - buf = chan->recycled[i]; + mtx_assert(&bch->dma_mtx, MA_NOTOWNED); + mtx_lock(&bch->dma_mtx); + error = dpaa2_buf_seed_rxb(sc->dev, buf, + DPAA2_RX_BUF_SIZE, &bch->dma_mtx); + mtx_unlock(&bch->dma_mtx); - /* Seed recycled buffer. */ - error = dpaa2_ni_seed_rxbuf(sc, buf); - KASSERT(error == 0, ("%s: failed to seed recycled " - "buffer: error=%d", __func__, error)); if (__predict_false(error != 0)) { - device_printf(sc->dev, "%s: failed to seed " - "recycled buffer: error=%d\n", __func__, - error); - continue; + /* TODO: What else to do with the buffer? */ + panic("%s: failed to recycle buffer: error=%d", + __func__, error); } - /* Prepare buffer to be released in a single command. */ - released[released_n++] = buf->rx.paddr; + /* Prepare buffer to be released in a single command */ + released[released_n++] = buf->paddr; } - /* There's only one buffer pool for now. */ - bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]); - bpsc = device_get_softc(bp_dev); + /* There's only one buffer pool for now */ + bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]); + bpsc = device_get_softc(bpdev); - error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid, + error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid, released, released_n); if (__predict_false(error != 0)) { device_printf(sc->dev, "%s: failed to release buffers " "to the pool: error=%d\n", __func__, error); return (error); } - - /* Be ready to recycle the next portion of the buffers. */ - chan->recycled_n = 0; + ch->recycled_n = 0; } return (0); } /** * @brief Receive Rx error frames. */ static int -dpaa2_ni_rx_err(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq, +dpaa2_ni_rx_err(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd) { - device_t bp_dev; - struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev); + bus_addr_t paddr = (bus_addr_t)fd->addr; + struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr); + struct dpaa2_buf *buf = fa->buf; + struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt; + struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev); + device_t bpdev; struct dpaa2_bp_softc *bpsc; - struct dpaa2_buf *buf; - struct dpaa2_fa *fa; - bus_addr_t paddr = (bus_addr_t) fd->addr; int error; - fa = (struct dpaa2_fa *) PHYS_TO_DMAP(paddr); - buf = fa->buf; - KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__)); - KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__)); - if (__predict_false(paddr != buf->rx.paddr)) { + /* + * NOTE: Current channel might not be the same as the "buffer" channel + * and it's fine. It must not be NULL though. + */ + KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__)); + + if (__predict_false(paddr != buf->paddr)) { panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)", - __func__, paddr, buf->rx.paddr); + __func__, paddr, buf->paddr); } - /* There's only one buffer pool for now. */ - bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]); - bpsc = device_get_softc(bp_dev); + /* There's only one buffer pool for now */ + bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]); + bpsc = device_get_softc(bpdev); - /* Release buffer to QBMan buffer pool. */ - error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid, &paddr, 1); + /* Release buffer to QBMan buffer pool */ + error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid, &paddr, 1); if (error != 0) { device_printf(sc->dev, "%s: failed to release frame buffer to " "the pool: error=%d\n", __func__, error); return (error); } return (0); } /** * @brief Receive Tx confirmation frames. */ static int -dpaa2_ni_tx_conf(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq, +dpaa2_ni_tx_conf(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd) { - struct dpaa2_ni_tx_ring *tx; - struct dpaa2_buf *buf; - struct dpaa2_fa *fa; - bus_addr_t paddr = (bus_addr_t) fd->addr; - - fa = (struct dpaa2_fa *) PHYS_TO_DMAP(paddr); - buf = fa->buf; - tx = fa->tx; + bus_addr_t paddr = (bus_addr_t)fd->addr; + struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr); + struct dpaa2_buf *buf = fa->buf; + struct dpaa2_buf *sgt = buf->sgt; + struct dpaa2_ni_tx_ring *tx = (struct dpaa2_ni_tx_ring *)buf->opt; + struct dpaa2_channel *bch = tx->fq->chan; KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__)); - KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__)); - if (paddr != buf->tx.paddr) { + KASSERT(tx != NULL, ("%s: Tx ring is NULL", __func__)); + KASSERT(sgt != NULL, ("%s: S/G table is NULL", __func__)); + /* + * NOTE: Current channel might not be the same as the "buffer" channel + * and it's fine. It must not be NULL though. + */ + KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__)); + + if (paddr != buf->paddr) { panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)", - __func__, paddr, buf->tx.paddr); + __func__, paddr, buf->paddr); } - bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap, BUS_DMASYNC_POSTWRITE); - bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap, BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap); - bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap); - m_freem(buf->tx.m); + mtx_assert(&bch->dma_mtx, MA_NOTOWNED); + mtx_lock(&bch->dma_mtx); - /* Return Tx buffer index back to the ring. */ - buf_ring_enqueue(tx->idx_br, (void *) buf->tx.idx); + bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTWRITE); + bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(buf->dmat, buf->dmap); + bus_dmamap_unload(sgt->dmat, sgt->dmap); + m_freem(buf->m); + buf->m = NULL; + buf->paddr = 0; + buf->vaddr = NULL; + sgt->paddr = 0; + + mtx_unlock(&bch->dma_mtx); + + /* Return Tx buffer back to the ring */ + buf_ring_enqueue(tx->br, buf); return (0); } /** * @brief Compare versions of the DPAA2 network interface API. */ static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major, uint16_t minor) { - if (sc->api_major == major) + if (sc->api_major == major) { return sc->api_minor - minor; - return sc->api_major - major; -} - -/** - * @brief Allocate Rx buffers visible to QBMan and release them to the pool. - */ -static int -dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *sc, uint32_t seedn) -{ - device_t bp_dev; - struct dpaa2_bp_softc *bpsc; - struct dpaa2_buf *buf; - bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD]; - const int allocated = DPAA2_ATOMIC_READ(&sc->buf_num); - int i, error, bufn = 0; - - KASSERT(sc->bp_dmat != NULL, ("%s: DMA tag for buffer pool not " - "created?", __func__)); - - /* There's only one buffer pool for now. */ - bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]); - bpsc = device_get_softc(bp_dev); - - /* Limit # of buffers released to the pool. */ - if (allocated + seedn > DPAA2_NI_BUFS_MAX) - seedn = DPAA2_NI_BUFS_MAX - allocated; - - /* Release "seedn" buffers to the pool. */ - for (i = allocated; i < (allocated + seedn); i++) { - /* Enough buffers were allocated for a single command. */ - if (bufn == DPAA2_SWP_BUFS_PER_CMD) { - error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev, - bpsc->attr.bpid, paddr, bufn); - if (error) { - device_printf(sc->dev, "%s: failed to release " - "buffers to the pool (1)\n", __func__); - return (error); - } - DPAA2_ATOMIC_ADD(&sc->buf_num, bufn); - bufn = 0; - } - - buf = &sc->buf[i]; - buf->type = DPAA2_BUF_RX; - buf->rx.m = NULL; - buf->rx.dmap = NULL; - buf->rx.paddr = 0; - buf->rx.vaddr = NULL; - error = dpaa2_ni_seed_rxbuf(sc, buf); - if (error != 0) { - break; - } - paddr[bufn] = buf->rx.paddr; - bufn++; - } - - /* Release if there are buffers left. */ - if (bufn > 0) { - error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev, - bpsc->attr.bpid, paddr, bufn); - if (error) { - device_printf(sc->dev, "%s: failed to release " - "buffers to the pool (2)\n", __func__); - return (error); - } - DPAA2_ATOMIC_ADD(&sc->buf_num, bufn); - } - - return (0); -} - -/** - * @brief Prepare Rx buffer to be released to the buffer pool. - */ -static int -dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf) -{ - struct mbuf *m; - struct dpaa2_fa *fa; - bus_dmamap_t dmap; - bus_dma_segment_t segs; - int error, nsegs; - - KASSERT(sc->bp_dmat != NULL, ("%s: Buffer pool DMA tag is not " - "allocated?", __func__)); - KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__)); - - /* Keep DMA tag for this buffer. */ - if (__predict_false(buf->rx.dmat == NULL)) - buf->rx.dmat = sc->bp_dmat; - - /* Create a DMA map for the giving buffer if it doesn't exist yet. */ - if (__predict_false(buf->rx.dmap == NULL)) { - error = bus_dmamap_create(buf->rx.dmat, 0, &dmap); - if (error) { - device_printf(sc->dev, "%s: failed to create DMA map " - "for buffer: error=%d\n", __func__, error); - return (error); - } - buf->rx.dmap = dmap; - } - - /* Allocate mbuf if needed. */ - if (__predict_false(buf->rx.m == NULL)) { - m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, BUF_SIZE); - if (__predict_false(m == NULL)) { - device_printf(sc->dev, "%s: failed to allocate mbuf for " - "buffer\n", __func__); - return (ENOMEM); - } - m->m_len = m->m_ext.ext_size; - m->m_pkthdr.len = m->m_ext.ext_size; - buf->rx.m = m; - } else - m = buf->rx.m; - - error = bus_dmamap_load_mbuf_sg(buf->rx.dmat, buf->rx.dmap, - m, &segs, &nsegs, BUS_DMA_NOWAIT); - KASSERT(nsegs == 1, ("one segment expected: nsegs=%d", nsegs)); - KASSERT(error == 0, ("failed to map mbuf: error=%d", error)); - if (__predict_false(error != 0 || nsegs != 1)) { - device_printf(sc->dev, "%s: failed to map mbuf: error=%d, " - "nsegs=%d\n", __func__, error, nsegs); - bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap); - m_freem(m); - return (error); - } - buf->rx.paddr = segs.ds_addr; - buf->rx.vaddr = m->m_data; - - /* Populate frame annotation for future use. */ - fa = (struct dpaa2_fa *) m->m_data; - fa->magic = DPAA2_MAGIC; - fa->buf = buf; - - bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap, BUS_DMASYNC_PREREAD); - - return (0); -} - -/** - * @brief Prepare Tx buffer to be added to the Tx ring. - */ -static int -dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf) -{ - bus_dmamap_t dmap; - int error; - - KASSERT(sc->tx_dmat != NULL, ("%s: Tx DMA tag is not allocated?", - __func__)); - KASSERT(sc->sgt_dmat != NULL, ("%s: S/G DMA tag not allocated?", - __func__)); - KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__)); - - /* Keep DMA tags for this buffer. */ - if (__predict_true(buf->tx.dmat == NULL)) - buf->tx.dmat = sc->tx_dmat; - if (__predict_true(buf->tx.sgt_dmat == NULL)) - buf->tx.sgt_dmat = sc->sgt_dmat; - - /* Create a DMA map for the giving buffer if it doesn't exist yet. */ - if (__predict_true(buf->tx.dmap == NULL)) { - error = bus_dmamap_create(buf->tx.dmat, 0, &dmap); - if (error != 0) { - device_printf(sc->dev, "%s: failed to create " - "Tx DMA map: error=%d\n", __func__, error); - return (error); - } - buf->tx.dmap = dmap; - } - - /* Allocate a buffer to store scatter/gather table. */ - if (__predict_true(buf->tx.sgt_vaddr == NULL)) { - error = bus_dmamem_alloc(buf->tx.sgt_dmat, - &buf->tx.sgt_vaddr, BUS_DMA_ZERO | BUS_DMA_COHERENT, - &buf->tx.sgt_dmap); - if (error != 0) { - device_printf(sc->dev, "%s: failed to allocate " - "S/G table: error=%d\n", __func__, error); - return (error); - } - } - - return (0); -} - -/** - * @brief Allocate channel storage visible to QBMan. - */ -static int -dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *sc, - struct dpaa2_ni_channel *chan) -{ - struct dpaa2_buf *buf = &chan->store; - int error; - - KASSERT(sc->st_dmat != NULL, ("%s: channel storage DMA tag is not " - "allocated?", __func__)); - KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not channel storage buffer", - __func__)); - - /* Keep DMA tag for this buffer. */ - if (__predict_false(buf->store.dmat == NULL)) { - buf->store.dmat = sc->st_dmat; - } - - if (__predict_false(buf->store.vaddr == NULL)) { - error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr, - BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap); - if (error) { - device_printf(sc->dev, "%s: failed to allocate channel " - "storage\n", __func__); - return (error); - } - } - - if (__predict_false(buf->store.paddr == 0)) { - error = bus_dmamap_load(buf->store.dmat, buf->store.dmap, - buf->store.vaddr, ETH_STORE_SIZE, dpaa2_ni_dmamap_cb, - &buf->store.paddr, BUS_DMA_NOWAIT); - if (error) { - device_printf(sc->dev, "%s: failed to map channel " - "storage\n", __func__); - return (error); - } } - - chan->store_sz = ETH_STORE_FRAMES; - chan->store_idx = 0; - - return (0); + return sc->api_major - major; } /** * @brief Build a DPAA2 frame descriptor. */ static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx, - struct dpaa2_buf *buf, bus_dma_segment_t *txsegs, int txnsegs, - struct dpaa2_fd *fd) + struct dpaa2_buf *buf, bus_dma_segment_t *segs, int nsegs, struct dpaa2_fd *fd) { - struct dpaa2_sg_entry *sgt; + struct dpaa2_buf *sgt = buf->sgt; + struct dpaa2_sg_entry *sge; struct dpaa2_fa *fa; int i, error; - KASSERT(txnsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments, " - "txnsegs (%d) > %d", __func__, txnsegs, DPAA2_TX_SEGLIMIT)); - KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__)); - KASSERT(buf->tx.sgt_vaddr != NULL, ("%s: S/G table not allocated?", - __func__)); + KASSERT(nsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments", __func__)); + KASSERT(buf->opt != NULL, ("%s: no Tx ring?", __func__)); + KASSERT(sgt != NULL, ("%s: no S/G table?", __func__)); + KASSERT(sgt->vaddr != NULL, ("%s: no S/G vaddr?", __func__)); - /* Reset frame descriptor fields. */ memset(fd, 0, sizeof(*fd)); - if (__predict_true(txnsegs <= DPAA2_TX_SEGLIMIT)) { - /* Populate S/G table. */ - sgt = (struct dpaa2_sg_entry *) buf->tx.sgt_vaddr + - sc->tx_data_off; - for (i = 0; i < txnsegs; i++) { - sgt[i].addr = (uint64_t) txsegs[i].ds_addr; - sgt[i].len = (uint32_t) txsegs[i].ds_len; - sgt[i].offset_fmt = 0u; + /* Populate and map S/G table */ + if (__predict_true(nsegs <= DPAA2_TX_SEGLIMIT)) { + sge = (struct dpaa2_sg_entry *)sgt->vaddr + sc->tx_data_off; + for (i = 0; i < nsegs; i++) { + sge[i].addr = (uint64_t)segs[i].ds_addr; + sge[i].len = (uint32_t)segs[i].ds_len; + sge[i].offset_fmt = 0u; } - sgt[i-1].offset_fmt |= 0x8000u; /* set final entry flag */ + sge[i-1].offset_fmt |= 0x8000u; /* set final entry flag */ - KASSERT(buf->tx.sgt_paddr == 0, ("%s: sgt_paddr(%#jx) != 0", - __func__, buf->tx.sgt_paddr)); + KASSERT(sgt->paddr == 0, ("%s: paddr(%#jx) != 0", __func__, + sgt->paddr)); - /* Load S/G table. */ - error = bus_dmamap_load(buf->tx.sgt_dmat, buf->tx.sgt_dmap, - buf->tx.sgt_vaddr, DPAA2_TX_SGT_SZ, dpaa2_ni_dmamap_cb, - &buf->tx.sgt_paddr, BUS_DMA_NOWAIT); + error = bus_dmamap_load(sgt->dmat, sgt->dmap, sgt->vaddr, + DPAA2_TX_SGT_SZ, dpaa2_dmamap_oneseg_cb, &sgt->paddr, + BUS_DMA_NOWAIT); if (__predict_false(error != 0)) { - device_printf(sc->dev, "%s: failed to map S/G table: " + device_printf(sc->dev, "%s: bus_dmamap_load() failed: " "error=%d\n", __func__, error); return (error); } - buf->tx.paddr = buf->tx.sgt_paddr; - buf->tx.vaddr = buf->tx.sgt_vaddr; + buf->paddr = sgt->paddr; + buf->vaddr = sgt->vaddr; sc->tx_sg_frames++; /* for sysctl(9) */ } else { return (EINVAL); } - fa = (struct dpaa2_fa *) buf->tx.sgt_vaddr; + fa = (struct dpaa2_fa *)sgt->vaddr; fa->magic = DPAA2_MAGIC; fa->buf = buf; - fa->tx = tx; - fd->addr = buf->tx.paddr; - fd->data_length = (uint32_t) buf->tx.m->m_pkthdr.len; + fd->addr = buf->paddr; + fd->data_length = (uint32_t)buf->m->m_pkthdr.len; fd->bpid_ivp_bmt = 0; fd->offset_fmt_sl = 0x2000u | sc->tx_data_off; fd->ctrl = 0x00800000u; return (0); } static int dpaa2_ni_fd_err(struct dpaa2_fd *fd) { return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK); } static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *fd) { - if (dpaa2_ni_fd_short_len(fd)) + if (dpaa2_ni_fd_short_len(fd)) { return (fd->data_length & DPAA2_NI_FD_LEN_MASK); - + } return (fd->data_length); } static int dpaa2_ni_fd_format(struct dpaa2_fd *fd) { return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >> DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK)); } static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *fd) { return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT) & DPAA2_NI_FD_SL_MASK) == 1); } static int dpaa2_ni_fd_offset(struct dpaa2_fd *fd) { return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK); } /** * @brief Collect statistics of the network interface. */ static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS) { struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1; struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number]; device_t pdev = device_get_parent(sc->dev); device_t dev = sc->dev; device_t child = dev; struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpaa2_cmd cmd; uint64_t cnt[DPAA2_NI_STAT_COUNTERS]; uint64_t result = 0; uint16_t rc_token, ni_token; int error; DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource container: " "id=%d, error=%d\n", __func__, rcinfo->id, error); goto exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network interface: " "id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt); if (!error) { result = cnt[stat->cnt]; } (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); exit: return (sysctl_handle_64(oidp, &result, 0, req)); } static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS) { struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1; uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num); return (sysctl_handle_32(oidp, &buf_num, 0, req)); } static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS) { struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1; uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free); return (sysctl_handle_32(oidp, &buf_free, 0, req)); } static int dpaa2_ni_set_hash(device_t dev, uint64_t flags) { struct dpaa2_ni_softc *sc = device_get_softc(dev); uint64_t key = 0; int i; if (!(sc->attr.num.queues > 1)) { return (EOPNOTSUPP); } for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { if (dist_fields[i].rxnfc_field & flags) { key |= dist_fields[i].id; } } return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key)); } /** * @brief Set Rx distribution (hash or flow classification) key flags is a * combination of RXH_ bits. */ static int dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags) { device_t pdev = device_get_parent(dev); device_t child = dev; struct dpaa2_ni_softc *sc = device_get_softc(dev); struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); struct dpaa2_devinfo *dinfo = device_get_ivars(dev); struct dpkg_profile_cfg cls_cfg; struct dpkg_extract *key; struct dpaa2_buf *buf = &sc->rxd_kcfg; struct dpaa2_cmd cmd; uint16_t rc_token, ni_token; int i, error = 0; - KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer", - __func__)); - if (__predict_true(buf->store.dmat == NULL)) { - buf->store.dmat = sc->rxd_dmat; + if (__predict_true(buf->dmat == NULL)) { + buf->dmat = sc->rxd_dmat; } memset(&cls_cfg, 0, sizeof(cls_cfg)); /* Configure extracts according to the given flags. */ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { key = &cls_cfg.extracts[cls_cfg.num_extracts]; if (!(flags & dist_fields[i].id)) { continue; } if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { device_printf(dev, "%s: failed to add key extraction " "rule\n", __func__); return (E2BIG); } key->type = DPKG_EXTRACT_FROM_HDR; key->extract.from_hdr.prot = dist_fields[i].cls_prot; key->extract.from_hdr.type = DPKG_FULL_FIELD; key->extract.from_hdr.field = dist_fields[i].cls_field; cls_cfg.num_extracts++; } - error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr, - BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap); + error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr, + BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap); if (error != 0) { device_printf(dev, "%s: failed to allocate a buffer for Rx " "traffic distribution key configuration\n", __func__); return (error); } - error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *) buf->store.vaddr); + error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *)buf->vaddr); if (error != 0) { device_printf(dev, "%s: failed to prepare key configuration: " "error=%d\n", __func__, error); return (error); } /* Prepare for setting the Rx dist. */ - error = bus_dmamap_load(buf->store.dmat, buf->store.dmap, - buf->store.vaddr, DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_ni_dmamap_cb, - &buf->store.paddr, BUS_DMA_NOWAIT); + error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr, + DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr, + BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->dev, "%s: failed to map a buffer for Rx " "traffic distribution key configuration\n", __func__); return (error); } if (type == DPAA2_NI_DIST_MODE_HASH) { DPAA2_CMD_INIT(&cmd); error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); if (error) { device_printf(dev, "%s: failed to open resource " "container: id=%d, error=%d\n", __func__, rcinfo->id, error); goto err_exit; } error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); if (error) { device_printf(dev, "%s: failed to open network " "interface: id=%d, error=%d\n", __func__, dinfo->id, error); goto close_rc; } error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd, - sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH, - buf->store.paddr); + sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH, buf->paddr); if (error != 0) { device_printf(dev, "%s: failed to set distribution mode " "and size for the traffic class\n", __func__); } (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); close_rc: (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); } err_exit: return (error); } /** * @brief Prepares extract parameters. * * cfg: Defining a full Key Generation profile. * key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA. */ static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf) { struct dpni_ext_set_rx_tc_dist *dpni_ext; struct dpni_dist_extract *extr; int i, j; if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS) return (EINVAL); dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf; dpni_ext->num_extracts = cfg->num_extracts; for (i = 0; i < cfg->num_extracts; i++) { extr = &dpni_ext->extracts[i]; switch (cfg->extracts[i].type) { case DPKG_EXTRACT_FROM_HDR: extr->prot = cfg->extracts[i].extract.from_hdr.prot; extr->efh_type = cfg->extracts[i].extract.from_hdr.type & 0x0Fu; extr->size = cfg->extracts[i].extract.from_hdr.size; extr->offset = cfg->extracts[i].extract.from_hdr.offset; extr->field = cfg->extracts[i].extract.from_hdr.field; extr->hdr_index = cfg->extracts[i].extract.from_hdr.hdr_index; break; case DPKG_EXTRACT_FROM_DATA: extr->size = cfg->extracts[i].extract.from_data.size; extr->offset = cfg->extracts[i].extract.from_data.offset; break; case DPKG_EXTRACT_FROM_PARSE: extr->size = cfg->extracts[i].extract.from_parse.size; extr->offset = cfg->extracts[i].extract.from_parse.offset; break; default: return (EINVAL); } extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks; extr->extract_type = cfg->extracts[i].type & 0x0Fu; for (j = 0; j < DPKG_NUM_OF_MASKS; j++) { extr->masks[j].mask = cfg->extracts[i].masks[j].mask; extr->masks[j].offset = cfg->extracts[i].masks[j].offset; } } return (0); } -/** - * @brief Obtain the next dequeue response from the channel storage. - */ -static int -dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *chan, struct dpaa2_dq **dq) -{ - struct dpaa2_buf *buf = &chan->store; - struct dpaa2_dq *msgs = buf->store.vaddr; - struct dpaa2_dq *msg = &msgs[chan->store_idx]; - int rc = EINPROGRESS; - - chan->store_idx++; - - if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) { - rc = EALREADY; /* VDQ command is expired */ - chan->store_idx = 0; - if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME)) - msg = NULL; /* Null response, FD is invalid */ - } - if (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY) { - rc = ENOENT; /* FQ is empty */ - chan->store_idx = 0; - } - - if (dq != NULL) - *dq = msg; - - return (rc); -} - static device_method_t dpaa2_ni_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dpaa2_ni_probe), DEVMETHOD(device_attach, dpaa2_ni_attach), DEVMETHOD(device_detach, dpaa2_ni_detach), /* mii via memac_mdio */ DEVMETHOD(miibus_statchg, dpaa2_ni_miibus_statchg), DEVMETHOD_END }; static driver_t dpaa2_ni_driver = { "dpaa2_ni", dpaa2_ni_methods, sizeof(struct dpaa2_ni_softc), }; DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0); DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0); MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1); #ifdef DEV_ACPI MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1); #endif #ifdef FDT MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1); #endif diff --git a/sys/dev/dpaa2/dpaa2_ni.h b/sys/dev/dpaa2/dpaa2_ni.h index 523775a9365c..6fb0673fac09 100644 --- a/sys/dev/dpaa2/dpaa2_ni.h +++ b/sys/dev/dpaa2/dpaa2_ni.h @@ -1,602 +1,514 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * - * Copyright © 2021-2022 Dmitry Salychev + * Copyright © 2021-2023 Dmitry Salychev * Copyright © 2022 Mathew McBride * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _DPAA2_NI_H #define _DPAA2_NI_H #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include "dpaa2_types.h" #include "dpaa2_mcp.h" #include "dpaa2_swp.h" #include "dpaa2_io.h" #include "dpaa2_mac.h" #include "dpaa2_ni_dpkg.h" +#include "dpaa2_channel.h" /* Name of the DPAA2 network interface. */ #define DPAA2_NI_IFNAME "dpni" /* Maximum resources per DPNI: 16 DPIOs + 16 DPCONs + 1 DPBP + 1 DPMCP. */ #define DPAA2_NI_MAX_RESOURCES 34 #define DPAA2_NI_MSI_COUNT 1 /* MSIs per DPNI */ -#define DPAA2_NI_MAX_CHANNELS 16 /* to distribute ingress traffic to cores */ -#define DPAA2_NI_MAX_TCS 8 /* traffic classes per DPNI */ #define DPAA2_NI_MAX_POOLS 8 /* buffer pools per DPNI */ -/* Maximum number of Rx buffers. */ -#define DPAA2_NI_BUFS_INIT (50u * DPAA2_SWP_BUFS_PER_CMD) -#define DPAA2_NI_BUFS_MAX (1 << 15) /* 15 bits for buffer index max. */ +#define DPAA2_NI_BUFS_INIT (5u * DPAA2_SWP_BUFS_PER_CMD) /* Maximum number of buffers allocated per Tx ring. */ #define DPAA2_NI_BUFS_PER_TX (1 << 7) -#define DPAA2_NI_MAX_BPTX (1 << 8) /* 8 bits for buffer index max. */ +#define DPAA2_NI_MAX_BPTX (1 << 8) /* Number of the DPNI statistics counters. */ #define DPAA2_NI_STAT_COUNTERS 7u #define DPAA2_NI_STAT_SYSCTLS 9u /* Error and status bits in the frame annotation status word. */ #define DPAA2_NI_FAS_DISC 0x80000000 /* debug frame */ #define DPAA2_NI_FAS_MS 0x40000000 /* MACSEC frame */ #define DPAA2_NI_FAS_PTP 0x08000000 #define DPAA2_NI_FAS_MC 0x04000000 /* Ethernet multicast frame */ #define DPAA2_NI_FAS_BC 0x02000000 /* Ethernet broadcast frame */ #define DPAA2_NI_FAS_KSE 0x00040000 #define DPAA2_NI_FAS_EOFHE 0x00020000 #define DPAA2_NI_FAS_MNLE 0x00010000 #define DPAA2_NI_FAS_TIDE 0x00008000 #define DPAA2_NI_FAS_PIEE 0x00004000 #define DPAA2_NI_FAS_FLE 0x00002000 /* Frame length error */ #define DPAA2_NI_FAS_FPE 0x00001000 /* Frame physical error */ #define DPAA2_NI_FAS_PTE 0x00000080 #define DPAA2_NI_FAS_ISP 0x00000040 #define DPAA2_NI_FAS_PHE 0x00000020 #define DPAA2_NI_FAS_BLE 0x00000010 #define DPAA2_NI_FAS_L3CV 0x00000008 /* L3 csum validation performed */ #define DPAA2_NI_FAS_L3CE 0x00000004 /* L3 csum error */ #define DPAA2_NI_FAS_L4CV 0x00000002 /* L4 csum validation performed */ #define DPAA2_NI_FAS_L4CE 0x00000001 /* L4 csum error */ /* Mask for errors on the ingress path. */ #define DPAA2_NI_FAS_RX_ERR_MASK (DPAA2_NI_FAS_KSE | \ DPAA2_NI_FAS_EOFHE | \ DPAA2_NI_FAS_MNLE | \ DPAA2_NI_FAS_TIDE | \ DPAA2_NI_FAS_PIEE | \ DPAA2_NI_FAS_FLE | \ DPAA2_NI_FAS_FPE | \ DPAA2_NI_FAS_PTE | \ DPAA2_NI_FAS_ISP | \ DPAA2_NI_FAS_PHE | \ DPAA2_NI_FAS_BLE | \ DPAA2_NI_FAS_L3CE | \ DPAA2_NI_FAS_L4CE \ ) /* Option bits to select specific queue configuration options to apply. */ #define DPAA2_NI_QUEUE_OPT_USER_CTX 0x00000001 #define DPAA2_NI_QUEUE_OPT_DEST 0x00000002 #define DPAA2_NI_QUEUE_OPT_FLC 0x00000004 #define DPAA2_NI_QUEUE_OPT_HOLD_ACTIVE 0x00000008 #define DPAA2_NI_QUEUE_OPT_SET_CGID 0x00000040 #define DPAA2_NI_QUEUE_OPT_CLEAR_CGID 0x00000080 /* DPNI link configuration options. */ #define DPAA2_NI_LINK_OPT_AUTONEG ((uint64_t) 0x01u) #define DPAA2_NI_LINK_OPT_HALF_DUPLEX ((uint64_t) 0x02u) #define DPAA2_NI_LINK_OPT_PAUSE ((uint64_t) 0x04u) #define DPAA2_NI_LINK_OPT_ASYM_PAUSE ((uint64_t) 0x08u) #define DPAA2_NI_LINK_OPT_PFC_PAUSE ((uint64_t) 0x10u) /* * Number of times to retry a frame enqueue before giving up. Value determined * empirically, in order to minimize the number of frames dropped on Tx. */ #define DPAA2_NI_ENQUEUE_RETRIES 10 -enum dpaa2_ni_queue_type { - DPAA2_NI_QUEUE_RX = 0, - DPAA2_NI_QUEUE_TX, - DPAA2_NI_QUEUE_TX_CONF, - DPAA2_NI_QUEUE_RX_ERR -}; +/* Channel storage buffer configuration */ +#define DPAA2_ETH_STORE_FRAMES 16u +#define DPAA2_ETH_STORE_SIZE \ + ((DPAA2_ETH_STORE_FRAMES + 1) * sizeof(struct dpaa2_dq)) + +/* + * NOTE: Don't forget to update dpaa2_ni_spec in case of any changes in macros! + */ +/* DPMCP resources */ +#define DPAA2_NI_MCP_RES_NUM (1u) +#define DPAA2_NI_MCP_RID_OFF (0u) +#define DPAA2_NI_MCP_RID(rid) ((rid) + DPAA2_NI_MCP_RID_OFF) +/* DPIO resources (software portals) */ +#define DPAA2_NI_IO_RES_NUM (16u) +#define DPAA2_NI_IO_RID_OFF (DPAA2_NI_MCP_RID_OFF + DPAA2_NI_MCP_RES_NUM) +#define DPAA2_NI_IO_RID(rid) ((rid) + DPAA2_NI_IO_RID_OFF) +/* DPBP resources (buffer pools) */ +#define DPAA2_NI_BP_RES_NUM (1u) +#define DPAA2_NI_BP_RID_OFF (DPAA2_NI_IO_RID_OFF + DPAA2_NI_IO_RES_NUM) +#define DPAA2_NI_BP_RID(rid) ((rid) + DPAA2_NI_BP_RID_OFF) +/* DPCON resources (channels) */ +#define DPAA2_NI_CON_RES_NUM (16u) +#define DPAA2_NI_CON_RID_OFF (DPAA2_NI_BP_RID_OFF + DPAA2_NI_BP_RES_NUM) +#define DPAA2_NI_CON_RID(rid) ((rid) + DPAA2_NI_CON_RID_OFF) enum dpaa2_ni_dest_type { DPAA2_NI_DEST_NONE = 0, DPAA2_NI_DEST_DPIO, DPAA2_NI_DEST_DPCON }; enum dpaa2_ni_ofl_type { DPAA2_NI_OFL_RX_L3_CSUM = 0, DPAA2_NI_OFL_RX_L4_CSUM, DPAA2_NI_OFL_TX_L3_CSUM, DPAA2_NI_OFL_TX_L4_CSUM, DPAA2_NI_OFL_FLCTYPE_HASH /* FD flow context for AIOP/CTLU */ }; /** * @brief DPNI ingress traffic distribution mode. */ enum dpaa2_ni_dist_mode { DPAA2_NI_DIST_MODE_NONE = 0, DPAA2_NI_DIST_MODE_HASH, DPAA2_NI_DIST_MODE_FS }; /** * @brief DPNI behavior in case of errors. */ enum dpaa2_ni_err_action { DPAA2_NI_ERR_DISCARD = 0, DPAA2_NI_ERR_CONTINUE, DPAA2_NI_ERR_SEND_TO_ERROR_QUEUE }; -struct dpaa2_ni_channel; +struct dpaa2_channel; struct dpaa2_ni_fq; /** * @brief Attributes of the DPNI object. * * options: ... * wriop_ver: Revision of the underlying WRIOP hardware block. */ struct dpaa2_ni_attr { uint32_t options; uint16_t wriop_ver; struct { uint16_t fs; uint8_t mac; uint8_t vlan; uint8_t qos; } entries; struct { uint8_t queues; uint8_t rx_tcs; uint8_t tx_tcs; uint8_t channels; uint8_t cgs; } num; struct { uint8_t fs; uint8_t qos; } key_size; }; -/** - * @brief Tx ring. - * - * fq: Parent (TxConf) frame queue. - * fqid: ID of the logical Tx queue. - * mbuf_br: Ring buffer for mbufs to transmit. - * mbuf_lock: Lock for the ring buffer. - */ -struct dpaa2_ni_tx_ring { - struct dpaa2_ni_fq *fq; - uint32_t fqid; - uint32_t txid; /* Tx ring index */ - - /* Ring buffer for indexes in "buf" array. */ - struct buf_ring *idx_br; - struct mtx lock; - - /* Buffers to DMA load/unload Tx mbufs. */ - struct dpaa2_buf buf[DPAA2_NI_BUFS_PER_TX]; -}; - -/** - * @brief A Frame Queue is the basic queuing structure used by the QMan. - * - * It comprises a list of frame descriptors (FDs), so it can be thought of - * as a queue of frames. - * - * NOTE: When frames on a FQ are ready to be processed, the FQ is enqueued - * onto a work queue (WQ). - * - * fqid: Frame queue ID, can be used to enqueue/dequeue or execute other - * commands on the queue through DPIO. - * txq_n: Number of configured Tx queues. - * tx_fqid: Frame queue IDs of the Tx queues which belong to the same flowid. - * Note that Tx queues are logical queues and not all management - * commands are available on these queue types. - * qdbin: Queue destination bin. Can be used with the DPIO enqueue - * operation based on QDID, QDBIN and QPRI. Note that all Tx queues - * with the same flowid have the same destination bin. - */ -struct dpaa2_ni_fq { - int (*consume)(struct dpaa2_ni_channel *, - struct dpaa2_ni_fq *, struct dpaa2_fd *); - - struct dpaa2_ni_channel *chan; - uint32_t fqid; - uint16_t flowid; - uint8_t tc; - enum dpaa2_ni_queue_type type; - - /* Optional fields (for TxConf queue). */ - struct dpaa2_ni_tx_ring tx_rings[DPAA2_NI_MAX_TCS]; - uint32_t tx_qdbin; -} __aligned(CACHE_LINE_SIZE); - -/** - * @brief QBMan channel to process ingress traffic (Rx, Tx conf). - * - * NOTE: Several WQs are organized into a single WQ Channel. - */ -struct dpaa2_ni_channel { - device_t ni_dev; - device_t io_dev; - device_t con_dev; - uint16_t id; - uint16_t flowid; - - /* For debug purposes only! */ - uint64_t tx_frames; - uint64_t tx_dropped; - - /* Context to configure CDAN. */ - struct dpaa2_io_notif_ctx ctx; - - /* Channel storage (to keep responses from VDQ command). */ - struct dpaa2_buf store; - uint32_t store_sz; /* in frames */ - uint32_t store_idx; /* frame index */ - - /* Recycled buffers to release back to the pool. */ - uint32_t recycled_n; - struct dpaa2_buf *recycled[DPAA2_SWP_BUFS_PER_CMD]; - - /* Frame queues */ - uint32_t rxq_n; - struct dpaa2_ni_fq rx_queues[DPAA2_NI_MAX_TCS]; - struct dpaa2_ni_fq txc_queue; -}; - /** * @brief Configuration of the network interface queue. * * NOTE: This configuration is used to obtain information of a queue by * DPNI_GET_QUEUE command and update it by DPNI_SET_QUEUE one. * * It includes binding of the queue to a DPIO or DPCON object to receive * notifications and traffic on the CPU. * * user_ctx: (r/w) User defined data, presented along with the frames * being dequeued from this queue. * flow_ctx: (r/w) Set default FLC value for traffic dequeued from this queue. * Please check description of FD structure for more information. * Note that FLC values set using DPNI_ADD_FS_ENTRY, if any, take * precedence over values per queue. * dest_id: (r/w) The ID of a DPIO or DPCON object, depending on * DEST_TYPE (in flags) value. This field is ignored for DEST_TYPE * set to 0 (DPNI_DEST_NONE). * fqid: (r) Frame queue ID, can be used to enqueue/dequeue or execute * other commands on the queue through DPIO. Note that Tx queues * are logical queues and not all management commands are available * on these queue types. * qdbin: (r) Queue destination bin. Can be used with the DPIO enqueue * operation based on QDID, QDBIN and QPRI. * type: Type of the queue to set configuration to. * tc: Traffic class. Ignored for QUEUE_TYPE 2 and 3 (Tx confirmation * and Rx error queues). * idx: Selects a specific queue out of the set of queues in a TC. * Accepted values are in range 0 to NUM_QUEUES–1. This field is * ignored for QUEUE_TYPE 3 (Rx error queue). For access to the * shared Tx confirmation queue (for Tx confirmation mode 1), this * field must be set to 0xff. * cgid: (r/w) Congestion group ID. * chan_id: (w) Channel index to be configured. Used only when QUEUE_TYPE is * set to DPNI_QUEUE_TX. * priority: (r/w) Sets the priority in the destination DPCON or DPIO for * dequeued traffic. Supported values are 0 to # of priorities in * destination DPCON or DPIO - 1. This field is ignored for * DEST_TYPE set to 0 (DPNI_DEST_NONE), except if this DPNI is in * AIOP context. In that case the DPNI_SET_QUEUE can be used to * override the default assigned priority of the FQ from the TC. * options: Option bits selecting specific configuration options to apply. * See DPAA2_NI_QUEUE_OPT_* for details. * dest_type: Type of destination for dequeued traffic. * cgid_valid: (r) Congestion group ID is valid. * stash_control: (r/w) If true, lowest 6 bits of FLC are used for stash control. * Please check description of FD structure for more information. * hold_active: (r/w) If true, this flag prevents the queue from being * rescheduled between DPIOs while it carries traffic and is active * on one DPIO. Can help reduce reordering if one queue is services * on multiple CPUs, but the queue is also more likely to be trapped * in one DPIO, especially when congested. */ struct dpaa2_ni_queue_cfg { uint64_t user_ctx; uint64_t flow_ctx; uint32_t dest_id; uint32_t fqid; uint16_t qdbin; enum dpaa2_ni_queue_type type; uint8_t tc; uint8_t idx; uint8_t cgid; uint8_t chan_id; uint8_t priority; uint8_t options; enum dpaa2_ni_dest_type dest_type; bool cgid_valid; bool stash_control; bool hold_active; }; /** * @brief Buffer layout attributes. * * pd_size: Size kept for private data (in bytes). * fd_align: Frame data alignment. * head_size: Data head room. * tail_size: Data tail room. * options: ... * pass_timestamp: Timestamp is included in the buffer layout. * pass_parser_result: Parsing results are included in the buffer layout. * pass_frame_status: Frame status is included in the buffer layout. * pass_sw_opaque: SW annotation is activated. * queue_type: Type of a queue this configuration applies to. */ struct dpaa2_ni_buf_layout { uint16_t pd_size; uint16_t fd_align; uint16_t head_size; uint16_t tail_size; uint16_t options; bool pass_timestamp; bool pass_parser_result; bool pass_frame_status; bool pass_sw_opaque; enum dpaa2_ni_queue_type queue_type; }; /** * @brief Buffer pools configuration for a network interface. */ struct dpaa2_ni_pools_cfg { uint8_t pools_num; struct { uint32_t bp_obj_id; uint16_t buf_sz; int backup_flag; /* 0 - regular pool, 1 - backup pool */ } pools[DPAA2_NI_MAX_POOLS]; }; /** * @brief Errors behavior configuration for a network interface. * * err_mask: The errors mask to configure. * action: Desired action for the errors selected in the mask. * set_err_fas: Set to true to mark the errors in frame annotation * status (FAS); relevant for non-discard actions only. */ struct dpaa2_ni_err_cfg { uint32_t err_mask; enum dpaa2_ni_err_action action; bool set_err_fas; }; /** * @brief Link configuration. * * options: Mask of available options. * adv_speeds: Speeds that are advertised for autoneg. * rate: Rate in Mbps. */ struct dpaa2_ni_link_cfg { uint64_t options; uint64_t adv_speeds; uint32_t rate; }; /** * @brief Link state. * * options: Mask of available options. * adv_speeds: Speeds that are advertised for autoneg. * sup_speeds: Speeds capability of the PHY. * rate: Rate in Mbps. * link_up: Link state (true if link is up, false otherwise). * state_valid: Ignore/Update the state of the link. */ struct dpaa2_ni_link_state { uint64_t options; uint64_t adv_speeds; uint64_t sup_speeds; uint32_t rate; bool link_up; bool state_valid; }; /** * @brief QoS table configuration. * * kcfg_busaddr: Address of the buffer in I/O virtual address space which * holds the QoS table key configuration. * default_tc: Default traffic class to use in case of a lookup miss in * the QoS table. * discard_on_miss: Set to true to discard frames in case of no match. * Default traffic class will be used otherwise. * keep_entries: Set to true to keep existing QoS table entries. This * option will work properly only for DPNI objects created * with DPNI_OPT_HAS_KEY_MASKING option. */ struct dpaa2_ni_qos_table { uint64_t kcfg_busaddr; uint8_t default_tc; bool discard_on_miss; bool keep_entries; }; /** * @brief Context to add multicast physical addresses to the filter table. * * ifp: Network interface associated with the context. * error: Result of the last MC command. * nent: Number of entries added. */ struct dpaa2_ni_mcaddr_ctx { struct ifnet *ifp; int error; int nent; }; struct dpaa2_eth_dist_fields { uint64_t rxnfc_field; enum net_prot cls_prot; int cls_field; int size; uint64_t id; }; struct dpni_mask_cfg { uint8_t mask; uint8_t offset; } __packed; struct dpni_dist_extract { uint8_t prot; uint8_t efh_type; /* EFH type is in the 4 LSBs. */ uint8_t size; uint8_t offset; uint32_t field; uint8_t hdr_index; uint8_t constant; uint8_t num_of_repeats; uint8_t num_of_byte_masks; uint8_t extract_type; /* Extraction type is in the 4 LSBs */ uint8_t _reserved[3]; struct dpni_mask_cfg masks[4]; } __packed; struct dpni_ext_set_rx_tc_dist { uint8_t num_extracts; uint8_t _reserved[7]; struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; } __packed; /** * @brief Software context for the DPAA2 Network Interface driver. */ struct dpaa2_ni_softc { device_t dev; struct resource *res[DPAA2_NI_MAX_RESOURCES]; uint16_t api_major; uint16_t api_minor; uint64_t rx_hash_fields; uint16_t tx_data_off; uint16_t tx_qdid; uint32_t link_options; int link_state; uint16_t buf_align; uint16_t buf_sz; - /* For debug purposes only! */ uint64_t rx_anomaly_frames; uint64_t rx_single_buf_frames; uint64_t rx_sg_buf_frames; uint64_t rx_enq_rej_frames; uint64_t rx_ieoi_err_frames; uint64_t tx_single_buf_frames; uint64_t tx_sg_frames; - /* Attributes of the DPAA2 network interface. */ struct dpaa2_ni_attr attr; - /* For network interface and miibus. */ struct ifnet *ifp; uint32_t if_flags; struct mtx lock; device_t miibus; struct mii_data *mii; bool fixed_link; struct ifmedia fixed_ifmedia; int media_status; - /* DMA resources */ - bus_dma_tag_t bp_dmat; /* for buffer pool */ - bus_dma_tag_t tx_dmat; /* for Tx buffers */ - bus_dma_tag_t st_dmat; /* for channel storage */ bus_dma_tag_t rxd_dmat; /* for Rx distribution key */ bus_dma_tag_t qos_dmat; /* for QoS table key */ - bus_dma_tag_t sgt_dmat; /* for scatter/gather tables */ - struct dpaa2_buf qos_kcfg; /* QoS table key config. */ - struct dpaa2_buf rxd_kcfg; /* Rx distribution key config. */ + struct dpaa2_buf qos_kcfg; /* QoS table key config */ + struct dpaa2_buf rxd_kcfg; /* Rx distribution key config */ - /* Channels and RxError frame queue */ uint32_t chan_n; - struct dpaa2_ni_channel *channels[DPAA2_NI_MAX_CHANNELS]; - struct dpaa2_ni_fq rxe_queue; /* one per network interface */ + struct dpaa2_channel *channels[DPAA2_MAX_CHANNELS]; + struct dpaa2_ni_fq rxe_queue; /* one per DPNI */ - /* Rx buffers for buffer pool. */ struct dpaa2_atomic buf_num; struct dpaa2_atomic buf_free; /* for sysctl(9) only */ - struct dpaa2_buf buf[DPAA2_NI_BUFS_MAX]; - /* Interrupts */ int irq_rid[DPAA2_NI_MSI_COUNT]; struct resource *irq_res; - void *intr; /* interrupt handle */ + void *intr; - /* Tasks */ struct taskqueue *bp_taskq; - struct task bp_task; - /* Callouts */ struct callout mii_callout; struct { uint32_t dpmac_id; uint8_t addr[ETHER_ADDR_LEN]; device_t phy_dev; int phy_loc; - } mac; /* Info about connected DPMAC (if exists). */ + } mac; /* Info about connected DPMAC (if exists) */ }; extern struct resource_spec dpaa2_ni_spec[]; #endif /* _DPAA2_NI_H */ diff --git a/sys/dev/dpaa2/dpaa2_swp.c b/sys/dev/dpaa2/dpaa2_swp.c index 3fa66ca8880f..5800d7fedd82 100644 --- a/sys/dev/dpaa2/dpaa2_swp.c +++ b/sys/dev/dpaa2/dpaa2_swp.c @@ -1,1167 +1,1164 @@ /*- * SPDX-License-Identifier: BSD-3-Clause AND BSD-2-Clause * * Copyright © 2014-2016 Freescale Semiconductor, Inc. * Copyright © 2016-2019 NXP * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Original source file obtained from: * drivers/soc/fsl/dpio/qbman-portal.c * * Commit: 4c86114194e644b6da9107d75910635c9e87179e * Repository: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git */ /* * Copyright © 2021-2022 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * DPAA2 QBMan software portal. */ #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #include "dpaa2_swp.h" #include "dpaa2_mc.h" #include "dpaa2_bp.h" #define CMD_SPIN_TIMEOUT 100u /* us */ #define CMD_SPIN_ATTEMPTS 2000u /* 200 ms max. */ #define CMD_VERB_MASK 0x7Fu /* Shifts in the VERB byte of the enqueue command descriptor. */ #define ENQ_CMD_ORP_ENABLE_SHIFT 2 #define ENQ_CMD_IRQ_ON_DISPATCH_SHIFT 3 #define ENQ_CMD_TARGET_TYPE_SHIFT 4 #define ENQ_CMD_DCA_EN_SHIFT 7 /* VERB byte options of the enqueue command descriptor. */ #define ENQ_CMD_EMPTY 0u #define ENQ_CMD_RESPONSE_ALWAYS 1u #define ENQ_CMD_REJECTS_TO_FQ 2u #define ENQ_DESC_FD_OFFSET 32u #define ENQ_DCA_IDXMASK 0x0Fu #define ENQ_FLAG_DCA (1ull << 31) /* QBMan portal command codes. */ #define CMDID_SWP_MC_ACQUIRE 0x30 #define CMDID_SWP_BP_QUERY 0x32 #define CMDID_SWP_WQCHAN_CONFIGURE 0x46 /* QBMan portal command result codes. */ #define QBMAN_CMD_RC_OK 0xF0 /* SDQCR attribute codes */ #define QB_SDQCR_FC_SHIFT 29u #define QB_SDQCR_FC_MASK 0x1u #define QB_SDQCR_DCT_SHIFT 24u #define QB_SDQCR_DCT_MASK 0x3u #define QB_SDQCR_TOK_SHIFT 16u #define QB_SDQCR_TOK_MASK 0xFFu #define QB_SDQCR_SRC_SHIFT 0u #define QB_SDQCR_SRC_MASK 0xFFFFu /* Shifts in the VERB byte of the volatile dequeue command. */ #define QB_VDQCR_VERB_DCT0_SHIFT 0 #define QB_VDQCR_VERB_DCT1_SHIFT 1 #define QB_VDQCR_VERB_DT0_SHIFT 2 #define QB_VDQCR_VERB_DT1_SHIFT 3 #define QB_VDQCR_VERB_RLS_SHIFT 4 #define QB_VDQCR_VERB_WAE_SHIFT 5 #define QB_VDQCR_VERB_RAD_SHIFT 6 /* Maximum timeout period for the DQRR interrupt. */ #define DQRR_MAX_ITP 4096u #define DQRR_PI_MASK 0x0Fu /* Release Array Allocation register helpers. */ #define RAR_IDX(rar) ((rar) & 0x7u) #define RAR_VB(rar) ((rar) & 0x80u) #define RAR_SUCCESS(rar) ((rar) & 0x100u) MALLOC_DEFINE(M_DPAA2_SWP, "dpaa2_swp", "DPAA2 QBMan Software Portal"); enum qbman_sdqcr_dct { qbman_sdqcr_dct_null = 0, qbman_sdqcr_dct_prio_ics, qbman_sdqcr_dct_active_ics, qbman_sdqcr_dct_active }; enum qbman_sdqcr_fc { qbman_sdqcr_fc_one = 0, qbman_sdqcr_fc_up_to_3 = 1 }; /* Routines to execute software portal commands. */ static int dpaa2_swp_exec_mgmt_command(struct dpaa2_swp *, struct dpaa2_swp_cmd *, struct dpaa2_swp_rsp *, uint8_t); static int dpaa2_swp_exec_br_command(struct dpaa2_swp *, struct dpaa2_swp_cmd *, uint32_t); static int dpaa2_swp_exec_vdc_command_locked(struct dpaa2_swp *, struct dpaa2_swp_cmd *); /* Management Commands helpers. */ static int dpaa2_swp_send_mgmt_command(struct dpaa2_swp *, struct dpaa2_swp_cmd *, uint8_t); static int dpaa2_swp_wait_for_mgmt_response(struct dpaa2_swp *, struct dpaa2_swp_rsp *); /* Helper subroutines. */ static int dpaa2_swp_cyc_diff(uint8_t, uint8_t, uint8_t); int dpaa2_swp_init_portal(struct dpaa2_swp **swp, struct dpaa2_swp_desc *desc, uint16_t flags) { struct dpaa2_swp *p; uint32_t reg, mask_size, eqcr_pi; /* EQCR producer index */ if (!swp || !desc) return (DPAA2_SWP_STAT_EINVAL); p = malloc(sizeof(struct dpaa2_swp), M_DPAA2_SWP, flags & DPAA2_SWP_NOWAIT_ALLOC ? (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO)); if (!p) return (DPAA2_SWP_STAT_NO_MEMORY); mtx_init(&p->lock, "swp_sleep_lock", NULL, MTX_DEF); p->cfg.mem_backed = false; p->cfg.writes_cinh = true; p->desc = desc; p->flags = flags; p->mc.valid_bit = DPAA2_SWP_VALID_BIT; p->mr.valid_bit = DPAA2_SWP_VALID_BIT; /* FIXME: Memory-backed mode doesn't work now. Why? */ p->cena_res = desc->cena_res; p->cena_map = desc->cena_map; p->cinh_res = desc->cinh_res; p->cinh_map = desc->cinh_map; /* Static Dequeue Command Register configuration. */ p->sdq = 0; p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT; p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT; p->sdq |= DPAA2_SWP_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT; /* Volatile Dequeue Command configuration. */ p->vdq.valid_bit = DPAA2_SWP_VALID_BIT; /* Dequeue Response Ring configuration */ p->dqrr.next_idx = 0; p->dqrr.valid_bit = DPAA2_SWP_VALID_BIT; if ((desc->swp_version & DPAA2_SWP_REV_MASK) < DPAA2_SWP_REV_4100) { p->dqrr.ring_size = 4; p->dqrr.reset_bug = 1; } else { p->dqrr.ring_size = 8; p->dqrr.reset_bug = 0; } if ((desc->swp_version & DPAA2_SWP_REV_MASK) < DPAA2_SWP_REV_5000) { reg = dpaa2_swp_set_cfg( p->dqrr.ring_size, /* max. entries QMan writes to DQRR */ 1, /* writes enabled in the CINH memory only */ 0, /* EQCR_CI stashing threshold */ 3, /* RPM: RCR in array mode */ 2, /* DCM: Discrete consumption ack */ 2, /* EPM: EQCR in ring mode (FIFO) */ 1, /* mem stashing drop enable enable */ 1, /* mem stashing priority enable */ 1, /* mem stashing enable */ 1, /* dequeue stashing priority enable */ 0, /* dequeue stashing enable enable */ 0 /* EQCR_CI stashing priority enable */ ); reg &= ~(1 << DPAA2_SWP_CFG_CPBS_SHIFT); /* QMan-backed mode */ } else { bus_set_region_4(p->cena_map, 0, 0, rman_get_size(p->cena_res) / 4); reg = dpaa2_swp_set_cfg( p->dqrr.ring_size, /* max. entries QMan writes to DQRR */ /* DQRR_MF */ 1, /* writes enabled in the CINH memory only */ /* WN */ 0, /* EQCR_CI stashing is disabled */ /* EST */ 3, /* RPM: RCR in array mode */ /* RPM */ 2, /* DCM: Discrete consumption ack */ /* DCM */ 2, /* EPM: EQCR in ring mode (FIFO) */ /* EPM */ 1, /* Dequeued frame data, annotation, and FQ context stashing drop enable */ /* SD */ 1, /* Dequeued frame data, annotation, and FQ context stashing priority */ /* SP */ 1, /* Dequeued frame data, annotation, and FQ context stashing enable */ /* SE */ 1, /* Dequeue response ring (DQRR) entry stashing priority */ /* DP */ 0, /* Dequeue response ring (DQRR) entry, or cacheable portal area, stashing enable. */ /* DE */ 0 /* EQCR_CI stashing priority */ /* EP */ ); /* TODO: Switch to memory-backed mode. */ reg &= ~(1 << DPAA2_SWP_CFG_CPBS_SHIFT); /* QMan-backed mode */ } dpaa2_swp_write_reg(p, DPAA2_SWP_CINH_CFG, reg); reg = dpaa2_swp_read_reg(p, DPAA2_SWP_CINH_CFG); if (!reg) { free(p, M_DPAA2_SWP); return (DPAA2_SWP_STAT_PORTAL_DISABLED); } /* * Static Dequeue Command Register needs to be initialized to 0 when no * channels are being dequeued from or else the QMan HW will indicate an * error. The values that were calculated above will be applied when * dequeues from a specific channel are enabled. */ dpaa2_swp_write_reg(p, DPAA2_SWP_CINH_SDQCR, 0); p->eqcr.pi_ring_size = 8; /* if ((desc->swp_version & DPAA2_SWP_REV_MASK) >= DPAA2_SWP_REV_5000) */ /* p->eqcr.pi_ring_size = 32; */ for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1) p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1; eqcr_pi = dpaa2_swp_read_reg(p, DPAA2_SWP_CINH_EQCR_PI); p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask; p->eqcr.pi_vb = eqcr_pi & DPAA2_SWP_VALID_BIT; p->eqcr.ci = dpaa2_swp_read_reg(p, DPAA2_SWP_CINH_EQCR_CI) & p->eqcr.pi_ci_mask; p->eqcr.available = p->eqcr.pi_ring_size; - /* Initialize the portal with an IRQ threshold and timeout of 0us. */ - dpaa2_swp_set_irq_coalescing(p, p->dqrr.ring_size - 1, 0); + /* TODO: sysctl(9) for the IRQ timeout? */ + /* Initialize the portal with an IRQ threshold and timeout of 120us. */ + dpaa2_swp_set_irq_coalescing(p, p->dqrr.ring_size - 1, 120); *swp = p; return (0); } void dpaa2_swp_free_portal(struct dpaa2_swp *swp) { uint16_t flags; KASSERT(swp != NULL, ("%s: swp is NULL", __func__)); DPAA2_SWP_LOCK(swp, &flags); swp->flags |= DPAA2_SWP_DESTROYED; DPAA2_SWP_UNLOCK(swp); /* Let threads stop using this portal. */ DELAY(DPAA2_SWP_TIMEOUT); mtx_destroy(&swp->lock); free(swp, M_DPAA2_SWP); } uint32_t dpaa2_swp_set_cfg(uint8_t max_fill, uint8_t wn, uint8_t est, uint8_t rpm, uint8_t dcm, uint8_t epm, int sd, int sp, int se, int dp, int de, int ep) { return ( max_fill << DPAA2_SWP_CFG_DQRR_MF_SHIFT | est << DPAA2_SWP_CFG_EST_SHIFT | wn << DPAA2_SWP_CFG_WN_SHIFT | rpm << DPAA2_SWP_CFG_RPM_SHIFT | dcm << DPAA2_SWP_CFG_DCM_SHIFT | epm << DPAA2_SWP_CFG_EPM_SHIFT | sd << DPAA2_SWP_CFG_SD_SHIFT | sp << DPAA2_SWP_CFG_SP_SHIFT | se << DPAA2_SWP_CFG_SE_SHIFT | dp << DPAA2_SWP_CFG_DP_SHIFT | de << DPAA2_SWP_CFG_DE_SHIFT | ep << DPAA2_SWP_CFG_EP_SHIFT ); } /* Read/write registers of a software portal. */ void dpaa2_swp_write_reg(struct dpaa2_swp *swp, uint32_t o, uint32_t v) { bus_write_4(swp->cinh_map, o, v); } uint32_t dpaa2_swp_read_reg(struct dpaa2_swp *swp, uint32_t o) { return (bus_read_4(swp->cinh_map, o)); } /* Helper routines. */ /** * @brief Set enqueue descriptor without Order Point Record ID. * * ed: Enqueue descriptor. * resp_always: Enqueue with response always (1); FD from a rejected enqueue * will be returned on a FQ (0). */ void dpaa2_swp_set_ed_norp(struct dpaa2_eq_desc *ed, bool resp_always) { ed->verb &= ~(1 << ENQ_CMD_ORP_ENABLE_SHIFT); if (resp_always) ed->verb |= ENQ_CMD_RESPONSE_ALWAYS; else ed->verb |= ENQ_CMD_REJECTS_TO_FQ; } /** * @brief Set FQ of the enqueue descriptor. */ void dpaa2_swp_set_ed_fq(struct dpaa2_eq_desc *ed, uint32_t fqid) { ed->verb &= ~(1 << ENQ_CMD_TARGET_TYPE_SHIFT); ed->tgtid = fqid; } /** * @brief Enable interrupts for a software portal. */ void dpaa2_swp_set_intr_trigger(struct dpaa2_swp *swp, uint32_t mask) { if (swp != NULL) dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_IER, mask); } /** * @brief Return the value in the SWP_IER register. */ uint32_t dpaa2_swp_get_intr_trigger(struct dpaa2_swp *swp) { if (swp != NULL) return dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_IER); return (0); } /** * @brief Return the value in the SWP_ISR register. */ uint32_t dpaa2_swp_read_intr_status(struct dpaa2_swp *swp) { if (swp != NULL) return dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_ISR); return (0); } /** * @brief Clear SWP_ISR register according to the given mask. */ void dpaa2_swp_clear_intr_status(struct dpaa2_swp *swp, uint32_t mask) { if (swp != NULL) dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_ISR, mask); } /** * @brief Enable or disable push dequeue. * * swp: the software portal object * chan_idx: the channel index (0 to 15) * en: enable or disable push dequeue */ void dpaa2_swp_set_push_dequeue(struct dpaa2_swp *swp, uint8_t chan_idx, bool en) { uint16_t dqsrc; if (swp != NULL) { if (chan_idx > 15u) { device_printf(swp->desc->dpio_dev, "channel index " "should be <= 15: chan_idx=%d\n", chan_idx); return; } if (en) swp->sdq |= 1 << chan_idx; else swp->sdq &= ~(1 << chan_idx); /* * Read make the complete src map. If no channels are enabled * the SDQCR must be 0 or else QMan will assert errors. */ dqsrc = (swp->sdq >> DPAA2_SDQCR_SRC_SHIFT) & DPAA2_SDQCR_SRC_MASK; dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_SDQCR, dqsrc != 0 ? swp->sdq : 0); } } /** * @brief Set new IRQ coalescing values. * * swp: The software portal object. * threshold: Threshold for DQRR interrupt generation. The DQRR interrupt * asserts when the ring contains greater than "threshold" entries. * holdoff: DQRR interrupt holdoff (timeout) period in us. */ int dpaa2_swp_set_irq_coalescing(struct dpaa2_swp *swp, uint32_t threshold, uint32_t holdoff) { uint32_t itp; /* Interrupt Timeout Period */ if (swp == NULL) return (EINVAL); /* * Convert "holdoff" value from us to 256 QBMAN clock cycles * increments. This depends on the QBMAN internal frequency. */ itp = (holdoff * 1000u) / swp->desc->swp_cycles_ratio; if (itp > DQRR_MAX_ITP) itp = DQRR_MAX_ITP; if (threshold >= swp->dqrr.ring_size) threshold = swp->dqrr.ring_size - 1; swp->dqrr.irq_threshold = threshold; swp->dqrr.irq_itp = itp; dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_DQRR_ITR, threshold); dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_ITPR, itp); return (0); } /* * Software portal commands. */ /** * @brief Configure the channel data availability notification (CDAN) * in a particular WQ channel. */ int dpaa2_swp_conf_wq_channel(struct dpaa2_swp *swp, uint16_t chan_id, uint8_t we_mask, bool cdan_en, uint64_t ctx) { /* NOTE: 64 bytes command. */ struct __packed { uint8_t verb; uint8_t result; /* in response only! */ uint16_t chan_id; uint8_t we; uint8_t ctrl; uint16_t _reserved2; uint64_t ctx; uint8_t _reserved3[48]; } cmd = {0}; struct __packed { uint8_t verb; uint8_t result; uint16_t chan_id; uint8_t _reserved[60]; } rsp; int error; if (swp == NULL) return (EINVAL); cmd.chan_id = chan_id; cmd.we = we_mask; cmd.ctrl = cdan_en ? 1u : 0u; cmd.ctx = ctx; error = dpaa2_swp_exec_mgmt_command(swp, (struct dpaa2_swp_cmd *) &cmd, (struct dpaa2_swp_rsp *) &rsp, CMDID_SWP_WQCHAN_CONFIGURE); if (error) return (error); if (rsp.result != QBMAN_CMD_RC_OK) { device_printf(swp->desc->dpio_dev, "WQ channel configuration " "error: channel_id=%d, result=0x%02x\n", chan_id, rsp.result); return (EIO); } return (0); } /** * @brief Query current configuration/state of the buffer pool. */ int dpaa2_swp_query_bp(struct dpaa2_swp *swp, uint16_t bpid, struct dpaa2_bp_conf *conf) { /* NOTE: 64 bytes command. */ struct __packed { uint8_t verb; uint8_t _reserved1; uint16_t bpid; uint8_t _reserved2[60]; } cmd = {0}; struct __packed { uint8_t verb; uint8_t result; uint32_t _reserved1; uint8_t bdi; uint8_t state; uint32_t fill; /* TODO: Support the other fields as well. */ uint8_t _reserved2[52]; } rsp; int error; if (swp == NULL || conf == NULL) return (EINVAL); cmd.bpid = bpid; error = dpaa2_swp_exec_mgmt_command(swp, (struct dpaa2_swp_cmd *) &cmd, (struct dpaa2_swp_rsp *) &rsp, CMDID_SWP_BP_QUERY); if (error) return (error); if (rsp.result != QBMAN_CMD_RC_OK) { device_printf(swp->desc->dpio_dev, "BP query error: bpid=%d, " "result=0x%02x\n", bpid, rsp.result); return (EIO); } conf->bdi = rsp.bdi; conf->state = rsp.state; conf->free_bufn = rsp.fill; return (0); } int dpaa2_swp_release_bufs(struct dpaa2_swp *swp, uint16_t bpid, bus_addr_t *buf, uint32_t buf_num) { /* NOTE: 64 bytes command. */ struct __packed { uint8_t verb; uint8_t _reserved1; uint16_t bpid; uint32_t _reserved2; uint64_t buf[DPAA2_SWP_BUFS_PER_CMD]; } cmd = {0}; int error; if (swp == NULL || buf == NULL || buf_num == 0u || buf_num > DPAA2_SWP_BUFS_PER_CMD) return (EINVAL); for (uint32_t i = 0; i < buf_num; i++) cmd.buf[i] = buf[i]; cmd.bpid = bpid; cmd.verb |= 1 << 5; /* Switch release buffer command to valid. */ error = dpaa2_swp_exec_br_command(swp, (struct dpaa2_swp_cmd *) &cmd, buf_num); if (error) { device_printf(swp->desc->dpio_dev, "buffers release command " "failed\n"); return (error); } return (0); } int dpaa2_swp_dqrr_next_locked(struct dpaa2_swp *swp, struct dpaa2_dq *dq, uint32_t *idx) { struct resource_map *map = swp->cinh_map; struct dpaa2_swp_rsp *rsp = (struct dpaa2_swp_rsp *) dq; uint32_t verb, pi; /* producer index */ uint32_t offset = swp->cfg.mem_backed ? DPAA2_SWP_CENA_DQRR_MEM(swp->dqrr.next_idx) : DPAA2_SWP_CENA_DQRR(swp->dqrr.next_idx); if (swp == NULL || dq == NULL) return (EINVAL); /* * Before using valid-bit to detect if something is there, we have to * handle the case of the DQRR reset bug... */ if (swp->dqrr.reset_bug) { /* * We pick up new entries by cache-inhibited producer index, * which means that a non-coherent mapping would require us to * invalidate and read *only* once that PI has indicated that * there's an entry here. The first trip around the DQRR ring * will be much less efficient than all subsequent trips around * it... */ pi = dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_DQPI) & DQRR_PI_MASK; /* There are new entries if pi != next_idx */ if (pi == swp->dqrr.next_idx) return (ENOENT); /* * If next_idx is/was the last ring index, and 'pi' is * different, we can disable the workaround as all the ring * entries have now been DMA'd to so valid-bit checking is * repaired. * * NOTE: This logic needs to be based on next_idx (which * increments one at a time), rather than on pi (which * can burst and wrap-around between our snapshots of it). */ if (swp->dqrr.next_idx == (swp->dqrr.ring_size - 1)) swp->dqrr.reset_bug = 0; } verb = bus_read_4(map, offset); if ((verb & DPAA2_SWP_VALID_BIT) != swp->dqrr.valid_bit) return (ENOENT); /* Read dequeue response message. */ for (int i = 0; i < DPAA2_SWP_RSP_PARAMS_N; i++) rsp->params[i] = bus_read_8(map, offset + i * sizeof(uint64_t)); /* Return index of the current entry (if requested). */ if (idx != NULL) *idx = swp->dqrr.next_idx; /* * There's something there. Move "next_idx" attention to the next ring * entry before returning what we found. */ swp->dqrr.next_idx++; swp->dqrr.next_idx &= swp->dqrr.ring_size - 1; /* wrap around */ if (swp->dqrr.next_idx == 0u) swp->dqrr.valid_bit ^= DPAA2_SWP_VALID_BIT; return (0); } int dpaa2_swp_pull(struct dpaa2_swp *swp, uint16_t chan_id, struct dpaa2_buf *buf, uint32_t frames_n) { /* NOTE: 64 bytes command. */ struct __packed { uint8_t verb; uint8_t numf; uint8_t tok; uint8_t _reserved; uint32_t dq_src; uint64_t rsp_addr; uint64_t _reserved1[6]; } cmd = {0}; struct dpaa2_dq *msg; uint16_t flags; int i, error; - KASSERT(swp != NULL, ("%s: swp is NULL", __func__)); KASSERT(frames_n != 0u, ("%s: cannot pull zero frames", __func__)); KASSERT(frames_n <= 16u, ("%s: too much frames to pull", __func__)); - KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not channel storage " - "buffer", __func__)); cmd.numf = frames_n - 1; cmd.tok = DPAA2_SWP_VDQCR_TOKEN; cmd.dq_src = chan_id; - cmd.rsp_addr = (uint64_t) buf->store.paddr; + cmd.rsp_addr = (uint64_t)buf->paddr; /* Dequeue command type */ cmd.verb &= ~(1 << QB_VDQCR_VERB_DCT0_SHIFT); cmd.verb |= (1 << QB_VDQCR_VERB_DCT1_SHIFT); /* Dequeue from a specific software portal channel (ID's in DQ_SRC). */ cmd.verb &= ~(1 << QB_VDQCR_VERB_DT0_SHIFT); cmd.verb &= ~(1 << QB_VDQCR_VERB_DT1_SHIFT); /* Write the response to this command into memory (at the RSP_ADDR). */ cmd.verb |= (1 << QB_VDQCR_VERB_RLS_SHIFT); /* Response writes won't attempt to allocate into a cache. */ cmd.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT); /* Allow the FQ to remain active in the portal after dequeue. */ cmd.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT); DPAA2_SWP_LOCK(swp, &flags); if (flags & DPAA2_SWP_DESTROYED) { /* Terminate operation if portal is destroyed. */ DPAA2_SWP_UNLOCK(swp); return (ENOENT); } error = dpaa2_swp_exec_vdc_command_locked(swp, (struct dpaa2_swp_cmd *) &cmd); if (error != 0) { DPAA2_SWP_UNLOCK(swp); return (error); } /* Let's sync before reading VDQ response from QBMan. */ - bus_dmamap_sync(buf->store.dmat, buf->store.dmap, BUS_DMASYNC_POSTREAD); + bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD); /* Read VDQ response from QBMan. */ - msg = (struct dpaa2_dq *) buf->store.vaddr; + msg = (struct dpaa2_dq *)buf->vaddr; for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) { if ((msg->fdr.desc.stat & DPAA2_DQ_STAT_VOLATILE) && (msg->fdr.desc.tok == DPAA2_SWP_VDQCR_TOKEN)) { /* Reset token. */ msg->fdr.desc.tok = 0; break; } DELAY(CMD_SPIN_TIMEOUT); } DPAA2_SWP_UNLOCK(swp); /* Return an error on expired timeout. */ return (i > CMD_SPIN_ATTEMPTS ? ETIMEDOUT : 0); } /** * @brief Issue a command to enqueue a frame using one enqueue descriptor. * * swp: Software portal used to send this command to. * ed: Enqueue command descriptor. * fd: Frame descriptor to enqueue. */ int dpaa2_swp_enq(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed, struct dpaa2_fd *fd) { uint32_t flags = 0; int rc = dpaa2_swp_enq_mult(swp, ed, fd, &flags, 1); return (rc >= 0 ? 0 : EBUSY); } /** * @brief Issue a command to enqueue frames using one enqueue descriptor. * * swp: Software portal used to send this command to. * ed: Enqueue command descriptor. * fd: Frame descriptor to enqueue. * flags: Table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL. * frames_n: Number of FDs to enqueue. * * NOTE: Enqueue command (64 bytes): 32 (eq. descriptor) + 32 (frame descriptor). */ int dpaa2_swp_enq_mult(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed, struct dpaa2_fd *fd, uint32_t *flags, int frames_n) { const uint8_t *ed_pdat8 = (const uint8_t *) ed; const uint32_t *ed_pdat32 = (const uint32_t *) ed; const uint64_t *ed_pdat64 = (const uint64_t *) ed; const uint64_t *fd_pdat64 = (const uint64_t *) fd; struct resource_map *map; uint32_t eqcr_ci, eqcr_pi; /* EQCR consumer/producer index */ uint32_t half_mask, full_mask, val, ci_offset; uint16_t swp_flags; int num_enq = 0; if (swp == NULL || ed == NULL || fd == NULL || flags == NULL || frames_n == 0) return (EINVAL); DPAA2_SWP_LOCK(swp, &swp_flags); if (swp_flags & DPAA2_SWP_DESTROYED) { /* Terminate operation if portal is destroyed. */ DPAA2_SWP_UNLOCK(swp); return (ENOENT); } map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map; ci_offset = swp->cfg.mem_backed ? DPAA2_SWP_CENA_EQCR_CI_MEMBACK : DPAA2_SWP_CENA_EQCR_CI; half_mask = swp->eqcr.pi_ci_mask >> 1; full_mask = swp->eqcr.pi_ci_mask; if (swp->eqcr.available == 0) { val = dpaa2_swp_read_reg(swp, ci_offset); eqcr_ci = swp->eqcr.ci; swp->eqcr.ci = val & full_mask; swp->eqcr.available = dpaa2_swp_cyc_diff(swp->eqcr.pi_ring_size, eqcr_ci, swp->eqcr.ci); if (swp->eqcr.available == 0) { DPAA2_SWP_UNLOCK(swp); return (0); } } eqcr_pi = swp->eqcr.pi; num_enq = swp->eqcr.available < frames_n ? swp->eqcr.available : frames_n; swp->eqcr.available -= num_enq; KASSERT(num_enq >= 0 && num_enq <= swp->eqcr.pi_ring_size, ("%s: unexpected num_enq=%d", __func__, num_enq)); KASSERT(swp->eqcr.available >= 0 && swp->eqcr.available <= swp->eqcr.pi_ring_size, ("%s: unexpected eqcr.available=%d", __func__, swp->eqcr.available)); /* Fill in the EQCR ring. */ for (int i = 0; i < num_enq; i++) { /* Write enq. desc. without the VERB, DCA, SEQNUM and OPRID. */ for (int j = 1; j <= 3; j++) bus_write_8(map, DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) + sizeof(uint64_t) * j, ed_pdat64[j]); /* Write OPRID. */ bus_write_4(map, DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) + sizeof(uint32_t), ed_pdat32[1]); /* Write DCA and SEQNUM without VERB byte. */ for (int j = 1; j <= 3; j++) bus_write_1(map, DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) + sizeof(uint8_t) * j, ed_pdat8[j]); /* Write frame descriptor. */ for (int j = 0; j <= 3; j++) bus_write_8(map, DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) + ENQ_DESC_FD_OFFSET + sizeof(uint64_t) * j, fd_pdat64[j]); eqcr_pi++; } wmb(); /* Write the VERB byte of enqueue descriptor. */ eqcr_pi = swp->eqcr.pi; for (int i = 0; i < num_enq; i++) { bus_write_1(map, DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask), ed_pdat8[0] | swp->eqcr.pi_vb); if (flags && (flags[i] & ENQ_FLAG_DCA)) { /* Update DCA byte. */ bus_write_1(map, DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) + 1, (1 << ENQ_CMD_DCA_EN_SHIFT) | (flags[i] & ENQ_DCA_IDXMASK)); } eqcr_pi++; if (!(eqcr_pi & half_mask)) swp->eqcr.pi_vb ^= DPAA2_SWP_VALID_BIT; } swp->eqcr.pi = eqcr_pi & full_mask; DPAA2_SWP_UNLOCK(swp); return (num_enq); } static int dpaa2_swp_cyc_diff(uint8_t ringsize, uint8_t first, uint8_t last) { /* 'first' is included, 'last' is excluded */ return ((first <= last) ? (last - first) : ((2 * ringsize) - (first - last))); } /** * @brief Execute Buffer Release Command (BRC). */ static int dpaa2_swp_exec_br_command(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd, uint32_t buf_num) { struct __packed with_verb { uint8_t verb; uint8_t _reserved[63]; } *c; const uint8_t *cmd_pdat8 = (const uint8_t *) cmd->params; const uint32_t *cmd_pdat32 = (const uint32_t *) cmd->params; struct resource_map *map; uint32_t offset, rar; /* Release Array Allocation register */ uint16_t flags; if (!swp || !cmd) return (EINVAL); DPAA2_SWP_LOCK(swp, &flags); if (flags & DPAA2_SWP_DESTROYED) { /* Terminate operation if portal is destroyed. */ DPAA2_SWP_UNLOCK(swp); return (ENOENT); } rar = dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_RAR); if (!RAR_SUCCESS(rar)) { DPAA2_SWP_UNLOCK(swp); return (EBUSY); } map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map; offset = swp->cfg.mem_backed ? DPAA2_SWP_CENA_RCR_MEM(RAR_IDX(rar)) : DPAA2_SWP_CENA_RCR(RAR_IDX(rar)); c = (struct with_verb *) cmd; /* Write command bytes (without VERB byte). */ for (uint32_t i = 1; i < DPAA2_SWP_CMD_PARAMS_N; i++) bus_write_8(map, offset + sizeof(uint64_t) * i, cmd->params[i]); bus_write_4(map, offset + 4, cmd_pdat32[1]); for (uint32_t i = 1; i <= 3; i++) bus_write_1(map, offset + i, cmd_pdat8[i]); /* Write VERB byte and trigger command execution. */ if (swp->cfg.mem_backed) { bus_write_1(map, offset, c->verb | RAR_VB(rar) | buf_num); wmb(); dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_RCR_AM_RT + RAR_IDX(rar) * 4, DPAA2_SWP_RT_MODE); } else { wmb(); bus_write_1(map, offset, c->verb | RAR_VB(rar) | buf_num); } DPAA2_SWP_UNLOCK(swp); return (0); } /** * @brief Execute Volatile Dequeue Command (VDC). * * This command will be executed by QBMan only once in order to deliver requested * number of frames (1-16 or 1-32 depending on QBMan version) to the driver via * DQRR or arbitrary DMA-mapped memory. * * NOTE: There is a counterpart to the volatile dequeue command called static * dequeue command (SDQC) which is executed periodically all the time the * command is present in the SDQCR register. */ static int dpaa2_swp_exec_vdc_command_locked(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd) { struct __packed with_verb { uint8_t verb; uint8_t _reserved[63]; } *c; const uint8_t *p8 = (const uint8_t *) cmd->params; const uint32_t *p32 = (const uint32_t *) cmd->params; struct resource_map *map; uint32_t offset; map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map; offset = swp->cfg.mem_backed ? DPAA2_SWP_CENA_VDQCR_MEM : DPAA2_SWP_CENA_VDQCR; c = (struct with_verb *) cmd; /* Write command bytes (without VERB byte). */ for (uint32_t i = 1; i < DPAA2_SWP_CMD_PARAMS_N; i++) bus_write_8(map, offset + sizeof(uint64_t) * i, cmd->params[i]); bus_write_4(map, offset + 4, p32[1]); for (uint32_t i = 1; i <= 3; i++) bus_write_1(map, offset + i, p8[i]); /* Write VERB byte and trigger command execution. */ if (swp->cfg.mem_backed) { bus_write_1(map, offset, c->verb | swp->vdq.valid_bit); swp->vdq.valid_bit ^= DPAA2_SWP_VALID_BIT; wmb(); dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_VDQCR_RT, DPAA2_SWP_RT_MODE); } else { wmb(); bus_write_1(map, offset, c->verb | swp->vdq.valid_bit); swp->vdq.valid_bit ^= DPAA2_SWP_VALID_BIT; } return (0); } /** * @brief Execute a QBMan management command. */ static int dpaa2_swp_exec_mgmt_command(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd, struct dpaa2_swp_rsp *rsp, uint8_t cmdid) { #if (defined(_KERNEL) && defined(INVARIANTS)) struct __packed with_verb { uint8_t verb; uint8_t _reserved[63]; } *r; #endif uint16_t flags; int error; if (swp == NULL || cmd == NULL || rsp == NULL) return (EINVAL); DPAA2_SWP_LOCK(swp, &flags); if (flags & DPAA2_SWP_DESTROYED) { /* Terminate operation if portal is destroyed. */ DPAA2_SWP_UNLOCK(swp); return (ENOENT); } /* * Send a command to QBMan using Management Command register and wait * for response from the Management Response registers. */ dpaa2_swp_send_mgmt_command(swp, cmd, cmdid); error = dpaa2_swp_wait_for_mgmt_response(swp, rsp); if (error) { DPAA2_SWP_UNLOCK(swp); return (error); } DPAA2_SWP_UNLOCK(swp); #if (defined(_KERNEL) && defined(INVARIANTS)) r = (struct with_verb *) rsp; KASSERT((r->verb & CMD_VERB_MASK) == cmdid, ("wrong VERB byte in response: resp=0x%02x, expected=0x%02x", r->verb, cmdid)); #endif return (0); } static int dpaa2_swp_send_mgmt_command(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd, uint8_t cmdid) { const uint8_t *cmd_pdat8 = (const uint8_t *) cmd->params; const uint32_t *cmd_pdat32 = (const uint32_t *) cmd->params; struct resource_map *map; uint32_t offset; map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map; offset = swp->cfg.mem_backed ? DPAA2_SWP_CENA_CR_MEM : DPAA2_SWP_CENA_CR; /* Write command bytes (without VERB byte). */ for (uint32_t i = 1; i < DPAA2_SWP_CMD_PARAMS_N; i++) bus_write_8(map, offset + sizeof(uint64_t) * i, cmd->params[i]); bus_write_4(map, offset + 4, cmd_pdat32[1]); for (uint32_t i = 1; i <= 3; i++) bus_write_1(map, offset + i, cmd_pdat8[i]); /* Write VERB byte and trigger command execution. */ if (swp->cfg.mem_backed) { bus_write_1(map, offset, cmdid | swp->mr.valid_bit); wmb(); dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_CR_RT, DPAA2_SWP_RT_MODE); } else { wmb(); bus_write_1(map, offset, cmdid | swp->mc.valid_bit); } return (0); } static int dpaa2_swp_wait_for_mgmt_response(struct dpaa2_swp *swp, struct dpaa2_swp_rsp *rsp) { struct resource_map *map = swp->cfg.mem_backed ? swp->cena_map : swp->cinh_map; /* Management command response to be read from the only RR or RR0/RR1. */ const uint32_t offset = swp->cfg.mem_backed ? DPAA2_SWP_CENA_RR_MEM : DPAA2_SWP_CENA_RR(swp->mc.valid_bit); uint32_t i, verb, ret; int rc; /* Wait for a command response from QBMan. */ for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) { if (swp->cfg.mem_backed) { verb = (uint32_t) (bus_read_4(map, offset) & 0xFFu); if (swp->mr.valid_bit != (verb & DPAA2_SWP_VALID_BIT)) goto wait; if (!(verb & ~DPAA2_SWP_VALID_BIT)) goto wait; swp->mr.valid_bit ^= DPAA2_SWP_VALID_BIT; } else { ret = bus_read_4(map, offset); verb = ret & ~DPAA2_SWP_VALID_BIT; /* remove valid bit */ if (verb == 0u) goto wait; swp->mc.valid_bit ^= DPAA2_SWP_VALID_BIT; } break; wait: DELAY(CMD_SPIN_TIMEOUT); } /* Return an error on expired timeout. */ rc = i > CMD_SPIN_ATTEMPTS ? ETIMEDOUT : 0; /* Read command response. */ for (i = 0; i < DPAA2_SWP_RSP_PARAMS_N; i++) rsp->params[i] = bus_read_8(map, offset + i * sizeof(uint64_t)); return (rc); } diff --git a/sys/dev/dpaa2/dpaa2_swp.h b/sys/dev/dpaa2/dpaa2_swp.h index 986ade601149..1b1383b4241f 100644 --- a/sys/dev/dpaa2/dpaa2_swp.h +++ b/sys/dev/dpaa2/dpaa2_swp.h @@ -1,531 +1,532 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright © 2021-2023 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _DPAA2_SWP_H #define _DPAA2_SWP_H #include #include #include #include "dpaa2_types.h" +#include "dpaa2_buf.h" #include "dpaa2_bp.h" /* * DPAA2 QBMan software portal. */ /* All QBMan commands and result structures use this "valid bit" encoding. */ #define DPAA2_SWP_VALID_BIT ((uint32_t) 0x80) #define DPAA2_SWP_TIMEOUT 100000 /* in us */ #define DPAA2_SWP_CMD_PARAMS_N 8u #define DPAA2_SWP_RSP_PARAMS_N 8u /* * Maximum number of buffers that can be acquired/released through a single * QBMan command. */ #define DPAA2_SWP_BUFS_PER_CMD 7u /* * Number of times to retry DPIO portal operations while waiting for portal to * finish executing current command and become available. * * We want to avoid being stuck in a while loop in case hardware becomes * unresponsive, but not give up too easily if the portal really is busy for * valid reasons. */ #define DPAA2_SWP_BUSY_RETRIES 1000 /* Versions of the QBMan software portals. */ #define DPAA2_SWP_REV_4000 0x04000000 #define DPAA2_SWP_REV_4100 0x04010000 #define DPAA2_SWP_REV_4101 0x04010001 #define DPAA2_SWP_REV_5000 0x05000000 #define DPAA2_SWP_REV_MASK 0xFFFF0000 /* Registers in the cache-inhibited area of the software portal. */ #define DPAA2_SWP_CINH_CR 0x600 /* Management Command reg.*/ #define DPAA2_SWP_CINH_EQCR_PI 0x800 /* Enqueue Ring, Producer Index */ #define DPAA2_SWP_CINH_EQCR_CI 0x840 /* Enqueue Ring, Consumer Index */ #define DPAA2_SWP_CINH_CR_RT 0x900 /* CR Read Trigger */ #define DPAA2_SWP_CINH_VDQCR_RT 0x940 /* VDQCR Read Trigger */ #define DPAA2_SWP_CINH_EQCR_AM_RT 0x980 #define DPAA2_SWP_CINH_RCR_AM_RT 0x9C0 #define DPAA2_SWP_CINH_DQPI 0xA00 /* DQRR Producer Index reg. */ #define DPAA2_SWP_CINH_DQRR_ITR 0xA80 /* DQRR interrupt timeout reg. */ #define DPAA2_SWP_CINH_DCAP 0xAC0 /* DQRR Consumption Ack. reg. */ #define DPAA2_SWP_CINH_SDQCR 0xB00 /* Static Dequeue Command reg. */ #define DPAA2_SWP_CINH_EQCR_AM_RT2 0xB40 #define DPAA2_SWP_CINH_RCR_PI 0xC00 /* Release Ring, Producer Index */ #define DPAA2_SWP_CINH_RAR 0xCC0 /* Release Array Allocation reg. */ #define DPAA2_SWP_CINH_CFG 0xD00 #define DPAA2_SWP_CINH_ISR 0xE00 #define DPAA2_SWP_CINH_IER 0xE40 #define DPAA2_SWP_CINH_ISDR 0xE80 #define DPAA2_SWP_CINH_IIR 0xEC0 #define DPAA2_SWP_CINH_ITPR 0xF40 /* Registers in the cache-enabled area of the software portal. */ #define DPAA2_SWP_CENA_EQCR(n) (0x000 + ((uint32_t)(n) << 6)) #define DPAA2_SWP_CENA_DQRR(n) (0x200 + ((uint32_t)(n) << 6)) #define DPAA2_SWP_CENA_RCR(n) (0x400 + ((uint32_t)(n) << 6)) #define DPAA2_SWP_CENA_CR (0x600) /* Management Command reg. */ #define DPAA2_SWP_CENA_RR(vb) (0x700 + ((uint32_t)(vb) >> 1)) #define DPAA2_SWP_CENA_VDQCR (0x780) #define DPAA2_SWP_CENA_EQCR_CI (0x840) /* Registers in the cache-enabled area of the software portal (memory-backed). */ #define DPAA2_SWP_CENA_DQRR_MEM(n) (0x0800 + ((uint32_t)(n) << 6)) #define DPAA2_SWP_CENA_RCR_MEM(n) (0x1400 + ((uint32_t)(n) << 6)) #define DPAA2_SWP_CENA_CR_MEM (0x1600) /* Management Command reg. */ #define DPAA2_SWP_CENA_RR_MEM (0x1680) /* Management Response reg. */ #define DPAA2_SWP_CENA_VDQCR_MEM (0x1780) #define DPAA2_SWP_CENA_EQCR_CI_MEMBACK (0x1840) /* Shifts in the portal's configuration register. */ #define DPAA2_SWP_CFG_DQRR_MF_SHIFT 20 #define DPAA2_SWP_CFG_EST_SHIFT 16 #define DPAA2_SWP_CFG_CPBS_SHIFT 15 #define DPAA2_SWP_CFG_WN_SHIFT 14 #define DPAA2_SWP_CFG_RPM_SHIFT 12 #define DPAA2_SWP_CFG_DCM_SHIFT 10 #define DPAA2_SWP_CFG_EPM_SHIFT 8 #define DPAA2_SWP_CFG_VPM_SHIFT 7 #define DPAA2_SWP_CFG_CPM_SHIFT 6 #define DPAA2_SWP_CFG_SD_SHIFT 5 #define DPAA2_SWP_CFG_SP_SHIFT 4 #define DPAA2_SWP_CFG_SE_SHIFT 3 #define DPAA2_SWP_CFG_DP_SHIFT 2 #define DPAA2_SWP_CFG_DE_SHIFT 1 #define DPAA2_SWP_CFG_EP_SHIFT 0 /* Static Dequeue Command Register attribute codes */ #define DPAA2_SDQCR_FC_SHIFT 29 /* Dequeue Command Frame Count */ #define DPAA2_SDQCR_FC_MASK 0x1 #define DPAA2_SDQCR_DCT_SHIFT 24 /* Dequeue Command Type */ #define DPAA2_SDQCR_DCT_MASK 0x3 #define DPAA2_SDQCR_TOK_SHIFT 16 /* Dequeue Command Token */ #define DPAA2_SDQCR_TOK_MASK 0xff #define DPAA2_SDQCR_SRC_SHIFT 0 /* Dequeue Source */ #define DPAA2_SDQCR_SRC_MASK 0xffff /* * Read trigger bit is used to trigger QMan to read a command from memory, * without having software perform a cache flush to force a write of the command * to QMan. * * NOTE: Implemented in QBMan 5.0 or above. */ #define DPAA2_SWP_RT_MODE ((uint32_t)0x100) /* Interrupt Enable Register bits. */ #define DPAA2_SWP_INTR_EQRI 0x01 #define DPAA2_SWP_INTR_EQDI 0x02 #define DPAA2_SWP_INTR_DQRI 0x04 #define DPAA2_SWP_INTR_RCRI 0x08 #define DPAA2_SWP_INTR_RCDI 0x10 #define DPAA2_SWP_INTR_VDCI 0x20 /* "Write Enable" bitmask for a command to configure SWP WQ Channel.*/ #define DPAA2_WQCHAN_WE_EN (0x1u) /* Enable CDAN generation */ #define DPAA2_WQCHAN_WE_ICD (0x2u) /* Interrupt Coalescing Disable */ #define DPAA2_WQCHAN_WE_CTX (0x4u) /* Definitions for parsing DQRR entries. */ #define DPAA2_DQRR_RESULT_MASK (0x7Fu) #define DPAA2_DQRR_RESULT_DQ (0x60u) #define DPAA2_DQRR_RESULT_FQRN (0x21u) #define DPAA2_DQRR_RESULT_FQRNI (0x22u) #define DPAA2_DQRR_RESULT_FQPN (0x24u) #define DPAA2_DQRR_RESULT_FQDAN (0x25u) #define DPAA2_DQRR_RESULT_CDAN (0x26u) #define DPAA2_DQRR_RESULT_CSCN_MEM (0x27u) #define DPAA2_DQRR_RESULT_CGCU (0x28u) #define DPAA2_DQRR_RESULT_BPSCN (0x29u) #define DPAA2_DQRR_RESULT_CSCN_WQ (0x2au) /* Frame dequeue statuses */ #define DPAA2_DQ_STAT_FQEMPTY (0x80u) /* FQ is empty */ #define DPAA2_DQ_STAT_HELDACTIVE (0x40u) /* FQ is held active */ #define DPAA2_DQ_STAT_FORCEELIGIBLE (0x20u) /* FQ force eligible */ #define DPAA2_DQ_STAT_VALIDFRAME (0x10u) /* valid frame */ #define DPAA2_DQ_STAT_ODPVALID (0x04u) /* FQ ODP enable */ #define DPAA2_DQ_STAT_VOLATILE (0x02u) /* volatile dequeue (VDC) */ #define DPAA2_DQ_STAT_EXPIRED (0x01u) /* VDC is expired */ /* * Portal flags. * * TODO: Use the same flags for both MC and software portals. */ #define DPAA2_SWP_DEF 0x0u #define DPAA2_SWP_NOWAIT_ALLOC 0x2u /* Do not sleep during init */ #define DPAA2_SWP_LOCKED 0x4000u /* Wait till portal's unlocked */ #define DPAA2_SWP_DESTROYED 0x8000u /* Terminate any operations */ /* Command return codes. */ #define DPAA2_SWP_STAT_OK 0x0 #define DPAA2_SWP_STAT_NO_MEMORY 0x9 /* No memory available */ #define DPAA2_SWP_STAT_PORTAL_DISABLED 0xFD /* QBMan portal disabled */ #define DPAA2_SWP_STAT_EINVAL 0xFE /* Invalid argument */ #define DPAA2_SWP_STAT_ERR 0xFF /* General error */ #define DPAA2_EQ_DESC_SIZE 32u /* Enqueue Command Descriptor */ #define DPAA2_FDR_DESC_SIZE 32u /* Descriptor of the FDR */ #define DPAA2_FD_SIZE 32u /* Frame Descriptor */ #define DPAA2_FDR_SIZE 64u /* Frame Dequeue Response */ #define DPAA2_SCN_SIZE 16u /* State Change Notification */ #define DPAA2_FA_SIZE 64u /* SW Frame Annotation */ #define DPAA2_SGE_SIZE 16u /* S/G table entry */ #define DPAA2_DQ_SIZE 64u /* Dequeue Response */ #define DPAA2_SWP_CMD_SIZE 64u /* SWP Command */ #define DPAA2_SWP_RSP_SIZE 64u /* SWP Command Response */ /* Opaque token for static dequeues. */ #define DPAA2_SWP_SDQCR_TOKEN 0xBBu /* Opaque token for static dequeues. */ #define DPAA2_SWP_VDQCR_TOKEN 0xCCu #define DPAA2_SWP_LOCK(__swp, __flags) do { \ mtx_assert(&(__swp)->lock, MA_NOTOWNED); \ mtx_lock(&(__swp)->lock); \ *(__flags) = (__swp)->flags; \ (__swp)->flags |= DPAA2_SWP_LOCKED; \ } while (0) #define DPAA2_SWP_UNLOCK(__swp) do { \ mtx_assert(&(__swp)->lock, MA_OWNED); \ (__swp)->flags &= ~DPAA2_SWP_LOCKED; \ mtx_unlock(&(__swp)->lock); \ } while (0) enum dpaa2_fd_format { DPAA2_FD_SINGLE = 0, DPAA2_FD_LIST, DPAA2_FD_SG }; /** * @brief Enqueue command descriptor. */ struct dpaa2_eq_desc { uint8_t verb; uint8_t dca; uint16_t seqnum; uint16_t orpid; uint16_t _reserved; uint32_t tgtid; uint32_t tag; uint16_t qdbin; uint8_t qpri; uint8_t _reserved1[3]; uint8_t wae; uint8_t rspid; uint64_t rsp_addr; } __packed; CTASSERT(sizeof(struct dpaa2_eq_desc) == DPAA2_EQ_DESC_SIZE); /** * @brief Frame Dequeue Response (FDR) descriptor. */ struct dpaa2_fdr_desc { uint8_t verb; uint8_t stat; uint16_t seqnum; uint16_t oprid; uint8_t _reserved; uint8_t tok; uint32_t fqid; uint32_t _reserved1; uint32_t fq_byte_cnt; uint32_t fq_frm_cnt; uint64_t fqd_ctx; } __packed; CTASSERT(sizeof(struct dpaa2_fdr_desc) == DPAA2_FDR_DESC_SIZE); /** * @brief State Change Notification Message (SCNM). */ struct dpaa2_scn { uint8_t verb; uint8_t stat; uint8_t state; uint8_t _reserved; uint32_t rid_tok; uint64_t ctx; } __packed; CTASSERT(sizeof(struct dpaa2_scn) == DPAA2_SCN_SIZE); /** * @brief DPAA2 frame descriptor. * * addr: Memory address of the start of the buffer holding the * frame data or the buffer containing the scatter/gather * list. * data_length: Length of the frame data (in bytes). * bpid_ivp_bmt: Buffer pool ID (14 bit + BMT bit + IVP bit) * offset_fmt_sl: Frame data offset, frame format and short-length fields. * frame_ctx: Frame context. This field allows the sender of a frame * to communicate some out-of-band information to the * receiver of the frame. * ctrl: Control bits (ERR, CBMT, ASAL, PTAC, DROPP, SC, DD). * flow_ctx: Frame flow context. Associates the frame with a flow * structure. QMan may use the FLC field for 3 purposes: * stashing control, order definition point identification, * and enqueue replication control. */ struct dpaa2_fd { uint64_t addr; uint32_t data_length; uint16_t bpid_ivp_bmt; uint16_t offset_fmt_sl; uint32_t frame_ctx; uint32_t ctrl; uint64_t flow_ctx; } __packed; CTASSERT(sizeof(struct dpaa2_fd) == DPAA2_FD_SIZE); /** * @brief DPAA2 frame annotation. */ struct dpaa2_fa { uint32_t magic; struct dpaa2_buf *buf; +#ifdef __notyet__ union { struct { /* Tx frame annotation */ struct dpaa2_ni_tx_ring *tx; }; -#ifdef __notyet__ struct { /* Rx frame annotation */ uint64_t _notused; }; -#endif }; +#endif } __packed; CTASSERT(sizeof(struct dpaa2_fa) <= DPAA2_FA_SIZE); /** * @brief DPAA2 scatter/gather entry. */ struct dpaa2_sg_entry { uint64_t addr; uint32_t len; uint16_t bpid; uint16_t offset_fmt; } __packed; CTASSERT(sizeof(struct dpaa2_sg_entry) == DPAA2_SGE_SIZE); /** * @brief Frame Dequeue Response (FDR). */ struct dpaa2_fdr { struct dpaa2_fdr_desc desc; struct dpaa2_fd fd; } __packed; CTASSERT(sizeof(struct dpaa2_fdr) == DPAA2_FDR_SIZE); /** * @brief Dequeue Response Message. */ struct dpaa2_dq { union { struct { uint8_t verb; uint8_t _reserved[63]; } common; struct dpaa2_fdr fdr; /* Frame Dequeue Response */ struct dpaa2_scn scn; /* State Change Notification */ }; } __packed; CTASSERT(sizeof(struct dpaa2_dq) == DPAA2_DQ_SIZE); /** * @brief Descriptor of the QBMan software portal. * * cena_res: Unmapped cache-enabled part of the portal's I/O memory. * cena_map: Mapped cache-enabled part of the portal's I/O memory. * cinh_res: Unmapped cache-inhibited part of the portal's I/O memory. * cinh_map: Mapped cache-inhibited part of the portal's I/O memory. * * dpio_dev: Device associated with the DPIO object to manage this * portal. * swp_version: Hardware IP version of the software portal. * swp_clk: QBMAN clock frequency value in Hz. * swp_cycles_ratio: How many 256 QBMAN cycles fit into one ns. * swp_id: Software portal ID. * * has_notif: True if the notification mode is used. * has_8prio: True for a channel with 8 priority WQs. Ignored unless * "has_notif" is true. */ struct dpaa2_swp_desc { struct resource *cena_res; struct resource_map *cena_map; struct resource *cinh_res; struct resource_map *cinh_map; device_t dpio_dev; uint32_t swp_version; uint32_t swp_clk; uint32_t swp_cycles_ratio; uint16_t swp_id; bool has_notif; bool has_8prio; }; /** * @brief Command holds data to be written to the software portal. */ struct dpaa2_swp_cmd { uint64_t params[DPAA2_SWP_CMD_PARAMS_N]; }; CTASSERT(sizeof(struct dpaa2_swp_cmd) == DPAA2_SWP_CMD_SIZE); /** * @brief Command response holds data received from the software portal. */ struct dpaa2_swp_rsp { uint64_t params[DPAA2_SWP_RSP_PARAMS_N]; }; CTASSERT(sizeof(struct dpaa2_swp_rsp) == DPAA2_SWP_RSP_SIZE); /** * @brief QBMan software portal. * * res: Unmapped cache-enabled and cache-inhibited parts of the portal. * map: Mapped cache-enabled and cache-inhibited parts of the portal. * desc: Descriptor of the QBMan software portal. * lock: Lock to guard an access to the portal. * cv: Conditional variable helps to wait for the helper object's state * change. * flags: Current state of the object. * sdq: Push dequeues status. * mc: Management commands data. * mr: Management response data. * dqrr: Dequeue Response Ring is used to issue frame dequeue responses * from the QBMan to the driver. * eqcr: Enqueue Command Ring is used to issue frame enqueue commands * from the driver to the QBMan. */ struct dpaa2_swp { struct resource *cena_res; struct resource_map *cena_map; struct resource *cinh_res; struct resource_map *cinh_map; struct mtx lock; struct dpaa2_swp_desc *desc; uint16_t flags; /* Static Dequeue Command Register value (to obtain CDANs). */ uint32_t sdq; /* Volatile Dequeue Command (to obtain frames). */ struct { uint32_t valid_bit; /* 0x00 or 0x80 */ } vdq; struct { bool atomic; bool writes_cinh; bool mem_backed; } cfg; /* Software portal configuration. */ struct { uint32_t valid_bit; /* 0x00 or 0x80 */ } mc; struct { uint32_t valid_bit; /* 0x00 or 0x80 */ } mr; struct { uint32_t next_idx; uint32_t valid_bit; uint8_t ring_size; bool reset_bug; /* dqrr reset workaround */ uint32_t irq_threshold; uint32_t irq_itp; } dqrr; struct { uint32_t pi; /* producer index */ uint32_t pi_vb; /* PI valid bits */ uint32_t pi_ring_size; uint32_t pi_ci_mask; uint32_t ci; int available; uint32_t pend; uint32_t no_pfdr; } eqcr; }; /* Management routines. */ int dpaa2_swp_init_portal(struct dpaa2_swp **swp, struct dpaa2_swp_desc *desc, uint16_t flags); void dpaa2_swp_free_portal(struct dpaa2_swp *swp); uint32_t dpaa2_swp_set_cfg(uint8_t max_fill, uint8_t wn, uint8_t est, uint8_t rpm, uint8_t dcm, uint8_t epm, int sd, int sp, int se, int dp, int de, int ep); /* Read/write registers of a software portal. */ void dpaa2_swp_write_reg(struct dpaa2_swp *swp, uint32_t o, uint32_t v); uint32_t dpaa2_swp_read_reg(struct dpaa2_swp *swp, uint32_t o); /* Helper routines. */ void dpaa2_swp_set_ed_norp(struct dpaa2_eq_desc *ed, bool resp_always); void dpaa2_swp_set_ed_fq(struct dpaa2_eq_desc *ed, uint32_t fqid); void dpaa2_swp_set_intr_trigger(struct dpaa2_swp *swp, uint32_t mask); uint32_t dpaa2_swp_get_intr_trigger(struct dpaa2_swp *swp); uint32_t dpaa2_swp_read_intr_status(struct dpaa2_swp *swp); void dpaa2_swp_clear_intr_status(struct dpaa2_swp *swp, uint32_t mask); void dpaa2_swp_set_push_dequeue(struct dpaa2_swp *swp, uint8_t chan_idx, bool en); int dpaa2_swp_set_irq_coalescing(struct dpaa2_swp *swp, uint32_t threshold, uint32_t holdoff); /* Software portal commands. */ int dpaa2_swp_conf_wq_channel(struct dpaa2_swp *swp, uint16_t chan_id, uint8_t we_mask, bool cdan_en, uint64_t ctx); int dpaa2_swp_query_bp(struct dpaa2_swp *swp, uint16_t bpid, struct dpaa2_bp_conf *conf); int dpaa2_swp_release_bufs(struct dpaa2_swp *swp, uint16_t bpid, bus_addr_t *buf, uint32_t buf_num); int dpaa2_swp_dqrr_next_locked(struct dpaa2_swp *swp, struct dpaa2_dq *dq, uint32_t *idx); int dpaa2_swp_pull(struct dpaa2_swp *swp, uint16_t chan_id, struct dpaa2_buf *buf, uint32_t frames_n); int dpaa2_swp_enq(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed, struct dpaa2_fd *fd); int dpaa2_swp_enq_mult(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed, struct dpaa2_fd *fd, uint32_t *flags, int frames_n); #endif /* _DPAA2_SWP_H */ diff --git a/sys/dev/dpaa2/dpaa2_types.c b/sys/dev/dpaa2/dpaa2_types.c new file mode 100644 index 000000000000..c2fac0ea426d --- /dev/null +++ b/sys/dev/dpaa2/dpaa2_types.c @@ -0,0 +1,114 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright © 2023 Dmitry Salychev + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include + +#include + +#include "dpaa2_types.h" + +#define COMPARE_TYPE(t, v) (strncmp((v), (t), strlen((v))) == 0) + +/** + * @brief Convert DPAA2 device type to string. + */ +const char * +dpaa2_ttos(enum dpaa2_dev_type type) +{ + switch (type) { + case DPAA2_DEV_MC: + return ("mc"); /* NOTE: to print as information only. */ + case DPAA2_DEV_RC: + return ("dprc"); + case DPAA2_DEV_IO: + return ("dpio"); + case DPAA2_DEV_NI: + return ("dpni"); + case DPAA2_DEV_MCP: + return ("dpmcp"); + case DPAA2_DEV_BP: + return ("dpbp"); + case DPAA2_DEV_CON: + return ("dpcon"); + case DPAA2_DEV_MAC: + return ("dpmac"); + case DPAA2_DEV_MUX: + return ("dpdmux"); + case DPAA2_DEV_SW: + return ("dpsw"); + default: + break; + } + + return ("notype"); +} + +/** + * @brief Convert string to DPAA2 device type. + */ +enum dpaa2_dev_type +dpaa2_stot(const char *str) +{ + if (COMPARE_TYPE(str, "dprc")) { + return (DPAA2_DEV_RC); + } else if (COMPARE_TYPE(str, "dpio")) { + return (DPAA2_DEV_IO); + } else if (COMPARE_TYPE(str, "dpni")) { + return (DPAA2_DEV_NI); + } else if (COMPARE_TYPE(str, "dpmcp")) { + return (DPAA2_DEV_MCP); + } else if (COMPARE_TYPE(str, "dpbp")) { + return (DPAA2_DEV_BP); + } else if (COMPARE_TYPE(str, "dpcon")) { + return (DPAA2_DEV_CON); + } else if (COMPARE_TYPE(str, "dpmac")) { + return (DPAA2_DEV_MAC); + } else if (COMPARE_TYPE(str, "dpdmux")) { + return (DPAA2_DEV_MUX); + } else if (COMPARE_TYPE(str, "dpsw")) { + return (DPAA2_DEV_SW); + } + + return (DPAA2_DEV_NOTYPE); +} + +/** + * @brief Callback to obtain a physical address of the only DMA segment mapped. + */ +void +dpaa2_dmamap_oneseg_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + if (error == 0) { + KASSERT(nseg == 1, ("%s: too many segments: nseg=%d\n", + __func__, nseg)); + *(bus_addr_t *)arg = segs[0].ds_addr; + } else { + panic("%s: error=%d\n", __func__, error); + } +} diff --git a/sys/dev/dpaa2/dpaa2_types.h b/sys/dev/dpaa2/dpaa2_types.h index 5e7ccad15e1c..dbfac9ce0a40 100644 --- a/sys/dev/dpaa2/dpaa2_types.h +++ b/sys/dev/dpaa2/dpaa2_types.h @@ -1,116 +1,135 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * - * Copyright © 2021-2022 Dmitry Salychev + * Copyright © 2021-2023 Dmitry Salychev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _DPAA2_TYPES_H #define _DPAA2_TYPES_H +#include +#include +#include + #include +#include #define DPAA2_MAGIC ((uint32_t) 0xD4AA2C0Du) +#define DPAA2_MAX_CHANNELS 16 /* CPU cores */ +#define DPAA2_MAX_TCS 8 /* Traffic classes */ + /** * @brief Types of the DPAA2 devices. */ enum dpaa2_dev_type { DPAA2_DEV_MC = 7500, /* Management Complex (firmware bus) */ DPAA2_DEV_RC, /* Resource Container (firmware bus) */ DPAA2_DEV_IO, /* I/O object (to work with QBMan portal) */ DPAA2_DEV_NI, /* Network Interface */ DPAA2_DEV_MCP, /* MC portal */ DPAA2_DEV_BP, /* Buffer Pool */ DPAA2_DEV_CON, /* Concentrator */ DPAA2_DEV_MAC, /* MAC object */ DPAA2_DEV_MUX, /* MUX (Datacenter bridge) object */ DPAA2_DEV_SW, /* Ethernet Switch */ DPAA2_DEV_NOTYPE /* Shouldn't be assigned to any DPAA2 device. */ }; /** - * @brief Types of the DPAA2 buffers. + * @brief Types of the DPNI queues. */ -enum dpaa2_buf_type { - DPAA2_BUF_RX = 75, /* Rx buffer */ - DPAA2_BUF_TX, /* Tx buffer */ - DPAA2_BUF_STORE /* Channel storage, key configuration */ -}; - -/** - * @brief DMA-mapped buffer (for Rx/Tx buffers, channel storage, etc.). - */ -struct dpaa2_buf { - enum dpaa2_buf_type type; - union { - struct { - bus_dma_tag_t dmat; /* DMA tag for this buffer */ - bus_dmamap_t dmap; - bus_addr_t paddr; - void *vaddr; - - struct mbuf *m; /* associated mbuf */ - } rx; - struct { - bus_dma_tag_t dmat; /* DMA tag for this buffer */ - bus_dmamap_t dmap; - bus_addr_t paddr; - void *vaddr; - - struct mbuf *m; /* associated mbuf */ - uint64_t idx; - - /* for scatter/gather table */ - bus_dma_tag_t sgt_dmat; - bus_dmamap_t sgt_dmap; - bus_addr_t sgt_paddr; - void *sgt_vaddr; - } tx; - struct { - bus_dma_tag_t dmat; /* DMA tag for this buffer */ - bus_dmamap_t dmap; - bus_addr_t paddr; - void *vaddr; - } store; - }; +enum dpaa2_ni_queue_type { + DPAA2_NI_QUEUE_RX = 0, + DPAA2_NI_QUEUE_TX, + DPAA2_NI_QUEUE_TX_CONF, + DPAA2_NI_QUEUE_RX_ERR }; struct dpaa2_atomic { volatile int counter; }; +/** + * @brief Tx ring. + * + * fq: Parent (TxConf) frame queue. + * fqid: ID of the logical Tx queue. + * br: Ring buffer for mbufs to transmit. + * lock: Lock for the ring buffer. + */ +struct dpaa2_ni_tx_ring { + struct dpaa2_ni_fq *fq; + uint32_t fqid; + uint32_t txid; /* Tx ring index */ + + struct buf_ring *br; + struct mtx lock; +} __aligned(CACHE_LINE_SIZE); + +/** + * @brief Frame Queue is the basic queuing structure used by the QMan. + * + * It comprises a list of frame descriptors (FDs), so it can be thought of + * as a queue of frames. + * + * NOTE: When frames on a FQ are ready to be processed, the FQ is enqueued + * onto a work queue (WQ). + * + * fqid: Frame queue ID, can be used to enqueue/dequeue or execute other + * commands on the queue through DPIO. + * txq_n: Number of configured Tx queues. + * tx_fqid: Frame queue IDs of the Tx queues which belong to the same flowid. + * Note that Tx queues are logical queues and not all management + * commands are available on these queue types. + * qdbin: Queue destination bin. Can be used with the DPIO enqueue + * operation based on QDID, QDBIN and QPRI. Note that all Tx queues + * with the same flowid have the same destination bin. + */ +struct dpaa2_ni_fq { + struct dpaa2_channel *chan; + uint32_t fqid; + uint16_t flowid; + uint8_t tc; + enum dpaa2_ni_queue_type type; + + /* Optional fields (for TxConf queue). */ + struct dpaa2_ni_tx_ring tx_rings[DPAA2_MAX_TCS]; + uint32_t tx_qdbin; +} __aligned(CACHE_LINE_SIZE); + /* Handy wrappers over atomic operations. */ #define DPAA2_ATOMIC_XCHG(a, val) \ (atomic_swap_int(&(a)->counter, (val))) #define DPAA2_ATOMIC_READ(a) \ (atomic_load_acq_int(&(a)->counter)) #define DPAA2_ATOMIC_ADD(a, val) \ (atomic_add_acq_int(&(a)->counter, (val))) -/* Convert DPAA2 type to/from string. */ -const char *dpaa2_ttos(enum dpaa2_dev_type type); -enum dpaa2_dev_type dpaa2_stot(const char *str); +const char *dpaa2_ttos(enum dpaa2_dev_type); +enum dpaa2_dev_type dpaa2_stot(const char *); +void dpaa2_dmamap_oneseg_cb(void *, bus_dma_segment_t *, int, int); #endif /* _DPAA2_TYPES_H */ diff --git a/sys/modules/dpaa2/Makefile b/sys/modules/dpaa2/Makefile index 64b73cb20723..816d6fa5cf4a 100644 --- a/sys/modules/dpaa2/Makefile +++ b/sys/modules/dpaa2/Makefile @@ -1,42 +1,48 @@ .PATH: ${SRCTOP}/sys/dev/dpaa2 KMOD= dpaa2 SRCS= dpaa2_mc.c SRCS+= dpaa2_rc.c SRCS+= dpaa2_io.c SRCS+= dpaa2_bp.c SRCS+= dpaa2_ni.c SRCS+= dpaa2_mcp.c SRCS+= dpaa2_swp.c SRCS+= dpaa2_mac.c SRCS+= dpaa2_con.c +SRCS+= dpaa2_buf.c +SRCS+= dpaa2_channel.c +SRCS+= dpaa2_types.c SRCS+= dpaa2_cmd_if.c dpaa2_cmd_if.h SRCS+= dpaa2_swp_if.c dpaa2_swp_if.h SRCS+= dpaa2_mc_if.c dpaa2_mc_if.h SRCS+= memac_mdio_common.c memac_mdio_if.c memac_mdio_if.h SRCS+= dpaa2_console.c SRCS+= bus_if.h device_if.h miibus_if.h SRCS+= pcib_if.h pci_if.h -SRCS+= opt_acpi.h opt_platform.h + +SRCS+= opt_acpi.h +SRCS+= opt_platform.h +SRCS+= opt_rss.h SRCS.DEV_ACPI= dpaa2_mc_acpi.c \ - memac_mdio_acpi.c \ - acpi_if.h \ - acpi_bus_if.h + memac_mdio_acpi.c \ + acpi_if.h \ + acpi_bus_if.h .if !empty(OPT_FDT) SRCS+= dpaa2_mc_fdt.c \ memac_mdio_fdt.c \ ofw_bus_if.h .endif MFILES= dev/dpaa2/dpaa2_cmd_if.m \ dev/dpaa2/dpaa2_swp_if.m \ dev/dpaa2/dpaa2_mc_if.m \ dev/dpaa2/memac_mdio_if.m .include