Index: head/sys/conf/files =================================================================== --- head/sys/conf/files (revision 332422) +++ head/sys/conf/files (revision 332423) @@ -1,4975 +1,4976 @@ # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # acpi_quirks.h optional acpi \ dependency "$S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \ compile-with "${AWK} -f $S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \ no-obj no-implicit-rule before-depend \ clean "acpi_quirks.h" bhnd_nvram_map.h optional bhnd \ dependency "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \ compile-with "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -h" \ no-obj no-implicit-rule before-depend \ clean "bhnd_nvram_map.h" bhnd_nvram_map_data.h optional bhnd \ dependency "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \ compile-with "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -d" \ no-obj no-implicit-rule before-depend \ clean "bhnd_nvram_map_data.h" # # The 'fdt_dtb_file' target covers an actual DTB file name, which is derived # from the specified source (DTS) file: .dts -> .dtb # fdt_dtb_file optional fdt fdt_dtb_static \ compile-with "sh -c 'MACHINE=${MACHINE} $S/tools/fdt/make_dtb.sh $S ${FDT_DTS_FILE} ${.CURDIR}'" \ no-obj no-implicit-rule before-depend \ clean "${FDT_DTS_FILE:R}.dtb" fdt_static_dtb.h optional fdt fdt_dtb_static \ compile-with "sh -c 'MACHINE=${MACHINE} $S/tools/fdt/make_dtbh.sh ${FDT_DTS_FILE} ${.CURDIR}'" \ dependency "fdt_dtb_file" \ no-obj no-implicit-rule before-depend \ clean "fdt_static_dtb.h" feeder_eq_gen.h optional sound \ dependency "$S/tools/sound/feeder_eq_mkfilter.awk" \ compile-with "${AWK} -f $S/tools/sound/feeder_eq_mkfilter.awk -- ${FEEDER_EQ_PRESETS} > feeder_eq_gen.h" \ no-obj no-implicit-rule before-depend \ clean "feeder_eq_gen.h" feeder_rate_gen.h optional sound \ dependency "$S/tools/sound/feeder_rate_mkfilter.awk" \ compile-with "${AWK} -f $S/tools/sound/feeder_rate_mkfilter.awk -- ${FEEDER_RATE_PRESETS} > feeder_rate_gen.h" \ no-obj no-implicit-rule before-depend \ clean "feeder_rate_gen.h" snd_fxdiv_gen.h optional sound \ dependency "$S/tools/sound/snd_fxdiv_gen.awk" \ compile-with "${AWK} -f $S/tools/sound/snd_fxdiv_gen.awk -- > snd_fxdiv_gen.h" \ no-obj no-implicit-rule before-depend \ clean "snd_fxdiv_gen.h" miidevs.h optional miibus | mii \ dependency "$S/tools/miidevs2h.awk $S/dev/mii/miidevs" \ compile-with "${AWK} -f $S/tools/miidevs2h.awk $S/dev/mii/miidevs" \ no-obj no-implicit-rule before-depend \ clean "miidevs.h" pccarddevs.h standard \ dependency "$S/tools/pccarddevs2h.awk $S/dev/pccard/pccarddevs" \ compile-with "${AWK} -f $S/tools/pccarddevs2h.awk $S/dev/pccard/pccarddevs" \ no-obj no-implicit-rule before-depend \ clean "pccarddevs.h" kbdmuxmap.h optional kbdmux_dflt_keymap \ compile-with "kbdcontrol -P ${S:S/sys$/share/}/vt/keymaps -P ${S:S/sys$/share/}/syscons/keymaps -L ${KBDMUX_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > kbdmuxmap.h" \ no-obj no-implicit-rule before-depend \ clean "kbdmuxmap.h" teken_state.h optional sc | vt \ dependency "$S/teken/gensequences $S/teken/sequences" \ compile-with "${AWK} -f $S/teken/gensequences $S/teken/sequences > teken_state.h" \ no-obj no-implicit-rule before-depend \ clean "teken_state.h" usbdevs.h optional usb \ dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \ compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -h" \ no-obj no-implicit-rule before-depend \ clean "usbdevs.h" usbdevs_data.h optional usb \ dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \ compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -d" \ no-obj no-implicit-rule before-depend \ clean "usbdevs_data.h" cam/cam.c optional scbus cam/cam_compat.c optional scbus cam/cam_iosched.c optional scbus cam/cam_periph.c optional scbus cam/cam_queue.c optional scbus cam/cam_sim.c optional scbus cam/cam_xpt.c optional scbus cam/ata/ata_all.c optional scbus cam/ata/ata_xpt.c optional scbus cam/ata/ata_pmp.c optional scbus cam/nvme/nvme_all.c optional scbus nvme cam/nvme/nvme_da.c optional scbus nvme da cam/nvme/nvme_xpt.c optional scbus nvme cam/scsi/scsi_xpt.c optional scbus cam/scsi/scsi_all.c optional scbus cam/scsi/scsi_cd.c optional cd cam/scsi/scsi_ch.c optional ch cam/ata/ata_da.c optional ada | da cam/ctl/ctl.c optional ctl cam/ctl/ctl_backend.c optional ctl cam/ctl/ctl_backend_block.c optional ctl cam/ctl/ctl_backend_ramdisk.c optional ctl cam/ctl/ctl_cmd_table.c optional ctl cam/ctl/ctl_frontend.c optional ctl cam/ctl/ctl_frontend_cam_sim.c optional ctl cam/ctl/ctl_frontend_ioctl.c optional ctl cam/ctl/ctl_frontend_iscsi.c optional ctl cfiscsi cam/ctl/ctl_ha.c optional ctl cam/ctl/ctl_scsi_all.c optional ctl cam/ctl/ctl_tpc.c optional ctl cam/ctl/ctl_tpc_local.c optional ctl cam/ctl/ctl_error.c optional ctl cam/ctl/ctl_util.c optional ctl cam/ctl/scsi_ctl.c optional ctl cam/mmc/mmc_xpt.c optional scbus mmccam cam/mmc/mmc_da.c optional scbus mmccam da cam/scsi/scsi_da.c optional da cam/scsi/scsi_low.c optional ncv | nsp | stg cam/scsi/scsi_pass.c optional pass cam/scsi/scsi_pt.c optional pt cam/scsi/scsi_sa.c optional sa cam/scsi/scsi_enc.c optional ses cam/scsi/scsi_enc_ses.c optional ses cam/scsi/scsi_enc_safte.c optional ses cam/scsi/scsi_sg.c optional sg cam/scsi/scsi_targ_bh.c optional targbh cam/scsi/scsi_target.c optional targ cam/scsi/smp_all.c optional scbus # shared between zfs and dtrace cddl/compat/opensolaris/kern/opensolaris.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_cmn_err.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_kmem.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_misc.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_proc.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_sunddi.c optional zfs | dtrace compile-with "${CDDL_C}" cddl/compat/opensolaris/kern/opensolaris_taskq.c optional zfs | dtrace compile-with "${CDDL_C}" # zfs specific cddl/compat/opensolaris/kern/opensolaris_acl.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_dtrace.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_kobj.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_kstat.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_lookup.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_policy.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_string.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_sysevent.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_uio.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_vfs.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_vm.c optional zfs compile-with "${ZFS_C}" cddl/compat/opensolaris/kern/opensolaris_zone.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/acl/acl_common.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/avl/avl.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/nvpair/opensolaris_fnvpair.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/nvpair/opensolaris_nvpair.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/nvpair/opensolaris_nvpair_alloc_fixed.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/unicode/u8_textprep.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfeature_common.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_comutil.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_deleg.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_fletcher.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_ioctl_compat.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_namecheck.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zfs_prop.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zpool_prop.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/common/zfs/zprop_common.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/vnode.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/abd.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/aggsum.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/blkptr.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/bplist.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/bpobj.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/bptree.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/bqueue.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/cityhash.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/ddt.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/ddt_zap.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_diff.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_objset.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_traverse.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_tx.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_zfetch.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c optional zfs compile-with "${ZFS_C}" \ warning "kernel contains CDDL licensed ZFS filesystem" cddl/contrib/opensolaris/uts/common/fs/zfs/dnode_sync.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_bookmark.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deadlist.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deleg.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_destroy.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dir.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_pool.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_prop.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scan.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_userhold.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_synctask.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/gzip.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lz4.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lzjb.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/multilist.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/range_tree.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/refcount.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/rrwlock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/sa.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/sha256.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/skein_zfs.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_checkpoint.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_config.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_errlog.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_history.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/space_map.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/space_reftree.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/trim_map.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/txg.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/uberblock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/unique.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_cache.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_file.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_indirect.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_indirect_births.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_indirect_mapping.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_label.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_mirror.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_missing.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_queue.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_raidz.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_removal.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_root.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zap.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zap_leaf.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zap_micro.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zcp.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zcp_get.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zcp_global.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zcp_iter.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zcp_synctask.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfeature.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_acl.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_byteswap.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_debug.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_dir.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fm.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fuid.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_log.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_onexit.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_replay.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_rlock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_sa.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_znode.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zil.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio_checksum.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio_compress.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zio_inject.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zle.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zrlock.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zthr.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/zvol.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/callb.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/fm.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/list.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/os/nvpair_alloc_system.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/adler32.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/deflate.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/inffast.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/inflate.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/inftrees.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/opensolaris_crc32.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/trees.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/zmod.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/zmod_subr.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/zmod/zutil.c optional zfs compile-with "${ZFS_C}" # zfs lua support cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lapi.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lauxlib.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lbaselib.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lbitlib.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lcode.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lcompat.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lcorolib.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lctype.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/ldebug.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/ldo.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/ldump.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lfunc.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lgc.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/llex.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lmem.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lobject.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lopcodes.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lparser.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lstate.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lstring.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lstrlib.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/ltable.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/ltablib.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/ltm.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lundump.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lvm.c optional zfs compile-with "${ZFS_C}" cddl/contrib/opensolaris/uts/common/fs/zfs/lua/lzio.c optional zfs compile-with "${ZFS_C}" # dtrace specific cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c optional dtrace compile-with "${DTRACE_C}" \ warning "kernel contains CDDL licensed DTRACE" cddl/contrib/opensolaris/uts/common/dtrace/dtrace_xoroshiro128_plus.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/dtmalloc/dtmalloc.c optional dtmalloc | dtraceall compile-with "${CDDL_C}" cddl/dev/profile/profile.c optional dtrace_profile | dtraceall compile-with "${CDDL_C}" cddl/dev/sdt/sdt.c optional dtrace_sdt | dtraceall compile-with "${CDDL_C}" cddl/dev/fbt/fbt.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" cddl/dev/systrace/systrace.c optional dtrace_systrace | dtraceall compile-with "${CDDL_C}" cddl/dev/prototype.c optional dtrace_prototype | dtraceall compile-with "${CDDL_C}" fs/nfsclient/nfs_clkdtrace.c optional dtnfscl nfscl | dtraceall nfscl compile-with "${CDDL_C}" compat/cloudabi/cloudabi_clock.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_errno.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_fd.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_file.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_futex.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_mem.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_proc.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_random.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_sock.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_thread.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi/cloudabi_vdso.c optional compat_cloudabi32 | compat_cloudabi64 compat/cloudabi32/cloudabi32_fd.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_module.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_poll.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_sock.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_syscalls.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_sysent.c optional compat_cloudabi32 compat/cloudabi32/cloudabi32_thread.c optional compat_cloudabi32 compat/cloudabi64/cloudabi64_fd.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_module.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_poll.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_sock.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_syscalls.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_sysent.c optional compat_cloudabi64 compat/cloudabi64/cloudabi64_thread.c optional compat_cloudabi64 compat/freebsd32/freebsd32_capability.c optional compat_freebsd32 compat/freebsd32/freebsd32_ioctl.c optional compat_freebsd32 compat/freebsd32/freebsd32_misc.c optional compat_freebsd32 compat/freebsd32/freebsd32_syscalls.c optional compat_freebsd32 compat/freebsd32/freebsd32_sysent.c optional compat_freebsd32 contrib/ck/src/ck_array.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_centralized.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_combining.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_dissemination.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_mcs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_barrier_tournament.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_epoch.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_hp.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_hs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_ht.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/ck/src/ck_rhs.c standard compile-with "${NORMAL_C} -I$S/contrib/ck/include" contrib/dev/acpica/common/ahids.c optional acpi acpi_debug contrib/dev/acpica/common/ahuuids.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbcmds.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbconvert.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbdisply.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbexec.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbhistry.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbinput.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbmethod.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbnames.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbobject.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbstats.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbtest.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbutils.c optional acpi acpi_debug contrib/dev/acpica/components/debugger/dbxface.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmbuffer.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmcstyle.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmdeferred.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmnames.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmopcode.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrc.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrcl.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrcl2.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmresrcs.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmutils.c optional acpi acpi_debug contrib/dev/acpica/components/disassembler/dmwalk.c optional acpi acpi_debug contrib/dev/acpica/components/dispatcher/dsargs.c optional acpi contrib/dev/acpica/components/dispatcher/dscontrol.c optional acpi contrib/dev/acpica/components/dispatcher/dsdebug.c optional acpi contrib/dev/acpica/components/dispatcher/dsfield.c optional acpi contrib/dev/acpica/components/dispatcher/dsinit.c optional acpi contrib/dev/acpica/components/dispatcher/dsmethod.c optional acpi contrib/dev/acpica/components/dispatcher/dsmthdat.c optional acpi contrib/dev/acpica/components/dispatcher/dsobject.c optional acpi contrib/dev/acpica/components/dispatcher/dsopcode.c optional acpi contrib/dev/acpica/components/dispatcher/dspkginit.c optional acpi contrib/dev/acpica/components/dispatcher/dsutils.c optional acpi contrib/dev/acpica/components/dispatcher/dswexec.c optional acpi contrib/dev/acpica/components/dispatcher/dswload.c optional acpi contrib/dev/acpica/components/dispatcher/dswload2.c optional acpi contrib/dev/acpica/components/dispatcher/dswscope.c optional acpi contrib/dev/acpica/components/dispatcher/dswstate.c optional acpi contrib/dev/acpica/components/events/evevent.c optional acpi contrib/dev/acpica/components/events/evglock.c optional acpi contrib/dev/acpica/components/events/evgpe.c optional acpi contrib/dev/acpica/components/events/evgpeblk.c optional acpi contrib/dev/acpica/components/events/evgpeinit.c optional acpi contrib/dev/acpica/components/events/evgpeutil.c optional acpi contrib/dev/acpica/components/events/evhandler.c optional acpi contrib/dev/acpica/components/events/evmisc.c optional acpi contrib/dev/acpica/components/events/evregion.c optional acpi contrib/dev/acpica/components/events/evrgnini.c optional acpi contrib/dev/acpica/components/events/evsci.c optional acpi contrib/dev/acpica/components/events/evxface.c optional acpi contrib/dev/acpica/components/events/evxfevnt.c optional acpi contrib/dev/acpica/components/events/evxfgpe.c optional acpi contrib/dev/acpica/components/events/evxfregn.c optional acpi contrib/dev/acpica/components/executer/exconcat.c optional acpi contrib/dev/acpica/components/executer/exconfig.c optional acpi contrib/dev/acpica/components/executer/exconvrt.c optional acpi contrib/dev/acpica/components/executer/excreate.c optional acpi contrib/dev/acpica/components/executer/exdebug.c optional acpi contrib/dev/acpica/components/executer/exdump.c optional acpi contrib/dev/acpica/components/executer/exfield.c optional acpi contrib/dev/acpica/components/executer/exfldio.c optional acpi contrib/dev/acpica/components/executer/exmisc.c optional acpi contrib/dev/acpica/components/executer/exmutex.c optional acpi contrib/dev/acpica/components/executer/exnames.c optional acpi contrib/dev/acpica/components/executer/exoparg1.c optional acpi contrib/dev/acpica/components/executer/exoparg2.c optional acpi contrib/dev/acpica/components/executer/exoparg3.c optional acpi contrib/dev/acpica/components/executer/exoparg6.c optional acpi contrib/dev/acpica/components/executer/exprep.c optional acpi contrib/dev/acpica/components/executer/exregion.c optional acpi contrib/dev/acpica/components/executer/exresnte.c optional acpi contrib/dev/acpica/components/executer/exresolv.c optional acpi contrib/dev/acpica/components/executer/exresop.c optional acpi contrib/dev/acpica/components/executer/exstore.c optional acpi contrib/dev/acpica/components/executer/exstoren.c optional acpi contrib/dev/acpica/components/executer/exstorob.c optional acpi contrib/dev/acpica/components/executer/exsystem.c optional acpi contrib/dev/acpica/components/executer/extrace.c optional acpi contrib/dev/acpica/components/executer/exutils.c optional acpi contrib/dev/acpica/components/hardware/hwacpi.c optional acpi contrib/dev/acpica/components/hardware/hwesleep.c optional acpi contrib/dev/acpica/components/hardware/hwgpe.c optional acpi contrib/dev/acpica/components/hardware/hwpci.c optional acpi contrib/dev/acpica/components/hardware/hwregs.c optional acpi contrib/dev/acpica/components/hardware/hwsleep.c optional acpi contrib/dev/acpica/components/hardware/hwtimer.c optional acpi contrib/dev/acpica/components/hardware/hwvalid.c optional acpi contrib/dev/acpica/components/hardware/hwxface.c optional acpi contrib/dev/acpica/components/hardware/hwxfsleep.c optional acpi contrib/dev/acpica/components/namespace/nsaccess.c optional acpi contrib/dev/acpica/components/namespace/nsalloc.c optional acpi contrib/dev/acpica/components/namespace/nsarguments.c optional acpi contrib/dev/acpica/components/namespace/nsconvert.c optional acpi contrib/dev/acpica/components/namespace/nsdump.c optional acpi contrib/dev/acpica/components/namespace/nseval.c optional acpi contrib/dev/acpica/components/namespace/nsinit.c optional acpi contrib/dev/acpica/components/namespace/nsload.c optional acpi contrib/dev/acpica/components/namespace/nsnames.c optional acpi contrib/dev/acpica/components/namespace/nsobject.c optional acpi contrib/dev/acpica/components/namespace/nsparse.c optional acpi contrib/dev/acpica/components/namespace/nspredef.c optional acpi contrib/dev/acpica/components/namespace/nsprepkg.c optional acpi contrib/dev/acpica/components/namespace/nsrepair.c optional acpi contrib/dev/acpica/components/namespace/nsrepair2.c optional acpi contrib/dev/acpica/components/namespace/nssearch.c optional acpi contrib/dev/acpica/components/namespace/nsutils.c optional acpi contrib/dev/acpica/components/namespace/nswalk.c optional acpi contrib/dev/acpica/components/namespace/nsxfeval.c optional acpi contrib/dev/acpica/components/namespace/nsxfname.c optional acpi contrib/dev/acpica/components/namespace/nsxfobj.c optional acpi contrib/dev/acpica/components/parser/psargs.c optional acpi contrib/dev/acpica/components/parser/psloop.c optional acpi contrib/dev/acpica/components/parser/psobject.c optional acpi contrib/dev/acpica/components/parser/psopcode.c optional acpi contrib/dev/acpica/components/parser/psopinfo.c optional acpi contrib/dev/acpica/components/parser/psparse.c optional acpi contrib/dev/acpica/components/parser/psscope.c optional acpi contrib/dev/acpica/components/parser/pstree.c optional acpi contrib/dev/acpica/components/parser/psutils.c optional acpi contrib/dev/acpica/components/parser/pswalk.c optional acpi contrib/dev/acpica/components/parser/psxface.c optional acpi contrib/dev/acpica/components/resources/rsaddr.c optional acpi contrib/dev/acpica/components/resources/rscalc.c optional acpi contrib/dev/acpica/components/resources/rscreate.c optional acpi contrib/dev/acpica/components/resources/rsdump.c optional acpi acpi_debug contrib/dev/acpica/components/resources/rsdumpinfo.c optional acpi contrib/dev/acpica/components/resources/rsinfo.c optional acpi contrib/dev/acpica/components/resources/rsio.c optional acpi contrib/dev/acpica/components/resources/rsirq.c optional acpi contrib/dev/acpica/components/resources/rslist.c optional acpi contrib/dev/acpica/components/resources/rsmemory.c optional acpi contrib/dev/acpica/components/resources/rsmisc.c optional acpi contrib/dev/acpica/components/resources/rsserial.c optional acpi contrib/dev/acpica/components/resources/rsutils.c optional acpi contrib/dev/acpica/components/resources/rsxface.c optional acpi contrib/dev/acpica/components/tables/tbdata.c optional acpi contrib/dev/acpica/components/tables/tbfadt.c optional acpi contrib/dev/acpica/components/tables/tbfind.c optional acpi contrib/dev/acpica/components/tables/tbinstal.c optional acpi contrib/dev/acpica/components/tables/tbprint.c optional acpi contrib/dev/acpica/components/tables/tbutils.c optional acpi contrib/dev/acpica/components/tables/tbxface.c optional acpi contrib/dev/acpica/components/tables/tbxfload.c optional acpi contrib/dev/acpica/components/tables/tbxfroot.c optional acpi contrib/dev/acpica/components/utilities/utaddress.c optional acpi contrib/dev/acpica/components/utilities/utalloc.c optional acpi contrib/dev/acpica/components/utilities/utascii.c optional acpi contrib/dev/acpica/components/utilities/utbuffer.c optional acpi contrib/dev/acpica/components/utilities/utcache.c optional acpi contrib/dev/acpica/components/utilities/utcopy.c optional acpi contrib/dev/acpica/components/utilities/utdebug.c optional acpi contrib/dev/acpica/components/utilities/utdecode.c optional acpi contrib/dev/acpica/components/utilities/utdelete.c optional acpi contrib/dev/acpica/components/utilities/uterror.c optional acpi contrib/dev/acpica/components/utilities/uteval.c optional acpi contrib/dev/acpica/components/utilities/utexcep.c optional acpi contrib/dev/acpica/components/utilities/utglobal.c optional acpi contrib/dev/acpica/components/utilities/uthex.c optional acpi contrib/dev/acpica/components/utilities/utids.c optional acpi contrib/dev/acpica/components/utilities/utinit.c optional acpi contrib/dev/acpica/components/utilities/utlock.c optional acpi contrib/dev/acpica/components/utilities/utmath.c optional acpi contrib/dev/acpica/components/utilities/utmisc.c optional acpi contrib/dev/acpica/components/utilities/utmutex.c optional acpi contrib/dev/acpica/components/utilities/utnonansi.c optional acpi contrib/dev/acpica/components/utilities/utobject.c optional acpi contrib/dev/acpica/components/utilities/utosi.c optional acpi contrib/dev/acpica/components/utilities/utownerid.c optional acpi contrib/dev/acpica/components/utilities/utpredef.c optional acpi contrib/dev/acpica/components/utilities/utresdecode.c optional acpi acpi_debug contrib/dev/acpica/components/utilities/utresrc.c optional acpi contrib/dev/acpica/components/utilities/utstate.c optional acpi contrib/dev/acpica/components/utilities/utstring.c optional acpi contrib/dev/acpica/components/utilities/utstrsuppt.c optional acpi contrib/dev/acpica/components/utilities/utstrtoul64.c optional acpi contrib/dev/acpica/components/utilities/utuuid.c optional acpi acpi_debug contrib/dev/acpica/components/utilities/utxface.c optional acpi contrib/dev/acpica/components/utilities/utxferror.c optional acpi contrib/dev/acpica/components/utilities/utxfinit.c optional acpi contrib/dev/acpica/os_specific/service_layers/osgendbg.c optional acpi acpi_debug contrib/ipfilter/netinet/fil.c optional ipfilter inet \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_auth.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_fil_freebsd.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_frag.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_log.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_nat.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_proxy.c optional ipfilter inet \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_state.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_lookup.c optional ipfilter inet \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN} -Wno-unused -Wno-error -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_pool.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_htable.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_sync.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/mlfk_ipl.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_nat6.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_rules.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_scan.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_dstlist.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-unused -I$S/contrib/ipfilter" contrib/ipfilter/netinet/radix_ipf.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/libfdt/fdt.c optional fdt contrib/libfdt/fdt_ro.c optional fdt contrib/libfdt/fdt_rw.c optional fdt contrib/libfdt/fdt_strerror.c optional fdt contrib/libfdt/fdt_sw.c optional fdt contrib/libfdt/fdt_wip.c optional fdt contrib/libnv/cnvlist.c standard contrib/libnv/dnvlist.c standard contrib/libnv/nvlist.c standard contrib/libnv/nvpair.c standard contrib/ngatm/netnatm/api/cc_conn.c optional ngatm_ccatm \ compile-with "${NORMAL_C_NOWERROR} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_data.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_dump.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_port.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_sig.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_user.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/unisap.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/misc/straddr.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/misc/unimsg_common.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/traffic.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/uni_ie.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/uni_msg.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/saal/saal_sscfu.c optional ngatm_sscfu \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/saal/saal_sscop.c optional ngatm_sscop \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_call.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_coord.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_party.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_print.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_reset.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_uni.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_unimsgcpy.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_verify.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" # Zstd contrib/zstd/lib/freebsd/zstd_kmalloc.c standard compile-with ${ZSTD_C} contrib/zstd/lib/common/zstd_common.c standard compile-with ${ZSTD_C} contrib/zstd/lib/common/fse_decompress.c standard compile-with ${ZSTD_C} contrib/zstd/lib/common/entropy_common.c standard compile-with ${ZSTD_C} contrib/zstd/lib/common/error_private.c standard compile-with ${ZSTD_C} contrib/zstd/lib/common/xxhash.c standard compile-with ${ZSTD_C} contrib/zstd/lib/compress/zstd_compress.c standard compile-with ${ZSTD_C} contrib/zstd/lib/compress/fse_compress.c standard compile-with ${ZSTD_C} contrib/zstd/lib/compress/huf_compress.c standard compile-with ${ZSTD_C} contrib/zstd/lib/compress/zstd_double_fast.c standard compile-with ${ZSTD_C} contrib/zstd/lib/compress/zstd_fast.c standard compile-with ${ZSTD_C} contrib/zstd/lib/compress/zstd_lazy.c standard compile-with ${ZSTD_C} contrib/zstd/lib/compress/zstd_ldm.c standard compile-with ${ZSTD_C} contrib/zstd/lib/compress/zstd_opt.c standard compile-with ${ZSTD_C} contrib/zstd/lib/decompress/zstd_decompress.c standard compile-with ${ZSTD_C} contrib/zstd/lib/decompress/huf_decompress.c standard compile-with ${ZSTD_C} # Blake 2 contrib/libb2/blake2b-ref.c optional crypto | ipsec | ipsec_support \ compile-with "${NORMAL_C} -I$S/crypto/blake2 ${NO_WCAST_QUAL} -DSUFFIX=_ref" contrib/libb2/blake2s-ref.c optional crypto | ipsec | ipsec_support \ compile-with "${NORMAL_C} -I$S/crypto/blake2 ${NO_WCAST_QUAL} -DSUFFIX=_ref" crypto/blake2/blake2-sw.c optional crypto | ipsec | ipsec_support \ compile-with "${NORMAL_C} -I$S/crypto/blake2 ${NO_WCAST_QUAL}" crypto/blowfish/bf_ecb.c optional ipsec | ipsec_support crypto/blowfish/bf_skey.c optional crypto | ipsec | ipsec_support crypto/camellia/camellia.c optional crypto | ipsec | ipsec_support crypto/camellia/camellia-api.c optional crypto | ipsec | ipsec_support crypto/chacha20/chacha.c optional crypto | ipsec | ipsec_support crypto/chacha20/chacha-sw.c optional crypto | ipsec | ipsec_support crypto/des/des_ecb.c optional crypto | ipsec | ipsec_support | netsmb crypto/des/des_setkey.c optional crypto | ipsec | ipsec_support | netsmb crypto/rc4/rc4.c optional netgraph_mppc_encryption | kgssapi crypto/rijndael/rijndael-alg-fst.c optional crypto | ekcd | geom_bde | \ ipsec | ipsec_support | random !random_loadable | wlan_ccmp crypto/rijndael/rijndael-api-fst.c optional ekcd | geom_bde | random !random_loadable crypto/rijndael/rijndael-api.c optional crypto | ipsec | ipsec_support | \ wlan_ccmp crypto/sha1.c optional carp | crypto | ipsec | \ ipsec_support | netgraph_mppc_encryption | sctp crypto/sha2/sha256c.c optional crypto | ekcd | geom_bde | ipsec | \ ipsec_support | random !random_loadable | sctp | zfs crypto/sha2/sha512c.c optional crypto | geom_bde | ipsec | \ ipsec_support | zfs crypto/skein/skein.c optional crypto | zfs crypto/skein/skein_block.c optional crypto | zfs crypto/siphash/siphash.c optional inet | inet6 crypto/siphash/siphash_test.c optional inet | inet6 ddb/db_access.c optional ddb ddb/db_break.c optional ddb ddb/db_capture.c optional ddb ddb/db_command.c optional ddb ddb/db_examine.c optional ddb ddb/db_expr.c optional ddb ddb/db_input.c optional ddb ddb/db_lex.c optional ddb ddb/db_main.c optional ddb ddb/db_output.c optional ddb ddb/db_print.c optional ddb ddb/db_ps.c optional ddb ddb/db_run.c optional ddb ddb/db_script.c optional ddb ddb/db_sym.c optional ddb ddb/db_thread.c optional ddb ddb/db_textdump.c optional ddb ddb/db_variables.c optional ddb ddb/db_watch.c optional ddb ddb/db_write_cmd.c optional ddb dev/aac/aac.c optional aac dev/aac/aac_cam.c optional aacp aac dev/aac/aac_debug.c optional aac dev/aac/aac_disk.c optional aac dev/aac/aac_linux.c optional aac compat_linux dev/aac/aac_pci.c optional aac pci dev/aacraid/aacraid.c optional aacraid dev/aacraid/aacraid_cam.c optional aacraid scbus dev/aacraid/aacraid_debug.c optional aacraid dev/aacraid/aacraid_linux.c optional aacraid compat_linux dev/aacraid/aacraid_pci.c optional aacraid pci dev/acpi_support/acpi_wmi.c optional acpi_wmi acpi dev/acpi_support/acpi_asus.c optional acpi_asus acpi dev/acpi_support/acpi_asus_wmi.c optional acpi_asus_wmi acpi dev/acpi_support/acpi_fujitsu.c optional acpi_fujitsu acpi dev/acpi_support/acpi_hp.c optional acpi_hp acpi dev/acpi_support/acpi_ibm.c optional acpi_ibm acpi dev/acpi_support/acpi_panasonic.c optional acpi_panasonic acpi dev/acpi_support/acpi_sony.c optional acpi_sony acpi dev/acpi_support/acpi_toshiba.c optional acpi_toshiba acpi dev/acpi_support/atk0110.c optional aibs acpi dev/acpica/Osd/OsdDebug.c optional acpi dev/acpica/Osd/OsdHardware.c optional acpi dev/acpica/Osd/OsdInterrupt.c optional acpi dev/acpica/Osd/OsdMemory.c optional acpi dev/acpica/Osd/OsdSchedule.c optional acpi dev/acpica/Osd/OsdStream.c optional acpi dev/acpica/Osd/OsdSynch.c optional acpi dev/acpica/Osd/OsdTable.c optional acpi dev/acpica/acpi.c optional acpi dev/acpica/acpi_acad.c optional acpi dev/acpica/acpi_battery.c optional acpi dev/acpica/acpi_button.c optional acpi dev/acpica/acpi_cmbat.c optional acpi dev/acpica/acpi_cpu.c optional acpi dev/acpica/acpi_ec.c optional acpi dev/acpica/acpi_isab.c optional acpi isa dev/acpica/acpi_lid.c optional acpi dev/acpica/acpi_package.c optional acpi dev/acpica/acpi_perf.c optional acpi dev/acpica/acpi_powerres.c optional acpi dev/acpica/acpi_quirk.c optional acpi dev/acpica/acpi_resource.c optional acpi dev/acpica/acpi_container.c optional acpi dev/acpica/acpi_smbat.c optional acpi dev/acpica/acpi_thermal.c optional acpi dev/acpica/acpi_throttle.c optional acpi dev/acpica/acpi_video.c optional acpi_video acpi dev/acpica/acpi_dock.c optional acpi_dock acpi dev/adlink/adlink.c optional adlink dev/advansys/adv_pci.c optional adv pci dev/advansys/advansys.c optional adv dev/advansys/advlib.c optional adv dev/advansys/advmcode.c optional adv dev/advansys/adw_pci.c optional adw pci dev/advansys/adwcam.c optional adw dev/advansys/adwlib.c optional adw dev/advansys/adwmcode.c optional adw dev/ae/if_ae.c optional ae pci dev/age/if_age.c optional age pci dev/agp/agp.c optional agp pci dev/agp/agp_if.m optional agp pci dev/aha/aha.c optional aha dev/aha/aha_isa.c optional aha isa dev/ahci/ahci.c optional ahci dev/ahci/ahciem.c optional ahci dev/ahci/ahci_pci.c optional ahci pci dev/aic/aic.c optional aic dev/aic/aic_pccard.c optional aic pccard dev/aic7xxx/ahc_isa.c optional ahc isa dev/aic7xxx/ahc_pci.c optional ahc pci \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/aic7xxx/ahd_pci.c optional ahd pci \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/aic7xxx/aic7770.c optional ahc dev/aic7xxx/aic79xx.c optional ahd pci dev/aic7xxx/aic79xx_osm.c optional ahd pci dev/aic7xxx/aic79xx_pci.c optional ahd pci dev/aic7xxx/aic79xx_reg_print.c optional ahd pci ahd_reg_pretty_print dev/aic7xxx/aic7xxx.c optional ahc dev/aic7xxx/aic7xxx_93cx6.c optional ahc dev/aic7xxx/aic7xxx_osm.c optional ahc dev/aic7xxx/aic7xxx_pci.c optional ahc pci dev/aic7xxx/aic7xxx_reg_print.c optional ahc ahc_reg_pretty_print dev/al_eth/al_eth.c optional al_eth fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" dev/al_eth/al_init_eth_lm.c optional al_eth fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" dev/al_eth/al_init_eth_kr.c optional al_eth fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_iofic.c optional al_iofic \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_serdes_25g.c optional al_serdes \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_serdes_hssp.c optional al_serdes \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_config.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_debug.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_iofic.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_hal_udma_main.c optional al_udma \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/al_serdes.c optional al_serdes \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/eth/al_hal_eth_kr.c optional al_eth \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" contrib/alpine-hal/eth/al_hal_eth_main.c optional al_eth \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" dev/alc/if_alc.c optional alc pci dev/ale/if_ale.c optional ale pci dev/alpm/alpm.c optional alpm pci dev/altera/avgen/altera_avgen.c optional altera_avgen dev/altera/avgen/altera_avgen_fdt.c optional altera_avgen fdt dev/altera/avgen/altera_avgen_nexus.c optional altera_avgen dev/altera/sdcard/altera_sdcard.c optional altera_sdcard dev/altera/sdcard/altera_sdcard_disk.c optional altera_sdcard dev/altera/sdcard/altera_sdcard_io.c optional altera_sdcard dev/altera/sdcard/altera_sdcard_fdt.c optional altera_sdcard fdt dev/altera/sdcard/altera_sdcard_nexus.c optional altera_sdcard dev/altera/pio/pio.c optional altera_pio dev/altera/pio/pio_if.m optional altera_pio dev/amdpm/amdpm.c optional amdpm pci | nfpm pci dev/amdsmb/amdsmb.c optional amdsmb pci dev/amr/amr.c optional amr dev/amr/amr_cam.c optional amrp amr dev/amr/amr_disk.c optional amr dev/amr/amr_linux.c optional amr compat_linux dev/amr/amr_pci.c optional amr pci dev/an/if_an.c optional an dev/an/if_an_isa.c optional an isa dev/an/if_an_pccard.c optional an pccard dev/an/if_an_pci.c optional an pci # dev/ata/ata_if.m optional ata | atacore dev/ata/ata-all.c optional ata | atacore dev/ata/ata-dma.c optional ata | atacore dev/ata/ata-lowlevel.c optional ata | atacore dev/ata/ata-sata.c optional ata | atacore dev/ata/ata-card.c optional ata pccard | atapccard dev/ata/ata-isa.c optional ata isa | ataisa dev/ata/ata-pci.c optional ata pci | atapci dev/ata/chipsets/ata-acard.c optional ata pci | ataacard dev/ata/chipsets/ata-acerlabs.c optional ata pci | ataacerlabs dev/ata/chipsets/ata-amd.c optional ata pci | ataamd dev/ata/chipsets/ata-ati.c optional ata pci | ataati dev/ata/chipsets/ata-cenatek.c optional ata pci | atacenatek dev/ata/chipsets/ata-cypress.c optional ata pci | atacypress dev/ata/chipsets/ata-cyrix.c optional ata pci | atacyrix dev/ata/chipsets/ata-highpoint.c optional ata pci | atahighpoint dev/ata/chipsets/ata-intel.c optional ata pci | ataintel dev/ata/chipsets/ata-ite.c optional ata pci | ataite dev/ata/chipsets/ata-jmicron.c optional ata pci | atajmicron dev/ata/chipsets/ata-marvell.c optional ata pci | atamarvell dev/ata/chipsets/ata-micron.c optional ata pci | atamicron dev/ata/chipsets/ata-national.c optional ata pci | atanational dev/ata/chipsets/ata-netcell.c optional ata pci | atanetcell dev/ata/chipsets/ata-nvidia.c optional ata pci | atanvidia dev/ata/chipsets/ata-promise.c optional ata pci | atapromise dev/ata/chipsets/ata-serverworks.c optional ata pci | ataserverworks dev/ata/chipsets/ata-siliconimage.c optional ata pci | atasiliconimage | ataati dev/ata/chipsets/ata-sis.c optional ata pci | atasis dev/ata/chipsets/ata-via.c optional ata pci | atavia # dev/ath/if_ath_pci.c optional ath_pci pci \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/ath/if_ath_ahb.c optional ath_ahb \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/ath/if_ath.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_alq.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_beacon.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_btcoex.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_btcoex_mci.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_debug.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_descdma.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_keycache.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_ioctl.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_led.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_lna_div.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_tx.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_tx_edma.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_tx_ht.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_tdma.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_sysctl.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_rx.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_rx_edma.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_spectral.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ah_osdep.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/ath/ath_hal/ah.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v1.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v3.c optional ath_hal | ath_ar5211 | ath_ar5212 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v14.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_v4k.c \ optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_eeprom_9287.c \ optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_hal/ah_regdomain.c optional ath \ compile-with "${NORMAL_C} ${NO_WSHIFT_COUNT_NEGATIVE} ${NO_WSHIFT_COUNT_OVERFLOW} -I$S/dev/ath" # ar5210 dev/ath/ath_hal/ar5210/ar5210_attach.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_beacon.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_interrupts.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_keycache.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_misc.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_phy.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_power.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_recv.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_reset.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5210/ar5210_xmit.c optional ath_hal | ath_ar5210 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar5211 dev/ath/ath_hal/ar5211/ar5211_attach.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_beacon.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_interrupts.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_keycache.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_misc.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_phy.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_power.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_recv.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_reset.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5211/ar5211_xmit.c optional ath_hal | ath_ar5211 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar5212 dev/ath/ath_hal/ar5212/ar5212_ani.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_attach.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_beacon.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_eeprom.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_gpio.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_interrupts.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_keycache.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_misc.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_phy.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_power.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_recv.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_reset.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_rfgain.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5212_xmit.c \ optional ath_hal | ath_ar5212 | ath_ar5416 | ath_ar9160 | ath_ar9280 | \ ath_ar9285 ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar5416 (depends on ar5212) dev/ath/ath_hal/ar5416/ar5416_ani.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_attach.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_beacon.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_btcoex.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal_iq.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal_adcgain.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_cal_adcdc.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_eeprom.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_gpio.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_interrupts.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_keycache.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_misc.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_phy.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_power.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_radar.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_recv.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_reset.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_spectral.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar5416_xmit.c \ optional ath_hal | ath_ar5416 | ath_ar9160 | ath_ar9280 | ath_ar9285 | \ ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9130 (depends upon ar5416) - also requires AH_SUPPORT_AR9130 # # Since this is an embedded MAC SoC, there's no need to compile it into the # default HAL. dev/ath/ath_hal/ar9001/ar9130_attach.c optional ath_ar9130 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9001/ar9130_phy.c optional ath_ar9130 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9001/ar9130_eeprom.c optional ath_ar9130 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9160 (depends on ar5416) dev/ath/ath_hal/ar9001/ar9160_attach.c optional ath_hal | ath_ar9160 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9280 (depends on ar5416) dev/ath/ath_hal/ar9002/ar9280_attach.c optional ath_hal | ath_ar9280 | \ ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9280_olc.c optional ath_hal | ath_ar9280 | \ ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9285 (depends on ar5416 and ar9280) dev/ath/ath_hal/ar9002/ar9285_attach.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_btcoex.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_reset.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_cal.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_phy.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285_diversity.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9287 (depends on ar5416) dev/ath/ath_hal/ar9002/ar9287_attach.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287_reset.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287_cal.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287_olc.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ar9300 contrib/dev/ath/ath_hal/ar9300/ar9300_ani.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_attach.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_beacon.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_eeprom.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal ${NO_WCONSTANT_CONVERSION}" contrib/dev/ath/ath_hal/ar9300/ar9300_freebsd.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_gpio.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_interrupts.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_keycache.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_mci.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_misc.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_paprd.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_phy.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_power.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_radar.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_radio.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_recv.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_recv_ds.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_reset.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal ${NO_WSOMETIMES_UNINITIALIZED} -Wno-unused-function" contrib/dev/ath/ath_hal/ar9300/ar9300_stub.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_stub_funcs.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_spectral.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_timer.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_xmit.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" contrib/dev/ath/ath_hal/ar9300/ar9300_xmit_ds.c optional ath_hal | ath_ar9300 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal -I$S/contrib/dev/ath/ath_hal" # rf backends dev/ath/ath_hal/ar5212/ar2316.c optional ath_rf2316 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar2317.c optional ath_rf2317 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar2413.c optional ath_hal | ath_rf2413 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar2425.c optional ath_hal | ath_rf2425 | ath_rf2417 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5111.c optional ath_hal | ath_rf5111 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5112.c optional ath_hal | ath_rf5112 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5212/ar5413.c optional ath_hal | ath_rf5413 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar5416/ar2133.c optional ath_hal | ath_ar5416 | \ ath_ar9130 | ath_ar9160 | ath_ar9280 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9280.c optional ath_hal | ath_ar9280 | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9285.c optional ath_hal | ath_ar9285 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" dev/ath/ath_hal/ar9002/ar9287.c optional ath_hal | ath_ar9287 \ compile-with "${NORMAL_C} -I$S/dev/ath -I$S/dev/ath/ath_hal" # ath rate control algorithms dev/ath/ath_rate/amrr/amrr.c optional ath_rate_amrr \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_rate/onoe/onoe.c optional ath_rate_onoe \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_rate/sample/sample.c optional ath_rate_sample \ compile-with "${NORMAL_C} -I$S/dev/ath" # ath DFS modules dev/ath/ath_dfs/null/dfs_null.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" # dev/bce/if_bce.c optional bce dev/bfe/if_bfe.c optional bfe dev/bge/if_bge.c optional bge dev/bhnd/bhnd.c optional bhnd dev/bhnd/bhnd_erom.c optional bhnd dev/bhnd/bhnd_erom_if.m optional bhnd dev/bhnd/bhnd_subr.c optional bhnd dev/bhnd/bhnd_bus_if.m optional bhnd dev/bhnd/bhndb/bhnd_bhndb.c optional bhndb bhnd dev/bhnd/bhndb/bhndb.c optional bhndb bhnd dev/bhnd/bhndb/bhndb_bus_if.m optional bhndb bhnd dev/bhnd/bhndb/bhndb_hwdata.c optional bhndb bhnd dev/bhnd/bhndb/bhndb_if.m optional bhndb bhnd dev/bhnd/bhndb/bhndb_pci.c optional bhndb_pci bhndb bhnd pci dev/bhnd/bhndb/bhndb_pci_hwdata.c optional bhndb_pci bhndb bhnd pci dev/bhnd/bhndb/bhndb_pci_sprom.c optional bhndb_pci bhndb bhnd pci dev/bhnd/bhndb/bhndb_subr.c optional bhndb bhnd dev/bhnd/bcma/bcma.c optional bcma bhnd dev/bhnd/bcma/bcma_bhndb.c optional bcma bhnd bhndb dev/bhnd/bcma/bcma_erom.c optional bcma bhnd dev/bhnd/bcma/bcma_subr.c optional bcma bhnd dev/bhnd/cores/chipc/bhnd_chipc_if.m optional bhnd dev/bhnd/cores/chipc/bhnd_sprom_chipc.c optional bhnd dev/bhnd/cores/chipc/bhnd_pmu_chipc.c optional bhnd dev/bhnd/cores/chipc/chipc.c optional bhnd dev/bhnd/cores/chipc/chipc_cfi.c optional bhnd cfi dev/bhnd/cores/chipc/chipc_gpio.c optional bhnd gpio dev/bhnd/cores/chipc/chipc_slicer.c optional bhnd cfi | bhnd spibus dev/bhnd/cores/chipc/chipc_spi.c optional bhnd spibus dev/bhnd/cores/chipc/chipc_subr.c optional bhnd dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl.c optional bhnd dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_if.m optional bhnd dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_hostb_if.m optional bhnd dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl_subr.c optional bhnd dev/bhnd/cores/pci/bhnd_pci.c optional bhnd pci dev/bhnd/cores/pci/bhnd_pci_hostb.c optional bhndb bhnd pci dev/bhnd/cores/pci/bhnd_pcib.c optional bhnd_pcib bhnd pci dev/bhnd/cores/pcie2/bhnd_pcie2.c optional bhnd pci dev/bhnd/cores/pcie2/bhnd_pcie2_hostb.c optional bhndb bhnd pci dev/bhnd/cores/pcie2/bhnd_pcie2b.c optional bhnd_pcie2b bhnd pci dev/bhnd/cores/pmu/bhnd_pmu.c optional bhnd dev/bhnd/cores/pmu/bhnd_pmu_core.c optional bhnd dev/bhnd/cores/pmu/bhnd_pmu_if.m optional bhnd dev/bhnd/cores/pmu/bhnd_pmu_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_bcm.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_bcmraw.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_btxt.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_sprom.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_sprom_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_data_tlv.c optional bhnd dev/bhnd/nvram/bhnd_nvram_if.m optional bhnd dev/bhnd/nvram/bhnd_nvram_io.c optional bhnd dev/bhnd/nvram/bhnd_nvram_iobuf.c optional bhnd dev/bhnd/nvram/bhnd_nvram_ioptr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_iores.c optional bhnd dev/bhnd/nvram/bhnd_nvram_plist.c optional bhnd dev/bhnd/nvram/bhnd_nvram_store.c optional bhnd dev/bhnd/nvram/bhnd_nvram_store_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_subr.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value_fmts.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value_prf.c optional bhnd dev/bhnd/nvram/bhnd_nvram_value_subr.c optional bhnd dev/bhnd/nvram/bhnd_sprom.c optional bhnd dev/bhnd/siba/siba.c optional siba bhnd dev/bhnd/siba/siba_bhndb.c optional siba bhnd bhndb dev/bhnd/siba/siba_erom.c optional siba bhnd dev/bhnd/siba/siba_subr.c optional siba bhnd # dev/bktr/bktr_audio.c optional bktr pci dev/bktr/bktr_card.c optional bktr pci dev/bktr/bktr_core.c optional bktr pci dev/bktr/bktr_i2c.c optional bktr pci smbus dev/bktr/bktr_os.c optional bktr pci dev/bktr/bktr_tuner.c optional bktr pci dev/bktr/msp34xx.c optional bktr pci dev/bnxt/bnxt_hwrm.c optional bnxt iflib pci dev/bnxt/bnxt_sysctl.c optional bnxt iflib pci dev/bnxt/bnxt_txrx.c optional bnxt iflib pci dev/bnxt/if_bnxt.c optional bnxt iflib pci dev/buslogic/bt.c optional bt dev/buslogic/bt_isa.c optional bt isa dev/buslogic/bt_pci.c optional bt pci dev/bwi/bwimac.c optional bwi dev/bwi/bwiphy.c optional bwi dev/bwi/bwirf.c optional bwi dev/bwi/if_bwi.c optional bwi dev/bwi/if_bwi_pci.c optional bwi pci dev/bwn/if_bwn.c optional bwn bhnd dev/bwn/if_bwn_pci.c optional bwn pci bhnd bhndb bhndb_pci dev/bwn/if_bwn_phy_common.c optional bwn bhnd dev/bwn/if_bwn_phy_g.c optional bwn bhnd dev/bwn/if_bwn_phy_lp.c optional bwn bhnd dev/bwn/if_bwn_phy_n.c optional bwn bhnd dev/bwn/if_bwn_util.c optional bwn bhnd dev/cardbus/cardbus.c optional cardbus dev/cardbus/cardbus_cis.c optional cardbus dev/cardbus/cardbus_device.c optional cardbus dev/cas/if_cas.c optional cas dev/cfi/cfi_bus_fdt.c optional cfi fdt dev/cfi/cfi_bus_nexus.c optional cfi dev/cfi/cfi_core.c optional cfi dev/cfi/cfi_dev.c optional cfi dev/cfi/cfi_disk.c optional cfid dev/chromebook_platform/chromebook_platform.c optional chromebook_platform dev/ciss/ciss.c optional ciss dev/cm/smc90cx6.c optional cm dev/cmx/cmx.c optional cmx dev/cmx/cmx_pccard.c optional cmx pccard dev/cpufreq/ichss.c optional cpufreq pci dev/cs/if_cs.c optional cs dev/cs/if_cs_isa.c optional cs isa dev/cs/if_cs_pccard.c optional cs pccard dev/cxgb/cxgb_main.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/cxgb_sge.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_mc5.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_vsc7323.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_vsc8211.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_ael1002.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_aq100x.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_mv88e1xxx.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_xgmac.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_t3_hw.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/common/cxgb_tn1010.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/sys/uipc_mvec.c optional cxgb pci \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgb/cxgb_t3fw.c optional cxgb cxgb_t3fw \ compile-with "${NORMAL_C} -I$S/dev/cxgb" dev/cxgbe/t4_if.m optional cxgbe pci dev/cxgbe/t4_iov.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_mp_ring.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_main.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_netmap.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_sched.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_sge.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_l2t.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_tracer.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/t4_vf.c optional cxgbev pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/common/t4_hw.c optional cxgbe pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/common/t4vf_hw.c optional cxgbev pci \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/cudbg_common.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/cudbg_flash_utils.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/cudbg_lib.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/cudbg_wtp.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/fastlz.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cxgbe/cudbg/fastlz_api.c optional cxgbe \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" t4fw_cfg.c optional cxgbe \ compile-with "${AWK} -f $S/tools/fw_stub.awk t4fw_cfg.fw:t4fw_cfg t4fw_cfg_uwire.fw:t4fw_cfg_uwire t4fw.fw:t4fw -mt4fw_cfg -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "t4fw_cfg.c" t4fw_cfg.fwo optional cxgbe \ dependency "t4fw_cfg.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t4fw_cfg.fwo" t4fw_cfg.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t4fw_cfg.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t4fw_cfg.fw" t4fw_cfg_uwire.fwo optional cxgbe \ dependency "t4fw_cfg_uwire.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t4fw_cfg_uwire.fwo" t4fw_cfg_uwire.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t4fw_cfg_uwire.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t4fw_cfg_uwire.fw" t4fw.fwo optional cxgbe \ dependency "t4fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t4fw.fwo" t4fw.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t4fw-1.16.63.0.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "t4fw.fw" t5fw_cfg.c optional cxgbe \ compile-with "${AWK} -f $S/tools/fw_stub.awk t5fw_cfg.fw:t5fw_cfg t5fw_cfg_uwire.fw:t5fw_cfg_uwire t5fw.fw:t5fw -mt5fw_cfg -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "t5fw_cfg.c" t5fw_cfg.fwo optional cxgbe \ dependency "t5fw_cfg.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t5fw_cfg.fwo" t5fw_cfg.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t5fw_cfg.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t5fw_cfg.fw" t5fw_cfg_uwire.fwo optional cxgbe \ dependency "t5fw_cfg_uwire.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t5fw_cfg_uwire.fwo" t5fw_cfg_uwire.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t5fw_cfg_uwire.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t5fw_cfg_uwire.fw" t5fw.fwo optional cxgbe \ dependency "t5fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t5fw.fwo" t5fw.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t5fw-1.16.63.0.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "t5fw.fw" t6fw_cfg.c optional cxgbe \ compile-with "${AWK} -f $S/tools/fw_stub.awk t6fw_cfg.fw:t6fw_cfg t6fw_cfg_uwire.fw:t6fw_cfg_uwire t6fw.fw:t6fw -mt6fw_cfg -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "t6fw_cfg.c" t6fw_cfg.fwo optional cxgbe \ dependency "t6fw_cfg.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t6fw_cfg.fwo" t6fw_cfg.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t6fw_cfg.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t6fw_cfg.fw" t6fw_cfg_uwire.fwo optional cxgbe \ dependency "t6fw_cfg_uwire.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t6fw_cfg_uwire.fwo" t6fw_cfg_uwire.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t6fw_cfg_uwire.txt" \ compile-with "${CP} ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule \ clean "t6fw_cfg_uwire.fw" t6fw.fwo optional cxgbe \ dependency "t6fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "t6fw.fwo" t6fw.fw optional cxgbe \ dependency "$S/dev/cxgbe/firmware/t6fw-1.16.63.0.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "t6fw.fw" dev/cxgbe/crypto/t4_crypto.c optional ccr \ compile-with "${NORMAL_C} -I$S/dev/cxgbe" dev/cy/cy.c optional cy dev/cy/cy_isa.c optional cy isa dev/cy/cy_pci.c optional cy pci dev/cyapa/cyapa.c optional cyapa iicbus dev/dc/if_dc.c optional dc pci dev/dc/dcphy.c optional dc pci dev/dc/pnphy.c optional dc pci dev/dcons/dcons.c optional dcons dev/dcons/dcons_crom.c optional dcons_crom dev/dcons/dcons_os.c optional dcons dev/de/if_de.c optional de pci dev/dme/if_dme.c optional dme dev/dpt/dpt_pci.c optional dpt pci dev/dpt/dpt_scsi.c optional dpt dev/drm/ati_pcigart.c optional drm dev/drm/drm_agpsupport.c optional drm dev/drm/drm_auth.c optional drm dev/drm/drm_bufs.c optional drm dev/drm/drm_context.c optional drm dev/drm/drm_dma.c optional drm dev/drm/drm_drawable.c optional drm dev/drm/drm_drv.c optional drm dev/drm/drm_fops.c optional drm dev/drm/drm_hashtab.c optional drm dev/drm/drm_ioctl.c optional drm dev/drm/drm_irq.c optional drm dev/drm/drm_lock.c optional drm dev/drm/drm_memory.c optional drm dev/drm/drm_mm.c optional drm dev/drm/drm_pci.c optional drm dev/drm/drm_scatter.c optional drm dev/drm/drm_sman.c optional drm dev/drm/drm_sysctl.c optional drm dev/drm/drm_vm.c optional drm dev/drm/mach64_dma.c optional mach64drm dev/drm/mach64_drv.c optional mach64drm dev/drm/mach64_irq.c optional mach64drm dev/drm/mach64_state.c optional mach64drm dev/drm/mga_dma.c optional mgadrm dev/drm/mga_drv.c optional mgadrm dev/drm/mga_irq.c optional mgadrm dev/drm/mga_state.c optional mgadrm dev/drm/mga_warp.c optional mgadrm dev/drm/r128_cce.c optional r128drm \ compile-with "${NORMAL_C} ${NO_WCONSTANT_CONVERSION}" dev/drm/r128_drv.c optional r128drm dev/drm/r128_irq.c optional r128drm dev/drm/r128_state.c optional r128drm dev/drm/savage_bci.c optional savagedrm dev/drm/savage_drv.c optional savagedrm dev/drm/savage_state.c optional savagedrm dev/drm/sis_drv.c optional sisdrm dev/drm/sis_ds.c optional sisdrm dev/drm/sis_mm.c optional sisdrm dev/drm/tdfx_drv.c optional tdfxdrm dev/drm/via_dma.c optional viadrm dev/drm/via_dmablit.c optional viadrm dev/drm/via_drv.c optional viadrm dev/drm/via_irq.c optional viadrm dev/drm/via_map.c optional viadrm dev/drm/via_mm.c optional viadrm dev/drm/via_verifier.c optional viadrm dev/drm/via_video.c optional viadrm dev/drm2/drm_agpsupport.c optional drm2 dev/drm2/drm_auth.c optional drm2 dev/drm2/drm_bufs.c optional drm2 dev/drm2/drm_buffer.c optional drm2 dev/drm2/drm_context.c optional drm2 dev/drm2/drm_crtc.c optional drm2 dev/drm2/drm_crtc_helper.c optional drm2 dev/drm2/drm_dma.c optional drm2 dev/drm2/drm_dp_helper.c optional drm2 dev/drm2/drm_dp_iic_helper.c optional drm2 dev/drm2/drm_drv.c optional drm2 dev/drm2/drm_edid.c optional drm2 dev/drm2/drm_fb_helper.c optional drm2 dev/drm2/drm_fops.c optional drm2 dev/drm2/drm_gem.c optional drm2 dev/drm2/drm_gem_names.c optional drm2 dev/drm2/drm_global.c optional drm2 dev/drm2/drm_hashtab.c optional drm2 dev/drm2/drm_ioctl.c optional drm2 dev/drm2/drm_irq.c optional drm2 dev/drm2/drm_linux_list_sort.c optional drm2 dev/drm2/drm_lock.c optional drm2 dev/drm2/drm_memory.c optional drm2 dev/drm2/drm_mm.c optional drm2 dev/drm2/drm_modes.c optional drm2 dev/drm2/drm_pci.c optional drm2 dev/drm2/drm_platform.c optional drm2 dev/drm2/drm_scatter.c optional drm2 dev/drm2/drm_stub.c optional drm2 dev/drm2/drm_sysctl.c optional drm2 dev/drm2/drm_vm.c optional drm2 dev/drm2/drm_os_freebsd.c optional drm2 dev/drm2/ttm/ttm_agp_backend.c optional drm2 dev/drm2/ttm/ttm_lock.c optional drm2 dev/drm2/ttm/ttm_object.c optional drm2 dev/drm2/ttm/ttm_tt.c optional drm2 dev/drm2/ttm/ttm_bo_util.c optional drm2 dev/drm2/ttm/ttm_bo.c optional drm2 dev/drm2/ttm/ttm_bo_manager.c optional drm2 dev/drm2/ttm/ttm_execbuf_util.c optional drm2 dev/drm2/ttm/ttm_memory.c optional drm2 dev/drm2/ttm/ttm_page_alloc.c optional drm2 dev/drm2/ttm/ttm_bo_vm.c optional drm2 dev/drm2/ati_pcigart.c optional drm2 agp pci dev/ed/if_ed.c optional ed dev/ed/if_ed_novell.c optional ed dev/ed/if_ed_rtl80x9.c optional ed dev/ed/if_ed_pccard.c optional ed pccard dev/ed/if_ed_pci.c optional ed pci dev/efidev/efidev.c optional efirt dev/efidev/efirt.c optional efirt dev/efidev/efirtc.c optional efirt dev/e1000/if_em.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/em_txrx.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/igb_txrx.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_80003es2lan.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82540.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82541.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82542.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82543.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82571.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_82575.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_ich8lan.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_i210.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_api.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_mac.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_manage.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_nvm.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_phy.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_vf.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_mbx.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/e1000/e1000_osdep.c optional em \ compile-with "${NORMAL_C} -I$S/dev/e1000" dev/et/if_et.c optional et dev/ena/ena.c optional ena \ compile-with "${NORMAL_C} -I$S/contrib" dev/ena/ena_sysctl.c optional ena \ compile-with "${NORMAL_C} -I$S/contrib" contrib/ena-com/ena_com.c optional ena contrib/ena-com/ena_eth_com.c optional ena dev/ep/if_ep.c optional ep dev/ep/if_ep_isa.c optional ep isa dev/ep/if_ep_pccard.c optional ep pccard dev/esp/esp_pci.c optional esp pci dev/esp/ncr53c9x.c optional esp dev/etherswitch/arswitch/arswitch.c optional arswitch dev/etherswitch/arswitch/arswitch_reg.c optional arswitch dev/etherswitch/arswitch/arswitch_phy.c optional arswitch dev/etherswitch/arswitch/arswitch_8216.c optional arswitch dev/etherswitch/arswitch/arswitch_8226.c optional arswitch dev/etherswitch/arswitch/arswitch_8316.c optional arswitch dev/etherswitch/arswitch/arswitch_8327.c optional arswitch dev/etherswitch/arswitch/arswitch_7240.c optional arswitch dev/etherswitch/arswitch/arswitch_9340.c optional arswitch dev/etherswitch/arswitch/arswitch_vlans.c optional arswitch dev/etherswitch/etherswitch.c optional etherswitch dev/etherswitch/etherswitch_if.m optional etherswitch dev/etherswitch/ip17x/ip17x.c optional ip17x dev/etherswitch/ip17x/ip175c.c optional ip17x dev/etherswitch/ip17x/ip175d.c optional ip17x dev/etherswitch/ip17x/ip17x_phy.c optional ip17x dev/etherswitch/ip17x/ip17x_vlans.c optional ip17x dev/etherswitch/miiproxy.c optional miiproxy dev/etherswitch/rtl8366/rtl8366rb.c optional rtl8366rb dev/etherswitch/e6000sw/e6000sw.c optional e6000sw dev/etherswitch/e6000sw/e6060sw.c optional e6060sw dev/etherswitch/infineon/adm6996fc.c optional adm6996fc dev/etherswitch/micrel/ksz8995ma.c optional ksz8995ma dev/etherswitch/ukswitch/ukswitch.c optional ukswitch dev/evdev/cdev.c optional evdev dev/evdev/evdev.c optional evdev dev/evdev/evdev_mt.c optional evdev dev/evdev/evdev_utils.c optional evdev dev/evdev/uinput.c optional evdev uinput dev/ex/if_ex.c optional ex dev/ex/if_ex_isa.c optional ex isa dev/ex/if_ex_pccard.c optional ex pccard dev/exca/exca.c optional cbb dev/extres/clk/clk.c optional ext_resources clk fdt dev/extres/clk/clkdev_if.m optional ext_resources clk fdt dev/extres/clk/clknode_if.m optional ext_resources clk fdt dev/extres/clk/clk_bus.c optional ext_resources clk fdt dev/extres/clk/clk_div.c optional ext_resources clk fdt dev/extres/clk/clk_fixed.c optional ext_resources clk fdt dev/extres/clk/clk_gate.c optional ext_resources clk fdt dev/extres/clk/clk_mux.c optional ext_resources clk fdt dev/extres/phy/phy.c optional ext_resources phy fdt dev/extres/phy/phydev_if.m optional ext_resources phy fdt dev/extres/phy/phynode_if.m optional ext_resources phy fdt dev/extres/hwreset/hwreset.c optional ext_resources hwreset fdt dev/extres/hwreset/hwreset_if.m optional ext_resources hwreset fdt dev/extres/regulator/regdev_if.m optional ext_resources regulator fdt dev/extres/regulator/regnode_if.m optional ext_resources regulator fdt dev/extres/regulator/regulator.c optional ext_resources regulator fdt dev/extres/regulator/regulator_bus.c optional ext_resources regulator fdt dev/extres/regulator/regulator_fixed.c optional ext_resources regulator fdt dev/extres/syscon/syscon.c optional ext_resources syscon dev/extres/syscon/syscon_generic.c optional ext_resources syscon fdt dev/extres/syscon/syscon_if.m optional ext_resources syscon dev/fb/fbd.c optional fbd | vt dev/fb/fb_if.m standard dev/fb/splash.c optional sc splash dev/fdt/fdt_clock.c optional fdt fdt_clock dev/fdt/fdt_clock_if.m optional fdt fdt_clock dev/fdt/fdt_common.c optional fdt dev/fdt/fdt_pinctrl.c optional fdt fdt_pinctrl dev/fdt/fdt_pinctrl_if.m optional fdt fdt_pinctrl dev/fdt/fdt_slicer.c optional fdt cfi | fdt nand | fdt mx25l dev/fdt/fdt_static_dtb.S optional fdt fdt_dtb_static \ dependency "fdt_dtb_file" dev/fdt/simplebus.c optional fdt dev/fe/if_fe.c optional fe dev/fe/if_fe_pccard.c optional fe pccard dev/filemon/filemon.c optional filemon dev/firewire/firewire.c optional firewire dev/firewire/fwcrom.c optional firewire dev/firewire/fwdev.c optional firewire dev/firewire/fwdma.c optional firewire dev/firewire/fwmem.c optional firewire dev/firewire/fwohci.c optional firewire dev/firewire/fwohci_pci.c optional firewire pci dev/firewire/if_fwe.c optional fwe dev/firewire/if_fwip.c optional fwip dev/firewire/sbp.c optional sbp dev/firewire/sbp_targ.c optional sbp_targ dev/flash/at45d.c optional at45d dev/flash/mx25l.c optional mx25l dev/fxp/if_fxp.c optional fxp dev/fxp/inphy.c optional fxp dev/gem/if_gem.c optional gem dev/gem/if_gem_pci.c optional gem pci dev/gem/if_gem_sbus.c optional gem sbus dev/gpio/gpiobacklight.c optional gpiobacklight fdt dev/gpio/gpiokeys.c optional gpiokeys fdt dev/gpio/gpiokeys_codes.c optional gpiokeys fdt dev/gpio/gpiobus.c optional gpio \ dependency "gpiobus_if.h" dev/gpio/gpioc.c optional gpio \ dependency "gpio_if.h" dev/gpio/gpioiic.c optional gpioiic dev/gpio/gpioled.c optional gpioled !fdt dev/gpio/gpioled_fdt.c optional gpioled fdt dev/gpio/gpiopower.c optional gpiopower fdt dev/gpio/gpioregulator.c optional gpioregulator fdt ext_resources dev/gpio/gpiospi.c optional gpiospi dev/gpio/gpioths.c optional gpioths dev/gpio/gpio_if.m optional gpio dev/gpio/gpiobus_if.m optional gpio dev/gpio/gpiopps.c optional gpiopps dev/gpio/ofw_gpiobus.c optional fdt gpio dev/hifn/hifn7751.c optional hifn dev/hme/if_hme.c optional hme dev/hme/if_hme_pci.c optional hme pci dev/hme/if_hme_sbus.c optional hme sbus dev/hptiop/hptiop.c optional hptiop scbus dev/hwpmc/hwpmc_logging.c optional hwpmc dev/hwpmc/hwpmc_mod.c optional hwpmc dev/hwpmc/hwpmc_soft.c optional hwpmc dev/ichiic/ig4_acpi.c optional ig4 acpi iicbus dev/ichiic/ig4_iic.c optional ig4 iicbus dev/ichiic/ig4_pci.c optional ig4 pci iicbus dev/ichsmb/ichsmb.c optional ichsmb dev/ichsmb/ichsmb_pci.c optional ichsmb pci dev/ida/ida.c optional ida dev/ida/ida_disk.c optional ida dev/ida/ida_pci.c optional ida pci dev/iicbus/ad7418.c optional ad7418 dev/iicbus/ds1307.c optional ds1307 dev/iicbus/ds13rtc.c optional ds13rtc | ds133x | ds1374 dev/iicbus/ds1672.c optional ds1672 dev/iicbus/ds3231.c optional ds3231 dev/iicbus/rtc8583.c optional rtc8583 dev/iicbus/icee.c optional icee dev/iicbus/if_ic.c optional ic dev/iicbus/iic.c optional iic dev/iicbus/iic_recover_bus.c optional iicbus dev/iicbus/iicbb.c optional iicbb dev/iicbus/iicbb_if.m optional iicbb dev/iicbus/iicbus.c optional iicbus dev/iicbus/iicbus_if.m optional iicbus dev/iicbus/iiconf.c optional iicbus dev/iicbus/iicsmb.c optional iicsmb \ dependency "iicbus_if.h" dev/iicbus/iicoc.c optional iicoc dev/iicbus/isl12xx.c optional isl12xx dev/iicbus/lm75.c optional lm75 dev/iicbus/nxprtc.c optional nxprtc | pcf8563 dev/iicbus/ofw_iicbus.c optional fdt iicbus dev/iicbus/s35390a.c optional s35390a dev/iir/iir.c optional iir dev/iir/iir_ctrl.c optional iir dev/iir/iir_pci.c optional iir pci dev/intpm/intpm.c optional intpm pci # XXX Work around clang warning, until maintainer approves fix. dev/ips/ips.c optional ips \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/ips/ips_commands.c optional ips dev/ips/ips_disk.c optional ips dev/ips/ips_ioctl.c optional ips dev/ips/ips_pci.c optional ips pci dev/ipw/if_ipw.c optional ipw ipwbssfw.c optional ipwbssfw | ipwfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_bss.fw:ipw_bss:130 -lintel_ipw -mipw_bss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "ipwbssfw.c" ipw_bss.fwo optional ipwbssfw | ipwfw \ dependency "ipw_bss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ipw_bss.fwo" ipw_bss.fw optional ipwbssfw | ipwfw \ dependency "$S/contrib/dev/ipw/ipw2100-1.3.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "ipw_bss.fw" ipwibssfw.c optional ipwibssfw | ipwfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_ibss.fw:ipw_ibss:130 -lintel_ipw -mipw_ibss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "ipwibssfw.c" ipw_ibss.fwo optional ipwibssfw | ipwfw \ dependency "ipw_ibss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ipw_ibss.fwo" ipw_ibss.fw optional ipwibssfw | ipwfw \ dependency "$S/contrib/dev/ipw/ipw2100-1.3-i.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "ipw_ibss.fw" ipwmonitorfw.c optional ipwmonitorfw | ipwfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk ipw_monitor.fw:ipw_monitor:130 -lintel_ipw -mipw_monitor -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "ipwmonitorfw.c" ipw_monitor.fwo optional ipwmonitorfw | ipwfw \ dependency "ipw_monitor.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ipw_monitor.fwo" ipw_monitor.fw optional ipwmonitorfw | ipwfw \ dependency "$S/contrib/dev/ipw/ipw2100-1.3-p.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "ipw_monitor.fw" dev/iscsi/icl.c optional iscsi dev/iscsi/icl_conn_if.m optional cfiscsi | iscsi dev/iscsi/icl_soft.c optional iscsi dev/iscsi/icl_soft_proxy.c optional iscsi dev/iscsi/iscsi.c optional iscsi scbus dev/iscsi_initiator/iscsi.c optional iscsi_initiator scbus dev/iscsi_initiator/iscsi_subr.c optional iscsi_initiator scbus dev/iscsi_initiator/isc_cam.c optional iscsi_initiator scbus dev/iscsi_initiator/isc_soc.c optional iscsi_initiator scbus dev/iscsi_initiator/isc_sm.c optional iscsi_initiator scbus dev/iscsi_initiator/isc_subr.c optional iscsi_initiator scbus dev/ismt/ismt.c optional ismt dev/isl/isl.c optional isl iicbus dev/isp/isp.c optional isp dev/isp/isp_freebsd.c optional isp dev/isp/isp_library.c optional isp dev/isp/isp_pci.c optional isp pci dev/isp/isp_sbus.c optional isp sbus dev/isp/isp_target.c optional isp dev/ispfw/ispfw.c optional ispfw dev/iwi/if_iwi.c optional iwi iwibssfw.c optional iwibssfw | iwifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_bss.fw:iwi_bss:300 -lintel_iwi -miwi_bss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwibssfw.c" iwi_bss.fwo optional iwibssfw | iwifw \ dependency "iwi_bss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwi_bss.fwo" iwi_bss.fw optional iwibssfw | iwifw \ dependency "$S/contrib/dev/iwi/ipw2200-bss.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwi_bss.fw" iwiibssfw.c optional iwiibssfw | iwifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_ibss.fw:iwi_ibss:300 -lintel_iwi -miwi_ibss -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwiibssfw.c" iwi_ibss.fwo optional iwiibssfw | iwifw \ dependency "iwi_ibss.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwi_ibss.fwo" iwi_ibss.fw optional iwiibssfw | iwifw \ dependency "$S/contrib/dev/iwi/ipw2200-ibss.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwi_ibss.fw" iwimonitorfw.c optional iwimonitorfw | iwifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwi_monitor.fw:iwi_monitor:300 -lintel_iwi -miwi_monitor -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwimonitorfw.c" iwi_monitor.fwo optional iwimonitorfw | iwifw \ dependency "iwi_monitor.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwi_monitor.fwo" iwi_monitor.fw optional iwimonitorfw | iwifw \ dependency "$S/contrib/dev/iwi/ipw2200-sniffer.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwi_monitor.fw" dev/iwm/if_iwm.c optional iwm dev/iwm/if_iwm_7000.c optional iwm dev/iwm/if_iwm_8000.c optional iwm dev/iwm/if_iwm_binding.c optional iwm dev/iwm/if_iwm_fw.c optional iwm dev/iwm/if_iwm_led.c optional iwm dev/iwm/if_iwm_mac_ctxt.c optional iwm dev/iwm/if_iwm_notif_wait.c optional iwm dev/iwm/if_iwm_pcie_trans.c optional iwm dev/iwm/if_iwm_phy_ctxt.c optional iwm dev/iwm/if_iwm_phy_db.c optional iwm dev/iwm/if_iwm_power.c optional iwm dev/iwm/if_iwm_scan.c optional iwm dev/iwm/if_iwm_sf.c optional iwm dev/iwm/if_iwm_sta.c optional iwm dev/iwm/if_iwm_time_event.c optional iwm dev/iwm/if_iwm_util.c optional iwm iwm3160fw.c optional iwm3160fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm3160.fw:iwm3160fw -miwm3160fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm3160fw.c" iwm3160fw.fwo optional iwm3160fw | iwmfw \ dependency "iwm3160.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm3160fw.fwo" iwm3160.fw optional iwm3160fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-3160-17.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm3160.fw" iwm3168fw.c optional iwm3168fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm3168.fw:iwm3168fw -miwm3168fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm3168fw.c" iwm3168fw.fwo optional iwm3168fw | iwmfw \ dependency "iwm3168.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm3168fw.fwo" iwm3168.fw optional iwm3168fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-3168-22.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm3168.fw" iwm7260fw.c optional iwm7260fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7260.fw:iwm7260fw -miwm7260fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm7260fw.c" iwm7260fw.fwo optional iwm7260fw | iwmfw \ dependency "iwm7260.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm7260fw.fwo" iwm7260.fw optional iwm7260fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-7260-17.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm7260.fw" iwm7265fw.c optional iwm7265fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7265.fw:iwm7265fw -miwm7265fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm7265fw.c" iwm7265fw.fwo optional iwm7265fw | iwmfw \ dependency "iwm7265.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm7265fw.fwo" iwm7265.fw optional iwm7265fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-7265-17.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm7265.fw" iwm7265Dfw.c optional iwm7265Dfw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm7265D.fw:iwm7265Dfw -miwm7265Dfw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm7265Dfw.c" iwm7265Dfw.fwo optional iwm7265Dfw | iwmfw \ dependency "iwm7265D.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm7265Dfw.fwo" iwm7265D.fw optional iwm7265Dfw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-7265D-17.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm7265D.fw" iwm8000Cfw.c optional iwm8000Cfw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm8000C.fw:iwm8000Cfw -miwm8000Cfw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm8000Cfw.c" iwm8000Cfw.fwo optional iwm8000Cfw | iwmfw \ dependency "iwm8000C.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm8000Cfw.fwo" iwm8000C.fw optional iwm8000Cfw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-8000C-16.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm8000C.fw" iwm8265.fw optional iwm8265fw | iwmfw \ dependency "$S/contrib/dev/iwm/iwm-8265-22.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwm8265.fw" iwm8265fw.c optional iwm8265fw | iwmfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwm8265.fw:iwm8265fw -miwm8265fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwm8265fw.c" iwm8265fw.fwo optional iwm8265fw | iwmfw \ dependency "iwm8265.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwm8265fw.fwo" dev/iwn/if_iwn.c optional iwn iwn1000fw.c optional iwn1000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn1000.fw:iwn1000fw -miwn1000fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn1000fw.c" iwn1000fw.fwo optional iwn1000fw | iwnfw \ dependency "iwn1000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn1000fw.fwo" iwn1000.fw optional iwn1000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-1000-39.31.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn1000.fw" iwn100fw.c optional iwn100fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn100.fw:iwn100fw -miwn100fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn100fw.c" iwn100fw.fwo optional iwn100fw | iwnfw \ dependency "iwn100.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn100fw.fwo" iwn100.fw optional iwn100fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-100-39.31.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn100.fw" iwn105fw.c optional iwn105fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn105.fw:iwn105fw -miwn105fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn105fw.c" iwn105fw.fwo optional iwn105fw | iwnfw \ dependency "iwn105.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn105fw.fwo" iwn105.fw optional iwn105fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-105-6-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn105.fw" iwn135fw.c optional iwn135fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn135.fw:iwn135fw -miwn135fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn135fw.c" iwn135fw.fwo optional iwn135fw | iwnfw \ dependency "iwn135.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn135fw.fwo" iwn135.fw optional iwn135fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-135-6-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn135.fw" iwn2000fw.c optional iwn2000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn2000.fw:iwn2000fw -miwn2000fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn2000fw.c" iwn2000fw.fwo optional iwn2000fw | iwnfw \ dependency "iwn2000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn2000fw.fwo" iwn2000.fw optional iwn2000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-2000-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn2000.fw" iwn2030fw.c optional iwn2030fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn2030.fw:iwn2030fw -miwn2030fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn2030fw.c" iwn2030fw.fwo optional iwn2030fw | iwnfw \ dependency "iwn2030.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn2030fw.fwo" iwn2030.fw optional iwn2030fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwnwifi-2030-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn2030.fw" iwn4965fw.c optional iwn4965fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn4965.fw:iwn4965fw -miwn4965fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn4965fw.c" iwn4965fw.fwo optional iwn4965fw | iwnfw \ dependency "iwn4965.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn4965fw.fwo" iwn4965.fw optional iwn4965fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-4965-228.61.2.24.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn4965.fw" iwn5000fw.c optional iwn5000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn5000.fw:iwn5000fw -miwn5000fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn5000fw.c" iwn5000fw.fwo optional iwn5000fw | iwnfw \ dependency "iwn5000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn5000fw.fwo" iwn5000.fw optional iwn5000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-5000-8.83.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn5000.fw" iwn5150fw.c optional iwn5150fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn5150.fw:iwn5150fw -miwn5150fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn5150fw.c" iwn5150fw.fwo optional iwn5150fw | iwnfw \ dependency "iwn5150.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn5150fw.fwo" iwn5150.fw optional iwn5150fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-5150-8.24.2.2.fw.uu"\ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn5150.fw" iwn6000fw.c optional iwn6000fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000.fw:iwn6000fw -miwn6000fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6000fw.c" iwn6000fw.fwo optional iwn6000fw | iwnfw \ dependency "iwn6000.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6000fw.fwo" iwn6000.fw optional iwn6000fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6000-9.221.4.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6000.fw" iwn6000g2afw.c optional iwn6000g2afw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000g2a.fw:iwn6000g2afw -miwn6000g2afw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6000g2afw.c" iwn6000g2afw.fwo optional iwn6000g2afw | iwnfw \ dependency "iwn6000g2a.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6000g2afw.fwo" iwn6000g2a.fw optional iwn6000g2afw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6000g2a-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6000g2a.fw" iwn6000g2bfw.c optional iwn6000g2bfw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6000g2b.fw:iwn6000g2bfw -miwn6000g2bfw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6000g2bfw.c" iwn6000g2bfw.fwo optional iwn6000g2bfw | iwnfw \ dependency "iwn6000g2b.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6000g2bfw.fwo" iwn6000g2b.fw optional iwn6000g2bfw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6000g2b-18.168.6.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6000g2b.fw" iwn6050fw.c optional iwn6050fw | iwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk iwn6050.fw:iwn6050fw -miwn6050fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "iwn6050fw.c" iwn6050fw.fwo optional iwn6050fw | iwnfw \ dependency "iwn6050.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "iwn6050fw.fwo" iwn6050.fw optional iwn6050fw | iwnfw \ dependency "$S/contrib/dev/iwn/iwlwifi-6050-41.28.5.1.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "iwn6050.fw" dev/ixgb/if_ixgb.c optional ixgb dev/ixgb/ixgb_ee.c optional ixgb dev/ixgb/ixgb_hw.c optional ixgb dev/ixgbe/if_ix.c optional ix inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP" dev/ixgbe/if_ixv.c optional ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP" dev/ixgbe/if_bypass.c optional ix inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/if_fdir.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/if_sriov.c optional ix inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ix_txrx.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_osdep.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_phy.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_api.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_common.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_mbx.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_vf.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_82598.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_82599.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_x540.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_x550.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_dcb.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_dcb_82598.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_dcb_82599.c optional ix inet | ixv inet \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/jedec_dimm/jedec_dimm.c optional jedec_dimm smbus dev/jedec_ts/jedec_ts.c optional jedec_ts smbus dev/jme/if_jme.c optional jme pci dev/joy/joy.c optional joy dev/joy/joy_isa.c optional joy isa dev/kbd/kbd.c optional atkbd | pckbd | sc | ukbd | vt dev/kbdmux/kbdmux.c optional kbdmux dev/ksyms/ksyms.c optional ksyms dev/le/am7990.c optional le dev/le/am79900.c optional le dev/le/if_le_pci.c optional le pci dev/le/lance.c optional le dev/led/led.c standard dev/lge/if_lge.c optional lge dev/liquidio/base/cn23xx_pf_device.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_console.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_ctrl.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_device.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_droq.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_mem_ops.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_request_manager.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/base/lio_response_manager.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_core.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_ioctl.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_main.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_rss.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_rxtx.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" dev/liquidio/lio_sysctl.c optional lio \ compile-with "${NORMAL_C} \ -I$S/dev/liquidio -I$S/dev/liquidio/base -DSMP" lio.c optional lio \ compile-with "${AWK} -f $S/tools/fw_stub.awk lio_23xx_nic.bin.fw:lio_23xx_nic.bin -mlio_23xx_nic.bin -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "lio.c" lio_23xx_nic.bin.fw.fwo optional lio \ dependency "lio_23xx_nic.bin.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "lio_23xx_nic.bin.fw.fwo" lio_23xx_nic.bin.fw optional lio \ dependency "$S/contrib/dev/liquidio/lio_23xx_nic.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "lio_23xx_nic.bin.fw" dev/lmc/if_lmc.c optional lmc dev/malo/if_malo.c optional malo dev/malo/if_malohal.c optional malo dev/malo/if_malo_pci.c optional malo pci dev/mc146818/mc146818.c optional mc146818 dev/md/md.c optional md dev/mdio/mdio_if.m optional miiproxy | mdio dev/mdio/mdio.c optional miiproxy | mdio dev/mem/memdev.c optional mem dev/mem/memutil.c optional mem dev/mfi/mfi.c optional mfi dev/mfi/mfi_debug.c optional mfi dev/mfi/mfi_pci.c optional mfi pci dev/mfi/mfi_disk.c optional mfi dev/mfi/mfi_syspd.c optional mfi dev/mfi/mfi_tbolt.c optional mfi dev/mfi/mfi_linux.c optional mfi compat_linux dev/mfi/mfi_cam.c optional mfip scbus dev/mii/acphy.c optional miibus | acphy dev/mii/amphy.c optional miibus | amphy dev/mii/atphy.c optional miibus | atphy dev/mii/axphy.c optional miibus | axphy dev/mii/bmtphy.c optional miibus | bmtphy dev/mii/brgphy.c optional miibus | brgphy dev/mii/ciphy.c optional miibus | ciphy dev/mii/e1000phy.c optional miibus | e1000phy dev/mii/gentbi.c optional miibus | gentbi dev/mii/icsphy.c optional miibus | icsphy dev/mii/ip1000phy.c optional miibus | ip1000phy dev/mii/jmphy.c optional miibus | jmphy dev/mii/lxtphy.c optional miibus | lxtphy dev/mii/micphy.c optional miibus fdt | micphy fdt dev/mii/mii.c optional miibus | mii dev/mii/mii_bitbang.c optional miibus | mii_bitbang dev/mii/mii_physubr.c optional miibus | mii dev/mii/mii_fdt.c optional miibus fdt | mii fdt dev/mii/miibus_if.m optional miibus | mii dev/mii/mlphy.c optional miibus | mlphy dev/mii/nsgphy.c optional miibus | nsgphy dev/mii/nsphy.c optional miibus | nsphy dev/mii/nsphyter.c optional miibus | nsphyter dev/mii/pnaphy.c optional miibus | pnaphy dev/mii/qsphy.c optional miibus | qsphy dev/mii/rdcphy.c optional miibus | rdcphy dev/mii/rgephy.c optional miibus | rgephy dev/mii/rlphy.c optional miibus | rlphy dev/mii/rlswitch.c optional rlswitch dev/mii/smcphy.c optional miibus | smcphy dev/mii/smscphy.c optional miibus | smscphy dev/mii/tdkphy.c optional miibus | tdkphy dev/mii/tlphy.c optional miibus | tlphy dev/mii/truephy.c optional miibus | truephy dev/mii/ukphy.c optional miibus | mii dev/mii/ukphy_subr.c optional miibus | mii dev/mii/vscphy.c optional miibus | vscphy dev/mii/xmphy.c optional miibus | xmphy dev/mk48txx/mk48txx.c optional mk48txx dev/mlx/mlx.c optional mlx dev/mlx/mlx_disk.c optional mlx dev/mlx/mlx_pci.c optional mlx pci dev/mly/mly.c optional mly dev/mmc/mmc_subr.c optional mmc | mmcsd !mmccam dev/mmc/mmc.c optional mmc !mmccam dev/mmc/mmcbr_if.m standard dev/mmc/mmcbus_if.m standard dev/mmc/mmcsd.c optional mmcsd !mmccam dev/mmcnull/mmcnull.c optional mmcnull dev/mn/if_mn.c optional mn pci dev/mpr/mpr.c optional mpr dev/mpr/mpr_config.c optional mpr # XXX Work around clang warning, until maintainer approves fix. dev/mpr/mpr_mapping.c optional mpr \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/mpr/mpr_pci.c optional mpr pci dev/mpr/mpr_sas.c optional mpr \ compile-with "${NORMAL_C} ${NO_WUNNEEDED_INTERNAL_DECL}" dev/mpr/mpr_sas_lsi.c optional mpr dev/mpr/mpr_table.c optional mpr dev/mpr/mpr_user.c optional mpr dev/mps/mps.c optional mps dev/mps/mps_config.c optional mps # XXX Work around clang warning, until maintainer approves fix. dev/mps/mps_mapping.c optional mps \ compile-with "${NORMAL_C} ${NO_WSOMETIMES_UNINITIALIZED}" dev/mps/mps_pci.c optional mps pci dev/mps/mps_sas.c optional mps \ compile-with "${NORMAL_C} ${NO_WUNNEEDED_INTERNAL_DECL}" dev/mps/mps_sas_lsi.c optional mps dev/mps/mps_table.c optional mps dev/mps/mps_user.c optional mps dev/mpt/mpt.c optional mpt dev/mpt/mpt_cam.c optional mpt dev/mpt/mpt_debug.c optional mpt dev/mpt/mpt_pci.c optional mpt pci dev/mpt/mpt_raid.c optional mpt dev/mpt/mpt_user.c optional mpt dev/mrsas/mrsas.c optional mrsas dev/mrsas/mrsas_cam.c optional mrsas dev/mrsas/mrsas_ioctl.c optional mrsas dev/mrsas/mrsas_fp.c optional mrsas dev/msk/if_msk.c optional msk dev/mvs/mvs.c optional mvs dev/mvs/mvs_if.m optional mvs dev/mvs/mvs_pci.c optional mvs pci dev/mwl/if_mwl.c optional mwl dev/mwl/if_mwl_pci.c optional mwl pci dev/mwl/mwlhal.c optional mwl mwlfw.c optional mwlfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk mw88W8363.fw:mw88W8363fw mwlboot.fw:mwlboot -mmwl -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "mwlfw.c" mw88W8363.fwo optional mwlfw \ dependency "mw88W8363.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "mw88W8363.fwo" mw88W8363.fw optional mwlfw \ dependency "$S/contrib/dev/mwl/mw88W8363.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "mw88W8363.fw" mwlboot.fwo optional mwlfw \ dependency "mwlboot.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "mwlboot.fwo" mwlboot.fw optional mwlfw \ dependency "$S/contrib/dev/mwl/mwlboot.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "mwlboot.fw" dev/mxge/if_mxge.c optional mxge pci dev/mxge/mxge_eth_z8e.c optional mxge pci dev/mxge/mxge_ethp_z8e.c optional mxge pci dev/mxge/mxge_rss_eth_z8e.c optional mxge pci dev/mxge/mxge_rss_ethp_z8e.c optional mxge pci dev/my/if_my.c optional my dev/nand/nand.c optional nand dev/nand/nand_bbt.c optional nand dev/nand/nand_cdev.c optional nand dev/nand/nand_generic.c optional nand dev/nand/nand_geom.c optional nand dev/nand/nand_id.c optional nand dev/nand/nandbus.c optional nand dev/nand/nandbus_if.m optional nand dev/nand/nand_if.m optional nand dev/nand/nandsim.c optional nandsim nand dev/nand/nandsim_chip.c optional nandsim nand dev/nand/nandsim_ctrl.c optional nandsim nand dev/nand/nandsim_log.c optional nandsim nand dev/nand/nandsim_swap.c optional nandsim nand dev/nand/nfc_if.m optional nand dev/ncr/ncr.c optional ncr pci dev/ncv/ncr53c500.c optional ncv dev/ncv/ncr53c500_pccard.c optional ncv pccard dev/netmap/if_ptnet.c optional netmap inet dev/netmap/netmap.c optional netmap dev/netmap/netmap_freebsd.c optional netmap dev/netmap/netmap_generic.c optional netmap dev/netmap/netmap_mbq.c optional netmap dev/netmap/netmap_mem2.c optional netmap dev/netmap/netmap_monitor.c optional netmap dev/netmap/netmap_offloadings.c optional netmap dev/netmap/netmap_pipe.c optional netmap dev/netmap/netmap_pt.c optional netmap dev/netmap/netmap_vale.c optional netmap +dev/netmap/netmap_legacy.c optional netmap # compile-with "${NORMAL_C} -Wconversion -Wextra" dev/nfsmb/nfsmb.c optional nfsmb pci dev/nge/if_nge.c optional nge dev/nxge/if_nxge.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-device.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-mm.c optional nxge dev/nxge/xgehal/xge-queue.c optional nxge dev/nxge/xgehal/xgehal-driver.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-ring.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-channel.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-fifo.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-stats.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nxge/xgehal/xgehal-config.c optional nxge dev/nxge/xgehal/xgehal-mgmt.c optional nxge \ compile-with "${NORMAL_C} ${NO_WSELF_ASSIGN}" dev/nmdm/nmdm.c optional nmdm dev/nsp/nsp.c optional nsp dev/nsp/nsp_pccard.c optional nsp pccard dev/null/null.c standard dev/nvd/nvd.c optional nvd nvme dev/nvme/nvme.c optional nvme dev/nvme/nvme_ctrlr.c optional nvme dev/nvme/nvme_ctrlr_cmd.c optional nvme dev/nvme/nvme_ns.c optional nvme dev/nvme/nvme_ns_cmd.c optional nvme dev/nvme/nvme_qpair.c optional nvme dev/nvme/nvme_sim.c optional nvme scbus dev/nvme/nvme_sysctl.c optional nvme dev/nvme/nvme_test.c optional nvme dev/nvme/nvme_util.c optional nvme dev/oce/oce_hw.c optional oce pci dev/oce/oce_if.c optional oce pci dev/oce/oce_mbox.c optional oce pci dev/oce/oce_queue.c optional oce pci dev/oce/oce_sysctl.c optional oce pci dev/oce/oce_util.c optional oce pci dev/ocs_fc/ocs_pci.c optional ocs_fc pci dev/ocs_fc/ocs_ioctl.c optional ocs_fc pci dev/ocs_fc/ocs_os.c optional ocs_fc pci dev/ocs_fc/ocs_utils.c optional ocs_fc pci dev/ocs_fc/ocs_hw.c optional ocs_fc pci dev/ocs_fc/ocs_hw_queues.c optional ocs_fc pci dev/ocs_fc/sli4.c optional ocs_fc pci dev/ocs_fc/ocs_sm.c optional ocs_fc pci dev/ocs_fc/ocs_device.c optional ocs_fc pci dev/ocs_fc/ocs_xport.c optional ocs_fc pci dev/ocs_fc/ocs_domain.c optional ocs_fc pci dev/ocs_fc/ocs_sport.c optional ocs_fc pci dev/ocs_fc/ocs_els.c optional ocs_fc pci dev/ocs_fc/ocs_fabric.c optional ocs_fc pci dev/ocs_fc/ocs_io.c optional ocs_fc pci dev/ocs_fc/ocs_node.c optional ocs_fc pci dev/ocs_fc/ocs_scsi.c optional ocs_fc pci dev/ocs_fc/ocs_unsol.c optional ocs_fc pci dev/ocs_fc/ocs_ddump.c optional ocs_fc pci dev/ocs_fc/ocs_mgmt.c optional ocs_fc pci dev/ocs_fc/ocs_cam.c optional ocs_fc pci dev/ofw/ofw_bus_if.m optional fdt dev/ofw/ofw_bus_subr.c optional fdt dev/ofw/ofw_cpu.c optional fdt dev/ofw/ofw_fdt.c optional fdt dev/ofw/ofw_if.m optional fdt dev/ofw/ofw_subr.c optional fdt dev/ofw/ofwbus.c optional fdt dev/ofw/openfirm.c optional fdt dev/ofw/openfirmio.c optional fdt dev/ow/ow.c optional ow \ dependency "owll_if.h" \ dependency "own_if.h" dev/ow/owll_if.m optional ow dev/ow/own_if.m optional ow dev/ow/ow_temp.c optional ow_temp dev/ow/owc_gpiobus.c optional owc gpio dev/pbio/pbio.c optional pbio isa dev/pccard/card_if.m standard dev/pccard/pccard.c optional pccard dev/pccard/pccard_cis.c optional pccard dev/pccard/pccard_cis_quirks.c optional pccard dev/pccard/pccard_device.c optional pccard dev/pccard/power_if.m standard dev/pccbb/pccbb.c optional cbb dev/pccbb/pccbb_isa.c optional cbb isa dev/pccbb/pccbb_pci.c optional cbb pci dev/pcf/pcf.c optional pcf dev/pci/fixup_pci.c optional pci dev/pci/hostb_pci.c optional pci dev/pci/ignore_pci.c optional pci dev/pci/isa_pci.c optional pci isa dev/pci/pci.c optional pci dev/pci/pci_if.m standard dev/pci/pci_iov.c optional pci pci_iov dev/pci/pci_iov_if.m standard dev/pci/pci_iov_schema.c optional pci pci_iov dev/pci/pci_pci.c optional pci dev/pci/pci_subr.c optional pci dev/pci/pci_user.c optional pci dev/pci/pcib_if.m standard dev/pci/pcib_support.c standard dev/pci/vga_pci.c optional pci dev/pcn/if_pcn.c optional pcn pci dev/pms/freebsd/driver/ini/src/agtiapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sadisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/mpi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saframe.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sahw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sainit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saint.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sampicmd.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sampirsp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saphy.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saport.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sasata.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sasmp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sassp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/satimer.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/sautil.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/saioctlcmd.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sallsdk/spc/mpidebug.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dminit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmsmp.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmdisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmport.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmtimer.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/discovery/dm/dmmisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/sminit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smmisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smsat.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smsatcb.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smsathw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/sat/src/smtimer.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdinit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdmisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdesgl.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdport.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdint.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdioctl.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdhw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/ossacmnapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tddmcmnapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdsmcmnapi.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/common/tdtimers.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itdio.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itdcb.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itdinit.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sas/ini/itddisc.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sata/host/sat.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sata/host/ossasat.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/pms/RefTisa/tisa/sassata/sata/host/sathw.c optional pmspcv \ compile-with "${NORMAL_C} -Wunused-variable -Woverflow -Wparentheses -w" dev/ppbus/if_plip.c optional plip dev/ppbus/immio.c optional vpo dev/ppbus/lpbb.c optional lpbb dev/ppbus/lpt.c optional lpt dev/ppbus/pcfclock.c optional pcfclock dev/ppbus/ppb_1284.c optional ppbus dev/ppbus/ppb_base.c optional ppbus dev/ppbus/ppb_msq.c optional ppbus dev/ppbus/ppbconf.c optional ppbus dev/ppbus/ppbus_if.m optional ppbus dev/ppbus/ppi.c optional ppi dev/ppbus/pps.c optional pps dev/ppbus/vpo.c optional vpo dev/ppbus/vpoio.c optional vpo dev/ppc/ppc.c optional ppc dev/ppc/ppc_acpi.c optional ppc acpi dev/ppc/ppc_isa.c optional ppc isa dev/ppc/ppc_pci.c optional ppc pci dev/ppc/ppc_puc.c optional ppc puc dev/proto/proto_bus_isa.c optional proto acpi | proto isa dev/proto/proto_bus_pci.c optional proto pci dev/proto/proto_busdma.c optional proto dev/proto/proto_core.c optional proto dev/pst/pst-iop.c optional pst dev/pst/pst-pci.c optional pst pci dev/pst/pst-raid.c optional pst dev/pty/pty.c optional pty dev/puc/puc.c optional puc dev/puc/puc_cfg.c optional puc dev/puc/puc_pccard.c optional puc pccard dev/puc/puc_pci.c optional puc pci dev/puc/pucdata.c optional puc pci dev/quicc/quicc_core.c optional quicc dev/ral/rt2560.c optional ral dev/ral/rt2661.c optional ral dev/ral/rt2860.c optional ral dev/ral/if_ral_pci.c optional ral pci rt2561fw.c optional rt2561fw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2561.fw:rt2561fw -mrt2561 -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2561fw.c" rt2561fw.fwo optional rt2561fw | ralfw \ dependency "rt2561.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2561fw.fwo" rt2561.fw optional rt2561fw | ralfw \ dependency "$S/contrib/dev/ral/rt2561.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2561.fw" rt2561sfw.c optional rt2561sfw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2561s.fw:rt2561sfw -mrt2561s -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2561sfw.c" rt2561sfw.fwo optional rt2561sfw | ralfw \ dependency "rt2561s.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2561sfw.fwo" rt2561s.fw optional rt2561sfw | ralfw \ dependency "$S/contrib/dev/ral/rt2561s.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2561s.fw" rt2661fw.c optional rt2661fw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2661.fw:rt2661fw -mrt2661 -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2661fw.c" rt2661fw.fwo optional rt2661fw | ralfw \ dependency "rt2661.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2661fw.fwo" rt2661.fw optional rt2661fw | ralfw \ dependency "$S/contrib/dev/ral/rt2661.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2661.fw" rt2860fw.c optional rt2860fw | ralfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rt2860.fw:rt2860fw -mrt2860 -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rt2860fw.c" rt2860fw.fwo optional rt2860fw | ralfw \ dependency "rt2860.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rt2860fw.fwo" rt2860.fw optional rt2860fw | ralfw \ dependency "$S/contrib/dev/ral/rt2860.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rt2860.fw" dev/random/random_infra.c optional random dev/random/random_harvestq.c optional random dev/random/randomdev.c optional random random_yarrow | \ random !random_yarrow !random_loadable dev/random/yarrow.c optional random random_yarrow dev/random/fortuna.c optional random !random_yarrow !random_loadable dev/random/hash.c optional random random_yarrow | \ random !random_yarrow !random_loadable dev/rc/rc.c optional rc dev/rccgpio/rccgpio.c optional rccgpio gpio dev/re/if_re.c optional re dev/rl/if_rl.c optional rl pci dev/rndtest/rndtest.c optional rndtest dev/rp/rp.c optional rp dev/rp/rp_isa.c optional rp isa dev/rp/rp_pci.c optional rp pci # dev/rtwn/if_rtwn.c optional rtwn dev/rtwn/if_rtwn_beacon.c optional rtwn dev/rtwn/if_rtwn_calib.c optional rtwn dev/rtwn/if_rtwn_cam.c optional rtwn dev/rtwn/if_rtwn_efuse.c optional rtwn dev/rtwn/if_rtwn_fw.c optional rtwn dev/rtwn/if_rtwn_rx.c optional rtwn dev/rtwn/if_rtwn_task.c optional rtwn dev/rtwn/if_rtwn_tx.c optional rtwn # dev/rtwn/pci/rtwn_pci_attach.c optional rtwn_pci pci dev/rtwn/pci/rtwn_pci_reg.c optional rtwn_pci pci dev/rtwn/pci/rtwn_pci_rx.c optional rtwn_pci pci dev/rtwn/pci/rtwn_pci_tx.c optional rtwn_pci pci # dev/rtwn/usb/rtwn_usb_attach.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_ep.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_reg.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_rx.c optional rtwn_usb dev/rtwn/usb/rtwn_usb_tx.c optional rtwn_usb # RTL8188E dev/rtwn/rtl8188e/r88e_beacon.c optional rtwn dev/rtwn/rtl8188e/r88e_calib.c optional rtwn dev/rtwn/rtl8188e/r88e_chan.c optional rtwn dev/rtwn/rtl8188e/r88e_fw.c optional rtwn dev/rtwn/rtl8188e/r88e_init.c optional rtwn dev/rtwn/rtl8188e/r88e_led.c optional rtwn dev/rtwn/rtl8188e/r88e_tx.c optional rtwn dev/rtwn/rtl8188e/r88e_rf.c optional rtwn dev/rtwn/rtl8188e/r88e_rom.c optional rtwn dev/rtwn/rtl8188e/r88e_rx.c optional rtwn dev/rtwn/rtl8188e/usb/r88eu_attach.c optional rtwn_usb dev/rtwn/rtl8188e/usb/r88eu_init.c optional rtwn_usb dev/rtwn/rtl8188e/usb/r88eu_rx.c optional rtwn_usb # RTL8192C dev/rtwn/rtl8192c/r92c_attach.c optional rtwn dev/rtwn/rtl8192c/r92c_beacon.c optional rtwn dev/rtwn/rtl8192c/r92c_calib.c optional rtwn dev/rtwn/rtl8192c/r92c_chan.c optional rtwn dev/rtwn/rtl8192c/r92c_fw.c optional rtwn dev/rtwn/rtl8192c/r92c_init.c optional rtwn dev/rtwn/rtl8192c/r92c_llt.c optional rtwn dev/rtwn/rtl8192c/r92c_rf.c optional rtwn dev/rtwn/rtl8192c/r92c_rom.c optional rtwn dev/rtwn/rtl8192c/r92c_rx.c optional rtwn dev/rtwn/rtl8192c/r92c_tx.c optional rtwn dev/rtwn/rtl8192c/pci/r92ce_attach.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_calib.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_fw.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_init.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_led.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_rx.c optional rtwn_pci pci dev/rtwn/rtl8192c/pci/r92ce_tx.c optional rtwn_pci pci dev/rtwn/rtl8192c/usb/r92cu_attach.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_init.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_led.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_rx.c optional rtwn_usb dev/rtwn/rtl8192c/usb/r92cu_tx.c optional rtwn_usb # RTL8192E dev/rtwn/rtl8192e/r92e_chan.c optional rtwn dev/rtwn/rtl8192e/r92e_fw.c optional rtwn dev/rtwn/rtl8192e/r92e_init.c optional rtwn dev/rtwn/rtl8192e/r92e_led.c optional rtwn dev/rtwn/rtl8192e/r92e_rf.c optional rtwn dev/rtwn/rtl8192e/r92e_rom.c optional rtwn dev/rtwn/rtl8192e/r92e_rx.c optional rtwn dev/rtwn/rtl8192e/usb/r92eu_attach.c optional rtwn_usb dev/rtwn/rtl8192e/usb/r92eu_init.c optional rtwn_usb # RTL8812A dev/rtwn/rtl8812a/r12a_beacon.c optional rtwn dev/rtwn/rtl8812a/r12a_calib.c optional rtwn dev/rtwn/rtl8812a/r12a_caps.c optional rtwn dev/rtwn/rtl8812a/r12a_chan.c optional rtwn dev/rtwn/rtl8812a/r12a_fw.c optional rtwn dev/rtwn/rtl8812a/r12a_init.c optional rtwn dev/rtwn/rtl8812a/r12a_led.c optional rtwn dev/rtwn/rtl8812a/r12a_rf.c optional rtwn dev/rtwn/rtl8812a/r12a_rom.c optional rtwn dev/rtwn/rtl8812a/r12a_rx.c optional rtwn dev/rtwn/rtl8812a/r12a_tx.c optional rtwn dev/rtwn/rtl8812a/usb/r12au_attach.c optional rtwn_usb dev/rtwn/rtl8812a/usb/r12au_init.c optional rtwn_usb dev/rtwn/rtl8812a/usb/r12au_rx.c optional rtwn_usb dev/rtwn/rtl8812a/usb/r12au_tx.c optional rtwn_usb # RTL8821A dev/rtwn/rtl8821a/r21a_beacon.c optional rtwn dev/rtwn/rtl8821a/r21a_calib.c optional rtwn dev/rtwn/rtl8821a/r21a_chan.c optional rtwn dev/rtwn/rtl8821a/r21a_fw.c optional rtwn dev/rtwn/rtl8821a/r21a_init.c optional rtwn dev/rtwn/rtl8821a/r21a_led.c optional rtwn dev/rtwn/rtl8821a/r21a_rom.c optional rtwn dev/rtwn/rtl8821a/r21a_rx.c optional rtwn dev/rtwn/rtl8821a/usb/r21au_attach.c optional rtwn_usb dev/rtwn/rtl8821a/usb/r21au_dfs.c optional rtwn_usb dev/rtwn/rtl8821a/usb/r21au_init.c optional rtwn_usb rtwn-rtl8188eufw.c optional rtwn-rtl8188eufw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8188eufw.fw:rtwn-rtl8188eufw:111 -mrtwn-rtl8188eufw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8188eufw.c" rtwn-rtl8188eufw.fwo optional rtwn-rtl8188eufw | rtwnfw \ dependency "rtwn-rtl8188eufw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8188eufw.fwo" rtwn-rtl8188eufw.fw optional rtwn-rtl8188eufw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8188eufw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8188eufw.fw" rtwn-rtl8192cfwE.c optional rtwn-rtl8192cfwE | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwE.fw:rtwn-rtl8192cfwE:111 -mrtwn-rtl8192cfwE -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwE.c" rtwn-rtl8192cfwE.fwo optional rtwn-rtl8192cfwE | rtwnfw \ dependency "rtwn-rtl8192cfwE.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwE.fwo" rtwn-rtl8192cfwE.fw optional rtwn-rtl8192cfwE | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwE.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwE.fw" rtwn-rtl8192cfwE_B.c optional rtwn-rtl8192cfwE_B | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwE_B.fw:rtwn-rtl8192cfwE_B:111 -mrtwn-rtl8192cfwE_B -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwE_B.c" rtwn-rtl8192cfwE_B.fwo optional rtwn-rtl8192cfwE_B | rtwnfw \ dependency "rtwn-rtl8192cfwE_B.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwE_B.fwo" rtwn-rtl8192cfwE_B.fw optional rtwn-rtl8192cfwE_B | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwE_B.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwE_B.fw" rtwn-rtl8192cfwT.c optional rtwn-rtl8192cfwT | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwT.fw:rtwn-rtl8192cfwT:111 -mrtwn-rtl8192cfwT -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwT.c" rtwn-rtl8192cfwT.fwo optional rtwn-rtl8192cfwT | rtwnfw \ dependency "rtwn-rtl8192cfwT.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwT.fwo" rtwn-rtl8192cfwT.fw optional rtwn-rtl8192cfwT | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwT.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwT.fw" rtwn-rtl8192cfwU.c optional rtwn-rtl8192cfwU | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192cfwU.fw:rtwn-rtl8192cfwU:111 -mrtwn-rtl8192cfwU -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8192cfwU.c" rtwn-rtl8192cfwU.fwo optional rtwn-rtl8192cfwU | rtwnfw \ dependency "rtwn-rtl8192cfwU.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192cfwU.fwo" rtwn-rtl8192cfwU.fw optional rtwn-rtl8192cfwU | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192cfwU.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192cfwU.fw" rtwn-rtl8192eufw.c optional rtwn-rtl8192eufw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8192eufw.fw:rtwn-rtl8192eufw:111 -mrtwn-rtl8192eufw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8192eufw.c" rtwn-rtl8192eufw.fwo optional rtwn-rtl8192eufw | rtwnfw \ dependency "rtwn-rtl8192eufw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8192eufw.fwo" rtwn-rtl8192eufw.fw optional rtwn-rtl8192eufw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8192eufw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8192eufw.fw" rtwn-rtl8812aufw.c optional rtwn-rtl8812aufw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8812aufw.fw:rtwn-rtl8812aufw:111 -mrtwn-rtl8812aufw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8812aufw.c" rtwn-rtl8812aufw.fwo optional rtwn-rtl8812aufw | rtwnfw \ dependency "rtwn-rtl8812aufw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8812aufw.fwo" rtwn-rtl8812aufw.fw optional rtwn-rtl8812aufw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8812aufw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8812aufw.fw" rtwn-rtl8821aufw.c optional rtwn-rtl8821aufw | rtwnfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rtwn-rtl8821aufw.fw:rtwn-rtl8821aufw:111 -mrtwn-rtl8821aufw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rtwn-rtl8821aufw.c" rtwn-rtl8821aufw.fwo optional rtwn-rtl8821aufw | rtwnfw \ dependency "rtwn-rtl8821aufw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rtwn-rtl8821aufw.fwo" rtwn-rtl8821aufw.fw optional rtwn-rtl8821aufw | rtwnfw \ dependency "$S/contrib/dev/rtwn/rtwn-rtl8821aufw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rtwn-rtl8821aufw.fw" dev/safe/safe.c optional safe dev/scc/scc_if.m optional scc dev/scc/scc_bfe_ebus.c optional scc ebus dev/scc/scc_bfe_quicc.c optional scc quicc dev/scc/scc_bfe_sbus.c optional scc fhc | scc sbus dev/scc/scc_core.c optional scc dev/scc/scc_dev_quicc.c optional scc quicc dev/scc/scc_dev_sab82532.c optional scc dev/scc/scc_dev_z8530.c optional scc dev/sdhci/sdhci.c optional sdhci dev/sdhci/sdhci_fdt.c optional sdhci fdt dev/sdhci/sdhci_fdt_gpio.c optional sdhci fdt gpio dev/sdhci/sdhci_if.m optional sdhci dev/sdhci/sdhci_acpi.c optional sdhci acpi dev/sdhci/sdhci_pci.c optional sdhci pci dev/sf/if_sf.c optional sf pci dev/sge/if_sge.c optional sge pci dev/siis/siis.c optional siis pci dev/sis/if_sis.c optional sis pci dev/sk/if_sk.c optional sk pci dev/smbus/smb.c optional smb dev/smbus/smbconf.c optional smbus dev/smbus/smbus.c optional smbus dev/smbus/smbus_if.m optional smbus dev/smc/if_smc.c optional smc dev/smc/if_smc_fdt.c optional smc fdt dev/sn/if_sn.c optional sn dev/sn/if_sn_isa.c optional sn isa dev/sn/if_sn_pccard.c optional sn pccard dev/snp/snp.c optional snp dev/sound/clone.c optional sound dev/sound/unit.c optional sound dev/sound/isa/ad1816.c optional snd_ad1816 isa dev/sound/isa/ess.c optional snd_ess isa dev/sound/isa/gusc.c optional snd_gusc isa dev/sound/isa/mss.c optional snd_mss isa dev/sound/isa/sb16.c optional snd_sb16 isa dev/sound/isa/sb8.c optional snd_sb8 isa dev/sound/isa/sbc.c optional snd_sbc isa dev/sound/isa/sndbuf_dma.c optional sound isa dev/sound/pci/als4000.c optional snd_als4000 pci dev/sound/pci/atiixp.c optional snd_atiixp pci dev/sound/pci/cmi.c optional snd_cmi pci dev/sound/pci/cs4281.c optional snd_cs4281 pci dev/sound/pci/csa.c optional snd_csa pci dev/sound/pci/csapcm.c optional snd_csa pci dev/sound/pci/ds1.c optional snd_ds1 pci dev/sound/pci/emu10k1.c optional snd_emu10k1 pci dev/sound/pci/emu10kx.c optional snd_emu10kx pci dev/sound/pci/emu10kx-pcm.c optional snd_emu10kx pci dev/sound/pci/emu10kx-midi.c optional snd_emu10kx pci dev/sound/pci/envy24.c optional snd_envy24 pci dev/sound/pci/envy24ht.c optional snd_envy24ht pci dev/sound/pci/es137x.c optional snd_es137x pci dev/sound/pci/fm801.c optional snd_fm801 pci dev/sound/pci/ich.c optional snd_ich pci dev/sound/pci/maestro.c optional snd_maestro pci dev/sound/pci/maestro3.c optional snd_maestro3 pci dev/sound/pci/neomagic.c optional snd_neomagic pci dev/sound/pci/solo.c optional snd_solo pci dev/sound/pci/spicds.c optional snd_spicds pci dev/sound/pci/t4dwave.c optional snd_t4dwave pci dev/sound/pci/via8233.c optional snd_via8233 pci dev/sound/pci/via82c686.c optional snd_via82c686 pci dev/sound/pci/vibes.c optional snd_vibes pci dev/sound/pci/hda/hdaa.c optional snd_hda pci dev/sound/pci/hda/hdaa_patches.c optional snd_hda pci dev/sound/pci/hda/hdac.c optional snd_hda pci dev/sound/pci/hda/hdac_if.m optional snd_hda pci dev/sound/pci/hda/hdacc.c optional snd_hda pci dev/sound/pci/hdspe.c optional snd_hdspe pci dev/sound/pci/hdspe-pcm.c optional snd_hdspe pci dev/sound/pcm/ac97.c optional sound dev/sound/pcm/ac97_if.m optional sound dev/sound/pcm/ac97_patch.c optional sound dev/sound/pcm/buffer.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/channel.c optional sound dev/sound/pcm/channel_if.m optional sound dev/sound/pcm/dsp.c optional sound dev/sound/pcm/feeder.c optional sound dev/sound/pcm/feeder_chain.c optional sound dev/sound/pcm/feeder_eq.c optional sound \ dependency "feeder_eq_gen.h" \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_if.m optional sound dev/sound/pcm/feeder_format.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_matrix.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_mixer.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_rate.c optional sound \ dependency "feeder_rate_gen.h" \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/feeder_volume.c optional sound \ dependency "snd_fxdiv_gen.h" dev/sound/pcm/mixer.c optional sound dev/sound/pcm/mixer_if.m optional sound dev/sound/pcm/sndstat.c optional sound dev/sound/pcm/sound.c optional sound dev/sound/pcm/vchan.c optional sound dev/sound/usb/uaudio.c optional snd_uaudio usb dev/sound/usb/uaudio_pcm.c optional snd_uaudio usb dev/sound/midi/midi.c optional sound dev/sound/midi/mpu401.c optional sound dev/sound/midi/mpu_if.m optional sound dev/sound/midi/mpufoi_if.m optional sound dev/sound/midi/sequencer.c optional sound dev/sound/midi/synth_if.m optional sound dev/spibus/ofw_spibus.c optional fdt spibus dev/spibus/spibus.c optional spibus \ dependency "spibus_if.h" dev/spibus/spigen.c optional spigen dev/spibus/spibus_if.m optional spibus dev/ste/if_ste.c optional ste pci dev/stg/tmc18c30.c optional stg dev/stg/tmc18c30_isa.c optional stg isa dev/stg/tmc18c30_pccard.c optional stg pccard dev/stg/tmc18c30_pci.c optional stg pci dev/stg/tmc18c30_subr.c optional stg dev/stge/if_stge.c optional stge dev/sym/sym_hipd.c optional sym \ dependency "$S/dev/sym/sym_{conf,defs}.h" dev/syscons/blank/blank_saver.c optional blank_saver dev/syscons/daemon/daemon_saver.c optional daemon_saver dev/syscons/dragon/dragon_saver.c optional dragon_saver dev/syscons/fade/fade_saver.c optional fade_saver dev/syscons/fire/fire_saver.c optional fire_saver dev/syscons/green/green_saver.c optional green_saver dev/syscons/logo/logo.c optional logo_saver dev/syscons/logo/logo_saver.c optional logo_saver dev/syscons/rain/rain_saver.c optional rain_saver dev/syscons/schistory.c optional sc dev/syscons/scmouse.c optional sc dev/syscons/scterm.c optional sc dev/syscons/scvidctl.c optional sc dev/syscons/snake/snake_saver.c optional snake_saver dev/syscons/star/star_saver.c optional star_saver dev/syscons/syscons.c optional sc dev/syscons/sysmouse.c optional sc dev/syscons/warp/warp_saver.c optional warp_saver dev/tcp_log/tcp_log_dev.c optional tcp_blackbox inet | tcp_blackbox inet6 dev/tdfx/tdfx_linux.c optional tdfx_linux tdfx compat_linux dev/tdfx/tdfx_pci.c optional tdfx pci dev/ti/if_ti.c optional ti pci dev/tl/if_tl.c optional tl pci dev/trm/trm.c optional trm dev/twa/tw_cl_init.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_intr.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_io.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_misc.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_osl_cam.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_osl_freebsd.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twe/twe.c optional twe dev/twe/twe_freebsd.c optional twe dev/tws/tws.c optional tws dev/tws/tws_cam.c optional tws dev/tws/tws_hdm.c optional tws dev/tws/tws_services.c optional tws dev/tws/tws_user.c optional tws dev/tx/if_tx.c optional tx dev/txp/if_txp.c optional txp dev/uart/uart_bus_acpi.c optional uart acpi dev/uart/uart_bus_ebus.c optional uart ebus dev/uart/uart_bus_fdt.c optional uart fdt dev/uart/uart_bus_isa.c optional uart isa dev/uart/uart_bus_pccard.c optional uart pccard dev/uart/uart_bus_pci.c optional uart pci dev/uart/uart_bus_puc.c optional uart puc dev/uart/uart_bus_scc.c optional uart scc dev/uart/uart_core.c optional uart dev/uart/uart_dbg.c optional uart gdb dev/uart/uart_dev_msm.c optional uart uart_msm fdt dev/uart/uart_dev_mvebu.c optional uart uart_mvebu dev/uart/uart_dev_ns8250.c optional uart uart_ns8250 | uart uart_snps dev/uart/uart_dev_pl011.c optional uart pl011 dev/uart/uart_dev_quicc.c optional uart quicc dev/uart/uart_dev_sab82532.c optional uart uart_sab82532 dev/uart/uart_dev_sab82532.c optional uart scc dev/uart/uart_dev_snps.c optional uart uart_snps fdt dev/uart/uart_dev_z8530.c optional uart uart_z8530 dev/uart/uart_dev_z8530.c optional uart scc dev/uart/uart_if.m optional uart dev/uart/uart_subr.c optional uart dev/uart/uart_tty.c optional uart dev/ubsec/ubsec.c optional ubsec # # USB controller drivers # dev/usb/controller/at91dci.c optional at91dci dev/usb/controller/at91dci_atmelarm.c optional at91dci at91rm9200 dev/usb/controller/musb_otg.c optional musb dev/usb/controller/musb_otg_atmelarm.c optional musb at91rm9200 dev/usb/controller/dwc_otg.c optional dwcotg dev/usb/controller/dwc_otg_fdt.c optional dwcotg fdt dev/usb/controller/ehci.c optional ehci dev/usb/controller/ehci_msm.c optional ehci_msm fdt dev/usb/controller/ehci_pci.c optional ehci pci dev/usb/controller/ohci.c optional ohci dev/usb/controller/ohci_pci.c optional ohci pci dev/usb/controller/uhci.c optional uhci dev/usb/controller/uhci_pci.c optional uhci pci dev/usb/controller/xhci.c optional xhci dev/usb/controller/xhci_pci.c optional xhci pci dev/usb/controller/saf1761_otg.c optional saf1761otg dev/usb/controller/saf1761_otg_fdt.c optional saf1761otg fdt dev/usb/controller/uss820dci.c optional uss820dci dev/usb/controller/uss820dci_atmelarm.c optional uss820dci at91rm9200 dev/usb/controller/usb_controller.c optional usb # # USB storage drivers # dev/usb/storage/cfumass.c optional cfumass ctl dev/usb/storage/umass.c optional umass dev/usb/storage/urio.c optional urio dev/usb/storage/ustorage_fs.c optional usfs # # USB core # dev/usb/usb_busdma.c optional usb dev/usb/usb_core.c optional usb dev/usb/usb_debug.c optional usb dev/usb/usb_dev.c optional usb dev/usb/usb_device.c optional usb dev/usb/usb_dynamic.c optional usb dev/usb/usb_error.c optional usb dev/usb/usb_generic.c optional usb dev/usb/usb_handle_request.c optional usb dev/usb/usb_hid.c optional usb dev/usb/usb_hub.c optional usb dev/usb/usb_if.m optional usb dev/usb/usb_lookup.c optional usb dev/usb/usb_mbuf.c optional usb dev/usb/usb_msctest.c optional usb dev/usb/usb_parse.c optional usb dev/usb/usb_pf.c optional usb dev/usb/usb_process.c optional usb dev/usb/usb_request.c optional usb dev/usb/usb_transfer.c optional usb dev/usb/usb_util.c optional usb # # USB network drivers # dev/usb/net/if_aue.c optional aue dev/usb/net/if_axe.c optional axe dev/usb/net/if_axge.c optional axge dev/usb/net/if_cdce.c optional cdce dev/usb/net/if_cue.c optional cue dev/usb/net/if_ipheth.c optional ipheth dev/usb/net/if_kue.c optional kue dev/usb/net/if_mos.c optional mos dev/usb/net/if_rue.c optional rue dev/usb/net/if_smsc.c optional smsc dev/usb/net/if_udav.c optional udav dev/usb/net/if_ure.c optional ure dev/usb/net/if_usie.c optional usie dev/usb/net/if_urndis.c optional urndis dev/usb/net/ruephy.c optional rue dev/usb/net/usb_ethernet.c optional uether | aue | axe | axge | cdce | \ cue | ipheth | kue | mos | rue | \ smsc | udav | ure | urndis dev/usb/net/uhso.c optional uhso # # USB WLAN drivers # dev/usb/wlan/if_rsu.c optional rsu rsu-rtl8712fw.c optional rsu-rtl8712fw | rsufw \ compile-with "${AWK} -f $S/tools/fw_stub.awk rsu-rtl8712fw.fw:rsu-rtl8712fw:120 -mrsu-rtl8712fw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "rsu-rtl8712fw.c" rsu-rtl8712fw.fwo optional rsu-rtl8712fw | rsufw \ dependency "rsu-rtl8712fw.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "rsu-rtl8712fw.fwo" rsu-rtl8712fw.fw optional rsu-rtl8712.fw | rsufw \ dependency "$S/contrib/dev/rsu/rsu-rtl8712fw.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "rsu-rtl8712fw.fw" dev/usb/wlan/if_rum.c optional rum dev/usb/wlan/if_run.c optional run runfw.c optional runfw \ compile-with "${AWK} -f $S/tools/fw_stub.awk run.fw:runfw -mrunfw -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "runfw.c" runfw.fwo optional runfw \ dependency "run.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "runfw.fwo" run.fw optional runfw \ dependency "$S/contrib/dev/run/rt2870.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "run.fw" dev/usb/wlan/if_uath.c optional uath dev/usb/wlan/if_upgt.c optional upgt dev/usb/wlan/if_ural.c optional ural dev/usb/wlan/if_urtw.c optional urtw dev/usb/wlan/if_zyd.c optional zyd # # USB serial and parallel port drivers # dev/usb/serial/u3g.c optional u3g dev/usb/serial/uark.c optional uark dev/usb/serial/ubsa.c optional ubsa dev/usb/serial/ubser.c optional ubser dev/usb/serial/uchcom.c optional uchcom dev/usb/serial/ucycom.c optional ucycom dev/usb/serial/ufoma.c optional ufoma dev/usb/serial/uftdi.c optional uftdi dev/usb/serial/ugensa.c optional ugensa dev/usb/serial/uipaq.c optional uipaq dev/usb/serial/ulpt.c optional ulpt dev/usb/serial/umcs.c optional umcs dev/usb/serial/umct.c optional umct dev/usb/serial/umodem.c optional umodem dev/usb/serial/umoscom.c optional umoscom dev/usb/serial/uplcom.c optional uplcom dev/usb/serial/uslcom.c optional uslcom dev/usb/serial/uvisor.c optional uvisor dev/usb/serial/uvscom.c optional uvscom dev/usb/serial/usb_serial.c optional ucom | u3g | uark | ubsa | ubser | \ uchcom | ucycom | ufoma | uftdi | \ ugensa | uipaq | umcs | umct | \ umodem | umoscom | uplcom | usie | \ uslcom | uvisor | uvscom # # USB misc drivers # dev/usb/misc/ufm.c optional ufm dev/usb/misc/udbp.c optional udbp dev/usb/misc/ugold.c optional ugold dev/usb/misc/uled.c optional uled # # USB input drivers # dev/usb/input/atp.c optional atp dev/usb/input/uep.c optional uep dev/usb/input/uhid.c optional uhid dev/usb/input/ukbd.c optional ukbd dev/usb/input/ums.c optional ums dev/usb/input/wmt.c optional wmt dev/usb/input/wsp.c optional wsp # # USB quirks # dev/usb/quirk/usb_quirk.c optional usb # # USB templates # dev/usb/template/usb_template.c optional usb_template dev/usb/template/usb_template_audio.c optional usb_template dev/usb/template/usb_template_cdce.c optional usb_template dev/usb/template/usb_template_kbd.c optional usb_template dev/usb/template/usb_template_modem.c optional usb_template dev/usb/template/usb_template_mouse.c optional usb_template dev/usb/template/usb_template_msc.c optional usb_template dev/usb/template/usb_template_mtp.c optional usb_template dev/usb/template/usb_template_phone.c optional usb_template dev/usb/template/usb_template_serialnet.c optional usb_template dev/usb/template/usb_template_midi.c optional usb_template # # USB video drivers # dev/usb/video/udl.c optional udl # # USB END # dev/videomode/videomode.c optional videomode dev/videomode/edid.c optional videomode dev/videomode/pickmode.c optional videomode dev/videomode/vesagtf.c optional videomode dev/vge/if_vge.c optional vge dev/viapm/viapm.c optional viapm pci dev/virtio/virtio.c optional virtio dev/virtio/virtqueue.c optional virtio dev/virtio/virtio_bus_if.m optional virtio dev/virtio/virtio_if.m optional virtio dev/virtio/pci/virtio_pci.c optional virtio_pci dev/virtio/mmio/virtio_mmio.c optional virtio_mmio fdt dev/virtio/mmio/virtio_mmio_if.m optional virtio_mmio fdt dev/virtio/network/if_vtnet.c optional vtnet dev/virtio/block/virtio_blk.c optional virtio_blk dev/virtio/balloon/virtio_balloon.c optional virtio_balloon dev/virtio/scsi/virtio_scsi.c optional virtio_scsi dev/virtio/random/virtio_random.c optional virtio_random dev/virtio/console/virtio_console.c optional virtio_console dev/vkbd/vkbd.c optional vkbd dev/vr/if_vr.c optional vr pci dev/vt/colors/vt_termcolors.c optional vt dev/vt/font/vt_font_default.c optional vt dev/vt/font/vt_mouse_cursor.c optional vt dev/vt/hw/efifb/efifb.c optional vt_efifb dev/vt/hw/fb/vt_fb.c optional vt dev/vt/hw/vga/vt_vga.c optional vt vt_vga dev/vt/logo/logo_freebsd.c optional vt splash dev/vt/logo/logo_beastie.c optional vt splash dev/vt/vt_buf.c optional vt dev/vt/vt_consolectl.c optional vt dev/vt/vt_core.c optional vt dev/vt/vt_cpulogos.c optional vt splash dev/vt/vt_font.c optional vt dev/vt/vt_sysmouse.c optional vt dev/vte/if_vte.c optional vte pci dev/vx/if_vx.c optional vx dev/vx/if_vx_pci.c optional vx pci dev/vxge/vxge.c optional vxge dev/vxge/vxgehal/vxgehal-ifmsg.c optional vxge dev/vxge/vxgehal/vxgehal-mrpcim.c optional vxge dev/vxge/vxgehal/vxge-queue.c optional vxge dev/vxge/vxgehal/vxgehal-ring.c optional vxge dev/vxge/vxgehal/vxgehal-swapper.c optional vxge dev/vxge/vxgehal/vxgehal-mgmt.c optional vxge dev/vxge/vxgehal/vxgehal-srpcim.c optional vxge dev/vxge/vxgehal/vxgehal-config.c optional vxge dev/vxge/vxgehal/vxgehal-blockpool.c optional vxge dev/vxge/vxgehal/vxgehal-doorbells.c optional vxge dev/vxge/vxgehal/vxgehal-mgmtaux.c optional vxge dev/vxge/vxgehal/vxgehal-device.c optional vxge dev/vxge/vxgehal/vxgehal-mm.c optional vxge dev/vxge/vxgehal/vxgehal-driver.c optional vxge dev/vxge/vxgehal/vxgehal-virtualpath.c optional vxge dev/vxge/vxgehal/vxgehal-channel.c optional vxge dev/vxge/vxgehal/vxgehal-fifo.c optional vxge dev/watchdog/watchdog.c standard dev/wb/if_wb.c optional wb pci dev/wi/if_wi.c optional wi dev/wi/if_wi_pccard.c optional wi pccard dev/wi/if_wi_pci.c optional wi pci dev/wpi/if_wpi.c optional wpi pci wpifw.c optional wpifw \ compile-with "${AWK} -f $S/tools/fw_stub.awk wpi.fw:wpifw:153229 -mwpi -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "wpifw.c" wpifw.fwo optional wpifw \ dependency "wpi.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "wpifw.fwo" wpi.fw optional wpifw \ dependency "$S/contrib/dev/wpi/iwlwifi-3945-15.32.2.9.fw.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "wpi.fw" dev/xdma/xdma.c optional xdma dev/xdma/xdma_if.m optional xdma dev/xdma/xdma_fdt_test.c optional xdma xdma_test fdt dev/xe/if_xe.c optional xe dev/xe/if_xe_pccard.c optional xe pccard dev/xen/balloon/balloon.c optional xenhvm dev/xen/blkfront/blkfront.c optional xenhvm dev/xen/blkback/blkback.c optional xenhvm dev/xen/console/xen_console.c optional xenhvm dev/xen/control/control.c optional xenhvm dev/xen/grant_table/grant_table.c optional xenhvm dev/xen/netback/netback.c optional xenhvm dev/xen/netfront/netfront.c optional xenhvm dev/xen/xenpci/xenpci.c optional xenpci dev/xen/timer/timer.c optional xenhvm dev/xen/pvcpu/pvcpu.c optional xenhvm dev/xen/xenstore/xenstore.c optional xenhvm dev/xen/xenstore/xenstore_dev.c optional xenhvm dev/xen/xenstore/xenstored_dev.c optional xenhvm dev/xen/evtchn/evtchn_dev.c optional xenhvm dev/xen/privcmd/privcmd.c optional xenhvm dev/xen/gntdev/gntdev.c optional xenhvm dev/xen/debug/debug.c optional xenhvm dev/xl/if_xl.c optional xl pci dev/xl/xlphy.c optional xl pci fs/autofs/autofs.c optional autofs fs/autofs/autofs_vfsops.c optional autofs fs/autofs/autofs_vnops.c optional autofs fs/deadfs/dead_vnops.c standard fs/devfs/devfs_devs.c standard fs/devfs/devfs_dir.c standard fs/devfs/devfs_rule.c standard fs/devfs/devfs_vfsops.c standard fs/devfs/devfs_vnops.c standard fs/fdescfs/fdesc_vfsops.c optional fdescfs fs/fdescfs/fdesc_vnops.c optional fdescfs fs/fifofs/fifo_vnops.c standard fs/cuse/cuse.c optional cuse fs/fuse/fuse_device.c optional fuse fs/fuse/fuse_file.c optional fuse fs/fuse/fuse_internal.c optional fuse fs/fuse/fuse_io.c optional fuse fs/fuse/fuse_ipc.c optional fuse fs/fuse/fuse_main.c optional fuse fs/fuse/fuse_node.c optional fuse fs/fuse/fuse_vfsops.c optional fuse fs/fuse/fuse_vnops.c optional fuse fs/msdosfs/msdosfs_conv.c optional msdosfs fs/msdosfs/msdosfs_denode.c optional msdosfs fs/msdosfs/msdosfs_fat.c optional msdosfs fs/msdosfs/msdosfs_iconv.c optional msdosfs_iconv fs/msdosfs/msdosfs_lookup.c optional msdosfs fs/msdosfs/msdosfs_vfsops.c optional msdosfs fs/msdosfs/msdosfs_vnops.c optional msdosfs fs/nandfs/bmap.c optional nandfs fs/nandfs/nandfs_alloc.c optional nandfs fs/nandfs/nandfs_bmap.c optional nandfs fs/nandfs/nandfs_buffer.c optional nandfs fs/nandfs/nandfs_cleaner.c optional nandfs fs/nandfs/nandfs_cpfile.c optional nandfs fs/nandfs/nandfs_dat.c optional nandfs fs/nandfs/nandfs_dir.c optional nandfs fs/nandfs/nandfs_ifile.c optional nandfs fs/nandfs/nandfs_segment.c optional nandfs fs/nandfs/nandfs_subr.c optional nandfs fs/nandfs/nandfs_sufile.c optional nandfs fs/nandfs/nandfs_vfsops.c optional nandfs fs/nandfs/nandfs_vnops.c optional nandfs fs/nfs/nfs_commonkrpc.c optional nfscl | nfsd fs/nfs/nfs_commonsubs.c optional nfscl | nfsd fs/nfs/nfs_commonport.c optional nfscl | nfsd fs/nfs/nfs_commonacl.c optional nfscl | nfsd fs/nfsclient/nfs_clcomsubs.c optional nfscl fs/nfsclient/nfs_clsubs.c optional nfscl fs/nfsclient/nfs_clstate.c optional nfscl fs/nfsclient/nfs_clkrpc.c optional nfscl fs/nfsclient/nfs_clrpcops.c optional nfscl fs/nfsclient/nfs_clvnops.c optional nfscl fs/nfsclient/nfs_clnode.c optional nfscl fs/nfsclient/nfs_clvfsops.c optional nfscl fs/nfsclient/nfs_clport.c optional nfscl fs/nfsclient/nfs_clbio.c optional nfscl fs/nfsclient/nfs_clnfsiod.c optional nfscl fs/nfsserver/nfs_fha_new.c optional nfsd inet fs/nfsserver/nfs_nfsdsocket.c optional nfsd inet fs/nfsserver/nfs_nfsdsubs.c optional nfsd inet fs/nfsserver/nfs_nfsdstate.c optional nfsd inet fs/nfsserver/nfs_nfsdkrpc.c optional nfsd inet fs/nfsserver/nfs_nfsdserv.c optional nfsd inet fs/nfsserver/nfs_nfsdport.c optional nfsd inet fs/nfsserver/nfs_nfsdcache.c optional nfsd inet fs/nullfs/null_subr.c optional nullfs fs/nullfs/null_vfsops.c optional nullfs fs/nullfs/null_vnops.c optional nullfs fs/procfs/procfs.c optional procfs fs/procfs/procfs_dbregs.c optional procfs fs/procfs/procfs_fpregs.c optional procfs fs/procfs/procfs_ioctl.c optional procfs fs/procfs/procfs_map.c optional procfs fs/procfs/procfs_mem.c optional procfs fs/procfs/procfs_note.c optional procfs fs/procfs/procfs_osrel.c optional procfs fs/procfs/procfs_regs.c optional procfs fs/procfs/procfs_rlimit.c optional procfs fs/procfs/procfs_status.c optional procfs fs/procfs/procfs_type.c optional procfs fs/pseudofs/pseudofs.c optional pseudofs fs/pseudofs/pseudofs_fileno.c optional pseudofs fs/pseudofs/pseudofs_vncache.c optional pseudofs fs/pseudofs/pseudofs_vnops.c optional pseudofs fs/smbfs/smbfs_io.c optional smbfs fs/smbfs/smbfs_node.c optional smbfs fs/smbfs/smbfs_smb.c optional smbfs fs/smbfs/smbfs_subr.c optional smbfs fs/smbfs/smbfs_vfsops.c optional smbfs fs/smbfs/smbfs_vnops.c optional smbfs fs/udf/osta.c optional udf fs/udf/udf_iconv.c optional udf_iconv fs/udf/udf_vfsops.c optional udf fs/udf/udf_vnops.c optional udf fs/unionfs/union_subr.c optional unionfs fs/unionfs/union_vfsops.c optional unionfs fs/unionfs/union_vnops.c optional unionfs fs/tmpfs/tmpfs_vnops.c optional tmpfs fs/tmpfs/tmpfs_fifoops.c optional tmpfs fs/tmpfs/tmpfs_vfsops.c optional tmpfs fs/tmpfs/tmpfs_subr.c optional tmpfs gdb/gdb_cons.c optional gdb gdb/gdb_main.c optional gdb gdb/gdb_packet.c optional gdb geom/bde/g_bde.c optional geom_bde geom/bde/g_bde_crypt.c optional geom_bde geom/bde/g_bde_lock.c optional geom_bde geom/bde/g_bde_work.c optional geom_bde geom/cache/g_cache.c optional geom_cache geom/concat/g_concat.c optional geom_concat geom/eli/g_eli.c optional geom_eli geom/eli/g_eli_crypto.c optional geom_eli geom/eli/g_eli_ctl.c optional geom_eli geom/eli/g_eli_hmac.c optional geom_eli geom/eli/g_eli_integrity.c optional geom_eli geom/eli/g_eli_key.c optional geom_eli geom/eli/g_eli_key_cache.c optional geom_eli geom/eli/g_eli_privacy.c optional geom_eli geom/eli/pkcs5v2.c optional geom_eli geom/gate/g_gate.c optional geom_gate geom/geom_bsd.c optional geom_bsd geom/geom_bsd_enc.c optional geom_bsd | geom_part_bsd geom/geom_ccd.c optional ccd | geom_ccd geom/geom_ctl.c standard geom/geom_dev.c standard geom/geom_disk.c standard geom/geom_dump.c standard geom/geom_event.c standard geom/geom_fox.c optional geom_fox geom/geom_flashmap.c optional fdt cfi | fdt nand | fdt mx25l | mmcsd geom/geom_io.c standard geom/geom_kern.c standard geom/geom_map.c optional geom_map geom/geom_mbr.c optional geom_mbr geom/geom_mbr_enc.c optional geom_mbr geom/geom_redboot.c optional geom_redboot geom/geom_slice.c standard geom/geom_subr.c standard geom/geom_sunlabel.c optional geom_sunlabel geom/geom_sunlabel_enc.c optional geom_sunlabel geom/geom_vfs.c standard geom/geom_vol_ffs.c optional geom_vol geom/journal/g_journal.c optional geom_journal geom/journal/g_journal_ufs.c optional geom_journal geom/label/g_label.c optional geom_label | geom_label_gpt geom/label/g_label_ext2fs.c optional geom_label geom/label/g_label_iso9660.c optional geom_label geom/label/g_label_msdosfs.c optional geom_label geom/label/g_label_ntfs.c optional geom_label geom/label/g_label_reiserfs.c optional geom_label geom/label/g_label_ufs.c optional geom_label geom/label/g_label_gpt.c optional geom_label | geom_label_gpt geom/label/g_label_disk_ident.c optional geom_label geom/linux_lvm/g_linux_lvm.c optional geom_linux_lvm geom/mirror/g_mirror.c optional geom_mirror geom/mirror/g_mirror_ctl.c optional geom_mirror geom/mountver/g_mountver.c optional geom_mountver geom/multipath/g_multipath.c optional geom_multipath geom/nop/g_nop.c optional geom_nop geom/part/g_part.c standard geom/part/g_part_if.m standard geom/part/g_part_apm.c optional geom_part_apm geom/part/g_part_bsd.c optional geom_part_bsd geom/part/g_part_bsd64.c optional geom_part_bsd64 geom/part/g_part_ebr.c optional geom_part_ebr geom/part/g_part_gpt.c optional geom_part_gpt geom/part/g_part_ldm.c optional geom_part_ldm geom/part/g_part_mbr.c optional geom_part_mbr geom/part/g_part_vtoc8.c optional geom_part_vtoc8 geom/raid/g_raid.c optional geom_raid geom/raid/g_raid_ctl.c optional geom_raid geom/raid/g_raid_md_if.m optional geom_raid geom/raid/g_raid_tr_if.m optional geom_raid geom/raid/md_ddf.c optional geom_raid geom/raid/md_intel.c optional geom_raid geom/raid/md_jmicron.c optional geom_raid geom/raid/md_nvidia.c optional geom_raid geom/raid/md_promise.c optional geom_raid geom/raid/md_sii.c optional geom_raid geom/raid/tr_concat.c optional geom_raid geom/raid/tr_raid0.c optional geom_raid geom/raid/tr_raid1.c optional geom_raid geom/raid/tr_raid1e.c optional geom_raid geom/raid/tr_raid5.c optional geom_raid geom/raid3/g_raid3.c optional geom_raid3 geom/raid3/g_raid3_ctl.c optional geom_raid3 geom/shsec/g_shsec.c optional geom_shsec geom/stripe/g_stripe.c optional geom_stripe contrib/xz-embedded/freebsd/xz_malloc.c \ optional xz_embedded | geom_uzip \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_crc32.c \ optional xz_embedded | geom_uzip \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_dec_bcj.c \ optional xz_embedded | geom_uzip \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_dec_lzma2.c \ optional xz_embedded | geom_uzip \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" contrib/xz-embedded/linux/lib/xz/xz_dec_stream.c \ optional xz_embedded | geom_uzip \ compile-with "${NORMAL_C} -I$S/contrib/xz-embedded/freebsd/ -I$S/contrib/xz-embedded/linux/lib/xz/ -I$S/contrib/xz-embedded/linux/include/linux/" geom/uzip/g_uzip.c optional geom_uzip geom/uzip/g_uzip_lzma.c optional geom_uzip geom/uzip/g_uzip_wrkthr.c optional geom_uzip geom/uzip/g_uzip_zlib.c optional geom_uzip geom/vinum/geom_vinum.c optional geom_vinum geom/vinum/geom_vinum_create.c optional geom_vinum geom/vinum/geom_vinum_drive.c optional geom_vinum geom/vinum/geom_vinum_plex.c optional geom_vinum geom/vinum/geom_vinum_volume.c optional geom_vinum geom/vinum/geom_vinum_subr.c optional geom_vinum geom/vinum/geom_vinum_raid5.c optional geom_vinum geom/vinum/geom_vinum_share.c optional geom_vinum geom/vinum/geom_vinum_list.c optional geom_vinum geom/vinum/geom_vinum_rm.c optional geom_vinum geom/vinum/geom_vinum_init.c optional geom_vinum geom/vinum/geom_vinum_state.c optional geom_vinum geom/vinum/geom_vinum_rename.c optional geom_vinum geom/vinum/geom_vinum_move.c optional geom_vinum geom/vinum/geom_vinum_events.c optional geom_vinum geom/virstor/binstream.c optional geom_virstor geom/virstor/g_virstor.c optional geom_virstor geom/virstor/g_virstor_md.c optional geom_virstor geom/zero/g_zero.c optional geom_zero fs/ext2fs/ext2_acl.c optional ext2fs fs/ext2fs/ext2_alloc.c optional ext2fs fs/ext2fs/ext2_balloc.c optional ext2fs fs/ext2fs/ext2_bmap.c optional ext2fs fs/ext2fs/ext2_csum.c optional ext2fs fs/ext2fs/ext2_extattr.c optional ext2fs fs/ext2fs/ext2_extents.c optional ext2fs fs/ext2fs/ext2_inode.c optional ext2fs fs/ext2fs/ext2_inode_cnv.c optional ext2fs fs/ext2fs/ext2_hash.c optional ext2fs fs/ext2fs/ext2_htree.c optional ext2fs fs/ext2fs/ext2_lookup.c optional ext2fs fs/ext2fs/ext2_subr.c optional ext2fs fs/ext2fs/ext2_vfsops.c optional ext2fs fs/ext2fs/ext2_vnops.c optional ext2fs # isa/isa_if.m standard isa/isa_common.c optional isa isa/isahint.c optional isa isa/pnp.c optional isa isapnp isa/pnpparse.c optional isa isapnp fs/cd9660/cd9660_bmap.c optional cd9660 fs/cd9660/cd9660_lookup.c optional cd9660 fs/cd9660/cd9660_node.c optional cd9660 fs/cd9660/cd9660_rrip.c optional cd9660 fs/cd9660/cd9660_util.c optional cd9660 fs/cd9660/cd9660_vfsops.c optional cd9660 fs/cd9660/cd9660_vnops.c optional cd9660 fs/cd9660/cd9660_iconv.c optional cd9660_iconv kern/bus_if.m standard kern/clock_if.m standard kern/cpufreq_if.m standard kern/device_if.m standard kern/imgact_binmisc.c optional imagact_binmisc kern/imgact_elf.c standard kern/imgact_elf32.c optional compat_freebsd32 kern/imgact_shell.c standard kern/inflate.c optional gzip kern/init_main.c standard kern/init_sysent.c standard kern/ksched.c optional _kposix_priority_scheduling kern/kern_acct.c standard kern/kern_alq.c optional alq kern/kern_clock.c standard kern/kern_condvar.c standard kern/kern_conf.c standard kern/kern_cons.c standard kern/kern_cpu.c standard kern/kern_cpuset.c standard kern/kern_context.c standard kern/kern_descrip.c standard kern/kern_dtrace.c optional kdtrace_hooks kern/kern_dump.c standard kern/kern_environment.c standard kern/kern_et.c standard kern/kern_event.c standard kern/kern_exec.c standard kern/kern_exit.c standard kern/kern_fail.c standard kern/kern_ffclock.c standard kern/kern_fork.c standard kern/kern_hhook.c standard kern/kern_idle.c standard kern/kern_intr.c standard kern/kern_jail.c standard kern/kern_khelp.c standard kern/kern_kthread.c standard kern/kern_ktr.c optional ktr kern/kern_ktrace.c standard kern/kern_linker.c standard kern/kern_lock.c standard kern/kern_lockf.c standard kern/kern_lockstat.c optional kdtrace_hooks kern/kern_loginclass.c standard kern/kern_malloc.c standard kern/kern_mbuf.c standard kern/kern_mib.c standard kern/kern_module.c standard kern/kern_mtxpool.c standard kern/kern_mutex.c standard kern/kern_ntptime.c standard kern/kern_osd.c standard kern/kern_physio.c standard kern/kern_pmc.c standard kern/kern_poll.c optional device_polling kern/kern_priv.c standard kern/kern_proc.c standard kern/kern_procctl.c standard kern/kern_prot.c standard kern/kern_racct.c standard kern/kern_rangelock.c standard kern/kern_rctl.c standard kern/kern_resource.c standard kern/kern_rmlock.c standard kern/kern_rwlock.c standard kern/kern_sdt.c optional kdtrace_hooks kern/kern_sema.c standard kern/kern_sendfile.c standard kern/kern_sharedpage.c standard kern/kern_shutdown.c standard kern/kern_sig.c standard kern/kern_switch.c standard kern/kern_sx.c standard kern/kern_synch.c standard kern/kern_syscalls.c standard kern/kern_sysctl.c standard kern/kern_tc.c standard kern/kern_thr.c standard kern/kern_thread.c standard kern/kern_time.c standard kern/kern_timeout.c standard kern/kern_tslog.c optional tslog kern/kern_umtx.c standard kern/kern_uuid.c standard kern/kern_xxx.c standard kern/link_elf.c standard kern/linker_if.m standard kern/md4c.c optional netsmb kern/md5c.c standard kern/p1003_1b.c standard kern/posix4_mib.c standard kern/sched_4bsd.c optional sched_4bsd kern/sched_ule.c optional sched_ule kern/serdev_if.m standard kern/stack_protector.c standard \ compile-with "${NORMAL_C:N-fstack-protector*}" kern/subr_acl_nfs4.c optional ufs_acl | zfs kern/subr_acl_posix1e.c optional ufs_acl kern/subr_autoconf.c standard kern/subr_blist.c standard kern/subr_bus.c standard kern/subr_bus_dma.c standard kern/subr_bufring.c standard kern/subr_capability.c standard kern/subr_clock.c standard kern/subr_compressor.c standard \ compile-with "${NORMAL_C} -I$S/contrib/zstd/lib/freebsd" kern/subr_counter.c standard kern/subr_devstat.c standard kern/subr_disk.c standard kern/subr_eventhandler.c standard kern/subr_fattime.c standard kern/subr_firmware.c optional firmware kern/subr_gtaskqueue.c standard kern/subr_hash.c standard kern/subr_hints.c standard kern/subr_kdb.c standard kern/subr_kobj.c standard kern/subr_lock.c standard kern/subr_log.c standard kern/subr_mchain.c optional libmchain kern/subr_module.c standard kern/subr_msgbuf.c standard kern/subr_param.c standard kern/subr_pcpu.c standard kern/subr_pctrie.c standard kern/subr_pidctrl.c standard kern/subr_power.c standard kern/subr_prf.c standard kern/subr_prof.c standard kern/subr_rman.c standard kern/subr_rtc.c standard kern/subr_sbuf.c standard kern/subr_scanf.c standard kern/subr_sglist.c standard kern/subr_sleepqueue.c standard kern/subr_smp.c standard kern/subr_stack.c optional ddb | stack | ktr kern/subr_taskqueue.c standard kern/subr_terminal.c optional vt kern/subr_trap.c standard kern/subr_turnstile.c standard kern/subr_uio.c standard kern/subr_unit.c standard kern/subr_vmem.c standard kern/subr_witness.c optional witness kern/sys_capability.c standard kern/sys_generic.c standard kern/sys_getrandom.c standard kern/sys_pipe.c standard kern/sys_procdesc.c standard kern/sys_process.c standard kern/sys_socket.c standard kern/syscalls.c standard kern/sysv_ipc.c standard kern/sysv_msg.c optional sysvmsg kern/sysv_sem.c optional sysvsem kern/sysv_shm.c optional sysvshm kern/tty.c standard kern/tty_compat.c optional compat_43tty kern/tty_info.c standard kern/tty_inq.c standard kern/tty_outq.c standard kern/tty_pts.c standard kern/tty_tty.c standard kern/tty_ttydisc.c standard kern/uipc_accf.c standard kern/uipc_debug.c optional ddb kern/uipc_domain.c standard kern/uipc_mbuf.c standard kern/uipc_mbuf2.c standard kern/uipc_mbufhash.c standard kern/uipc_mqueue.c optional p1003_1b_mqueue kern/uipc_sem.c optional p1003_1b_semaphores kern/uipc_shm.c standard kern/uipc_sockbuf.c standard kern/uipc_socket.c standard kern/uipc_syscalls.c standard kern/uipc_usrreq.c standard kern/vfs_acl.c standard kern/vfs_aio.c standard kern/vfs_bio.c standard kern/vfs_cache.c standard kern/vfs_cluster.c standard kern/vfs_default.c standard kern/vfs_export.c standard kern/vfs_extattr.c standard kern/vfs_hash.c standard kern/vfs_init.c standard kern/vfs_lookup.c standard kern/vfs_mount.c standard kern/vfs_mountroot.c standard kern/vfs_subr.c standard kern/vfs_syscalls.c standard kern/vfs_vnops.c standard # # Kernel GSS-API # gssd.h optional kgssapi \ dependency "$S/kgssapi/gssd.x" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -hM $S/kgssapi/gssd.x | grep -v pthread.h > gssd.h" \ no-obj no-implicit-rule before-depend local \ clean "gssd.h" gssd_xdr.c optional kgssapi \ dependency "$S/kgssapi/gssd.x gssd.h" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -c $S/kgssapi/gssd.x -o gssd_xdr.c" \ no-implicit-rule before-depend local \ clean "gssd_xdr.c" gssd_clnt.c optional kgssapi \ dependency "$S/kgssapi/gssd.x gssd.h" \ compile-with "RPCGEN_CPP='${CPP}' rpcgen -lM $S/kgssapi/gssd.x | grep -v string.h > gssd_clnt.c" \ no-implicit-rule before-depend local \ clean "gssd_clnt.c" kgssapi/gss_accept_sec_context.c optional kgssapi kgssapi/gss_add_oid_set_member.c optional kgssapi kgssapi/gss_acquire_cred.c optional kgssapi kgssapi/gss_canonicalize_name.c optional kgssapi kgssapi/gss_create_empty_oid_set.c optional kgssapi kgssapi/gss_delete_sec_context.c optional kgssapi kgssapi/gss_display_status.c optional kgssapi kgssapi/gss_export_name.c optional kgssapi kgssapi/gss_get_mic.c optional kgssapi kgssapi/gss_init_sec_context.c optional kgssapi kgssapi/gss_impl.c optional kgssapi kgssapi/gss_import_name.c optional kgssapi kgssapi/gss_names.c optional kgssapi kgssapi/gss_pname_to_uid.c optional kgssapi kgssapi/gss_release_buffer.c optional kgssapi kgssapi/gss_release_cred.c optional kgssapi kgssapi/gss_release_name.c optional kgssapi kgssapi/gss_release_oid_set.c optional kgssapi kgssapi/gss_set_cred_option.c optional kgssapi kgssapi/gss_test_oid_set_member.c optional kgssapi kgssapi/gss_unwrap.c optional kgssapi kgssapi/gss_verify_mic.c optional kgssapi kgssapi/gss_wrap.c optional kgssapi kgssapi/gss_wrap_size_limit.c optional kgssapi kgssapi/gssd_prot.c optional kgssapi kgssapi/krb5/krb5_mech.c optional kgssapi kgssapi/krb5/kcrypto.c optional kgssapi kgssapi/krb5/kcrypto_aes.c optional kgssapi kgssapi/krb5/kcrypto_arcfour.c optional kgssapi kgssapi/krb5/kcrypto_des.c optional kgssapi kgssapi/krb5/kcrypto_des3.c optional kgssapi kgssapi/kgss_if.m optional kgssapi kgssapi/gsstest.c optional kgssapi_debug # These files in libkern/ are those needed by all architectures. Some # of the files in libkern/ are only needed on some architectures, e.g., # libkern/divdi3.c is needed by i386 but not alpha. Also, some of these # routines may be optimized for a particular platform. In either case, # the file should be moved to conf/files. from here. # libkern/arc4random.c standard crypto/chacha20/chacha.c standard libkern/asprintf.c standard libkern/bcd.c standard libkern/bsearch.c standard libkern/crc32.c standard libkern/explicit_bzero.c standard libkern/fnmatch.c standard libkern/iconv.c optional libiconv libkern/iconv_converter_if.m optional libiconv libkern/iconv_ucs.c optional libiconv libkern/iconv_xlat.c optional libiconv libkern/iconv_xlat16.c optional libiconv libkern/inet_aton.c standard libkern/inet_ntoa.c standard libkern/inet_ntop.c standard libkern/inet_pton.c standard libkern/jenkins_hash.c standard libkern/murmur3_32.c standard libkern/mcount.c optional profiling-routine libkern/memcchr.c standard libkern/memchr.c standard libkern/memcmp.c standard libkern/memmem.c optional gdb libkern/qsort.c standard libkern/qsort_r.c standard libkern/random.c standard libkern/scanc.c standard libkern/strcasecmp.c standard libkern/strcat.c standard libkern/strchr.c standard libkern/strcmp.c standard libkern/strcpy.c standard libkern/strcspn.c standard libkern/strdup.c standard libkern/strndup.c standard libkern/strlcat.c standard libkern/strlcpy.c standard libkern/strlen.c standard libkern/strncat.c standard libkern/strncmp.c standard libkern/strncpy.c standard libkern/strnlen.c standard libkern/strrchr.c standard libkern/strsep.c standard libkern/strspn.c standard libkern/strstr.c standard libkern/strtol.c standard libkern/strtoq.c standard libkern/strtoul.c standard libkern/strtouq.c standard libkern/strvalid.c standard libkern/timingsafe_bcmp.c standard libkern/zlib.c optional crypto | geom_uzip | ipsec | \ ipsec_support | mxge | netgraph_deflate | ddb_ctf | gzio net/altq/altq_cbq.c optional altq net/altq/altq_cdnr.c optional altq net/altq/altq_codel.c optional altq net/altq/altq_hfsc.c optional altq net/altq/altq_fairq.c optional altq net/altq/altq_priq.c optional altq net/altq/altq_red.c optional altq net/altq/altq_rio.c optional altq net/altq/altq_rmclass.c optional altq net/altq/altq_subr.c optional altq net/bpf.c standard net/bpf_buffer.c optional bpf net/bpf_jitter.c optional bpf_jitter net/bpf_filter.c optional bpf | netgraph_bpf net/bpf_zerocopy.c optional bpf net/bridgestp.c optional bridge | if_bridge net/flowtable.c optional flowtable inet | flowtable inet6 net/ieee8023ad_lacp.c optional lagg net/if.c standard net/if_arcsubr.c optional arcnet net/if_bridge.c optional bridge inet | if_bridge inet net/if_clone.c standard net/if_dead.c standard net/if_debug.c optional ddb net/if_disc.c optional disc net/if_edsc.c optional edsc net/if_enc.c optional enc inet | enc inet6 net/if_epair.c optional epair net/if_ethersubr.c optional ether net/if_fwsubr.c optional fwip net/if_gif.c optional gif inet | gif inet6 | \ netgraph_gif inet | netgraph_gif inet6 net/if_gre.c optional gre inet | gre inet6 net/if_ipsec.c optional inet ipsec | inet6 ipsec net/if_lagg.c optional lagg net/if_loop.c optional loop net/if_llatbl.c standard net/if_me.c optional me inet net/if_media.c standard net/if_mib.c standard net/if_spppfr.c optional sppp | netgraph_sppp net/if_spppsubr.c optional sppp | netgraph_sppp net/if_stf.c optional stf inet inet6 net/if_tun.c optional tun net/if_tap.c optional tap net/if_vlan.c optional vlan net/if_vxlan.c optional vxlan inet | vxlan inet6 net/ifdi_if.m optional ether pci net/iflib.c optional ether pci net/mp_ring.c optional ether net/mppcc.c optional netgraph_mppc_compression net/mppcd.c optional netgraph_mppc_compression net/netisr.c standard net/pfil.c optional ether | inet net/radix.c standard net/radix_mpath.c standard net/raw_cb.c standard net/raw_usrreq.c standard net/route.c standard net/rss_config.c optional inet rss | inet6 rss net/rtsock.c standard net/slcompress.c optional netgraph_vjc | sppp | \ netgraph_sppp net/toeplitz.c optional inet rss | inet6 rss net/vnet.c optional vimage net80211/ieee80211.c optional wlan net80211/ieee80211_acl.c optional wlan wlan_acl net80211/ieee80211_action.c optional wlan net80211/ieee80211_adhoc.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_ageq.c optional wlan net80211/ieee80211_amrr.c optional wlan | wlan_amrr net80211/ieee80211_crypto.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_crypto_ccmp.c optional wlan wlan_ccmp net80211/ieee80211_crypto_none.c optional wlan net80211/ieee80211_crypto_tkip.c optional wlan wlan_tkip net80211/ieee80211_crypto_wep.c optional wlan wlan_wep net80211/ieee80211_ddb.c optional wlan ddb net80211/ieee80211_dfs.c optional wlan net80211/ieee80211_freebsd.c optional wlan net80211/ieee80211_hostap.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_ht.c optional wlan net80211/ieee80211_hwmp.c optional wlan ieee80211_support_mesh net80211/ieee80211_input.c optional wlan net80211/ieee80211_ioctl.c optional wlan net80211/ieee80211_mesh.c optional wlan ieee80211_support_mesh \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_monitor.c optional wlan net80211/ieee80211_node.c optional wlan net80211/ieee80211_output.c optional wlan net80211/ieee80211_phy.c optional wlan net80211/ieee80211_power.c optional wlan net80211/ieee80211_proto.c optional wlan net80211/ieee80211_radiotap.c optional wlan net80211/ieee80211_ratectl.c optional wlan net80211/ieee80211_ratectl_none.c optional wlan net80211/ieee80211_regdomain.c optional wlan net80211/ieee80211_rssadapt.c optional wlan wlan_rssadapt net80211/ieee80211_scan.c optional wlan net80211/ieee80211_scan_sta.c optional wlan net80211/ieee80211_sta.c optional wlan \ compile-with "${NORMAL_C} -Wno-unused-function" net80211/ieee80211_superg.c optional wlan ieee80211_support_superg net80211/ieee80211_scan_sw.c optional wlan net80211/ieee80211_tdma.c optional wlan ieee80211_support_tdma net80211/ieee80211_vht.c optional wlan net80211/ieee80211_wds.c optional wlan net80211/ieee80211_xauth.c optional wlan wlan_xauth net80211/ieee80211_alq.c optional wlan ieee80211_alq netgraph/atm/ccatm/ng_ccatm.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/ngatmbase.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/sscfu/ng_sscfu.c optional ngatm_sscfu \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/sscop/ng_sscop.c optional ngatm_sscop \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/uni/ng_uni.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/bluetooth/common/ng_bluetooth.c optional netgraph_bluetooth netgraph/bluetooth/drivers/bt3c/ng_bt3c_pccard.c optional netgraph_bluetooth_bt3c netgraph/bluetooth/drivers/h4/ng_h4.c optional netgraph_bluetooth_h4 netgraph/bluetooth/drivers/ubt/ng_ubt.c optional netgraph_bluetooth_ubt usb netgraph/bluetooth/drivers/ubtbcmfw/ubtbcmfw.c optional netgraph_bluetooth_ubtbcmfw usb netgraph/bluetooth/hci/ng_hci_cmds.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_evnt.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_main.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_misc.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_ulpi.c optional netgraph_bluetooth_hci netgraph/bluetooth/l2cap/ng_l2cap_cmds.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_evnt.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_llpi.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_main.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_misc.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_ulpi.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/socket/ng_btsocket.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_hci_raw.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_l2cap.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_l2cap_raw.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_rfcomm.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_sco.c optional netgraph_bluetooth_socket netgraph/netflow/netflow.c optional netgraph_netflow netgraph/netflow/netflow_v9.c optional netgraph_netflow netgraph/netflow/ng_netflow.c optional netgraph_netflow netgraph/ng_UI.c optional netgraph_UI netgraph/ng_async.c optional netgraph_async netgraph/ng_atmllc.c optional netgraph_atmllc netgraph/ng_base.c optional netgraph netgraph/ng_bpf.c optional netgraph_bpf netgraph/ng_bridge.c optional netgraph_bridge netgraph/ng_car.c optional netgraph_car netgraph/ng_cisco.c optional netgraph_cisco netgraph/ng_deflate.c optional netgraph_deflate netgraph/ng_device.c optional netgraph_device netgraph/ng_echo.c optional netgraph_echo netgraph/ng_eiface.c optional netgraph_eiface netgraph/ng_ether.c optional netgraph_ether netgraph/ng_ether_echo.c optional netgraph_ether_echo netgraph/ng_frame_relay.c optional netgraph_frame_relay netgraph/ng_gif.c optional netgraph_gif inet6 | netgraph_gif inet netgraph/ng_gif_demux.c optional netgraph_gif_demux netgraph/ng_hole.c optional netgraph_hole netgraph/ng_iface.c optional netgraph_iface netgraph/ng_ip_input.c optional netgraph_ip_input netgraph/ng_ipfw.c optional netgraph_ipfw inet ipfirewall netgraph/ng_ksocket.c optional netgraph_ksocket netgraph/ng_l2tp.c optional netgraph_l2tp netgraph/ng_lmi.c optional netgraph_lmi netgraph/ng_mppc.c optional netgraph_mppc_compression | \ netgraph_mppc_encryption netgraph/ng_nat.c optional netgraph_nat inet libalias netgraph/ng_one2many.c optional netgraph_one2many netgraph/ng_parse.c optional netgraph netgraph/ng_patch.c optional netgraph_patch netgraph/ng_pipe.c optional netgraph_pipe netgraph/ng_ppp.c optional netgraph_ppp netgraph/ng_pppoe.c optional netgraph_pppoe netgraph/ng_pptpgre.c optional netgraph_pptpgre netgraph/ng_pred1.c optional netgraph_pred1 netgraph/ng_rfc1490.c optional netgraph_rfc1490 netgraph/ng_socket.c optional netgraph_socket netgraph/ng_split.c optional netgraph_split netgraph/ng_sppp.c optional netgraph_sppp netgraph/ng_tag.c optional netgraph_tag netgraph/ng_tcpmss.c optional netgraph_tcpmss netgraph/ng_tee.c optional netgraph_tee netgraph/ng_tty.c optional netgraph_tty netgraph/ng_vjc.c optional netgraph_vjc netgraph/ng_vlan.c optional netgraph_vlan netinet/accf_data.c optional accept_filter_data inet netinet/accf_dns.c optional accept_filter_dns inet netinet/accf_http.c optional accept_filter_http inet netinet/if_ether.c optional inet ether netinet/igmp.c optional inet netinet/in.c optional inet netinet/in_debug.c optional inet ddb netinet/in_kdtrace.c optional inet | inet6 netinet/ip_carp.c optional inet carp | inet6 carp netinet/in_fib.c optional inet netinet/in_gif.c optional gif inet | netgraph_gif inet netinet/ip_gre.c optional gre inet netinet/ip_id.c optional inet netinet/in_jail.c optional inet netinet/in_mcast.c optional inet netinet/in_pcb.c optional inet | inet6 netinet/in_pcbgroup.c optional inet pcbgroup | inet6 pcbgroup netinet/in_prot.c optional inet | inet6 netinet/in_proto.c optional inet | inet6 netinet/in_rmx.c optional inet netinet/in_rss.c optional inet rss netinet/ip_divert.c optional inet ipdivert ipfirewall netinet/ip_ecn.c optional inet | inet6 netinet/ip_encap.c optional inet | inet6 netinet/ip_fastfwd.c optional inet netinet/ip_icmp.c optional inet | inet6 netinet/ip_input.c optional inet netinet/ip_mroute.c optional mrouting inet netinet/ip_options.c optional inet netinet/ip_output.c optional inet netinet/ip_reass.c optional inet netinet/raw_ip.c optional inet | inet6 netinet/cc/cc.c optional inet | inet6 netinet/cc/cc_newreno.c optional inet | inet6 netinet/sctp_asconf.c optional inet sctp | inet6 sctp netinet/sctp_auth.c optional inet sctp | inet6 sctp netinet/sctp_bsd_addr.c optional inet sctp | inet6 sctp netinet/sctp_cc_functions.c optional inet sctp | inet6 sctp netinet/sctp_crc32.c optional inet | inet6 netinet/sctp_indata.c optional inet sctp | inet6 sctp netinet/sctp_input.c optional inet sctp | inet6 sctp netinet/sctp_output.c optional inet sctp | inet6 sctp netinet/sctp_pcb.c optional inet sctp | inet6 sctp netinet/sctp_peeloff.c optional inet sctp | inet6 sctp netinet/sctp_ss_functions.c optional inet sctp | inet6 sctp netinet/sctp_syscalls.c optional inet sctp | inet6 sctp netinet/sctp_sysctl.c optional inet sctp | inet6 sctp netinet/sctp_timer.c optional inet sctp | inet6 sctp netinet/sctp_usrreq.c optional inet sctp | inet6 sctp netinet/sctputil.c optional inet sctp | inet6 sctp netinet/siftr.c optional inet siftr alq | inet6 siftr alq netinet/tcp_debug.c optional tcpdebug netinet/tcp_fastopen.c optional inet tcp_rfc7413 | inet6 tcp_rfc7413 netinet/tcp_hostcache.c optional inet | inet6 netinet/tcp_input.c optional inet | inet6 netinet/tcp_log_buf.c optional tcp_blackbox inet | tcp_blackbox inet6 netinet/tcp_lro.c optional inet | inet6 netinet/tcp_output.c optional inet | inet6 netinet/tcp_offload.c optional tcp_offload inet | tcp_offload inet6 netinet/tcp_pcap.c optional inet tcppcap | inet6 tcppcap netinet/tcp_reass.c optional inet | inet6 netinet/tcp_sack.c optional inet | inet6 netinet/tcp_subr.c optional inet | inet6 netinet/tcp_syncache.c optional inet | inet6 netinet/tcp_timer.c optional inet | inet6 netinet/tcp_timewait.c optional inet | inet6 netinet/tcp_usrreq.c optional inet | inet6 netinet/udp_usrreq.c optional inet | inet6 netinet/libalias/alias.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_db.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_mod.c optional libalias | netgraph_nat netinet/libalias/alias_proxy.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_util.c optional libalias inet | netgraph_nat inet netinet/libalias/alias_sctp.c optional libalias inet | netgraph_nat inet netinet6/dest6.c optional inet6 netinet6/frag6.c optional inet6 netinet6/icmp6.c optional inet6 netinet6/in6.c optional inet6 netinet6/in6_cksum.c optional inet6 netinet6/in6_fib.c optional inet6 netinet6/in6_gif.c optional gif inet6 | netgraph_gif inet6 netinet6/in6_ifattach.c optional inet6 netinet6/in6_jail.c optional inet6 netinet6/in6_mcast.c optional inet6 netinet6/in6_pcb.c optional inet6 netinet6/in6_pcbgroup.c optional inet6 pcbgroup netinet6/in6_proto.c optional inet6 netinet6/in6_rmx.c optional inet6 netinet6/in6_rss.c optional inet6 rss netinet6/in6_src.c optional inet6 netinet6/ip6_fastfwd.c optional inet6 netinet6/ip6_forward.c optional inet6 netinet6/ip6_gre.c optional gre inet6 netinet6/ip6_id.c optional inet6 netinet6/ip6_input.c optional inet6 netinet6/ip6_mroute.c optional mrouting inet6 netinet6/ip6_output.c optional inet6 netinet6/mld6.c optional inet6 netinet6/nd6.c optional inet6 netinet6/nd6_nbr.c optional inet6 netinet6/nd6_rtr.c optional inet6 netinet6/raw_ip6.c optional inet6 netinet6/route6.c optional inet6 netinet6/scope6.c optional inet6 netinet6/sctp6_usrreq.c optional inet6 sctp netinet6/udp6_usrreq.c optional inet6 netipsec/ipsec.c optional ipsec inet | ipsec inet6 netipsec/ipsec_input.c optional ipsec inet | ipsec inet6 netipsec/ipsec_mbuf.c optional ipsec inet | ipsec inet6 netipsec/ipsec_mod.c optional ipsec inet | ipsec inet6 netipsec/ipsec_output.c optional ipsec inet | ipsec inet6 netipsec/ipsec_pcb.c optional ipsec inet | ipsec inet6 | \ ipsec_support inet | ipsec_support inet6 netipsec/key.c optional ipsec inet | ipsec inet6 | \ ipsec_support inet | ipsec_support inet6 netipsec/key_debug.c optional ipsec inet | ipsec inet6 | \ ipsec_support inet | ipsec_support inet6 netipsec/keysock.c optional ipsec inet | ipsec inet6 | \ ipsec_support inet | ipsec_support inet6 netipsec/subr_ipsec.c optional ipsec inet | ipsec inet6 | \ ipsec_support inet | ipsec_support inet6 netipsec/udpencap.c optional ipsec inet netipsec/xform_ah.c optional ipsec inet | ipsec inet6 netipsec/xform_esp.c optional ipsec inet | ipsec inet6 netipsec/xform_ipcomp.c optional ipsec inet | ipsec inet6 netipsec/xform_tcp.c optional ipsec inet tcp_signature | \ ipsec inet6 tcp_signature | ipsec_support inet tcp_signature | \ ipsec_support inet6 tcp_signature netpfil/ipfw/dn_aqm_codel.c optional inet dummynet netpfil/ipfw/dn_aqm_pie.c optional inet dummynet netpfil/ipfw/dn_heap.c optional inet dummynet netpfil/ipfw/dn_sched_fifo.c optional inet dummynet netpfil/ipfw/dn_sched_fq_codel.c optional inet dummynet netpfil/ipfw/dn_sched_fq_pie.c optional inet dummynet netpfil/ipfw/dn_sched_prio.c optional inet dummynet netpfil/ipfw/dn_sched_qfq.c optional inet dummynet netpfil/ipfw/dn_sched_rr.c optional inet dummynet netpfil/ipfw/dn_sched_wf2q.c optional inet dummynet netpfil/ipfw/ip_dummynet.c optional inet dummynet netpfil/ipfw/ip_dn_io.c optional inet dummynet netpfil/ipfw/ip_dn_glue.c optional inet dummynet netpfil/ipfw/ip_fw2.c optional inet ipfirewall netpfil/ipfw/ip_fw_bpf.c optional inet ipfirewall netpfil/ipfw/ip_fw_dynamic.c optional inet ipfirewall \ compile-with "${NORMAL_C} -I$S/contrib/ck/include" netpfil/ipfw/ip_fw_eaction.c optional inet ipfirewall netpfil/ipfw/ip_fw_log.c optional inet ipfirewall netpfil/ipfw/ip_fw_pfil.c optional inet ipfirewall netpfil/ipfw/ip_fw_sockopt.c optional inet ipfirewall netpfil/ipfw/ip_fw_table.c optional inet ipfirewall netpfil/ipfw/ip_fw_table_algo.c optional inet ipfirewall netpfil/ipfw/ip_fw_table_value.c optional inet ipfirewall netpfil/ipfw/ip_fw_iface.c optional inet ipfirewall netpfil/ipfw/ip_fw_nat.c optional inet ipfirewall_nat netpfil/ipfw/nat64/ip_fw_nat64.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64lsn.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64lsn_control.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64stl.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64stl_control.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nat64/nat64_translate.c optional inet inet6 ipfirewall \ ipfirewall_nat64 netpfil/ipfw/nptv6/ip_fw_nptv6.c optional inet inet6 ipfirewall \ ipfirewall_nptv6 netpfil/ipfw/nptv6/nptv6.c optional inet inet6 ipfirewall \ ipfirewall_nptv6 netpfil/ipfw/pmod/ip_fw_pmod.c optional inet ipfirewall_pmod netpfil/ipfw/pmod/tcpmod.c optional inet ipfirewall_pmod netpfil/pf/if_pflog.c optional pflog pf inet netpfil/pf/if_pfsync.c optional pfsync pf inet netpfil/pf/pf.c optional pf inet netpfil/pf/pf_if.c optional pf inet netpfil/pf/pf_ioctl.c optional pf inet netpfil/pf/pf_lb.c optional pf inet netpfil/pf/pf_norm.c optional pf inet netpfil/pf/pf_osfp.c optional pf inet netpfil/pf/pf_ruleset.c optional pf inet netpfil/pf/pf_table.c optional pf inet netpfil/pf/in4_cksum.c optional pf inet netsmb/smb_conn.c optional netsmb netsmb/smb_crypt.c optional netsmb netsmb/smb_dev.c optional netsmb netsmb/smb_iod.c optional netsmb netsmb/smb_rq.c optional netsmb netsmb/smb_smb.c optional netsmb netsmb/smb_subr.c optional netsmb netsmb/smb_trantcp.c optional netsmb netsmb/smb_usr.c optional netsmb nfs/bootp_subr.c optional bootp nfscl nfs/krpc_subr.c optional bootp nfscl nfs/nfs_diskless.c optional nfscl nfs_root nfs/nfs_fha.c optional nfsd nfs/nfs_lock.c optional nfscl | nfslockd | nfsd nfs/nfs_nfssvc.c optional nfscl | nfsd nlm/nlm_advlock.c optional nfslockd | nfsd nlm/nlm_prot_clnt.c optional nfslockd | nfsd nlm/nlm_prot_impl.c optional nfslockd | nfsd nlm/nlm_prot_server.c optional nfslockd | nfsd nlm/nlm_prot_svc.c optional nfslockd | nfsd nlm/nlm_prot_xdr.c optional nfslockd | nfsd nlm/sm_inter_xdr.c optional nfslockd | nfsd # Linux Kernel Programming Interface compat/linuxkpi/common/src/linux_kmod.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_compat.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_current.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_hrtimer.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_kthread.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_lock.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_page.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_pci.c optional compat_linuxkpi pci \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_tasklet.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_idr.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_radix.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_rcu.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C} -I$S/contrib/ck/include" compat/linuxkpi/common/src/linux_schedule.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_slab.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_usb.c optional compat_linuxkpi usb \ compile-with "${LINUXKPI_C}" compat/linuxkpi/common/src/linux_work.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" # OpenFabrics Enterprise Distribution (Infiniband) ofed/drivers/infiniband/core/ib_addr.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_agent.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_cache.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_cm.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_cma.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_cq.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_device.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_fmr_pool.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_iwcm.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_iwpm_msg.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_iwpm_util.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_mad.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_mad_rmpp.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_multicast.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_packer.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_sa_query.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_smi.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_sysfs.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_ucm.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_ucma.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_ud_header.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_umem.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_user_mad.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_cmd.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_main.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_uverbs_marshall.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/core/ib_verbs.c optional ofed \ compile-with "${OFED_C}" ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" #ofed/drivers/infiniband/ulp/ipoib/ipoib_fs.c optional ipoib \ # compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_multicast.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/ipoib/ipoib_verbs.c optional ipoib \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" #ofed/drivers/infiniband/ulp/ipoib/ipoib_vlan.c optional ipoib \ # compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/ipoib/" ofed/drivers/infiniband/ulp/sdp/sdp_bcopy.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_main.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_rx.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_cma.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" ofed/drivers/infiniband/ulp/sdp/sdp_tx.c optional sdp inet \ compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/" dev/mthca/mthca_allocator.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_av.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_catas.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_cmd.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_cq.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_eq.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_mad.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_main.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_mcg.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_memfree.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_mr.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_pd.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_profile.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_provider.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_qp.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_reset.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_srq.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mthca/mthca_uar.c optional mthca pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_alias_GUID.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_mcg.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_sysfs.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_cm.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_ah.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_cq.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_doorbell.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_mad.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_main.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_mr.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_qp.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_srq.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_ib/mlx4_ib_wc.c optional mlx4ib pci ofed \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_alloc.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_catas.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_cmd.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_cq.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_eq.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_fw.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_fw_qos.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_icm.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_intf.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_main.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_mcg.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_mr.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_pd.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_port.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_profile.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_qp.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_reset.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_sense.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_srq.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_core/mlx4_resource_tracker.c optional mlx4 pci \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_cq.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_main.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_netdev.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_port.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_resources.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_rx.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx4/mlx4_en/mlx4_en_tx.c optional mlx4en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_ah.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_cong.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_cq.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_doorbell.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_gsi.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_mad.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_main.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_mem.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_mr.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_qp.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_srq.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_ib/mlx5_ib_virt.c optional mlx5ib pci ofed \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_alloc.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_cmd.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_cq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_crspace.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_diagnostics.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_eq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fs_cmd.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fs_tree.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fw.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fwdump.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_fwdump_regmaps.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_health.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_mad.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_main.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_mcg.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_mr.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_pagealloc.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_pd.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_port.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_qp.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_srq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_transobj.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_uar.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_vport.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_vsc.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_core/mlx5_wq.c optional mlx5 pci \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_ethtool.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_main.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_tx.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_flow_table.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_rx.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" dev/mlx5/mlx5_en/mlx5_en_txrx.c optional mlx5en pci inet inet6 \ compile-with "${OFED_C}" # crypto support opencrypto/cast.c optional crypto | ipsec | ipsec_support opencrypto/criov.c optional crypto | ipsec | ipsec_support opencrypto/crypto.c optional crypto | ipsec | ipsec_support opencrypto/cryptodev.c optional cryptodev opencrypto/cryptodev_if.m optional crypto | ipsec | ipsec_support opencrypto/cryptosoft.c optional crypto | ipsec | ipsec_support opencrypto/cryptodeflate.c optional crypto | ipsec | ipsec_support opencrypto/gmac.c optional crypto | ipsec | ipsec_support opencrypto/gfmult.c optional crypto | ipsec | ipsec_support opencrypto/rmd160.c optional crypto | ipsec | ipsec_support opencrypto/skipjack.c optional crypto | ipsec | ipsec_support opencrypto/xform.c optional crypto | ipsec | ipsec_support rpc/auth_none.c optional krpc | nfslockd | nfscl | nfsd rpc/auth_unix.c optional krpc | nfslockd | nfscl | nfsd rpc/authunix_prot.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_bck.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_dg.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_rc.c optional krpc | nfslockd | nfscl | nfsd rpc/clnt_vc.c optional krpc | nfslockd | nfscl | nfsd rpc/getnetconfig.c optional krpc | nfslockd | nfscl | nfsd rpc/replay.c optional krpc | nfslockd | nfscl | nfsd rpc/rpc_callmsg.c optional krpc | nfslockd | nfscl | nfsd rpc/rpc_generic.c optional krpc | nfslockd | nfscl | nfsd rpc/rpc_prot.c optional krpc | nfslockd | nfscl | nfsd rpc/rpcb_clnt.c optional krpc | nfslockd | nfscl | nfsd rpc/rpcb_prot.c optional krpc | nfslockd | nfscl | nfsd rpc/svc.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_auth.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_auth_unix.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_dg.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_generic.c optional krpc | nfslockd | nfscl | nfsd rpc/svc_vc.c optional krpc | nfslockd | nfscl | nfsd rpc/rpcsec_gss/rpcsec_gss.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/rpcsec_gss_conf.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/rpcsec_gss_misc.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/rpcsec_gss_prot.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi rpc/rpcsec_gss/svc_rpcsec_gss.c optional krpc kgssapi | nfslockd kgssapi | nfscl kgssapi | nfsd kgssapi security/audit/audit.c optional audit security/audit/audit_arg.c optional audit security/audit/audit_bsm.c optional audit security/audit/audit_bsm_db.c optional audit security/audit/audit_bsm_klib.c optional audit security/audit/audit_dtrace.c optional dtaudit audit | dtraceall audit compile-with "${CDDL_C}" security/audit/audit_pipe.c optional audit security/audit/audit_syscalls.c standard security/audit/audit_trigger.c optional audit security/audit/audit_worker.c optional audit security/audit/bsm_domain.c optional audit security/audit/bsm_errno.c optional audit security/audit/bsm_fcntl.c optional audit security/audit/bsm_socket_type.c optional audit security/audit/bsm_token.c optional audit security/mac/mac_audit.c optional mac audit security/mac/mac_cred.c optional mac security/mac/mac_framework.c optional mac security/mac/mac_inet.c optional mac inet | mac inet6 security/mac/mac_inet6.c optional mac inet6 security/mac/mac_label.c optional mac security/mac/mac_net.c optional mac security/mac/mac_pipe.c optional mac security/mac/mac_posix_sem.c optional mac security/mac/mac_posix_shm.c optional mac security/mac/mac_priv.c optional mac security/mac/mac_process.c optional mac security/mac/mac_socket.c optional mac security/mac/mac_syscalls.c standard security/mac/mac_system.c optional mac security/mac/mac_sysv_msg.c optional mac security/mac/mac_sysv_sem.c optional mac security/mac/mac_sysv_shm.c optional mac security/mac/mac_vfs.c optional mac security/mac_biba/mac_biba.c optional mac_biba security/mac_bsdextended/mac_bsdextended.c optional mac_bsdextended security/mac_bsdextended/ugidfw_system.c optional mac_bsdextended security/mac_bsdextended/ugidfw_vnode.c optional mac_bsdextended security/mac_ifoff/mac_ifoff.c optional mac_ifoff security/mac_lomac/mac_lomac.c optional mac_lomac security/mac_mls/mac_mls.c optional mac_mls security/mac_none/mac_none.c optional mac_none security/mac_partition/mac_partition.c optional mac_partition security/mac_portacl/mac_portacl.c optional mac_portacl security/mac_seeotheruids/mac_seeotheruids.c optional mac_seeotheruids security/mac_stub/mac_stub.c optional mac_stub security/mac_test/mac_test.c optional mac_test teken/teken.c optional sc | vt ufs/ffs/ffs_alloc.c optional ffs ufs/ffs/ffs_balloc.c optional ffs ufs/ffs/ffs_inode.c optional ffs ufs/ffs/ffs_snapshot.c optional ffs ufs/ffs/ffs_softdep.c optional ffs ufs/ffs/ffs_subr.c optional ffs | geom_label ufs/ffs/ffs_tables.c optional ffs | geom_label ufs/ffs/ffs_vfsops.c optional ffs ufs/ffs/ffs_vnops.c optional ffs ufs/ffs/ffs_rawread.c optional ffs directio ufs/ffs/ffs_suspend.c optional ffs ufs/ufs/ufs_acl.c optional ffs ufs/ufs/ufs_bmap.c optional ffs ufs/ufs/ufs_dirhash.c optional ffs ufs/ufs/ufs_extattr.c optional ffs ufs/ufs/ufs_gjournal.c optional ffs UFS_GJOURNAL ufs/ufs/ufs_inode.c optional ffs ufs/ufs/ufs_lookup.c optional ffs ufs/ufs/ufs_quota.c optional ffs ufs/ufs/ufs_vfsops.c optional ffs ufs/ufs/ufs_vnops.c optional ffs vm/default_pager.c standard vm/device_pager.c standard vm/phys_pager.c standard vm/redzone.c optional DEBUG_REDZONE vm/sg_pager.c standard vm/swap_pager.c standard vm/uma_core.c standard vm/uma_dbg.c standard vm/memguard.c optional DEBUG_MEMGUARD vm/vm_domainset.c standard vm/vm_fault.c standard vm/vm_glue.c standard vm/vm_init.c standard vm/vm_kern.c standard vm/vm_map.c standard vm/vm_meter.c standard vm/vm_mmap.c standard vm/vm_object.c standard vm/vm_page.c standard vm/vm_pageout.c standard vm/vm_pager.c standard vm/vm_phys.c standard vm/vm_radix.c standard vm/vm_reserv.c standard vm/vm_swapout.c optional !NO_SWAPPING vm/vm_swapout_dummy.c optional NO_SWAPPING vm/vm_unix.c standard vm/vnode_pager.c standard xen/features.c optional xenhvm xen/xenbus/xenbus_if.m optional xenhvm xen/xenbus/xenbus.c optional xenhvm xen/xenbus/xenbusb_if.m optional xenhvm xen/xenbus/xenbusb.c optional xenhvm xen/xenbus/xenbusb_front.c optional xenhvm xen/xenbus/xenbusb_back.c optional xenhvm xen/xenmem/xenmem_if.m optional xenhvm xdr/xdr.c optional krpc | nfslockd | nfscl | nfsd xdr/xdr_array.c optional krpc | nfslockd | nfscl | nfsd xdr/xdr_mbuf.c optional krpc | nfslockd | nfscl | nfsd xdr/xdr_mem.c optional krpc | nfslockd | nfscl | nfsd xdr/xdr_reference.c optional krpc | nfslockd | nfscl | nfsd xdr/xdr_sizeof.c optional krpc | nfslockd | nfscl | nfsd Index: head/sys/dev/cxgbe/t4_netmap.c =================================================================== --- head/sys/dev/cxgbe/t4_netmap.c (revision 332422) +++ head/sys/dev/cxgbe/t4_netmap.c (revision 332423) @@ -1,1039 +1,1039 @@ /*- * Copyright (c) 2014 Chelsio Communications, Inc. * All rights reserved. * Written by: Navdeep Parhar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #ifdef DEV_NETMAP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common/common.h" #include "common/t4_regs.h" #include "common/t4_regs_values.h" extern int fl_pad; /* XXXNM */ SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe netmap parameters"); /* * 0 = normal netmap rx * 1 = black hole * 2 = supermassive black hole (buffer packing enabled) */ int black_hole = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RDTUN, &black_hole, 0, "Sink incoming packets."); int rx_ndesc = 256; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN, &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated."); int holdoff_tmr_idx = 2; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN, &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues."); /* * Congestion drops. * -1: no congestion feedback (not recommended). * 0: backpressure the channel instead of dropping packets right away. * 1: no backpressure, drop packets for the congested queue immediately. */ static int nm_cong_drop = 1; TUNABLE_INT("hw.cxgbe.nm_cong_drop", &nm_cong_drop); static int alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong) { int rc, cntxt_id, i; __be32 v; struct adapter *sc = vi->pi->adapter; struct sge_params *sp = &sc->params.sge; struct netmap_adapter *na = NA(vi->ifp); struct fw_iq_cmd c; MPASS(na != NULL); MPASS(nm_rxq->iq_desc != NULL); MPASS(nm_rxq->fl_desc != NULL); bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE); bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len); bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | V_FW_IQ_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | FW_LEN16(c)); MPASS(!forwarding_intr_to_fwq(sc)); KASSERT(nm_rxq->intr_idx < sc->intr_count, ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx)); v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx); c.type_to_iqandstindex = htobe32(v | V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | V_FW_IQ_CMD_VIID(vi->viid) | V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) | F_FW_IQ_CMD_IQGTSMODE | V_FW_IQ_CMD_IQINTCNTTHRESH(0) | V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); c.iqsize = htobe16(vi->qsize_rxq); c.iqaddr = htobe64(nm_rxq->iq_ba); if (cong >= 0) { c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN | V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF | F_FW_IQ_CMD_FL0CONGEN); } c.iqns_to_fl0congen |= htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0)); c.fl0dcaen_to_fl0cidxfthresh = htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B) | V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE); c.fl0addr = htobe64(nm_rxq->fl_ba); rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(sc->dev, "failed to create netmap ingress queue: %d\n", rc); return (rc); } nm_rxq->iq_cidx = 0; MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE); nm_rxq->iq_gen = F_RSPD_GEN; nm_rxq->iq_cntxt_id = be16toh(c.iqid); nm_rxq->iq_abs_id = be16toh(c.physiqid); cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start; if (cntxt_id >= sc->sge.niq) { panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.niq - 1); } sc->sge.iqmap[cntxt_id] = (void *)nm_rxq; nm_rxq->fl_cntxt_id = be16toh(c.fl0id); nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; MPASS(nm_rxq->fl_sidx == na->num_rx_desc); cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) { panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); } sc->sge.eqmap[cntxt_id] = (void *)nm_rxq; nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) | sc->chip_params->sge_fl_db; if (chip_id(sc) >= CHELSIO_T5 && cong >= 0) { uint32_t param, val; param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); if (cong == 0) val = 1 << 19; else { val = 2 << 19; for (i = 0; i < 4; i++) { if (cong & (1 << i)) val |= 1 << (i << 2); } } rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc != 0) { /* report error but carry on */ device_printf(sc->dev, "failed to set congestion manager context for " "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc); } } t4_write_reg(sc, sc->sge_gts_reg, V_INGRESSQID(nm_rxq->iq_cntxt_id) | V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); return (rc); } static int free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) { struct adapter *sc = vi->pi->adapter; int rc; rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); if (rc != 0) device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n", __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc); nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; return (rc); } static int alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) { int rc, cntxt_id; size_t len; struct adapter *sc = vi->pi->adapter; struct netmap_adapter *na = NA(vi->ifp); struct fw_eq_eth_cmd c; MPASS(na != NULL); MPASS(nm_txq->desc != NULL); len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; bzero(nm_txq->desc, len); bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | V_FW_EQ_ETH_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); c.fetchszm_to_iqid = htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id)); c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE)); c.eqaddr = htobe64(nm_txq->ba); rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(vi->dev, "failed to create netmap egress queue: %d\n", rc); return (rc); } nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); sc->sge.eqmap[cntxt_id] = (void *)nm_txq; nm_txq->pidx = nm_txq->cidx = 0; MPASS(nm_txq->sidx == na->num_tx_desc); nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0; nm_txq->doorbells = sc->doorbells; if (isset(&nm_txq->doorbells, DOORBELL_UDB) || isset(&nm_txq->doorbells, DOORBELL_UDBWC) || isset(&nm_txq->doorbells, DOORBELL_WCWR)) { uint32_t s_qpp = sc->params.sge.eq_s_qpp; uint32_t mask = (1 << s_qpp) - 1; volatile uint8_t *udb; udb = sc->udbs_base + UDBS_DB_OFFSET; udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT; nm_txq->udb_qid = nm_txq->cntxt_id & mask; if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) clrbit(&nm_txq->doorbells, DOORBELL_WCWR); else { udb += nm_txq->udb_qid << UDBS_SEG_SHIFT; nm_txq->udb_qid = 0; } nm_txq->udb = (volatile void *)udb; } return (rc); } static int free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) { struct adapter *sc = vi->pi->adapter; int rc; rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); if (rc != 0) device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__, nm_txq->cntxt_id, rc); nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; return (rc); } static int cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, struct netmap_adapter *na) { struct netmap_slot *slot; struct netmap_kring *kring; struct sge_nm_rxq *nm_rxq; struct sge_nm_txq *nm_txq; int rc, i, j, hwidx; struct hw_buf_info *hwb; ASSERT_SYNCHRONIZED_OP(sc); if ((vi->flags & VI_INIT_DONE) == 0 || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return (EAGAIN); hwb = &sc->sge.hw_buf_info[0]; for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { if (hwb->size == NETMAP_BUF_SIZE(na)) break; } if (i >= SGE_FLBUF_SIZES) { if_printf(ifp, "no hwidx for netmap buffer size %d.\n", NETMAP_BUF_SIZE(na)); return (ENXIO); } hwidx = i; /* Must set caps before calling netmap_reset */ nm_set_native_flags(na); for_each_nm_rxq(vi, i, nm_rxq) { struct irq *irq = &sc->irq[vi->first_intr + i]; - kring = &na->rx_rings[nm_rxq->nid]; + kring = na->rx_rings[nm_rxq->nid]; if (!nm_kring_pending_on(kring) || nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID) continue; alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop)); nm_rxq->fl_hwidx = hwidx; slot = netmap_reset(na, NR_RX, i, 0); MPASS(slot != NULL); /* XXXNM: error check, not assert */ /* We deal with 8 bufs at a time */ MPASS((na->num_rx_desc & 7) == 0); MPASS(na->num_rx_desc == nm_rxq->fl_sidx); for (j = 0; j < nm_rxq->fl_sidx; j++) { uint64_t ba; PNMB(na, &slot[j], &ba); MPASS(ba != 0); nm_rxq->fl_desc[j] = htobe64(ba | hwidx); } j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8; MPASS((j & 7) == 0); j /= 8; /* driver pidx to hardware pidx */ wmb(); t4_write_reg(sc, sc->sge_kdoorbell_reg, nm_rxq->fl_db_val | V_PIDX(j)); atomic_cmpset_int(&irq->nm_state, NM_OFF, NM_ON); } for_each_nm_txq(vi, i, nm_txq) { - kring = &na->tx_rings[nm_txq->nid]; + kring = na->tx_rings[nm_txq->nid]; if (!nm_kring_pending_on(kring) || nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID) continue; alloc_nm_txq_hwq(vi, nm_txq); slot = netmap_reset(na, NR_TX, i, 0); MPASS(slot != NULL); /* XXXNM: error check, not assert */ } if (vi->nm_rss == NULL) { vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE, M_ZERO | M_WAITOK); } for (i = 0; i < vi->rss_size;) { for_each_nm_rxq(vi, j, nm_rxq) { vi->nm_rss[i++] = nm_rxq->iq_abs_id; if (i == vi->rss_size) break; } } rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, vi->nm_rss, vi->rss_size); if (rc != 0) if_printf(ifp, "netmap rss_config failed: %d\n", rc); return (rc); } static int cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, struct netmap_adapter *na) { struct netmap_kring *kring; int rc, i; struct sge_nm_txq *nm_txq; struct sge_nm_rxq *nm_rxq; ASSERT_SYNCHRONIZED_OP(sc); if ((vi->flags & VI_INIT_DONE) == 0) return (0); rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, vi->rss, vi->rss_size); if (rc != 0) if_printf(ifp, "failed to restore RSS config: %d\n", rc); nm_clear_native_flags(na); for_each_nm_txq(vi, i, nm_txq) { struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; - kring = &na->tx_rings[nm_txq->nid]; + kring = na->tx_rings[nm_txq->nid]; if (!nm_kring_pending_off(kring) || nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID) continue; /* Wait for hw pidx to catch up ... */ while (be16toh(nm_txq->pidx) != spg->pidx) pause("nmpidx", 1); /* ... and then for the cidx. */ while (spg->pidx != spg->cidx) pause("nmcidx", 1); free_nm_txq_hwq(vi, nm_txq); } for_each_nm_rxq(vi, i, nm_rxq) { struct irq *irq = &sc->irq[vi->first_intr + i]; - kring = &na->rx_rings[nm_rxq->nid]; + kring = na->rx_rings[nm_rxq->nid]; if (!nm_kring_pending_off(kring) || nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID) continue; while (!atomic_cmpset_int(&irq->nm_state, NM_ON, NM_OFF)) pause("nmst", 1); free_nm_rxq_hwq(vi, nm_rxq); } return (rc); } static int cxgbe_netmap_reg(struct netmap_adapter *na, int on) { struct ifnet *ifp = na->ifp; struct vi_info *vi = ifp->if_softc; struct adapter *sc = vi->pi->adapter; int rc; rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg"); if (rc != 0) return (rc); if (on) rc = cxgbe_netmap_on(sc, vi, ifp, na); else rc = cxgbe_netmap_off(sc, vi, ifp, na); end_synchronized_op(sc, 0); return (rc); } /* How many packets can a single type1 WR carry in n descriptors */ static inline int ndesc_to_npkt(const int n) { MPASS(n > 0 && n <= SGE_MAX_WR_NDESC); return (n * 2 - 1); } #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC)) /* Space (in descriptors) needed for a type1 WR that carries n packets */ static inline int npkt_to_ndesc(const int n) { MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); return ((n + 2) / 2); } /* Space (in 16B units) needed for a type1 WR that carries n packets */ static inline int npkt_to_len16(const int n) { MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); return (n * 2 + 1); } #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx) static void ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq) { int n; u_int db = nm_txq->doorbells; MPASS(nm_txq->pidx != nm_txq->dbidx); n = NMIDXDIFF(nm_txq, dbidx); if (n > 1) clrbit(&db, DOORBELL_WCWR); wmb(); switch (ffs(db) - 1) { case DOORBELL_UDB: *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); break; case DOORBELL_WCWR: { volatile uint64_t *dst, *src; /* * Queues whose 128B doorbell segment fits in the page do not * use relative qid (udb_qid is always 0). Only queues with * doorbell segments can do WCWR. */ KASSERT(nm_txq->udb_qid == 0 && n == 1, ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p", __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq)); dst = (volatile void *)((uintptr_t)nm_txq->udb + UDBS_WR_OFFSET - UDBS_DB_OFFSET); src = (void *)&nm_txq->desc[nm_txq->dbidx]; while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1]) *dst++ = *src++; wmb(); break; } case DOORBELL_UDBWC: *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); wmb(); break; case DOORBELL_KDB: t4_write_reg(sc, sc->sge_kdoorbell_reg, V_QID(nm_txq->cntxt_id) | V_PIDX(n)); break; } nm_txq->dbidx = nm_txq->pidx; } int lazy_tx_credit_flush = 1; /* * Write work requests to send 'npkt' frames and ring the doorbell to send them * on their way. No need to check for wraparound. */ static void cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq, struct netmap_kring *kring, int npkt, int npkt_remaining, int txcsum) { struct netmap_ring *ring = kring->ring; struct netmap_slot *slot; const u_int lim = kring->nkr_num_slots - 1; struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx]; uint16_t len; uint64_t ba; struct cpl_tx_pkt_core *cpl; struct ulptx_sgl *usgl; int i, n; while (npkt) { n = min(npkt, MAX_NPKT_IN_TYPE1_WR); len = 0; wr = (void *)&nm_txq->desc[nm_txq->pidx]; wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n))); wr->npkt = n; wr->r3 = 0; wr->type = 1; cpl = (void *)(wr + 1); for (i = 0; i < n; i++) { slot = &ring->slot[kring->nr_hwcur]; PNMB(kring->na, slot, &ba); MPASS(ba != 0); cpl->ctrl0 = nm_txq->cpl_ctrl0; cpl->pack = 0; cpl->len = htobe16(slot->len); /* * netmap(4) says "netmap does not use features such as * checksum offloading, TCP segmentation offloading, * encryption, VLAN encapsulation/decapsulation, etc." * * So the ncxl interfaces have tx hardware checksumming * disabled by default. But you can override netmap by * enabling IFCAP_TXCSUM on the interface manully. */ cpl->ctrl1 = txcsum ? 0 : htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); usgl = (void *)(cpl + 1); usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | V_ULPTX_NSGE(1)); usgl->len0 = htobe32(slot->len); usgl->addr0 = htobe64(ba); slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); cpl = (void *)(usgl + 1); MPASS(slot->len + len <= UINT16_MAX); len += slot->len; kring->nr_hwcur = nm_next(kring->nr_hwcur, lim); } wr->plen = htobe16(len); npkt -= n; nm_txq->pidx += npkt_to_ndesc(n); MPASS(nm_txq->pidx <= nm_txq->sidx); if (__predict_false(nm_txq->pidx == nm_txq->sidx)) { /* * This routine doesn't know how to write WRs that wrap * around. Make sure it wasn't asked to. */ MPASS(npkt == 0); nm_txq->pidx = 0; } if (npkt == 0 && npkt_remaining == 0) { /* All done. */ if (lazy_tx_credit_flush == 0) { wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); nm_txq->equeqidx = nm_txq->pidx; nm_txq->equiqidx = nm_txq->pidx; } ring_nm_txq_db(sc, nm_txq); return; } if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) { wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); nm_txq->equeqidx = nm_txq->pidx; nm_txq->equiqidx = nm_txq->pidx; } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) { wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); nm_txq->equeqidx = nm_txq->pidx; } if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC) ring_nm_txq_db(sc, nm_txq); } /* Will get called again. */ MPASS(npkt_remaining); } /* How many contiguous free descriptors starting at pidx */ static inline int contiguous_ndesc_available(struct sge_nm_txq *nm_txq) { if (nm_txq->cidx > nm_txq->pidx) return (nm_txq->cidx - nm_txq->pidx - 1); else if (nm_txq->cidx > 0) return (nm_txq->sidx - nm_txq->pidx); else return (nm_txq->sidx - nm_txq->pidx - 1); } static int reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq) { struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; uint16_t hw_cidx = spg->cidx; /* snapshot */ struct fw_eth_tx_pkts_wr *wr; int n = 0; hw_cidx = be16toh(hw_cidx); while (nm_txq->cidx != hw_cidx) { wr = (void *)&nm_txq->desc[nm_txq->cidx]; MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR))); MPASS(wr->type == 1); MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR); n += wr->npkt; nm_txq->cidx += npkt_to_ndesc(wr->npkt); /* * We never sent a WR that wrapped around so the credits coming * back, WR by WR, should never cause the cidx to wrap around * either. */ MPASS(nm_txq->cidx <= nm_txq->sidx); if (__predict_false(nm_txq->cidx == nm_txq->sidx)) nm_txq->cidx = 0; } return (n); } static int cxgbe_netmap_txsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct vi_info *vi = ifp->if_softc; struct adapter *sc = vi->pi->adapter; struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id]; const u_int head = kring->rhead; u_int reclaimed = 0; int n, d, npkt_remaining, ndesc_remaining, txcsum; /* * Tx was at kring->nr_hwcur last time around and now we need to advance * to kring->rhead. Note that the driver's pidx moves independent of * netmap's kring->nr_hwcur (pidx counts descriptors and the relation * between descriptors and frames isn't 1:1). */ npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur : kring->nkr_num_slots - kring->nr_hwcur + head; txcsum = ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6); while (npkt_remaining) { reclaimed += reclaim_nm_tx_desc(nm_txq); ndesc_remaining = contiguous_ndesc_available(nm_txq); /* Can't run out of descriptors with packets still remaining */ MPASS(ndesc_remaining > 0); /* # of desc needed to tx all remaining packets */ d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC; if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR) d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR); if (d <= ndesc_remaining) n = npkt_remaining; else { /* Can't send all, calculate how many can be sent */ n = (ndesc_remaining / SGE_MAX_WR_NDESC) * MAX_NPKT_IN_TYPE1_WR; if (ndesc_remaining % SGE_MAX_WR_NDESC) n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC); } /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */ npkt_remaining -= n; cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining, txcsum); } MPASS(npkt_remaining == 0); MPASS(kring->nr_hwcur == head); MPASS(nm_txq->dbidx == nm_txq->pidx); /* * Second part: reclaim buffers for completed transmissions. */ if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { reclaimed += reclaim_nm_tx_desc(nm_txq); kring->nr_hwtail += reclaimed; if (kring->nr_hwtail >= kring->nkr_num_slots) kring->nr_hwtail -= kring->nkr_num_slots; } return (0); } static int cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_ring *ring = kring->ring; struct ifnet *ifp = na->ifp; struct vi_info *vi = ifp->if_softc; struct adapter *sc = vi->pi->adapter; struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id]; u_int const head = kring->rhead; u_int n; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; if (black_hole) return (0); /* No updates ever. */ if (netmap_no_pendintr || force_update) { kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx); kring->nr_kflags &= ~NKR_PENDINTR; } /* Userspace done with buffers from kring->nr_hwcur to head */ n = head >= kring->nr_hwcur ? head - kring->nr_hwcur : kring->nkr_num_slots - kring->nr_hwcur + head; n &= ~7U; if (n > 0) { u_int fl_pidx = nm_rxq->fl_pidx; struct netmap_slot *slot = &ring->slot[fl_pidx]; uint64_t ba; int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx; /* * We always deal with 8 buffers at a time. We must have * stopped at an 8B boundary (fl_pidx) last time around and we * must have a multiple of 8B buffers to give to the freelist. */ MPASS((fl_pidx & 7) == 0); MPASS((n & 7) == 0); IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots); IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx); while (n > 0) { for (i = 0; i < 8; i++, fl_pidx++, slot++) { PNMB(na, slot, &ba); MPASS(ba != 0); nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx); slot->flags &= ~NS_BUF_CHANGED; MPASS(fl_pidx <= nm_rxq->fl_sidx); } n -= 8; if (fl_pidx == nm_rxq->fl_sidx) { fl_pidx = 0; slot = &ring->slot[0]; } if (++dbinc == 8 && n >= 32) { wmb(); t4_write_reg(sc, sc->sge_kdoorbell_reg, nm_rxq->fl_db_val | V_PIDX(dbinc)); dbinc = 0; } } MPASS(nm_rxq->fl_pidx == fl_pidx); if (dbinc > 0) { wmb(); t4_write_reg(sc, sc->sge_kdoorbell_reg, nm_rxq->fl_db_val | V_PIDX(dbinc)); } } return (0); } void cxgbe_nm_attach(struct vi_info *vi) { struct port_info *pi; struct adapter *sc; struct netmap_adapter na; MPASS(vi->nnmrxq > 0); MPASS(vi->ifp != NULL); pi = vi->pi; sc = pi->adapter; bzero(&na, sizeof(na)); na.ifp = vi->ifp; na.na_flags = NAF_BDG_MAYSLEEP; /* Netmap doesn't know about the space reserved for the status page. */ na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE; /* * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So * num_rx_desc is based on the number of buffers that can be held in the * freelist, and not the number of entries in the iq. (These two are * not exactly the same due to the space taken up by the status page). */ na.num_rx_desc = rounddown(vi->qsize_rxq, 8); na.nm_txsync = cxgbe_netmap_txsync; na.nm_rxsync = cxgbe_netmap_rxsync; na.nm_register = cxgbe_netmap_reg; na.num_tx_rings = vi->nnmtxq; na.num_rx_rings = vi->nnmrxq; netmap_attach(&na); } void cxgbe_nm_detach(struct vi_info *vi) { MPASS(vi->nnmrxq > 0); MPASS(vi->ifp != NULL); netmap_detach(vi->ifp); } static inline const void * unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl) { MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL); /* data[0] is RSS header */ return (&cpl->data[1]); } static void handle_nm_sge_egr_update(struct adapter *sc, struct ifnet *ifp, const struct cpl_sge_egr_update *egr) { uint32_t oq; struct sge_nm_txq *nm_txq; oq = be32toh(egr->opcode_qid); MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE); nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start]; netmap_tx_irq(ifp, nm_txq->nid); } void t4_nm_intr(void *arg) { struct sge_nm_rxq *nm_rxq = arg; struct vi_info *vi = nm_rxq->vi; struct adapter *sc = vi->pi->adapter; struct ifnet *ifp = vi->ifp; struct netmap_adapter *na = NA(ifp); - struct netmap_kring *kring = &na->rx_rings[nm_rxq->nid]; + struct netmap_kring *kring = na->rx_rings[nm_rxq->nid]; struct netmap_ring *ring = kring->ring; struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx]; const void *cpl; uint32_t lq; u_int n = 0, work = 0; uint8_t opcode; uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx); u_int fl_credits = fl_cidx & 7; while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) { rmb(); lq = be32toh(d->rsp.pldbuflen_qid); opcode = d->rss.opcode; cpl = &d->cpl[0]; switch (G_RSPD_TYPE(d->rsp.u.type_gen)) { case X_RSPD_TYPE_FLBUF: if (black_hole != 2) { /* No buffer packing so new buf every time */ MPASS(lq & F_RSPD_NEWBUF); } /* fall through */ case X_RSPD_TYPE_CPL: MPASS(opcode < NUM_CPL_CMDS); switch (opcode) { case CPL_FW4_MSG: case CPL_FW6_MSG: cpl = unwrap_nm_fw6_msg(cpl); /* fall through */ case CPL_SGE_EGR_UPDATE: handle_nm_sge_egr_update(sc, ifp, cpl); break; case CPL_RX_PKT: ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - sc->params.sge.fl_pktshift; ring->slot[fl_cidx].flags = 0; fl_cidx += (lq & F_RSPD_NEWBUF) ? 1 : 0; fl_credits += (lq & F_RSPD_NEWBUF) ? 1 : 0; if (__predict_false(fl_cidx == nm_rxq->fl_sidx)) fl_cidx = 0; break; default: panic("%s: unexpected opcode 0x%x on nm_rxq %p", __func__, opcode, nm_rxq); } break; case X_RSPD_TYPE_INTR: /* Not equipped to handle forwarded interrupts. */ panic("%s: netmap queue received interrupt for iq %u\n", __func__, lq); default: panic("%s: illegal response type %d on nm_rxq %p", __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq); } d++; if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) { nm_rxq->iq_cidx = 0; d = &nm_rxq->iq_desc[0]; nm_rxq->iq_gen ^= F_RSPD_GEN; } if (__predict_false(++n == rx_ndesc)) { atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); if (black_hole && fl_credits >= 8) { fl_credits /= 8; IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx); t4_write_reg(sc, sc->sge_kdoorbell_reg, nm_rxq->fl_db_val | V_PIDX(fl_credits)); fl_credits = fl_cidx & 7; } else if (!black_hole) { netmap_rx_irq(ifp, nm_rxq->nid, &work); MPASS(work != 0); } t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(n) | V_INGRESSQID(nm_rxq->iq_cntxt_id) | V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); n = 0; } } atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); if (black_hole) { fl_credits /= 8; IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx); t4_write_reg(sc, sc->sge_kdoorbell_reg, nm_rxq->fl_db_val | V_PIDX(fl_credits)); } else netmap_rx_irq(ifp, nm_rxq->nid, &work); t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(n) | V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) | V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); } #endif Index: head/sys/dev/ixgbe/if_ixv.c =================================================================== --- head/sys/dev/ixgbe/if_ixv.c (revision 332422) +++ head/sys/dev/ixgbe/if_ixv.c (revision 332423) @@ -1,1920 +1,1920 @@ /****************************************************************************** Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "opt_inet.h" #include "opt_inet6.h" #include "ixgbe.h" #include "ifdi_if.h" #include #include /************************************************************************ * Driver version ************************************************************************/ char ixv_driver_version[] = "2.0.1-k"; /************************************************************************ * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into ixv_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } ************************************************************************/ static pci_vendor_info_t ixv_vendor_info_array[] = { PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"), /* required last entry */ PVID_END }; /************************************************************************ * Function prototypes ************************************************************************/ static void *ixv_register(device_t dev); static int ixv_if_attach_pre(if_ctx_t ctx); static int ixv_if_attach_post(if_ctx_t ctx); static int ixv_if_detach(if_ctx_t ctx); static int ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid); static int ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); static int ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); static void ixv_if_queues_free(if_ctx_t ctx); static void ixv_identify_hardware(if_ctx_t ctx); static void ixv_init_device_features(struct adapter *); static int ixv_allocate_pci_resources(if_ctx_t ctx); static void ixv_free_pci_resources(if_ctx_t ctx); static int ixv_setup_interface(if_ctx_t ctx); static void ixv_if_media_status(if_ctx_t , struct ifmediareq *); static int ixv_if_media_change(if_ctx_t ctx); static void ixv_if_update_admin_status(if_ctx_t ctx); static int ixv_if_msix_intr_assign(if_ctx_t ctx, int msix); static int ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu); static void ixv_if_init(if_ctx_t ctx); static void ixv_if_local_timer(if_ctx_t ctx, uint16_t qid); static void ixv_if_stop(if_ctx_t ctx); static int ixv_negotiate_api(struct adapter *); static void ixv_initialize_transmit_units(if_ctx_t ctx); static void ixv_initialize_receive_units(if_ctx_t ctx); static void ixv_initialize_rss_mapping(struct adapter *); static void ixv_setup_vlan_support(if_ctx_t ctx); static void ixv_configure_ivars(struct adapter *); static void ixv_if_enable_intr(if_ctx_t ctx); static void ixv_if_disable_intr(if_ctx_t ctx); static void ixv_if_multi_set(if_ctx_t ctx); static void ixv_if_register_vlan(if_ctx_t, u16); static void ixv_if_unregister_vlan(if_ctx_t, u16); static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter); static void ixv_save_stats(struct adapter *); static void ixv_init_stats(struct adapter *); static void ixv_update_stats(struct adapter *); static void ixv_add_stats_sysctls(struct adapter *adapter); static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS); static void ixv_set_ivar(struct adapter *, u8, u8, s8); static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); /* The MSI-X Interrupt handlers */ static int ixv_msix_que(void *); static int ixv_msix_mbx(void *); /************************************************************************ * FreeBSD Device Interface Entry Points ************************************************************************/ static device_method_t ixv_methods[] = { /* Device interface */ DEVMETHOD(device_register, ixv_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD_END }; static driver_t ixv_driver = { "ixv", ixv_methods, sizeof(struct adapter), }; devclass_t ixv_devclass; DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); MODULE_DEPEND(ixv, pci, 1, 1, 1); MODULE_DEPEND(ixv, ether, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(ixv, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ static device_method_t ixv_if_methods[] = { DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre), DEVMETHOD(ifdi_attach_post, ixv_if_attach_post), DEVMETHOD(ifdi_detach, ixv_if_detach), DEVMETHOD(ifdi_init, ixv_if_init), DEVMETHOD(ifdi_stop, ixv_if_stop), DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign), DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr), DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr), DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable), DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable), DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, ixv_if_queues_free), DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status), DEVMETHOD(ifdi_multi_set, ixv_if_multi_set), DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set), DEVMETHOD(ifdi_media_status, ixv_if_media_status), DEVMETHOD(ifdi_media_change, ixv_if_media_change), DEVMETHOD(ifdi_timer, ixv_if_local_timer), DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan), DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan), DEVMETHOD(ifdi_get_counter, ixv_if_get_counter), DEVMETHOD_END }; static driver_t ixv_if_driver = { "ixv_if", ixv_if_methods, sizeof(struct adapter) }; /* * TUNEABLE PARAMETERS: */ /* Flow control setting, default to full */ static int ixv_flow_control = ixgbe_fc_full; TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control); /* * Header split: this causes the hardware to DMA * the header into a separate mbuf from the payload, * it can be a performance win in some workloads, but * in others it actually hurts, its off by default. */ static int ixv_header_split = FALSE; TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split); /* * Shadow VFTA table, this is needed because * the real filter table gets cleared during * a soft reset and we need to repopulate it. */ static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE]; extern struct if_txrx ixgbe_txrx; static struct if_shared_ctx ixv_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ .isc_tx_maxsize = IXGBE_TSO_SIZE, .isc_tx_maxsegsize = PAGE_SIZE, .isc_rx_maxsize = MJUM16BYTES, .isc_rx_nsegments = 1, .isc_rx_maxsegsize = MJUM16BYTES, .isc_nfl = 1, .isc_ntxqs = 1, .isc_nrxqs = 1, .isc_admin_intrcnt = 1, .isc_vendor_info = ixv_vendor_info_array, .isc_driver_version = ixv_driver_version, .isc_driver = &ixv_if_driver, .isc_nrxd_min = {MIN_RXD}, .isc_ntxd_min = {MIN_TXD}, .isc_nrxd_max = {MAX_RXD}, .isc_ntxd_max = {MAX_TXD}, .isc_nrxd_default = {DEFAULT_RXD}, .isc_ntxd_default = {DEFAULT_TXD}, }; if_shared_ctx_t ixv_sctx = &ixv_sctx_init; static void * ixv_register(device_t dev) { return (ixv_sctx); } /************************************************************************ * ixv_if_tx_queues_alloc ************************************************************************/ static int ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) { struct adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; struct ix_tx_queue *que; int i, j, error; MPASS(adapter->num_tx_queues == ntxqsets); MPASS(ntxqs == 1); /* Allocate queue structure memory */ adapter->tx_queues = (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); if (!adapter->tx_queues) { device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); return (ENOMEM); } for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) { struct tx_ring *txr = &que->txr; txr->me = i; txr->adapter = que->adapter = adapter; adapter->active_queues |= (u64)1 << txr->me; /* Allocate report status array */ if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) { error = ENOMEM; goto fail; } for (j = 0; j < scctx->isc_ntxd[0]; j++) txr->tx_rsq[j] = QIDX_INVALID; /* get the virtual and physical address of the hardware queues */ txr->tail = IXGBE_VFTDT(txr->me); txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs]; txr->tx_paddr = paddrs[i*ntxqs]; txr->bytes = 0; txr->total_packets = 0; } device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", adapter->num_tx_queues); return (0); fail: ixv_if_queues_free(ctx); return (error); } /* ixv_if_tx_queues_alloc */ /************************************************************************ * ixv_if_rx_queues_alloc ************************************************************************/ static int ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) { struct adapter *adapter = iflib_get_softc(ctx); struct ix_rx_queue *que; int i, error; MPASS(adapter->num_rx_queues == nrxqsets); MPASS(nrxqs == 1); /* Allocate queue structure memory */ adapter->rx_queues = (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); if (!adapter->rx_queues) { device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); error = ENOMEM; goto fail; } for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) { struct rx_ring *rxr = &que->rxr; rxr->me = i; rxr->adapter = que->adapter = adapter; /* get the virtual and physical address of the hw queues */ rxr->tail = IXGBE_VFRDT(rxr->me); rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; rxr->rx_paddr = paddrs[i*nrxqs]; rxr->bytes = 0; rxr->que = que; } device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", adapter->num_rx_queues); return (0); fail: ixv_if_queues_free(ctx); return (error); } /* ixv_if_rx_queues_alloc */ /************************************************************************ * ixv_if_queues_free ************************************************************************/ static void ixv_if_queues_free(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ix_tx_queue *que = adapter->tx_queues; int i; if (que == NULL) goto free; for (i = 0; i < adapter->num_tx_queues; i++, que++) { struct tx_ring *txr = &que->txr; if (txr->tx_rsq == NULL) break; free(txr->tx_rsq, M_DEVBUF); txr->tx_rsq = NULL; } if (adapter->tx_queues != NULL) free(adapter->tx_queues, M_DEVBUF); free: if (adapter->rx_queues != NULL) free(adapter->rx_queues, M_DEVBUF); adapter->tx_queues = NULL; adapter->rx_queues = NULL; } /* ixv_if_queues_free */ /************************************************************************ * ixv_if_attach_pre - Device initialization routine * * Called when the driver is being loaded. * Identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure ************************************************************************/ static int ixv_if_attach_pre(if_ctx_t ctx) { struct adapter *adapter; device_t dev; if_softc_ctx_t scctx; struct ixgbe_hw *hw; int error = 0; INIT_DEBUGOUT("ixv_attach: begin"); /* Allocate, clear, and link in our adapter structure */ dev = iflib_get_dev(ctx); adapter = iflib_get_softc(ctx); adapter->dev = dev; adapter->ctx = ctx; adapter->hw.back = adapter; scctx = adapter->shared = iflib_get_softc_ctx(ctx); adapter->media = iflib_get_media(ctx); hw = &adapter->hw; /* Do base PCI setup - map BAR0 */ if (ixv_allocate_pci_resources(ctx)) { device_printf(dev, "ixv_allocate_pci_resources() failed!\n"); error = ENXIO; goto err_out; } /* SYSCTL APIs */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I", "Debug Info"); /* Determine hardware revision */ ixv_identify_hardware(ctx); ixv_init_device_features(adapter); /* Initialize the shared code */ error = ixgbe_init_ops_vf(hw); if (error) { device_printf(dev, "ixgbe_init_ops_vf() failed!\n"); error = EIO; goto err_out; } /* Setup the mailbox */ ixgbe_init_mbx_params_vf(hw); error = hw->mac.ops.reset_hw(hw); if (error == IXGBE_ERR_RESET_FAILED) device_printf(dev, "...reset_hw() failure: Reset Failed!\n"); else if (error) device_printf(dev, "...reset_hw() failed with error %d\n", error); if (error) { error = EIO; goto err_out; } error = hw->mac.ops.init_hw(hw); if (error) { device_printf(dev, "...init_hw() failed with error %d\n", error); error = EIO; goto err_out; } /* Negotiate mailbox API version */ error = ixv_negotiate_api(adapter); if (error) { device_printf(dev, "Mailbox API negotiation failed during attach!\n"); goto err_out; } /* If no mac address was assigned, make a random one */ if (!ixv_check_ether_addr(hw->mac.addr)) { u8 addr[ETHER_ADDR_LEN]; arc4rand(&addr, sizeof(addr), 0); addr[0] &= 0xFE; addr[0] |= 0x02; bcopy(addr, hw->mac.addr, sizeof(addr)); bcopy(addr, hw->mac.perm_addr, sizeof(addr)); } /* Most of the iflib initialization... */ iflib_set_mac(ctx, hw->mac.addr); switch (adapter->hw.mac.type) { case ixgbe_mac_X550_vf: case ixgbe_mac_X550EM_x_vf: case ixgbe_mac_X550EM_a_vf: scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2; break; default: scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1; } scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + sizeof(u32), DBA_ALIGN); scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); /* XXX */ scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR); scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; scctx->isc_tx_tso_segsize_max = PAGE_SIZE; scctx->isc_txrx = &ixgbe_txrx; /* * Tell the upper layer(s) we support everything the PF * driver does except... * hardware stats * Wake-on-LAN */ scctx->isc_capenable = IXGBE_CAPS; scctx->isc_capenable ^= IFCAP_HWSTATS | IFCAP_WOL; INIT_DEBUGOUT("ixv_if_attach_pre: end"); return (0); err_out: ixv_free_pci_resources(ctx); return (error); } /* ixv_if_attach_pre */ static int ixv_if_attach_post(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); int error = 0; /* Setup OS specific network interface */ error = ixv_setup_interface(ctx); if (error) { device_printf(dev, "Interface setup failed: %d\n", error); goto end; } /* Do the stats setup */ ixv_save_stats(adapter); ixv_init_stats(adapter); ixv_add_stats_sysctls(adapter); end: return error; } /* ixv_if_attach_post */ /************************************************************************ * ixv_detach - Device removal routine * * Called when the driver is being removed. * Stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure ************************************************************************/ static int ixv_if_detach(if_ctx_t ctx) { INIT_DEBUGOUT("ixv_detach: begin"); ixv_free_pci_resources(ctx); return (0); } /* ixv_if_detach */ /************************************************************************ * ixv_if_mtu_set ************************************************************************/ static int ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu) { struct adapter *adapter = iflib_get_softc(ctx); struct ifnet *ifp = iflib_get_ifp(ctx); int error = 0; IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) { error = EINVAL; } else { ifp->if_mtu = mtu; adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; } return error; } /* ixv_if_mtu_set */ /************************************************************************ * ixv_if_init - Init entry point * * Used in two ways: It is used by the stack as an init entry * point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get * to a consistent state. * * return 0 on success, positive on failure ************************************************************************/ static void ixv_if_init(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ifnet *ifp = iflib_get_ifp(ctx); device_t dev = iflib_get_dev(ctx); struct ixgbe_hw *hw = &adapter->hw; int error = 0; INIT_DEBUGOUT("ixv_if_init: begin"); hw->adapter_stopped = FALSE; hw->mac.ops.stop_adapter(hw); /* reprogram the RAR[0] in case user changed it. */ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); /* Get the latest mac address, User can use a LAA */ bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1); /* Reset VF and renegotiate mailbox API version */ hw->mac.ops.reset_hw(hw); hw->mac.ops.start_hw(hw); error = ixv_negotiate_api(adapter); if (error) { device_printf(dev, "Mailbox API negotiation failed in if_init!\n"); return; } ixv_initialize_transmit_units(ctx); /* Setup Multicast table */ ixv_if_multi_set(ctx); /* * Determine the correct mbuf pool * for doing jumbo/headersplit */ if (ifp->if_mtu > ETHERMTU) adapter->rx_mbuf_sz = MJUMPAGESIZE; else adapter->rx_mbuf_sz = MCLBYTES; /* Configure RX settings */ ixv_initialize_receive_units(ctx); /* Set up VLAN offload and filter */ ixv_setup_vlan_support(ctx); /* Set up MSI-X routing */ ixv_configure_ivars(adapter); /* Set up auto-mask */ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE); /* Set moderation on the Link interrupt */ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR); /* Stats init */ ixv_init_stats(adapter); /* Config/Enable Link */ hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up, FALSE); /* And now turn on interrupts */ ixv_if_enable_intr(ctx); return; } /* ixv_if_init */ /************************************************************************ * ixv_enable_queue ************************************************************************/ static inline void ixv_enable_queue(struct adapter *adapter, u32 vector) { struct ixgbe_hw *hw = &adapter->hw; u32 queue = 1 << vector; u32 mask; mask = (IXGBE_EIMS_RTX_QUEUE & queue); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); } /* ixv_enable_queue */ /************************************************************************ * ixv_disable_queue ************************************************************************/ static inline void ixv_disable_queue(struct adapter *adapter, u32 vector) { struct ixgbe_hw *hw = &adapter->hw; u64 queue = (u64)(1 << vector); u32 mask; mask = (IXGBE_EIMS_RTX_QUEUE & queue); IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); } /* ixv_disable_queue */ /************************************************************************ * ixv_msix_que - MSI-X Queue Interrupt Service routine ************************************************************************/ static int ixv_msix_que(void *arg) { struct ix_rx_queue *que = arg; struct adapter *adapter = que->adapter; ixv_disable_queue(adapter, que->msix); ++que->irqs; return (FILTER_SCHEDULE_THREAD); } /* ixv_msix_que */ /************************************************************************ * ixv_msix_mbx ************************************************************************/ static int ixv_msix_mbx(void *arg) { struct adapter *adapter = arg; struct ixgbe_hw *hw = &adapter->hw; u32 reg; ++adapter->link_irq; /* First get the cause */ reg = IXGBE_READ_REG(hw, IXGBE_VTEICS); /* Clear interrupt with write */ IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg); /* Link status change */ if (reg & IXGBE_EICR_LSC) iflib_admin_intr_deferred(adapter->ctx); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER); return (FILTER_HANDLED); } /* ixv_msix_mbx */ /************************************************************************ * ixv_media_status - Media Ioctl callback * * Called whenever the user queries the status of * the interface using ifconfig. ************************************************************************/ static void ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) { struct adapter *adapter = iflib_get_softc(ctx); INIT_DEBUGOUT("ixv_media_status: begin"); iflib_admin_intr_deferred(ctx); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->link_active) return; ifmr->ifm_status |= IFM_ACTIVE; switch (adapter->link_speed) { case IXGBE_LINK_SPEED_1GB_FULL: ifmr->ifm_active |= IFM_1000_T | IFM_FDX; break; case IXGBE_LINK_SPEED_10GB_FULL: ifmr->ifm_active |= IFM_10G_T | IFM_FDX; break; case IXGBE_LINK_SPEED_100_FULL: ifmr->ifm_active |= IFM_100_TX | IFM_FDX; break; case IXGBE_LINK_SPEED_10_FULL: ifmr->ifm_active |= IFM_10_T | IFM_FDX; break; } } /* ixv_if_media_status */ /************************************************************************ * ixv_if_media_change - Media Ioctl callback * * Called when the user changes speed/duplex using * media/mediopt option with ifconfig. ************************************************************************/ static int ixv_if_media_change(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ifmedia *ifm = iflib_get_media(ctx); INIT_DEBUGOUT("ixv_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: break; default: device_printf(adapter->dev, "Only auto media type\n"); return (EINVAL); } return (0); } /* ixv_if_media_change */ /************************************************************************ * ixv_negotiate_api * * Negotiate the Mailbox API with the PF; * start with the most featured API first. ************************************************************************/ static int ixv_negotiate_api(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int mbx_api[] = { ixgbe_mbox_api_11, ixgbe_mbox_api_10, ixgbe_mbox_api_unknown }; int i = 0; while (mbx_api[i] != ixgbe_mbox_api_unknown) { if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0) return (0); i++; } return (EINVAL); } /* ixv_negotiate_api */ /************************************************************************ * ixv_if_multi_set - Multicast Update * * Called whenever multicast address list is updated. ************************************************************************/ static void ixv_if_multi_set(if_ctx_t ctx) { u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; struct adapter *adapter = iflib_get_softc(ctx); u8 *update_ptr; struct ifmultiaddr *ifma; if_t ifp = iflib_get_ifp(ctx); int mcnt = 0; IOCTL_DEBUGOUT("ixv_if_multi_set: begin"); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], IXGBE_ETH_LENGTH_OF_ADDRESS); mcnt++; } update_ptr = mta; adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt, ixv_mc_array_itr, TRUE); } /* ixv_if_multi_set */ /************************************************************************ * ixv_mc_array_itr * * An iterator function needed by the multicast shared code. * It feeds the shared code routine the addresses in the * array of ixv_set_multi() one by one. ************************************************************************/ static u8 * ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) { u8 *addr = *update_ptr; u8 *newptr; *vmdq = 0; newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; *update_ptr = newptr; return addr; } /* ixv_mc_array_itr */ /************************************************************************ * ixv_if_local_timer - Timer routine * * Checks for link status, updates statistics, * and runs the watchdog check. ************************************************************************/ static void ixv_if_local_timer(if_ctx_t ctx, uint16_t qid) { if (qid != 0) return; /* Fire off the adminq task */ iflib_admin_intr_deferred(ctx); } /* ixv_if_local_timer */ /************************************************************************ * ixv_if_update_admin_status - Update OS on link state * * Note: Only updates the OS on the cached link state. * The real check of the hardware only happens with * a link interrupt. ************************************************************************/ static void ixv_if_update_admin_status(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); s32 status; adapter->hw.mac.get_link_status = TRUE; status = ixgbe_check_link(&adapter->hw, &adapter->link_speed, &adapter->link_up, FALSE); if (status != IXGBE_SUCCESS && adapter->hw.adapter_stopped == FALSE) { /* Mailbox's Clear To Send status is lost or timeout occurred. * We need reinitialization. */ iflib_get_ifp(ctx)->if_init(ctx); } if (adapter->link_up) { if (adapter->link_active == FALSE) { if (bootverbose) device_printf(dev, "Link is up %d Gbps %s \n", ((adapter->link_speed == 128) ? 10 : 1), "Full Duplex"); adapter->link_active = TRUE; iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); } } else { /* Link down */ if (adapter->link_active == TRUE) { if (bootverbose) device_printf(dev, "Link is Down\n"); iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); adapter->link_active = FALSE; } } /* Stats Update */ ixv_update_stats(adapter); } /* ixv_if_update_admin_status */ /************************************************************************ * ixv_if_stop - Stop the hardware * * Disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. ************************************************************************/ static void ixv_if_stop(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; INIT_DEBUGOUT("ixv_stop: begin\n"); ixv_if_disable_intr(ctx); hw->mac.ops.reset_hw(hw); adapter->hw.adapter_stopped = FALSE; hw->mac.ops.stop_adapter(hw); /* Update the stack */ adapter->link_up = FALSE; ixv_if_update_admin_status(ctx); /* reprogram the RAR[0] in case user changed it. */ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); } /* ixv_if_stop */ /************************************************************************ * ixv_identify_hardware - Determine hardware revision. ************************************************************************/ static void ixv_identify_hardware(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); struct ixgbe_hw *hw = &adapter->hw; /* Save off the information about this board */ hw->vendor_id = pci_get_vendor(dev); hw->device_id = pci_get_device(dev); hw->revision_id = pci_get_revid(dev); hw->subsystem_vendor_id = pci_get_subvendor(dev); hw->subsystem_device_id = pci_get_subdevice(dev); /* A subset of set_mac_type */ switch (hw->device_id) { case IXGBE_DEV_ID_82599_VF: hw->mac.type = ixgbe_mac_82599_vf; break; case IXGBE_DEV_ID_X540_VF: hw->mac.type = ixgbe_mac_X540_vf; break; case IXGBE_DEV_ID_X550_VF: hw->mac.type = ixgbe_mac_X550_vf; break; case IXGBE_DEV_ID_X550EM_X_VF: hw->mac.type = ixgbe_mac_X550EM_x_vf; break; case IXGBE_DEV_ID_X550EM_A_VF: hw->mac.type = ixgbe_mac_X550EM_a_vf; break; default: device_printf(dev, "unknown mac type\n"); hw->mac.type = ixgbe_mac_unknown; break; } } /* ixv_identify_hardware */ /************************************************************************ * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers ************************************************************************/ static int ixv_if_msix_intr_assign(if_ctx_t ctx, int msix) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); struct ix_rx_queue *rx_que = adapter->rx_queues; struct ix_tx_queue *tx_que; int error, rid, vector = 0; char buf[16]; for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) { rid = vector + 1; snprintf(buf, sizeof(buf), "rxq%d", i); error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RX, ixv_msix_que, rx_que, rx_que->rxr.me, buf); if (error) { device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error); adapter->num_rx_queues = i + 1; goto fail; } rx_que->msix = vector; adapter->active_queues |= (u64)(1 << rx_que->msix); } for (int i = 0; i < adapter->num_tx_queues; i++) { snprintf(buf, sizeof(buf), "txq%d", i); tx_que = &adapter->tx_queues[i]; tx_que->msix = i % adapter->num_rx_queues; iflib_softirq_alloc_generic(ctx, &adapter->rx_queues[tx_que->msix].que_irq, IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); } rid = vector + 1; error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, ixv_msix_mbx, adapter, 0, "aq"); if (error) { device_printf(iflib_get_dev(ctx), "Failed to register admin handler"); return (error); } adapter->vector = vector; /* * Due to a broken design QEMU will fail to properly * enable the guest for MSIX unless the vectors in * the table are all set up, so we must rewrite the * ENABLE in the MSIX control register again at this * point to cause it to successfully initialize us. */ if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { int msix_ctrl; pci_find_cap(dev, PCIY_MSIX, &rid); rid += PCIR_MSIX_CTRL; msix_ctrl = pci_read_config(dev, rid, 2); msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(dev, rid, msix_ctrl, 2); } return (0); fail: iflib_irq_free(ctx, &adapter->irq); rx_que = adapter->rx_queues; for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) iflib_irq_free(ctx, &rx_que->que_irq); return (error); } /* ixv_if_msix_intr_assign */ /************************************************************************ * ixv_allocate_pci_resources ************************************************************************/ static int ixv_allocate_pci_resources(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); int rid; rid = PCIR_BAR(0); adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!(adapter->pci_mem)) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->pci_mem); adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; return (0); } /* ixv_allocate_pci_resources */ /************************************************************************ * ixv_free_pci_resources ************************************************************************/ static void ixv_free_pci_resources(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ix_rx_queue *que = adapter->rx_queues; device_t dev = iflib_get_dev(ctx); /* Release all msix queue resources */ if (adapter->intr_type == IFLIB_INTR_MSIX) iflib_irq_free(ctx, &adapter->irq); if (que != NULL) { for (int i = 0; i < adapter->num_rx_queues; i++, que++) { iflib_irq_free(ctx, &que->que_irq); } } /* Clean the Legacy or Link interrupt last */ if (adapter->pci_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), adapter->pci_mem); } /* ixv_free_pci_resources */ /************************************************************************ * ixv_setup_interface * * Setup networking device structure and register an interface. ************************************************************************/ static int ixv_setup_interface(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; struct ifnet *ifp = iflib_get_ifp(ctx); INIT_DEBUGOUT("ixv_setup_interface: begin"); if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if_setbaudrate(ifp, IF_Gbps(10)); ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2; adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); return 0; } /* ixv_setup_interface */ /************************************************************************ * ixv_if_get_counter ************************************************************************/ static uint64_t ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt) { struct adapter *adapter = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); switch (cnt) { case IFCOUNTER_IPACKETS: return (adapter->ipackets); case IFCOUNTER_OPACKETS: return (adapter->opackets); case IFCOUNTER_IBYTES: return (adapter->ibytes); case IFCOUNTER_OBYTES: return (adapter->obytes); case IFCOUNTER_IMCASTS: return (adapter->imcasts); default: return (if_get_counter_default(ifp, cnt)); } } /* ixv_if_get_counter */ /************************************************************************ * ixv_initialize_transmit_units - Enable transmit unit. ************************************************************************/ static void ixv_initialize_transmit_units(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; if_softc_ctx_t scctx = adapter->shared; struct ix_tx_queue *que = adapter->tx_queues; int i; for (i = 0; i < adapter->num_tx_queues; i++, que++) { struct tx_ring *txr = &que->txr; u64 tdba = txr->tx_paddr; u32 txctrl, txdctl; int j = txr->me; /* Set WTHRESH to 8, burst writeback */ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); txdctl |= (8 << 16); IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); /* Set the HW Tx Head and Tail indices */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0); /* Set Tx Tail register */ txr->tail = IXGBE_VFTDT(j); txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0; for (int k = 0; k < scctx->isc_ntxd[0]; k++) txr->tx_rsq[k] = QIDX_INVALID; /* Set Ring parameters */ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), (tdba & 0x00000000ffffffffULL)); IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc)); txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); /* Now enable */ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); txdctl |= IXGBE_TXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); } return; } /* ixv_initialize_transmit_units */ /************************************************************************ * ixv_initialize_rss_mapping ************************************************************************/ static void ixv_initialize_rss_mapping(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 reta = 0, mrqc, rss_key[10]; int queue_id; int i, j; u32 rss_hash_config; if (adapter->feat_en & IXGBE_FEATURE_RSS) { /* Fetch the configured RSS key */ rss_getkey((uint8_t *)&rss_key); } else { /* set up random bits */ arc4rand(&rss_key, sizeof(rss_key), 0); } /* Now fill out hash function seeds */ for (i = 0; i < 10; i++) IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); /* Set up the redirection table */ for (i = 0, j = 0; i < 64; i++, j++) { if (j == adapter->num_rx_queues) j = 0; if (adapter->feat_en & IXGBE_FEATURE_RSS) { /* * Fetch the RSS bucket id for the given indirection * entry. Cap it at the number of configured buckets * (which is num_rx_queues.) */ queue_id = rss_get_indirection_to_bucket(i); queue_id = queue_id % adapter->num_rx_queues; } else queue_id = j; /* * The low 8 bits are for hash value (n+0); * The next 8 bits are for hash value (n+1), etc. */ reta >>= 8; reta |= ((uint32_t)queue_id) << 24; if ((i & 3) == 3) { IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta); reta = 0; } } /* Perform hash on these packet types */ if (adapter->feat_en & IXGBE_FEATURE_RSS) rss_hash_config = rss_gethashconfig(); else { /* * Disable UDP - IP fragments aren't currently being handled * and so we end up with a mix of 2-tuple and 4-tuple * traffic. */ rss_hash_config = RSS_HASHTYPE_RSS_IPV4 | RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | RSS_HASHTYPE_RSS_TCP_IPV6; } mrqc = IXGBE_MRQC_RSSEN; if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n", __func__); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n", __func__); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n", __func__); IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc); } /* ixv_initialize_rss_mapping */ /************************************************************************ * ixv_initialize_receive_units - Setup receive registers and features. ************************************************************************/ static void ixv_initialize_receive_units(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx; struct ixgbe_hw *hw = &adapter->hw; struct ifnet *ifp = iflib_get_ifp(ctx); struct ix_rx_queue *que = adapter->rx_queues; u32 bufsz, psrtype; if (ifp->if_mtu > ETHERMTU) bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; else bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | IXGBE_PSRTYPE_L2HDR; if (adapter->num_rx_queues > 1) psrtype |= 1 << 29; IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); /* Tell PF our max_frame size */ if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) { device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n"); } scctx = adapter->shared; for (int i = 0; i < adapter->num_rx_queues; i++, que++) { struct rx_ring *rxr = &que->rxr; u64 rdba = rxr->rx_paddr; u32 reg, rxdctl; int j = rxr->me; /* Disable the queue */ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); rxdctl &= ~IXGBE_RXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); for (int k = 0; k < 10; k++) { if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) msec_delay(1); else break; } wmb(); /* Setup the Base and Length of the Rx Descriptor Ring */ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), (rdba & 0x00000000ffffffffULL)); IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); /* Reset the ring indices */ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0); /* Set up the SRRCTL register */ reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j)); reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; reg |= bufsz; reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg); /* Capture Rx Tail index */ rxr->tail = IXGBE_VFRDT(rxr->me); /* Do the queue enabling last */ rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); for (int l = 0; l < 10; l++) { if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) break; msec_delay(1); } wmb(); /* Set the Tail Pointer */ #ifdef DEV_NETMAP /* * In netmap mode, we must preserve the buffers made * available to userspace before the if_init() * (this is true by default on the TX side, because * init makes all buffers available to userspace). * * netmap_reset() and the device specific routines * (e.g. ixgbe_setup_receive_rings()) map these * buffers at the end of the NIC ring, so here we * must set the RDT (tail) register to make sure * they are not overwritten. * * In this driver the NIC ring starts at RDH = 0, * RDT points to the last slot available for reception (?), * so RDT = num_rx_desc - 1 means the whole ring is available. */ if (ifp->if_capenable & IFCAP_NETMAP) { struct netmap_adapter *na = NA(ifp); - struct netmap_kring *kring = &na->rx_rings[j]; + struct netmap_kring *kring = na->rx_rings[j]; int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t); } else #endif /* DEV_NETMAP */ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), scctx->isc_nrxd[0] - 1); } ixv_initialize_rss_mapping(adapter); } /* ixv_initialize_receive_units */ /************************************************************************ * ixv_setup_vlan_support ************************************************************************/ static void ixv_setup_vlan_support(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; u32 ctrl, vid, vfta, retry; /* * We get here thru if_init, meaning * a soft reset, this has already cleared * the VFTA and other state, so if there * have been no vlan's registered do nothing. */ if (adapter->num_vlans == 0) return; /* Enable the queues */ for (int i = 0; i < adapter->num_rx_queues; i++) { ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); ctrl |= IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl); /* * Let Rx path know that it needs to store VLAN tag * as part of extra mbuf info. */ adapter->rx_queues[i].rxr.vtag_strip = TRUE; } /* * A soft reset zero's out the VFTA, so * we need to repopulate it now. */ for (int i = 0; i < IXGBE_VFTA_SIZE; i++) { if (ixv_shadow_vfta[i] == 0) continue; vfta = ixv_shadow_vfta[i]; /* * Reconstruct the vlan id's * based on the bits set in each * of the array ints. */ for (int j = 0; j < 32; j++) { retry = 0; if ((vfta & (1 << j)) == 0) continue; vid = (i * 32) + j; /* Call the shared code mailbox routine */ while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) { if (++retry > 5) break; } } } } /* ixv_setup_vlan_support */ /************************************************************************ * ixv_if_register_vlan * * Run via a vlan config EVENT, it enables us to use the * HW Filter table since we can get the vlan id. This just * creates the entry in the soft version of the VFTA, init * will repopulate the real table. ************************************************************************/ static void ixv_if_register_vlan(if_ctx_t ctx, u16 vtag) { struct adapter *adapter = iflib_get_softc(ctx); u16 index, bit; index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; ixv_shadow_vfta[index] |= (1 << bit); ++adapter->num_vlans; } /* ixv_if_register_vlan */ /************************************************************************ * ixv_if_unregister_vlan * * Run via a vlan unconfig EVENT, remove our entry * in the soft vfta. ************************************************************************/ static void ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag) { struct adapter *adapter = iflib_get_softc(ctx); u16 index, bit; index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; ixv_shadow_vfta[index] &= ~(1 << bit); --adapter->num_vlans; } /* ixv_if_unregister_vlan */ /************************************************************************ * ixv_if_enable_intr ************************************************************************/ static void ixv_if_enable_intr(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; struct ix_rx_queue *que = adapter->rx_queues; u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); mask = IXGBE_EIMS_ENABLE_MASK; mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); for (int i = 0; i < adapter->num_rx_queues; i++, que++) ixv_enable_queue(adapter, que->msix); IXGBE_WRITE_FLUSH(hw); } /* ixv_if_enable_intr */ /************************************************************************ * ixv_if_disable_intr ************************************************************************/ static void ixv_if_disable_intr(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0); IXGBE_WRITE_FLUSH(&adapter->hw); } /* ixv_if_disable_intr */ /************************************************************************ * ixv_if_rx_queue_intr_enable ************************************************************************/ static int ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) { struct adapter *adapter = iflib_get_softc(ctx); struct ix_rx_queue *que = &adapter->rx_queues[rxqid]; ixv_enable_queue(adapter, que->rxr.me); return (0); } /* ixv_if_rx_queue_intr_enable */ /************************************************************************ * ixv_set_ivar * * Setup the correct IVAR register for a particular MSI-X interrupt * - entry is the register array entry * - vector is the MSI-X vector for this queue * - type is RX/TX/MISC ************************************************************************/ static void ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) { struct ixgbe_hw *hw = &adapter->hw; u32 ivar, index; vector |= IXGBE_IVAR_ALLOC_VAL; if (type == -1) { /* MISC IVAR */ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); ivar &= ~0xFF; ivar |= vector; IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); } else { /* RX/TX IVARS */ index = (16 * (entry & 1)) + (8 * type); ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); ivar &= ~(0xFF << index); ivar |= (vector << index); IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); } } /* ixv_set_ivar */ /************************************************************************ * ixv_configure_ivars ************************************************************************/ static void ixv_configure_ivars(struct adapter *adapter) { struct ix_rx_queue *que = adapter->rx_queues; MPASS(adapter->num_rx_queues == adapter->num_tx_queues); for (int i = 0; i < adapter->num_rx_queues; i++, que++) { /* First the RX queue entry */ ixv_set_ivar(adapter, i, que->msix, 0); /* ... and the TX */ ixv_set_ivar(adapter, i, que->msix, 1); /* Set an initial value in EITR */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix), IXGBE_EITR_DEFAULT); } /* For the mailbox interrupt */ ixv_set_ivar(adapter, 1, adapter->vector, -1); } /* ixv_configure_ivars */ /************************************************************************ * ixv_save_stats * * The VF stats registers never have a truly virgin * starting point, so this routine tries to make an * artificial one, marking ground zero on attach as * it were. ************************************************************************/ static void ixv_save_stats(struct adapter *adapter) { if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) { adapter->stats.vf.saved_reset_vfgprc += adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc; adapter->stats.vf.saved_reset_vfgptc += adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc; adapter->stats.vf.saved_reset_vfgorc += adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc; adapter->stats.vf.saved_reset_vfgotc += adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc; adapter->stats.vf.saved_reset_vfmprc += adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc; } } /* ixv_save_stats */ /************************************************************************ * ixv_init_stats ************************************************************************/ static void ixv_init_stats(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); adapter->stats.vf.last_vfgorc |= (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); adapter->stats.vf.last_vfgotc |= (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc; adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc; adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc; adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc; adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc; } /* ixv_init_stats */ #define UPDATE_STAT_32(reg, last, count) \ { \ u32 current = IXGBE_READ_REG(hw, reg); \ if (current < last) \ count += 0x100000000LL; \ last = current; \ count &= 0xFFFFFFFF00000000LL; \ count |= current; \ } #define UPDATE_STAT_36(lsb, msb, last, count) \ { \ u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \ u64 cur_msb = IXGBE_READ_REG(hw, msb); \ u64 current = ((cur_msb << 32) | cur_lsb); \ if (current < last) \ count += 0x1000000000LL; \ last = current; \ count &= 0xFFFFFFF000000000LL; \ count |= current; \ } /************************************************************************ * ixv_update_stats - Update the board statistics counters. ************************************************************************/ void ixv_update_stats(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbevf_hw_stats *stats = &adapter->stats.vf; UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc, adapter->stats.vf.vfgprc); UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc, adapter->stats.vf.vfgptc); UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc); UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc); UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc, adapter->stats.vf.vfmprc); /* Fill out the OS statistics structure */ IXGBE_SET_IPACKETS(adapter, stats->vfgprc); IXGBE_SET_OPACKETS(adapter, stats->vfgptc); IXGBE_SET_IBYTES(adapter, stats->vfgorc); IXGBE_SET_OBYTES(adapter, stats->vfgotc); IXGBE_SET_IMCASTS(adapter, stats->vfmprc); } /* ixv_update_stats */ /************************************************************************ * ixv_add_stats_sysctls - Add statistic sysctls for the VF. ************************************************************************/ static void ixv_add_stats_sysctls(struct adapter *adapter) { device_t dev = adapter->dev; struct ix_tx_queue *tx_que = adapter->tx_queues; struct ix_rx_queue *rx_que = adapter->rx_queues; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct ixgbevf_hw_stats *stats = &adapter->stats.vf; struct sysctl_oid *stat_node, *queue_node; struct sysctl_oid_list *stat_list, *queue_list; #define QUEUE_NAME_LEN 32 char namebuf[QUEUE_NAME_LEN]; /* Driver Statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled"); for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", CTLFLAG_RD, &(txr->tso_tx), "TSO Packets"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", CTLFLAG_RD, &(txr->total_packets), "TX Packets"); } for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) { struct rx_ring *rxr = &rx_que->rxr; snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", CTLFLAG_RD, &(rxr->rx_packets), "RX packets"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets"); } stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", CTLFLAG_RD, &stats->vfgprc, "Good Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", CTLFLAG_RD, &stats->vfgorc, "Good Octets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted"); } /* ixv_add_stats_sysctls */ /************************************************************************ * ixv_print_debug_info * * Called only when em_display_debug_stats is enabled. * Provides a way to take a look at important statistics * maintained by the driver and hardware. ************************************************************************/ static void ixv_print_debug_info(struct adapter *adapter) { device_t dev = adapter->dev; struct ixgbe_hw *hw = &adapter->hw; device_printf(dev, "Error Byte Count = %u \n", IXGBE_READ_REG(hw, IXGBE_ERRBC)); device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq); } /* ixv_print_debug_info */ /************************************************************************ * ixv_sysctl_debug ************************************************************************/ static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; int error, result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct adapter *)arg1; ixv_print_debug_info(adapter); } return error; } /* ixv_sysctl_debug */ /************************************************************************ * ixv_init_device_features ************************************************************************/ static void ixv_init_device_features(struct adapter *adapter) { adapter->feat_cap = IXGBE_FEATURE_NETMAP | IXGBE_FEATURE_VF | IXGBE_FEATURE_RSS | IXGBE_FEATURE_LEGACY_TX; /* A tad short on feature flags for VFs, atm. */ switch (adapter->hw.mac.type) { case ixgbe_mac_82599_vf: break; case ixgbe_mac_X540_vf: break; case ixgbe_mac_X550_vf: case ixgbe_mac_X550EM_x_vf: case ixgbe_mac_X550EM_a_vf: adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD; break; default: break; } /* Enabled by default... */ /* Is a virtual function (VF) */ if (adapter->feat_cap & IXGBE_FEATURE_VF) adapter->feat_en |= IXGBE_FEATURE_VF; /* Netmap */ if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) adapter->feat_en |= IXGBE_FEATURE_NETMAP; /* Receive-Side Scaling (RSS) */ if (adapter->feat_cap & IXGBE_FEATURE_RSS) adapter->feat_en |= IXGBE_FEATURE_RSS; /* Needs advanced context descriptor regardless of offloads req'd */ if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD) adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD; } /* ixv_init_device_features */ Index: head/sys/dev/ixl/ixl_pf_main.c =================================================================== --- head/sys/dev/ixl/ixl_pf_main.c (revision 332422) +++ head/sys/dev/ixl/ixl_pf_main.c (revision 332423) @@ -1,6090 +1,6090 @@ /****************************************************************************** Copyright (c) 2013-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl_pf.h" #ifdef PCI_IOV #include "ixl_pf_iov.h" #endif #ifdef IXL_IW #include "ixl_iw.h" #include "ixl_iw_int.h" #endif #ifdef DEV_NETMAP #include #include #include #endif /* DEV_NETMAP */ static int ixl_setup_queue(struct ixl_queue *, struct ixl_pf *, int); static u64 ixl_max_aq_speed_to_value(u8); static u8 ixl_convert_sysctl_aq_link_speed(u8, bool); /* Sysctls */ static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS); static int ixl_set_advertise(SYSCTL_HANDLER_ARGS); static int ixl_current_speed(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); /* Debug Sysctls */ static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); #ifdef IXL_DEBUG static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); #endif #ifdef IXL_IW extern int ixl_enable_iwarp; #endif const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; const char * const ixl_fc_string[6] = { "None", "Rx", "Tx", "Full", "Priority", "Default" }; MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); void ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...) { va_list args; if (!(mask & pf->dbg_mask)) return; /* Re-implement device_printf() */ device_print_prettyname(pf->dev); va_start(args, fmt); vprintf(fmt, args); va_end(args); } /* ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string */ void ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf) { u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24); u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF); u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF); sbuf_printf(buf, "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, hw->aq.api_maj_ver, hw->aq.api_min_ver, (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> IXL_NVM_VERSION_HI_SHIFT, (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> IXL_NVM_VERSION_LO_SHIFT, hw->nvm.eetrack, oem_ver, oem_build, oem_patch); } void ixl_print_nvm_version(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *sbuf; sbuf = sbuf_new_auto(); ixl_nvm_version_str(hw, sbuf); sbuf_finish(sbuf); device_printf(dev, "%s\n", sbuf_data(sbuf)); sbuf_delete(sbuf); } static void ixl_configure_tx_itr(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; vsi->tx_itr_setting = pf->tx_itr; for (int i = 0; i < vsi->num_queues; i++, que++) { struct tx_ring *txr = &que->txr; wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i), vsi->tx_itr_setting); txr->itr = vsi->tx_itr_setting; txr->latency = IXL_AVE_LATENCY; } } static void ixl_configure_rx_itr(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; vsi->rx_itr_setting = pf->rx_itr; for (int i = 0; i < vsi->num_queues; i++, que++) { struct rx_ring *rxr = &que->rxr; wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i), vsi->rx_itr_setting); rxr->itr = vsi->rx_itr_setting; rxr->latency = IXL_AVE_LATENCY; } } /* * Write PF ITR values to queue ITR registers. */ void ixl_configure_itr(struct ixl_pf *pf) { ixl_configure_tx_itr(pf); ixl_configure_rx_itr(pf); } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ void ixl_init_locked(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ifnet *ifp = vsi->ifp; device_t dev = pf->dev; struct i40e_filter_control_settings filter; u8 tmpaddr[ETHER_ADDR_LEN]; int ret; INIT_DEBUGOUT("ixl_init_locked: begin"); IXL_PF_LOCK_ASSERT(pf); ixl_stop_locked(pf); /* * If the aq is dead here, it probably means something outside of the driver * did something to the adapter, like a PF reset. * So rebuild the driver's state here if that occurs. */ if (!i40e_check_asq_alive(&pf->hw)) { device_printf(dev, "Admin Queue is down; resetting...\n"); ixl_teardown_hw_structs(pf); ixl_reset(pf); } /* Get the latest mac address... User might use a LAA */ bcopy(IF_LLADDR(vsi->ifp), tmpaddr, I40E_ETH_LENGTH_OF_ADDRESS); if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); bcopy(tmpaddr, hw->mac.addr, I40E_ETH_LENGTH_OF_ADDRESS); ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY, hw->mac.addr, NULL); if (ret) { device_printf(dev, "LLA address" "change failed!!\n"); return; } } ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); /* Set the various hardware offload abilities */ ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TSO) ifp->if_hwassist |= CSUM_TSO; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6); /* Set up the device filtering */ bzero(&filter, sizeof(filter)); filter.enable_ethtype = TRUE; filter.enable_macvlan = TRUE; filter.enable_fdir = FALSE; filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; if (i40e_set_filter_control(hw, &filter)) device_printf(dev, "i40e_set_filter_control() failed\n"); /* Prepare the VSI: rings, hmc contexts, etc... */ if (ixl_initialize_vsi(vsi)) { device_printf(dev, "initialize vsi failed!!\n"); return; } /* Set up RSS */ ixl_config_rss(pf); /* Add protocol filters to list */ ixl_init_filters(vsi); /* Setup vlan's if needed */ ixl_setup_vlan_filters(vsi); /* Set up MSI/X routing and the ITR settings */ if (pf->msix > 1) { ixl_configure_queue_intr_msix(pf); ixl_configure_itr(pf); } else ixl_configure_legacy(pf); ixl_enable_rings(vsi); i40e_aq_set_default_vsi(hw, vsi->seid, NULL); ixl_reconfigure_filters(vsi); /* And now turn on interrupts */ ixl_enable_intr(vsi); /* Get link info */ hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); ixl_update_link_status(pf); /* Start the local timer */ callout_reset(&pf->timer, hz, ixl_local_timer, pf); /* Now inform the stack we're ready */ ifp->if_drv_flags |= IFF_DRV_RUNNING; #ifdef IXL_IW if (ixl_enable_iwarp && pf->iw_enabled) { ret = ixl_iw_pf_init(pf); if (ret) device_printf(dev, "initialize iwarp failed, code %d\n", ret); } #endif } /********************************************************************* * * Get the hardware capabilities * **********************************************************************/ int ixl_get_hw_capabilities(struct ixl_pf *pf) { struct i40e_aqc_list_capabilities_element_resp *buf; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int error, len; u16 needed; bool again = TRUE; len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); retry: if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate cap memory\n"); return (ENOMEM); } /* This populates the hw struct */ error = i40e_aq_discover_capabilities(hw, buf, len, &needed, i40e_aqc_opc_list_func_capabilities, NULL); free(buf, M_DEVBUF); if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && (again == TRUE)) { /* retry once with a larger buffer */ again = FALSE; len = needed; goto retry; } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { device_printf(dev, "capability discovery failed: %d\n", pf->hw.aq.asq_last_status); return (ENODEV); } /* Capture this PF's starting queue pair */ pf->qbase = hw->func_caps.base_queue; #ifdef IXL_DEBUG device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, " "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n", hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, hw->func_caps.num_msix_vectors_vf, hw->func_caps.fd_filters_guaranteed, hw->func_caps.fd_filters_best_effort, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, hw->func_caps.base_queue); #endif /* Print a subset of the capability information. */ device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n", hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, (hw->func_caps.mdio_port_mode == 2) ? "I2C" : (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : "MDIO shared"); struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back; osdep->i2c_intfc_num = ixl_find_i2c_interface(pf); if (osdep->i2c_intfc_num != -1) pf->has_i2c = true; return (error); } void ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask) { device_t dev = vsi->dev; /* Enable/disable TXCSUM/TSO4 */ if (!(ifp->if_capenable & IFCAP_TXCSUM) && !(ifp->if_capenable & IFCAP_TSO4)) { if (mask & IFCAP_TXCSUM) { ifp->if_capenable |= IFCAP_TXCSUM; /* enable TXCSUM, restore TSO if previously enabled */ if (vsi->flags & IXL_FLAGS_KEEP_TSO4) { vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; ifp->if_capenable |= IFCAP_TSO4; } } else if (mask & IFCAP_TSO4) { ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4); vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; device_printf(dev, "TSO4 requires txcsum, enabling both...\n"); } } else if((ifp->if_capenable & IFCAP_TXCSUM) && !(ifp->if_capenable & IFCAP_TSO4)) { if (mask & IFCAP_TXCSUM) ifp->if_capenable &= ~IFCAP_TXCSUM; else if (mask & IFCAP_TSO4) ifp->if_capenable |= IFCAP_TSO4; } else if((ifp->if_capenable & IFCAP_TXCSUM) && (ifp->if_capenable & IFCAP_TSO4)) { if (mask & IFCAP_TXCSUM) { vsi->flags |= IXL_FLAGS_KEEP_TSO4; ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4); device_printf(dev, "TSO4 requires txcsum, disabling both...\n"); } else if (mask & IFCAP_TSO4) ifp->if_capenable &= ~IFCAP_TSO4; } /* Enable/disable TXCSUM_IPV6/TSO6 */ if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6) && !(ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) { ifp->if_capenable |= IFCAP_TXCSUM_IPV6; if (vsi->flags & IXL_FLAGS_KEEP_TSO6) { vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; ifp->if_capenable |= IFCAP_TSO6; } } else if (mask & IFCAP_TSO6) { ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; device_printf(dev, "TSO6 requires txcsum6, enabling both...\n"); } } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6) && !(ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6; else if (mask & IFCAP_TSO6) ifp->if_capenable |= IFCAP_TSO6; } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) && (ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) { vsi->flags |= IXL_FLAGS_KEEP_TSO6; ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); device_printf(dev, "TSO6 requires txcsum6, disabling both...\n"); } else if (mask & IFCAP_TSO6) ifp->if_capenable &= ~IFCAP_TSO6; } } /* For the set_advertise sysctl */ void ixl_get_initial_advertised_speeds(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; enum i40e_status_code status; struct i40e_aq_get_phy_abilities_resp abilities; /* Set initial sysctl values */ status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities, NULL); if (status) { /* Non-fatal error */ device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n", __func__, status); return; } pf->advertised_speed = ixl_convert_sysctl_aq_link_speed(abilities.link_speed, false); } int ixl_teardown_hw_structs(struct ixl_pf *pf) { enum i40e_status_code status = 0; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; /* Shutdown LAN HMC */ if (hw->hmc.hmc_obj) { status = i40e_shutdown_lan_hmc(hw); if (status) { device_printf(dev, "init: LAN HMC shutdown failure; status %d\n", status); goto err_out; } } // XXX: This gets called when we know the adminq is inactive; // so we already know it's setup when we get here. /* Shutdown admin queue */ status = i40e_shutdown_adminq(hw); if (status) device_printf(dev, "init: Admin Queue shutdown failure; status %d\n", status); err_out: return (status); } int ixl_reset(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; u8 set_fc_err_mask; int error = 0; // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary i40e_clear_hw(hw); error = i40e_pf_reset(hw); if (error) { device_printf(dev, "init: PF reset failure"); error = EIO; goto err_out; } error = i40e_init_adminq(hw); if (error) { device_printf(dev, "init: Admin queue init failure;" " status code %d", error); error = EIO; goto err_out; } i40e_clear_pxe_mode(hw); error = ixl_get_hw_capabilities(pf); if (error) { device_printf(dev, "init: Error retrieving HW capabilities;" " status code %d\n", error); goto err_out; } error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, 0, 0); if (error) { device_printf(dev, "init: LAN HMC init failed; status code %d\n", error); error = EIO; goto err_out; } error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (error) { device_printf(dev, "init: LAN HMC config failed; status code %d\n", error); error = EIO; goto err_out; } // XXX: possible fix for panic, but our failure recovery is still broken error = ixl_switch_config(pf); if (error) { device_printf(dev, "init: ixl_switch_config() failed: %d\n", error); goto err_out; } error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, NULL); if (error) { device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d," " aq_err %d\n", error, hw->aq.asq_last_status); error = EIO; goto err_out; } error = i40e_set_fc(hw, &set_fc_err_mask, true); if (error) { device_printf(dev, "init: setting link flow control failed; retcode %d," " fc_err_mask 0x%02x\n", error, set_fc_err_mask); goto err_out; } // XXX: (Rebuild VSIs?) /* Firmware delay workaround */ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || (hw->aq.fw_maj_ver < 4)) { i40e_msec_delay(75); error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); if (error) { device_printf(dev, "init: link restart failed, aq_err %d\n", hw->aq.asq_last_status); goto err_out; } } err_out: return (error); } /* ** MSIX Interrupt Handlers and Tasklets */ void ixl_handle_que(void *context, int pending) { struct ixl_queue *que = context; struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; struct ifnet *ifp = vsi->ifp; bool more; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { more = ixl_rxeof(que, IXL_RX_LIMIT); IXL_TX_LOCK(txr); ixl_txeof(que); if (!drbr_empty(ifp, txr->br)) ixl_mq_start_locked(ifp, txr); IXL_TX_UNLOCK(txr); if (more) { taskqueue_enqueue(que->tq, &que->task); return; } } /* Reenable this interrupt - hmmm */ ixl_enable_queue(hw, que->me); return; } /********************************************************************* * * Legacy Interrupt Service routine * **********************************************************************/ void ixl_intr(void *arg) { struct ixl_pf *pf = arg; struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; struct ifnet *ifp = vsi->ifp; struct tx_ring *txr = &que->txr; u32 icr0; bool more_tx, more_rx; pf->admin_irq++; /* Protect against spurious interrupts */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; icr0 = rd32(hw, I40E_PFINT_ICR0); #ifdef PCI_IOV if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) taskqueue_enqueue(pf->tq, &pf->vflr_task); #endif if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { taskqueue_enqueue(pf->tq, &pf->adminq); } if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { ++que->irqs; more_rx = ixl_rxeof(que, IXL_RX_LIMIT); IXL_TX_LOCK(txr); more_tx = ixl_txeof(que); if (!drbr_empty(vsi->ifp, txr->br)) more_tx = 1; IXL_TX_UNLOCK(txr); } ixl_enable_intr0(hw); } /********************************************************************* * * MSIX VSI Interrupt Service routine * **********************************************************************/ void ixl_msix_que(void *arg) { struct ixl_queue *que = arg; struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; bool more_tx, more_rx; /* Protect against spurious interrupts */ if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)) return; ++que->irqs; more_rx = ixl_rxeof(que, IXL_RX_LIMIT); IXL_TX_LOCK(txr); more_tx = ixl_txeof(que); /* ** Make certain that if the stack ** has anything queued the task gets ** scheduled to handle it. */ if (!drbr_empty(vsi->ifp, txr->br)) more_tx = 1; IXL_TX_UNLOCK(txr); ixl_set_queue_rx_itr(que); ixl_set_queue_tx_itr(que); if (more_tx || more_rx) taskqueue_enqueue(que->tq, &que->task); else ixl_enable_queue(hw, que->me); return; } /********************************************************************* * * MSIX Admin Queue Interrupt Service routine * **********************************************************************/ void ixl_msix_adminq(void *arg) { struct ixl_pf *pf = arg; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; u32 reg, mask, rstat_reg; bool do_task = FALSE; ++pf->admin_irq; reg = rd32(hw, I40E_PFINT_ICR0); mask = rd32(hw, I40E_PFINT_ICR0_ENA); /* Check on the cause */ if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) { mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK; do_task = TRUE; } if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) { ixl_handle_mdd_event(pf); mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK; } if (reg & I40E_PFINT_ICR0_GRST_MASK) { device_printf(dev, "Reset Requested!\n"); rstat_reg = rd32(hw, I40E_GLGEN_RSTAT); rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; device_printf(dev, "Reset type: "); switch (rstat_reg) { /* These others might be handled similarly to an EMPR reset */ case I40E_RESET_CORER: printf("CORER\n"); break; case I40E_RESET_GLOBR: printf("GLOBR\n"); break; case I40E_RESET_EMPR: printf("EMPR\n"); atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING); break; default: printf("POR\n"); break; } /* overload admin queue task to check reset progress */ do_task = TRUE; } if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) { device_printf(dev, "ECC Error detected!\n"); } if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) { reg = rd32(hw, I40E_PFHMC_ERRORINFO); if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) { device_printf(dev, "HMC Error detected!\n"); device_printf(dev, "INFO 0x%08x\n", reg); reg = rd32(hw, I40E_PFHMC_ERRORDATA); device_printf(dev, "DATA 0x%08x\n", reg); wr32(hw, I40E_PFHMC_ERRORINFO, 0); } } if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) { device_printf(dev, "PCI Exception detected!\n"); } #ifdef PCI_IOV if (reg & I40E_PFINT_ICR0_VFLR_MASK) { mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; taskqueue_enqueue(pf->tq, &pf->vflr_task); } #endif if (do_task) taskqueue_enqueue(pf->tq, &pf->adminq); else ixl_enable_intr0(hw); } void ixl_set_promisc(struct ixl_vsi *vsi) { struct ifnet *ifp = vsi->ifp; struct i40e_hw *hw = vsi->hw; int err, mcnt = 0; bool uni = FALSE, multi = FALSE; if (ifp->if_flags & IFF_ALLMULTI) multi = TRUE; else { /* Need to count the multicast addresses */ struct ifmultiaddr *ifma; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == MAX_MULTICAST_ADDR) break; mcnt++; } if_maddr_runlock(ifp); } if (mcnt >= MAX_MULTICAST_ADDR) multi = TRUE; if (ifp->if_flags & IFF_PROMISC) uni = TRUE; err = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, uni, NULL, TRUE); err = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, multi, NULL); return; } /********************************************************************* * Filter Routines * * Routines for multicast and vlan filter management. * *********************************************************************/ void ixl_add_multi(struct ixl_vsi *vsi) { struct ifmultiaddr *ifma; struct ifnet *ifp = vsi->ifp; struct i40e_hw *hw = vsi->hw; int mcnt = 0, flags; IOCTL_DEBUGOUT("ixl_add_multi: begin"); if_maddr_rlock(ifp); /* ** First just get a count, to decide if we ** we simply use multicast promiscuous. */ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; mcnt++; } if_maddr_runlock(ifp); if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { /* delete existing MC filters */ ixl_del_hw_filters(vsi, mcnt); i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL); return; } mcnt = 0; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; ixl_add_mc_filter(vsi, (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr)); mcnt++; } if_maddr_runlock(ifp); if (mcnt > 0) { flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); ixl_add_hw_filters(vsi, flags, mcnt); } IOCTL_DEBUGOUT("ixl_add_multi: end"); return; } void ixl_del_multi(struct ixl_vsi *vsi) { struct ifnet *ifp = vsi->ifp; struct ifmultiaddr *ifma; struct ixl_mac_filter *f; int mcnt = 0; bool match = FALSE; IOCTL_DEBUGOUT("ixl_del_multi: begin"); /* Search for removed multicast addresses */ if_maddr_rlock(ifp); SLIST_FOREACH(f, &vsi->ftl, next) { if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) { match = FALSE; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr); if (cmp_etheraddr(f->macaddr, mc_addr)) { match = TRUE; break; } } if (match == FALSE) { f->flags |= IXL_FILTER_DEL; mcnt++; } } } if_maddr_runlock(ifp); if (mcnt > 0) ixl_del_hw_filters(vsi, mcnt); } /********************************************************************* * Timer routine * * This routine checks for link status,updates statistics, * and runs the watchdog check. * * Only runs when the driver is configured UP and RUNNING. * **********************************************************************/ void ixl_local_timer(void *arg) { struct ixl_pf *pf = arg; struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; device_t dev = pf->dev; struct tx_ring *txr; int hung = 0; u32 mask; s32 timer, new_timer; IXL_PF_LOCK_ASSERT(pf); /* Fire off the adminq task */ taskqueue_enqueue(pf->tq, &pf->adminq); /* Update stats */ ixl_update_stats_counters(pf); /* Check status of the queues */ mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); for (int i = 0; i < vsi->num_queues; i++, que++) { txr = &que->txr; timer = atomic_load_acq_32(&txr->watchdog_timer); if (timer > 0) { new_timer = timer - hz; if (new_timer <= 0) { atomic_store_rel_32(&txr->watchdog_timer, -1); device_printf(dev, "WARNING: queue %d " "appears to be hung!\n", que->me); ++hung; } else { /* * If this fails, that means something in the TX path has updated * the watchdog, so it means the TX path is still working and * the watchdog doesn't need to countdown. */ atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer); /* Any queues with outstanding work get a sw irq */ wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask); } } } /* Reset when a queue shows hung */ if (hung) goto hung; callout_reset(&pf->timer, hz, ixl_local_timer, pf); return; hung: device_printf(dev, "WARNING: Resetting!\n"); pf->watchdog_events++; ixl_init_locked(pf); } void ixl_link_up_msg(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ifnet *ifp = pf->vsi.ifp; log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, FEC: %s, Autoneg: %s, Flow Control: %s\n", ifp->if_xname, ixl_aq_speed_to_str(hw->phy.link_info.link_speed), (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) ? "Clause 74 BASE-R FEC" : (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) ? "Clause 108 RS-FEC" : "None", (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? ixl_fc_string[1] : ixl_fc_string[0]); } /* ** Note: this routine updates the OS on the link state ** the real check of the hardware only happens with ** a link interrupt. */ void ixl_update_link_status(struct ixl_pf *pf) { struct ixl_vsi *vsi = &pf->vsi; struct ifnet *ifp = vsi->ifp; device_t dev = pf->dev; if (pf->link_up) { if (vsi->link_active == FALSE) { vsi->link_active = TRUE; ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed); if_link_state_change(ifp, LINK_STATE_UP); ixl_link_up_msg(pf); } } else { /* Link down */ if (vsi->link_active == TRUE) { if (bootverbose) device_printf(dev, "Link is Down\n"); if_link_state_change(ifp, LINK_STATE_DOWN); vsi->link_active = FALSE; } } return; } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * **********************************************************************/ void ixl_stop_locked(struct ixl_pf *pf) { struct ixl_vsi *vsi = &pf->vsi; struct ifnet *ifp = vsi->ifp; INIT_DEBUGOUT("ixl_stop: begin\n"); IXL_PF_LOCK_ASSERT(pf); #ifdef IXL_IW /* Stop iWARP device */ if (ixl_enable_iwarp && pf->iw_enabled) ixl_iw_pf_stop(pf); #endif /* Stop the local timer */ callout_stop(&pf->timer); ixl_disable_rings_intr(vsi); ixl_disable_rings(vsi); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING); } void ixl_stop(struct ixl_pf *pf) { IXL_PF_LOCK(pf); ixl_stop_locked(pf); IXL_PF_UNLOCK(pf); } /********************************************************************* * * Setup MSIX Interrupt resources and handlers for the VSI * **********************************************************************/ int ixl_setup_legacy(struct ixl_pf *pf) { device_t dev = pf->dev; int error, rid = 0; if (pf->msix == 1) rid = 1; pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (pf->res == NULL) { device_printf(dev, "bus_alloc_resource_any() for" " legacy/msi interrupt\n"); return (ENXIO); } /* Set the handler function */ error = bus_setup_intr(dev, pf->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixl_intr, pf, &pf->tag); if (error) { pf->res = NULL; device_printf(dev, "bus_setup_intr() for legacy/msi" " interrupt handler failed, error %d\n", error); return (ENXIO); } error = bus_describe_intr(dev, pf->res, pf->tag, "irq"); if (error) { /* non-fatal */ device_printf(dev, "bus_describe_intr() for Admin Queue" " interrupt name failed, error %d\n", error); } return (0); } int ixl_setup_adminq_tq(struct ixl_pf *pf) { device_t dev = pf->dev; int error = 0; /* Tasklet for Admin Queue interrupts */ TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf); #ifdef PCI_IOV /* VFLR Tasklet */ TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf); #endif /* Create and start Admin Queue taskqueue */ pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT, taskqueue_thread_enqueue, &pf->tq); if (!pf->tq) { device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n"); return (ENOMEM); } error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq", device_get_nameunit(dev)); if (error) { device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n", error); taskqueue_free(pf->tq); return (error); } return (0); } int ixl_setup_queue_tqs(struct ixl_vsi *vsi) { struct ixl_queue *que = vsi->queues; device_t dev = vsi->dev; #ifdef RSS int cpu_id = 0; cpuset_t cpu_mask; #endif /* Create queue tasks and start queue taskqueues */ for (int i = 0; i < vsi->num_queues; i++, que++) { TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que); TASK_INIT(&que->task, 0, ixl_handle_que, que); que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT, taskqueue_thread_enqueue, &que->tq); #ifdef RSS CPU_SETOF(cpu_id, &cpu_mask); taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET, &cpu_mask, "%s (bucket %d)", device_get_nameunit(dev), cpu_id); #else taskqueue_start_threads(&que->tq, 1, PI_NET, "%s (que %d)", device_get_nameunit(dev), que->me); #endif } return (0); } void ixl_free_adminq_tq(struct ixl_pf *pf) { if (pf->tq) { taskqueue_free(pf->tq); pf->tq = NULL; } } void ixl_free_queue_tqs(struct ixl_vsi *vsi) { struct ixl_queue *que = vsi->queues; for (int i = 0; i < vsi->num_queues; i++, que++) { if (que->tq) { taskqueue_free(que->tq); que->tq = NULL; } } } int ixl_setup_adminq_msix(struct ixl_pf *pf) { device_t dev = pf->dev; int rid, error = 0; /* Admin IRQ rid is 1, vector is 0 */ rid = 1; /* Get interrupt resource from bus */ pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (!pf->res) { device_printf(dev, "bus_alloc_resource_any() for Admin Queue" " interrupt failed [rid=%d]\n", rid); return (ENXIO); } /* Then associate interrupt with handler */ error = bus_setup_intr(dev, pf->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixl_msix_adminq, pf, &pf->tag); if (error) { pf->res = NULL; device_printf(dev, "bus_setup_intr() for Admin Queue" " interrupt handler failed, error %d\n", error); return (ENXIO); } error = bus_describe_intr(dev, pf->res, pf->tag, "aq"); if (error) { /* non-fatal */ device_printf(dev, "bus_describe_intr() for Admin Queue" " interrupt name failed, error %d\n", error); } pf->admvec = 0; return (0); } /* * Allocate interrupt resources from bus and associate an interrupt handler * to those for the VSI's queues. */ int ixl_setup_queue_msix(struct ixl_vsi *vsi) { device_t dev = vsi->dev; struct ixl_queue *que = vsi->queues; struct tx_ring *txr; int error, rid, vector = 1; /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */ for (int i = 0; i < vsi->num_queues; i++, vector++, que++) { int cpu_id = i; rid = vector + 1; txr = &que->txr; que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (!que->res) { device_printf(dev, "bus_alloc_resource_any() for" " Queue %d interrupt failed [rid=%d]\n", que->me, rid); return (ENXIO); } /* Set the handler function */ error = bus_setup_intr(dev, que->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixl_msix_que, que, &que->tag); if (error) { device_printf(dev, "bus_setup_intr() for Queue %d" " interrupt handler failed, error %d\n", que->me, error); bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); return (error); } error = bus_describe_intr(dev, que->res, que->tag, "q%d", i); if (error) { device_printf(dev, "bus_describe_intr() for Queue %d" " interrupt name failed, error %d\n", que->me, error); } /* Bind the vector to a CPU */ #ifdef RSS cpu_id = rss_getcpu(i % rss_getnumbuckets()); #endif error = bus_bind_intr(dev, que->res, cpu_id); if (error) { device_printf(dev, "bus_bind_intr() for Queue %d" " to CPU %d failed, error %d\n", que->me, cpu_id, error); } que->msix = vector; } return (0); } /* * When used in a virtualized environment PCI BUSMASTER capability may not be set * so explicity set it here and rewrite the ENABLE in the MSIX control register * at this point to cause the host to successfully initialize us. */ void ixl_set_busmaster(device_t dev) { u16 pci_cmd_word; pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); pci_cmd_word |= PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2); } /* * rewrite the ENABLE in the MSIX control register * to cause the host to successfully initialize us. */ void ixl_set_msix_enable(device_t dev) { int msix_ctrl, rid; pci_find_cap(dev, PCIY_MSIX, &rid); rid += PCIR_MSIX_CTRL; msix_ctrl = pci_read_config(dev, rid, 2); msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(dev, rid, msix_ctrl, 2); } /* * Allocate MSI/X vectors from the OS. * Returns 0 for legacy, 1 for MSI, >1 for MSIX. */ int ixl_init_msix(struct ixl_pf *pf) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; int auto_max_queues; int rid, want, vectors, queues, available; #ifdef IXL_IW int iw_want, iw_vectors; pf->iw_msix = 0; #endif /* Override by tuneable */ if (!pf->enable_msix) goto no_msix; /* Ensure proper operation in virtualized environment */ ixl_set_busmaster(dev); /* First try MSI/X */ rid = PCIR_BAR(IXL_MSIX_BAR); pf->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!pf->msix_mem) { /* May not be enabled */ device_printf(pf->dev, "Unable to map MSIX table\n"); goto no_msix; } available = pci_msix_count(dev); if (available < 2) { /* system has msix disabled (0), or only one vector (1) */ bus_release_resource(dev, SYS_RES_MEMORY, rid, pf->msix_mem); pf->msix_mem = NULL; goto no_msix; } /* Clamp max number of queues based on: * - # of MSI-X vectors available * - # of cpus available * - # of queues that can be assigned to the LAN VSI */ auto_max_queues = min(mp_ncpus, available - 1); if (hw->mac.type == I40E_MAC_X722) auto_max_queues = min(auto_max_queues, 128); else auto_max_queues = min(auto_max_queues, 64); /* Override with tunable value if tunable is less than autoconfig count */ if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues)) queues = pf->max_queues; /* Use autoconfig amount if that's lower */ else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) { device_printf(dev, "ixl_max_queues (%d) is too large, using " "autoconfig amount (%d)...\n", pf->max_queues, auto_max_queues); queues = auto_max_queues; } /* Limit maximum auto-configured queues to 8 if no user value is set */ else queues = min(auto_max_queues, 8); #ifdef RSS /* If we're doing RSS, clamp at the number of RSS buckets */ if (queues > rss_getnumbuckets()) queues = rss_getnumbuckets(); #endif /* ** Want one vector (RX/TX pair) per queue ** plus an additional for the admin queue. */ want = queues + 1; if (want <= available) /* Have enough */ vectors = want; else { device_printf(pf->dev, "MSIX Configuration Problem, " "%d vectors available but %d wanted!\n", available, want); pf->msix_mem = NULL; goto no_msix; /* Will go to Legacy setup */ } #ifdef IXL_IW if (ixl_enable_iwarp) { /* iWARP wants additional vector for CQP */ iw_want = mp_ncpus + 1; available -= vectors; if (available > 0) { iw_vectors = (available >= iw_want) ? iw_want : available; vectors += iw_vectors; } else iw_vectors = 0; } #endif ixl_set_msix_enable(dev); if (pci_alloc_msix(dev, &vectors) == 0) { device_printf(pf->dev, "Using MSIX interrupts with %d vectors\n", vectors); pf->msix = vectors; #ifdef IXL_IW if (ixl_enable_iwarp) pf->iw_msix = iw_vectors; #endif pf->vsi.num_queues = queues; #ifdef RSS /* * If we're doing RSS, the number of queues needs to * match the number of RSS buckets that are configured. * * + If there's more queues than RSS buckets, we'll end * up with queues that get no traffic. * * + If there's more RSS buckets than queues, we'll end * up having multiple RSS buckets map to the same queue, * so there'll be some contention. */ if (queues != rss_getnumbuckets()) { device_printf(dev, "%s: queues (%d) != RSS buckets (%d)" "; performance will be impacted.\n", __func__, queues, rss_getnumbuckets()); } #endif return (vectors); } no_msix: vectors = pci_msi_count(dev); pf->vsi.num_queues = 1; pf->max_queues = 1; if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) device_printf(pf->dev, "Using an MSI interrupt\n"); else { vectors = 0; device_printf(pf->dev, "Using a Legacy interrupt\n"); } return (vectors); } /* * Configure admin queue/misc interrupt cause registers in hardware. */ void ixl_configure_intr0_msix(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 reg; /* First set up the adminq - vector 0 */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ rd32(hw, I40E_PFINT_ICR0); /* read to clear */ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | I40E_PFINT_ICR0_ENA_GRST_MASK | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); /* * 0x7FF is the end of the queue list. * This means we won't use MSI-X vector 0 for a queue interrupt * in MSIX mode. */ wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */ wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E); wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); wr32(hw, I40E_PFINT_STAT_CTL0, 0); } /* * Configure queue interrupt cause registers in hardware. */ void ixl_configure_queue_intr_msix(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; u32 reg; u16 vector = 1; for (int i = 0; i < vsi->num_queues; i++, vector++) { wr32(hw, I40E_PFINT_DYN_CTLN(i), 0); /* First queue type is RX / 0 */ wr32(hw, I40E_PFINT_LNKLSTN(i), i); reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_RQCTL(i), reg); reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_TQCTL(i), reg); } } /* * Configure for MSI single vector operation */ void ixl_configure_legacy(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; struct rx_ring *rxr = &que->rxr; struct tx_ring *txr = &que->txr; u32 reg; /* Configure ITR */ vsi->tx_itr_setting = pf->tx_itr; wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR), vsi->tx_itr_setting); txr->itr = vsi->tx_itr_setting; vsi->rx_itr_setting = pf->rx_itr; wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), vsi->rx_itr_setting); rxr->itr = vsi->rx_itr_setting; /* Setup "other" causes */ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | I40E_PFINT_ICR0_ENA_GRST_MASK | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | I40E_PFINT_ICR0_ENA_GPIO_MASK | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK ; wr32(hw, I40E_PFINT_ICR0_ENA, reg); /* No ITR for non-queue interrupts */ wr32(hw, I40E_PFINT_STAT_CTL0, IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT); /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ wr32(hw, I40E_PFINT_LNKLST0, 0); /* Associate the queue pair to the vector and enable the q int */ reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_RQCTL(0), reg); reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_QINT_TQCTL(0), reg); } int ixl_allocate_pci_resources(struct ixl_pf *pf) { int rid; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; /* Map BAR0 */ rid = PCIR_BAR(0); pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!(pf->pci_mem)) { device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); return (ENXIO); } /* Save off the PCI information */ hw->vendor_id = pci_get_vendor(dev); hw->device_id = pci_get_device(dev); hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); hw->subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); hw->subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); hw->bus.device = pci_get_slot(dev); hw->bus.func = pci_get_function(dev); /* Save off register access information */ pf->osdep.mem_bus_space_tag = rman_get_bustag(pf->pci_mem); pf->osdep.mem_bus_space_handle = rman_get_bushandle(pf->pci_mem); pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); pf->osdep.flush_reg = I40E_GLGEN_STAT; pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; pf->hw.back = &pf->osdep; return (0); } /* * Teardown and release the admin queue/misc vector * interrupt. */ int ixl_teardown_adminq_msix(struct ixl_pf *pf) { device_t dev = pf->dev; int rid, error = 0; if (pf->admvec) /* we are doing MSIX */ rid = pf->admvec + 1; else (pf->msix != 0) ? (rid = 1):(rid = 0); if (pf->tag != NULL) { bus_teardown_intr(dev, pf->res, pf->tag); if (error) { device_printf(dev, "bus_teardown_intr() for" " interrupt 0 failed\n"); // return (ENXIO); } pf->tag = NULL; } if (pf->res != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res); if (error) { device_printf(dev, "bus_release_resource() for" " interrupt 0 failed [rid=%d]\n", rid); // return (ENXIO); } pf->res = NULL; } return (0); } int ixl_teardown_queue_msix(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct ixl_queue *que = vsi->queues; device_t dev = vsi->dev; int rid, error = 0; /* We may get here before stations are setup */ if ((pf->msix < 2) || (que == NULL)) return (0); /* Release all MSIX queue resources */ for (int i = 0; i < vsi->num_queues; i++, que++) { rid = que->msix + 1; if (que->tag != NULL) { error = bus_teardown_intr(dev, que->res, que->tag); if (error) { device_printf(dev, "bus_teardown_intr() for" " Queue %d interrupt failed\n", que->me); // return (ENXIO); } que->tag = NULL; } if (que->res != NULL) { error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); if (error) { device_printf(dev, "bus_release_resource() for" " Queue %d interrupt failed [rid=%d]\n", que->me, rid); // return (ENXIO); } que->res = NULL; } } return (0); } void ixl_free_pci_resources(struct ixl_pf *pf) { device_t dev = pf->dev; int memrid; ixl_teardown_queue_msix(&pf->vsi); ixl_teardown_adminq_msix(pf); if (pf->msix > 0) pci_release_msi(dev); memrid = PCIR_BAR(IXL_MSIX_BAR); if (pf->msix_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, memrid, pf->msix_mem); if (pf->pci_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), pf->pci_mem); return; } void ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types) { /* Display supported media types */ if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) || phy_types & (I40E_CAP_PHY_TYPE_XFI) || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) || phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) || phy_types & (I40E_CAP_PHY_TYPE_XLAUI) || phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_UNKNOWN, 0, NULL); } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ int ixl_setup_interface(device_t dev, struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct ifnet *ifp; struct i40e_hw *hw = vsi->hw; struct ixl_queue *que = vsi->queues; struct i40e_aq_get_phy_abilities_resp abilities; enum i40e_status_code aq_error = 0; INIT_DEBUGOUT("ixl_setup_interface: begin"); ifp = vsi->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (-1); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_init = ixl_init; ifp->if_softc = vsi; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ixl_ioctl; #if __FreeBSD_version >= 1100036 if_setgetcounterfn(ifp, ixl_get_counter); #endif ifp->if_transmit = ixl_mq_start; ifp->if_qflush = ixl_qflush; ifp->if_snd.ifq_maxlen = que->num_desc - 2; vsi->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; /* Set TSO limits */ ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN); ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS; ifp->if_hw_tsomaxsegsize = PAGE_SIZE; /* * Tell the upper layer(s) we support long frames. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_capabilities |= IFCAP_HWCSUM_IPV6; ifp->if_capabilities |= IFCAP_TSO; ifp->if_capabilities |= IFCAP_JUMBO_MTU; ifp->if_capabilities |= IFCAP_LRO; /* VLAN capabilties */ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; ifp->if_capenable = ifp->if_capabilities; /* ** Don't turn this on by default, if vlans are ** created on another pseudo device (eg. lagg) ** then vlan events are not passed thru, breaking ** operation, but with HW FILTER off it works. If ** using vlans directly on the ixl driver you can ** enable this and get full hardware tag filtering. */ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change, ixl_media_status); aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities, NULL); /* May need delay to detect fiber correctly */ if (aq_error == I40E_ERR_UNKNOWN_PHY) { i40e_msec_delay(200); aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities, NULL); } if (aq_error) { if (aq_error == I40E_ERR_UNKNOWN_PHY) device_printf(dev, "Unknown PHY type detected!\n"); else device_printf(dev, "Error getting supported media types, err %d," " AQ error %d\n", aq_error, hw->aq.asq_last_status); return (0); } pf->supported_speeds = abilities.link_speed; ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds); ixl_add_ifmedia(vsi, hw->phy.phy_types); /* Use autoselect media by default */ ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO); ether_ifattach(ifp, hw->mac.addr); return (0); } /* ** Run when the Admin Queue gets a link state change interrupt. */ void ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct i40e_aqc_get_link_status *status = (struct i40e_aqc_get_link_status *)&e->desc.params.raw; /* Request link status from adapter */ hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); /* Print out message if an unqualified module is found */ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && (!(status->link_info & I40E_AQ_LINK_UP))) device_printf(dev, "Link failed because " "an unqualified module was detected!\n"); /* Update OS link info */ ixl_update_link_status(pf); } /********************************************************************* * * Get Firmware Switch configuration * - this will need to be more robust when more complex * switch configurations are enabled. * **********************************************************************/ int ixl_switch_config(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; device_t dev = vsi->dev; struct i40e_aqc_get_switch_config_resp *sw_config; u8 aq_buf[I40E_AQ_LARGE_BUF]; int ret; u16 next = 0; memset(&aq_buf, 0, sizeof(aq_buf)); sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; ret = i40e_aq_get_switch_config(hw, sw_config, sizeof(aq_buf), &next, NULL); if (ret) { device_printf(dev, "aq_get_switch_config() failed, error %d," " aq_error %d\n", ret, pf->hw.aq.asq_last_status); return (ret); } if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) { device_printf(dev, "Switch config: header reported: %d in structure, %d total\n", sw_config->header.num_reported, sw_config->header.num_total); for (int i = 0; i < sw_config->header.num_reported; i++) { device_printf(dev, "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i, sw_config->element[i].element_type, sw_config->element[i].seid, sw_config->element[i].uplink_seid, sw_config->element[i].downlink_seid); } } /* Simplified due to a single VSI */ vsi->uplink_seid = sw_config->element[0].uplink_seid; vsi->downlink_seid = sw_config->element[0].downlink_seid; vsi->seid = sw_config->element[0].seid; return (ret); } /********************************************************************* * * Initialize the VSI: this handles contexts, which means things * like the number of descriptors, buffer size, * plus we init the rings thru this function. * **********************************************************************/ int ixl_initialize_vsi(struct ixl_vsi *vsi) { struct ixl_pf *pf = vsi->back; struct ixl_queue *que = vsi->queues; device_t dev = vsi->dev; struct i40e_hw *hw = vsi->hw; struct i40e_vsi_context ctxt; int tc_queues; int err = 0; memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = vsi->seid; if (pf->veb_seid != 0) ctxt.uplink_seid = pf->veb_seid; ctxt.pf_num = hw->pf_id; err = i40e_aq_get_vsi_params(hw, &ctxt, NULL); if (err) { device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d" " aq_error %d\n", err, hw->aq.asq_last_status); return (err); } ixl_dbg(pf, IXL_DBG_SWITCH_INFO, "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, " "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, " "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid, ctxt.uplink_seid, ctxt.vsi_number, ctxt.vsis_allocated, ctxt.vsis_unallocated, ctxt.flags, ctxt.pf_num, ctxt.vf_num, ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits); /* ** Set the queue and traffic class bits ** - when multiple traffic classes are supported ** this will need to be more robust. */ ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG; /* In contig mode, que_mapping[0] is first queue index used by this VSI */ ctxt.info.queue_mapping[0] = 0; /* * This VSI will only use traffic class 0; start traffic class 0's * queue allocation at queue 0, and assign it 2^tc_queues queues (though * the driver may not use all of them). */ tc_queues = bsrl(pf->qtag.num_allocated); ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) | ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) & I40E_AQ_VSI_TC_QUE_NUMBER_MASK); /* Set VLAN receive stripping mode */ ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID; ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL; if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING) ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; else ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING; #ifdef IXL_IW /* Set TCP Enable for iWARP capable VSI */ if (ixl_enable_iwarp && pf->iw_enabled) { ctxt.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; } #endif /* Save VSI number and info for use later */ vsi->vsi_num = ctxt.vsi_number; bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info)); /* Reset VSI statistics */ ixl_vsi_reset_stats(vsi); vsi->hw_filters_add = 0; vsi->hw_filters_del = 0; ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF); err = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (err) { device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d," " aq_error %d\n", err, hw->aq.asq_last_status); return (err); } for (int i = 0; i < vsi->num_queues; i++, que++) { struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; struct i40e_hmc_obj_txq tctx; struct i40e_hmc_obj_rxq rctx; u32 txctl; u16 size; /* Setup the HMC TX Context */ size = que->num_desc * sizeof(struct i40e_tx_desc); memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq)); tctx.new_context = 1; tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS); tctx.qlen = que->num_desc; tctx.fc_ena = 0; tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */ /* Enable HEAD writeback */ tctx.head_wb_ena = 1; tctx.head_wb_addr = txr->dma.pa + (que->num_desc * sizeof(struct i40e_tx_desc)); tctx.rdylist_act = 0; err = i40e_clear_lan_tx_queue_context(hw, i); if (err) { device_printf(dev, "Unable to clear TX context\n"); break; } err = i40e_set_lan_tx_queue_context(hw, i, &tctx); if (err) { device_printf(dev, "Unable to set TX context\n"); break; } /* Associate the ring with this PF */ txctl = I40E_QTX_CTL_PF_QUEUE; txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); wr32(hw, I40E_QTX_CTL(i), txctl); ixl_flush(hw); /* Do ring (re)init */ ixl_init_tx_ring(que); /* Next setup the HMC RX Context */ if (vsi->max_frame_size <= MCLBYTES) rxr->mbuf_sz = MCLBYTES; else rxr->mbuf_sz = MJUMPAGESIZE; u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len; /* Set up an RX context for the HMC */ memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq)); rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT; /* ignore header split for now */ rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT; rctx.rxmax = (vsi->max_frame_size < max_rxmax) ? vsi->max_frame_size : max_rxmax; rctx.dtype = 0; rctx.dsize = 1; /* do 32byte descriptors */ rctx.hsplit_0 = 0; /* no HDR split initially */ rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS); rctx.qlen = que->num_desc; rctx.tphrdesc_ena = 1; rctx.tphwdesc_ena = 1; rctx.tphdata_ena = 0; rctx.tphhead_ena = 0; rctx.lrxqthresh = 2; rctx.crcstrip = 1; rctx.l2tsel = 1; rctx.showiv = 1; rctx.fc_ena = 0; rctx.prefena = 1; err = i40e_clear_lan_rx_queue_context(hw, i); if (err) { device_printf(dev, "Unable to clear RX context %d\n", i); break; } err = i40e_set_lan_rx_queue_context(hw, i, &rctx); if (err) { device_printf(dev, "Unable to set RX context %d\n", i); break; } err = ixl_init_rx_ring(que); if (err) { device_printf(dev, "Fail in init_rx_ring %d\n", i); break; } #ifdef DEV_NETMAP /* preserve queue */ if (vsi->ifp->if_capenable & IFCAP_NETMAP) { struct netmap_adapter *na = NA(vsi->ifp); - struct netmap_kring *kring = &na->rx_rings[i]; + struct netmap_kring *kring = na->rx_rings[i]; int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); wr32(vsi->hw, I40E_QRX_TAIL(que->me), t); } else #endif /* DEV_NETMAP */ wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1); } return (err); } /********************************************************************* * * Free all VSI structs. * **********************************************************************/ void ixl_free_vsi(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct ixl_queue *que = vsi->queues; /* Free station queues */ if (!vsi->queues) goto free_filters; for (int i = 0; i < vsi->num_queues; i++, que++) { struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; if (!mtx_initialized(&txr->mtx)) /* uninitialized */ continue; IXL_TX_LOCK(txr); ixl_free_que_tx(que); if (txr->base) i40e_free_dma_mem(&pf->hw, &txr->dma); IXL_TX_UNLOCK(txr); IXL_TX_LOCK_DESTROY(txr); if (!mtx_initialized(&rxr->mtx)) /* uninitialized */ continue; IXL_RX_LOCK(rxr); ixl_free_que_rx(que); if (rxr->base) i40e_free_dma_mem(&pf->hw, &rxr->dma); IXL_RX_UNLOCK(rxr); IXL_RX_LOCK_DESTROY(rxr); } free(vsi->queues, M_DEVBUF); free_filters: /* Free VSI filter list */ ixl_free_mac_filters(vsi); } void ixl_free_mac_filters(struct ixl_vsi *vsi) { struct ixl_mac_filter *f; while (!SLIST_EMPTY(&vsi->ftl)) { f = SLIST_FIRST(&vsi->ftl); SLIST_REMOVE_HEAD(&vsi->ftl, next); free(f, M_DEVBUF); } } /* * Fill out fields in queue struct and setup tx/rx memory and structs */ static int ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; int error = 0; int rsize, tsize; que->num_desc = pf->ringsz; que->me = index; que->vsi = vsi; txr->que = que; txr->tail = I40E_QTX_TAIL(que->me); /* Initialize the TX lock */ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", device_get_nameunit(dev), que->me); mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF); /* Create the TX descriptor ring */ tsize = roundup2((que->num_desc * sizeof(struct i40e_tx_desc)) + sizeof(u32), DBA_ALIGN); if (i40e_allocate_dma_mem(hw, &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) { device_printf(dev, "Unable to allocate TX Descriptor memory\n"); error = ENOMEM; goto fail; } txr->base = (struct i40e_tx_desc *)txr->dma.va; bzero((void *)txr->base, tsize); /* Now allocate transmit soft structs for the ring */ if (ixl_allocate_tx_data(que)) { device_printf(dev, "Critical Failure setting up TX structures\n"); error = ENOMEM; goto fail; } /* Allocate a buf ring */ txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF, M_NOWAIT, &txr->mtx); if (txr->br == NULL) { device_printf(dev, "Critical Failure setting up TX buf ring\n"); error = ENOMEM; goto fail; } rsize = roundup2(que->num_desc * sizeof(union i40e_rx_desc), DBA_ALIGN); rxr->que = que; rxr->tail = I40E_QRX_TAIL(que->me); /* Initialize the RX side lock */ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", device_get_nameunit(dev), que->me); mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); if (i40e_allocate_dma_mem(hw, &rxr->dma, i40e_mem_reserved, rsize, 4096)) { device_printf(dev, "Unable to allocate RX Descriptor memory\n"); error = ENOMEM; goto fail; } rxr->base = (union i40e_rx_desc *)rxr->dma.va; bzero((void *)rxr->base, rsize); /* Allocate receive soft structs for the ring*/ if (ixl_allocate_rx_data(que)) { device_printf(dev, "Critical Failure setting up receive structs\n"); error = ENOMEM; goto fail; } return (0); fail: if (rxr->base) i40e_free_dma_mem(&pf->hw, &rxr->dma); if (mtx_initialized(&rxr->mtx)) mtx_destroy(&rxr->mtx); if (txr->br) { buf_ring_free(txr->br, M_DEVBUF); txr->br = NULL; } if (txr->base) i40e_free_dma_mem(&pf->hw, &txr->dma); if (mtx_initialized(&txr->mtx)) mtx_destroy(&txr->mtx); return (error); } /********************************************************************* * * Allocate memory for the VSI (virtual station interface) and their * associated queues, rings and the descriptors associated with each, * called only once at attach. * **********************************************************************/ int ixl_setup_stations(struct ixl_pf *pf) { device_t dev = pf->dev; struct ixl_vsi *vsi; struct ixl_queue *que; int error = 0; vsi = &pf->vsi; vsi->back = (void *)pf; vsi->hw = &pf->hw; vsi->id = 0; vsi->num_vlans = 0; vsi->back = pf; /* Get memory for the station queues */ if (!(vsi->queues = (struct ixl_queue *) malloc(sizeof(struct ixl_queue) * vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate queue memory\n"); error = ENOMEM; return (error); } /* Then setup each queue */ for (int i = 0; i < vsi->num_queues; i++) { que = &vsi->queues[i]; error = ixl_setup_queue(que, pf, i); if (error) return (error); } return (0); } /* ** Provide a update to the queue RX ** interrupt moderation value. */ void ixl_set_queue_rx_itr(struct ixl_queue *que) { struct ixl_vsi *vsi = que->vsi; struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = vsi->hw; struct rx_ring *rxr = &que->rxr; u16 rx_itr; u16 rx_latency = 0; int rx_bytes; /* Idle, do nothing */ if (rxr->bytes == 0) return; if (pf->dynamic_rx_itr) { rx_bytes = rxr->bytes/rxr->itr; rx_itr = rxr->itr; /* Adjust latency range */ switch (rxr->latency) { case IXL_LOW_LATENCY: if (rx_bytes > 10) { rx_latency = IXL_AVE_LATENCY; rx_itr = IXL_ITR_20K; } break; case IXL_AVE_LATENCY: if (rx_bytes > 20) { rx_latency = IXL_BULK_LATENCY; rx_itr = IXL_ITR_8K; } else if (rx_bytes <= 10) { rx_latency = IXL_LOW_LATENCY; rx_itr = IXL_ITR_100K; } break; case IXL_BULK_LATENCY: if (rx_bytes <= 20) { rx_latency = IXL_AVE_LATENCY; rx_itr = IXL_ITR_20K; } break; } rxr->latency = rx_latency; if (rx_itr != rxr->itr) { /* do an exponential smoothing */ rx_itr = (10 * rx_itr * rxr->itr) / ((9 * rx_itr) + rxr->itr); rxr->itr = min(rx_itr, IXL_MAX_ITR); wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, que->me), rxr->itr); } } else { /* We may have have toggled to non-dynamic */ if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC) vsi->rx_itr_setting = pf->rx_itr; /* Update the hardware if needed */ if (rxr->itr != vsi->rx_itr_setting) { rxr->itr = vsi->rx_itr_setting; wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, que->me), rxr->itr); } } rxr->bytes = 0; rxr->packets = 0; return; } /* ** Provide a update to the queue TX ** interrupt moderation value. */ void ixl_set_queue_tx_itr(struct ixl_queue *que) { struct ixl_vsi *vsi = que->vsi; struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; u16 tx_itr; u16 tx_latency = 0; int tx_bytes; /* Idle, do nothing */ if (txr->bytes == 0) return; if (pf->dynamic_tx_itr) { tx_bytes = txr->bytes/txr->itr; tx_itr = txr->itr; switch (txr->latency) { case IXL_LOW_LATENCY: if (tx_bytes > 10) { tx_latency = IXL_AVE_LATENCY; tx_itr = IXL_ITR_20K; } break; case IXL_AVE_LATENCY: if (tx_bytes > 20) { tx_latency = IXL_BULK_LATENCY; tx_itr = IXL_ITR_8K; } else if (tx_bytes <= 10) { tx_latency = IXL_LOW_LATENCY; tx_itr = IXL_ITR_100K; } break; case IXL_BULK_LATENCY: if (tx_bytes <= 20) { tx_latency = IXL_AVE_LATENCY; tx_itr = IXL_ITR_20K; } break; } txr->latency = tx_latency; if (tx_itr != txr->itr) { /* do an exponential smoothing */ tx_itr = (10 * tx_itr * txr->itr) / ((9 * tx_itr) + txr->itr); txr->itr = min(tx_itr, IXL_MAX_ITR); wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, que->me), txr->itr); } } else { /* We may have have toggled to non-dynamic */ if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC) vsi->tx_itr_setting = pf->tx_itr; /* Update the hardware if needed */ if (txr->itr != vsi->tx_itr_setting) { txr->itr = vsi->tx_itr_setting; wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, que->me), txr->itr); } } txr->bytes = 0; txr->packets = 0; return; } void ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx, const char *sysctl_name) { struct sysctl_oid *tree; struct sysctl_oid_list *child; struct sysctl_oid_list *vsi_list; tree = device_get_sysctl_tree(pf->dev); child = SYSCTL_CHILDREN(tree); vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name, CTLFLAG_RD, NULL, "VSI Number"); vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats); } #ifdef IXL_DEBUG /** * ixl_sysctl_qtx_tail_handler * Retrieves I40E_QTX_TAIL value from hardware * for a sysctl. */ static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS) { struct ixl_queue *que; int error; u32 val; que = ((struct ixl_queue *)oidp->oid_arg1); if (!que) return 0; val = rd32(que->vsi->hw, que->txr.tail); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; return (0); } /** * ixl_sysctl_qrx_tail_handler * Retrieves I40E_QRX_TAIL value from hardware * for a sysctl. */ static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS) { struct ixl_queue *que; int error; u32 val; que = ((struct ixl_queue *)oidp->oid_arg1); if (!que) return 0; val = rd32(que->vsi->hw, que->rxr.tail); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; return (0); } #endif /* * Used to set the Tx ITR value for all of the PF LAN VSI's queues. * Writes to the ITR registers immediately. */ static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int error = 0; int requested_tx_itr; requested_tx_itr = pf->tx_itr; error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (pf->dynamic_tx_itr) { device_printf(dev, "Cannot set TX itr value while dynamic TX itr is enabled\n"); return (EINVAL); } if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { device_printf(dev, "Invalid TX itr value; value must be between 0 and %d\n", IXL_MAX_ITR); return (EINVAL); } pf->tx_itr = requested_tx_itr; ixl_configure_tx_itr(pf); return (error); } /* * Used to set the Rx ITR value for all of the PF LAN VSI's queues. * Writes to the ITR registers immediately. */ static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int error = 0; int requested_rx_itr; requested_rx_itr = pf->rx_itr; error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (pf->dynamic_rx_itr) { device_printf(dev, "Cannot set RX itr value while dynamic RX itr is enabled\n"); return (EINVAL); } if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { device_printf(dev, "Invalid RX itr value; value must be between 0 and %d\n", IXL_MAX_ITR); return (EINVAL); } pf->rx_itr = requested_rx_itr; ixl_configure_rx_itr(pf); return (error); } void ixl_add_hw_stats(struct ixl_pf *pf) { device_t dev = pf->dev; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *queues = vsi->queues; struct i40e_hw_port_stats *pf_stats = &pf->stats; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct sysctl_oid_list *vsi_list; struct sysctl_oid *queue_node; struct sysctl_oid_list *queue_list; struct tx_ring *txr; struct rx_ring *rxr; char queue_namebuf[QUEUE_NAME_LEN]; /* Driver statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", CTLFLAG_RD, &pf->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq", CTLFLAG_RD, &pf->admin_irq, "Admin Queue IRQ Handled"); ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf"); vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node); /* Queue statistics */ for (int q = 0; q < vsi->num_queues; q++) { snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q); queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #"); queue_list = SYSCTL_CHILDREN(queue_node); txr = &(queues[q].txr); rxr = &(queues[q].rxr); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed", CTLFLAG_RD, &(queues[q].mbuf_defrag_failed), "m_defrag() failed"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", CTLFLAG_RD, &(queues[q].irqs), "irqs on this queue"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", CTLFLAG_RD, &(queues[q].tso), "TSO"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed", CTLFLAG_RD, &(queues[q].tx_dmamap_failed), "Driver tx dma failure in xmit"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small", CTLFLAG_RD, &(queues[q].mss_too_small), "TSO sends with an MSS less than 64"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &(txr->no_desc), "Queue No Descriptor Available"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", CTLFLAG_RD, &(txr->total_packets), "Queue Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, &(txr->tx_bytes), "Queue Bytes Transmitted"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", CTLFLAG_RD, &(rxr->rx_packets), "Queue Packets Received"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &(rxr->rx_bytes), "Queue Bytes Received"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err", CTLFLAG_RD, &(rxr->desc_errs), "Queue Rx Descriptor Errors"); SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr", CTLFLAG_RD, &(rxr->itr), 0, "Queue Rx ITR Interval"); SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr", CTLFLAG_RD, &(txr->itr), 0, "Queue Tx ITR Interval"); #ifdef IXL_DEBUG SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done", CTLFLAG_RD, &(rxr->not_done), "Queue Rx Descriptors not Done"); SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh", CTLFLAG_RD, &(rxr->next_refresh), 0, "Queue Rx Descriptors not Done"); SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check", CTLFLAG_RD, &(rxr->next_check), 0, "Queue Rx Descriptors not Done"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail", CTLTYPE_UINT | CTLFLAG_RD, &queues[q], sizeof(struct ixl_queue), ixl_sysctl_qtx_tail_handler, "IU", "Queue Transmit Descriptor Tail"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail", CTLTYPE_UINT | CTLFLAG_RD, &queues[q], sizeof(struct ixl_queue), ixl_sysctl_qrx_tail_handler, "IU", "Queue Receive Descriptor Tail"); #endif } /* MAC stats */ ixl_add_sysctls_mac_stats(ctx, child, pf_stats); } void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct i40e_eth_stats *eth_stats) { struct ixl_sysctl_info ctls[] = { {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"}, {ð_stats->rx_unicast, "ucast_pkts_rcvd", "Unicast Packets Received"}, {ð_stats->rx_multicast, "mcast_pkts_rcvd", "Multicast Packets Received"}, {ð_stats->rx_broadcast, "bcast_pkts_rcvd", "Broadcast Packets Received"}, {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"}, {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"}, {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"}, {ð_stats->tx_multicast, "mcast_pkts_txd", "Multicast Packets Transmitted"}, {ð_stats->tx_broadcast, "bcast_pkts_txd", "Broadcast Packets Transmitted"}, // end {0,0,0} }; struct ixl_sysctl_info *entry = ctls; while (entry->stat != 0) { SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name, CTLFLAG_RD, entry->stat, entry->description); entry++; } } void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct i40e_hw_port_stats *stats) { struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", CTLFLAG_RD, NULL, "Mac Statistics"); struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); struct i40e_eth_stats *eth_stats = &stats->eth; ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); struct ixl_sysctl_info ctls[] = { {&stats->crc_errors, "crc_errors", "CRC Errors"}, {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"}, /* Packet Reception Stats */ {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, {&stats->checksum_error, "checksum_errors", "Checksum Errors"}, /* Packet Transmission Stats */ {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, /* Flow control */ {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, /* End */ {0,0,0} }; struct ixl_sysctl_info *entry = ctls; while (entry->stat != 0) { SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, CTLFLAG_RD, entry->stat, entry->description); entry++; } } void ixl_set_rss_key(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; device_t dev = pf->dev; enum i40e_status_code status; #ifdef RSS u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; #else u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687, 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377, 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1, 0x0, 0x0, 0x0}; #endif #ifdef RSS /* Fetch the configured RSS key */ rss_getkey((uint8_t *) &rss_seed); #endif /* Fill out hash function seed */ if (hw->mac.type == I40E_MAC_X722) { struct i40e_aqc_get_set_rss_key_data key_data; bcopy(rss_seed, key_data.standard_rss_key, 40); status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); if (status) device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); } } /* * Configure enabled PCTYPES for RSS. */ void ixl_set_rss_pctypes(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u64 set_hena = 0, hena; #ifdef RSS u32 rss_hash_config; rss_hash_config = rss_gethashconfig(); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); #else if (hw->mac.type == I40E_MAC_X722) set_hena = IXL_DEFAULT_RSS_HENA_X722; else set_hena = IXL_DEFAULT_RSS_HENA_XL710; #endif hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); hena |= set_hena; i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); } void ixl_set_rss_hlut(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct ixl_vsi *vsi = &pf->vsi; int i, que_id; int lut_entry_width; u32 lut = 0; enum i40e_status_code status; if (hw->mac.type == I40E_MAC_X722) lut_entry_width = 7; else lut_entry_width = pf->hw.func_caps.rss_table_entry_width; /* Populate the LUT with max no. of queues in round robin fashion */ u8 hlut_buf[512]; for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) { #ifdef RSS /* * Fetch the RSS bucket id for the given indirection entry. * Cap it at the number of configured buckets (which is * num_queues.) */ que_id = rss_get_indirection_to_bucket(i); que_id = que_id % vsi->num_queues; #else que_id = i % vsi->num_queues; #endif lut = (que_id & ((0x1 << lut_entry_width) - 1)); hlut_buf[i] = lut; } if (hw->mac.type == I40E_MAC_X722) { status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf)); if (status) device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++) wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]); ixl_flush(hw); } } /* ** Setup the PF's RSS parameters. */ void ixl_config_rss(struct ixl_pf *pf) { ixl_set_rss_key(pf); ixl_set_rss_pctypes(pf); ixl_set_rss_hlut(pf); } /* ** This routine is run via an vlan config EVENT, ** it enables us to use the HW Filter table since ** we can get the vlan id. This just creates the ** entry in the soft version of the VFTA, init will ** repopulate the real table. */ void ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct ixl_vsi *vsi = ifp->if_softc; struct i40e_hw *hw = vsi->hw; struct ixl_pf *pf = (struct ixl_pf *)vsi->back; if (ifp->if_softc != arg) /* Not our event */ return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; IXL_PF_LOCK(pf); ++vsi->num_vlans; ixl_add_filter(vsi, hw->mac.addr, vtag); IXL_PF_UNLOCK(pf); } /* ** This routine is run via an vlan ** unconfig EVENT, remove our entry ** in the soft vfta. */ void ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct ixl_vsi *vsi = ifp->if_softc; struct i40e_hw *hw = vsi->hw; struct ixl_pf *pf = (struct ixl_pf *)vsi->back; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; IXL_PF_LOCK(pf); --vsi->num_vlans; ixl_del_filter(vsi, hw->mac.addr, vtag); IXL_PF_UNLOCK(pf); } /* ** This routine updates vlan filters, called by init ** it scans the filter table and then updates the hw ** after a soft reset. */ void ixl_setup_vlan_filters(struct ixl_vsi *vsi) { struct ixl_mac_filter *f; int cnt = 0, flags; if (vsi->num_vlans == 0) return; /* ** Scan the filter list for vlan entries, ** mark them for addition and then call ** for the AQ update. */ SLIST_FOREACH(f, &vsi->ftl, next) { if (f->flags & IXL_FILTER_VLAN) { f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); cnt++; } } if (cnt == 0) { printf("setup vlan: no filters found!\n"); return; } flags = IXL_FILTER_VLAN; flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); ixl_add_hw_filters(vsi, flags, cnt); return; } /* ** Initialize filter list and add filters that the hardware ** needs to know about. ** ** Requires VSI's filter list & seid to be set before calling. */ void ixl_init_filters(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; /* Add broadcast address */ ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY); /* * Prevent Tx flow control frames from being sent out by * non-firmware transmitters. * This affects every VSI in the PF. */ if (pf->enable_tx_fc_filter) i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); } /* ** This routine adds mulicast filters */ void ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr) { struct ixl_mac_filter *f; /* Does one already exist */ f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); if (f != NULL) return; f = ixl_get_filter(vsi); if (f == NULL) { printf("WARNING: no filter available!!\n"); return; } bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); f->vlan = IXL_VLAN_ANY; f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); return; } void ixl_reconfigure_filters(struct ixl_vsi *vsi) { ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs); } /* ** This routine adds macvlan filters */ void ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f, *tmp; struct ixl_pf *pf; device_t dev; DEBUGOUT("ixl_add_filter: begin"); pf = vsi->back; dev = pf->dev; /* Does one already exist */ f = ixl_find_filter(vsi, macaddr, vlan); if (f != NULL) return; /* ** Is this the first vlan being registered, if so we ** need to remove the ANY filter that indicates we are ** not in a vlan, and replace that with a 0 filter. */ if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); if (tmp != NULL) { ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY); ixl_add_filter(vsi, macaddr, 0); } } f = ixl_get_filter(vsi); if (f == NULL) { device_printf(dev, "WARNING: no filter available!!\n"); return; } bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); f->vlan = vlan; f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); if (f->vlan != IXL_VLAN_ANY) f->flags |= IXL_FILTER_VLAN; else vsi->num_macs++; ixl_add_hw_filters(vsi, f->flags, 1); return; } void ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f; f = ixl_find_filter(vsi, macaddr, vlan); if (f == NULL) return; f->flags |= IXL_FILTER_DEL; ixl_del_hw_filters(vsi, 1); vsi->num_macs--; /* Check if this is the last vlan removal */ if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) { /* Switch back to a non-vlan filter */ ixl_del_filter(vsi, macaddr, 0); ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); } return; } /* ** Find the filter with both matching mac addr and vlan id */ struct ixl_mac_filter * ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f; bool match = FALSE; SLIST_FOREACH(f, &vsi->ftl, next) { if (!cmp_etheraddr(f->macaddr, macaddr)) continue; if (f->vlan == vlan) { match = TRUE; break; } } if (!match) f = NULL; return (f); } /* ** This routine takes additions to the vsi filter ** table and creates an Admin Queue call to create ** the filters in the hardware. */ void ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt) { struct i40e_aqc_add_macvlan_element_data *a, *b; struct ixl_mac_filter *f; struct ixl_pf *pf; struct i40e_hw *hw; device_t dev; int err, j = 0; pf = vsi->back; dev = pf->dev; hw = &pf->hw; IXL_PF_LOCK_ASSERT(pf); a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, M_DEVBUF, M_NOWAIT | M_ZERO); if (a == NULL) { device_printf(dev, "add_hw_filters failed to get memory\n"); return; } /* ** Scan the filter list, each time we find one ** we add it to the admin queue array and turn off ** the add bit. */ SLIST_FOREACH(f, &vsi->ftl, next) { if (f->flags == flags) { b = &a[j]; // a pox on fvl long names :) bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); if (f->vlan == IXL_VLAN_ANY) { b->vlan_tag = 0; b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; } else { b->vlan_tag = f->vlan; b->flags = 0; } b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; f->flags &= ~IXL_FILTER_ADD; j++; } if (j == cnt) break; } if (j > 0) { err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); if (err) device_printf(dev, "aq_add_macvlan err %d, " "aq_error %d\n", err, hw->aq.asq_last_status); else vsi->hw_filters_add += j; } free(a, M_DEVBUF); return; } /* ** This routine takes removals in the vsi filter ** table and creates an Admin Queue call to delete ** the filters in the hardware. */ void ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt) { struct i40e_aqc_remove_macvlan_element_data *d, *e; struct ixl_pf *pf; struct i40e_hw *hw; device_t dev; struct ixl_mac_filter *f, *f_temp; int err, j = 0; DEBUGOUT("ixl_del_hw_filters: begin\n"); pf = vsi->back; hw = &pf->hw; dev = pf->dev; d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, M_DEVBUF, M_NOWAIT | M_ZERO); if (d == NULL) { printf("del hw filter failed to get memory\n"); return; } SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) { if (f->flags & IXL_FILTER_DEL) { e = &d[j]; // a pox on fvl long names :) bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan); e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; /* delete entry from vsi list */ SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next); free(f, M_DEVBUF); j++; } if (j == cnt) break; } if (j > 0) { err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) { int sc = 0; for (int i = 0; i < j; i++) sc += (!d[i].error_code); vsi->hw_filters_del += sc; device_printf(dev, "Failed to remove %d/%d filters, aq error %d\n", j - sc, j, hw->aq.asq_last_status); } else vsi->hw_filters_del += j; } free(d, M_DEVBUF); DEBUGOUT("ixl_del_hw_filters: end\n"); return; } int ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); ixl_dbg(pf, IXL_DBG_EN_DIS, "Enabling PF TX ring %4d / VSI TX ring %4d...\n", pf_qidx, vsi_qidx); i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE); reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); reg |= I40E_QTX_ENA_QENA_REQ_MASK | I40E_QTX_ENA_QENA_STAT_MASK; wr32(hw, I40E_QTX_ENA(pf_qidx), reg); /* Verify the enable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); if (reg & I40E_QTX_ENA_QENA_STAT_MASK) break; i40e_msec_delay(10); } if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { device_printf(pf->dev, "TX queue %d still disabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } int ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); ixl_dbg(pf, IXL_DBG_EN_DIS, "Enabling PF RX ring %4d / VSI RX ring %4d...\n", pf_qidx, vsi_qidx); reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); reg |= I40E_QRX_ENA_QENA_REQ_MASK | I40E_QRX_ENA_QENA_STAT_MASK; wr32(hw, I40E_QRX_ENA(pf_qidx), reg); /* Verify the enable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); if (reg & I40E_QRX_ENA_QENA_STAT_MASK) break; i40e_msec_delay(10); } if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { device_printf(pf->dev, "RX queue %d still disabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { int error = 0; error = ixl_enable_tx_ring(pf, qtag, vsi_qidx); /* Called function already prints error message */ if (error) return (error); error = ixl_enable_rx_ring(pf, qtag, vsi_qidx); return (error); } /* For PF VSI only */ int ixl_enable_rings(struct ixl_vsi *vsi) { struct ixl_pf *pf = vsi->back; int error = 0; for (int i = 0; i < vsi->num_queues; i++) { error = ixl_enable_ring(pf, &pf->qtag, i); if (error) return (error); } return (error); } int ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE); i40e_usec_delay(500); reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QTX_ENA(pf_qidx), reg); /* Verify the disable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) break; i40e_msec_delay(10); } if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { device_printf(pf->dev, "TX queue %d still enabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } int ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QRX_ENA(pf_qidx), reg); /* Verify the disable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) break; i40e_msec_delay(10); } if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { device_printf(pf->dev, "RX queue %d still enabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } int ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { int error = 0; error = ixl_disable_tx_ring(pf, qtag, vsi_qidx); /* Called function already prints error message */ if (error) return (error); error = ixl_disable_rx_ring(pf, qtag, vsi_qidx); return (error); } /* For PF VSI only */ int ixl_disable_rings(struct ixl_vsi *vsi) { struct ixl_pf *pf = vsi->back; int error = 0; for (int i = 0; i < vsi->num_queues; i++) { error = ixl_disable_ring(pf, &pf->qtag, i); if (error) return (error); } return (error); } /** * ixl_handle_mdd_event * * Called from interrupt handler to identify possibly malicious vfs * (But also detects events from the PF, as well) **/ void ixl_handle_mdd_event(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; bool mdd_detected = false; bool pf_mdd_detected = false; u32 reg; /* find what triggered the MDD event */ reg = rd32(hw, I40E_GL_MDET_TX); if (reg & I40E_GL_MDET_TX_VALID_MASK) { u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> I40E_GL_MDET_TX_PF_NUM_SHIFT; u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> I40E_GL_MDET_TX_EVENT_SHIFT; u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> I40E_GL_MDET_TX_QUEUE_SHIFT; device_printf(dev, "Malicious Driver Detection event %d" " on TX queue %d, pf number %d\n", event, queue, pf_num); wr32(hw, I40E_GL_MDET_TX, 0xffffffff); mdd_detected = true; } reg = rd32(hw, I40E_GL_MDET_RX); if (reg & I40E_GL_MDET_RX_VALID_MASK) { u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> I40E_GL_MDET_RX_FUNCTION_SHIFT; u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> I40E_GL_MDET_RX_EVENT_SHIFT; u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> I40E_GL_MDET_RX_QUEUE_SHIFT; device_printf(dev, "Malicious Driver Detection event %d" " on RX queue %d, pf number %d\n", event, queue, pf_num); wr32(hw, I40E_GL_MDET_RX, 0xffffffff); mdd_detected = true; } if (mdd_detected) { reg = rd32(hw, I40E_PF_MDET_TX); if (reg & I40E_PF_MDET_TX_VALID_MASK) { wr32(hw, I40E_PF_MDET_TX, 0xFFFF); device_printf(dev, "MDD TX event is for this function!"); pf_mdd_detected = true; } reg = rd32(hw, I40E_PF_MDET_RX); if (reg & I40E_PF_MDET_RX_VALID_MASK) { wr32(hw, I40E_PF_MDET_RX, 0xFFFF); device_printf(dev, "MDD RX event is for this function!"); pf_mdd_detected = true; } } /* re-enable mdd interrupt cause */ reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); ixl_flush(hw); } void ixl_enable_intr(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = vsi->hw; struct ixl_queue *que = vsi->queues; if (pf->msix > 1) { for (int i = 0; i < vsi->num_queues; i++, que++) ixl_enable_queue(hw, que->me); } else ixl_enable_intr0(hw); } void ixl_disable_rings_intr(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; struct ixl_queue *que = vsi->queues; for (int i = 0; i < vsi->num_queues; i++, que++) ixl_disable_queue(hw, que->me); } void ixl_enable_intr0(struct i40e_hw *hw) { u32 reg; /* Use IXL_ITR_NONE so ITR isn't updated here */ reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTL0, reg); } void ixl_disable_intr0(struct i40e_hw *hw) { u32 reg; reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; wr32(hw, I40E_PFINT_DYN_CTL0, reg); ixl_flush(hw); } void ixl_enable_queue(struct i40e_hw *hw, int id) { u32 reg; reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); } void ixl_disable_queue(struct i40e_hw *hw, int id) { u32 reg; reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); } void ixl_update_stats_counters(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_vf *vf; struct i40e_hw_port_stats *nsd = &pf->stats; struct i40e_hw_port_stats *osd = &pf->stats_offsets; /* Update hw stats */ ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), pf->stat_offsets_loaded, &osd->crc_errors, &nsd->crc_errors); ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), pf->stat_offsets_loaded, &osd->illegal_bytes, &nsd->illegal_bytes); ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), I40E_GLPRT_GORCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_bytes, &nsd->eth.rx_bytes); ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), I40E_GLPRT_GOTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_bytes, &nsd->eth.tx_bytes); ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_discards, &nsd->eth.rx_discards); ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), I40E_GLPRT_UPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_unicast, &nsd->eth.rx_unicast); ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), I40E_GLPRT_UPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_unicast, &nsd->eth.tx_unicast); ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), I40E_GLPRT_MPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_multicast, &nsd->eth.rx_multicast); ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), I40E_GLPRT_MPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_multicast, &nsd->eth.tx_multicast); ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), I40E_GLPRT_BPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_broadcast, &nsd->eth.rx_broadcast); ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), I40E_GLPRT_BPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_broadcast, &nsd->eth.tx_broadcast); ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), pf->stat_offsets_loaded, &osd->tx_dropped_link_down, &nsd->tx_dropped_link_down); ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), pf->stat_offsets_loaded, &osd->mac_local_faults, &nsd->mac_local_faults); ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), pf->stat_offsets_loaded, &osd->mac_remote_faults, &nsd->mac_remote_faults); ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), pf->stat_offsets_loaded, &osd->rx_length_errors, &nsd->rx_length_errors); /* Flow control (LFC) stats */ ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_rx, &nsd->link_xon_rx); ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_tx, &nsd->link_xon_tx); ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_rx, &nsd->link_xoff_rx); ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_tx, &nsd->link_xoff_tx); /* Packet size stats rx */ ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), I40E_GLPRT_PRC64L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_64, &nsd->rx_size_64); ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), I40E_GLPRT_PRC127L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_127, &nsd->rx_size_127); ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), I40E_GLPRT_PRC255L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_255, &nsd->rx_size_255); ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), I40E_GLPRT_PRC511L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_511, &nsd->rx_size_511); ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), I40E_GLPRT_PRC1023L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1023, &nsd->rx_size_1023); ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), I40E_GLPRT_PRC1522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1522, &nsd->rx_size_1522); ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), I40E_GLPRT_PRC9522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_big, &nsd->rx_size_big); /* Packet size stats tx */ ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), I40E_GLPRT_PTC64L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_64, &nsd->tx_size_64); ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), I40E_GLPRT_PTC127L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_127, &nsd->tx_size_127); ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), I40E_GLPRT_PTC255L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_255, &nsd->tx_size_255); ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), I40E_GLPRT_PTC511L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_511, &nsd->tx_size_511); ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), I40E_GLPRT_PTC1023L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1023, &nsd->tx_size_1023); ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), I40E_GLPRT_PTC1522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1522, &nsd->tx_size_1522); ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), I40E_GLPRT_PTC9522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_big, &nsd->tx_size_big); ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), pf->stat_offsets_loaded, &osd->rx_undersize, &nsd->rx_undersize); ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), pf->stat_offsets_loaded, &osd->rx_fragments, &nsd->rx_fragments); ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), pf->stat_offsets_loaded, &osd->rx_oversize, &nsd->rx_oversize); ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), pf->stat_offsets_loaded, &osd->rx_jabber, &nsd->rx_jabber); pf->stat_offsets_loaded = true; /* End hw stats */ /* Update vsi stats */ ixl_update_vsi_stats(vsi); for (int i = 0; i < pf->num_vfs; i++) { vf = &pf->vfs[i]; if (vf->vf_flags & VF_FLAG_ENABLED) ixl_update_eth_stats(&pf->vfs[i].vsi); } } int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; device_t dev = pf->dev; bool is_up = false; int error = 0; is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING); /* Teardown */ if (is_up) ixl_stop(pf); error = i40e_shutdown_lan_hmc(hw); if (error) device_printf(dev, "Shutdown LAN HMC failed with code %d\n", error); ixl_disable_intr0(hw); ixl_teardown_adminq_msix(pf); error = i40e_shutdown_adminq(hw); if (error) device_printf(dev, "Shutdown Admin queue failed with code %d\n", error); /* Setup */ error = i40e_init_adminq(hw); if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) { device_printf(dev, "Unable to initialize Admin Queue, error %d\n", error); } error = ixl_setup_adminq_msix(pf); if (error) { device_printf(dev, "ixl_setup_adminq_msix error: %d\n", error); } ixl_configure_intr0_msix(pf); ixl_enable_intr0(hw); error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, 0, 0); if (error) { device_printf(dev, "init_lan_hmc failed: %d\n", error); } error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (error) { device_printf(dev, "configure_lan_hmc failed: %d\n", error); } if (is_up) ixl_init(pf); return (0); } void ixl_handle_empr_reset(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int count = 0; u32 reg; /* Typically finishes within 3-4 seconds */ while (count++ < 100) { reg = rd32(hw, I40E_GLGEN_RSTAT) & I40E_GLGEN_RSTAT_DEVSTATE_MASK; if (reg) i40e_msec_delay(100); else break; } ixl_dbg(pf, IXL_DBG_INFO, "EMPR reset wait count: %d\n", count); device_printf(dev, "Rebuilding driver state...\n"); ixl_rebuild_hw_structs_after_reset(pf); device_printf(dev, "Rebuilding driver state done.\n"); atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING); } /* ** Tasklet handler for MSIX Adminq interrupts ** - do outside interrupt since it might sleep */ void ixl_do_adminq(void *context, int pending) { struct ixl_pf *pf = context; struct i40e_hw *hw = &pf->hw; struct i40e_arq_event_info event; i40e_status ret; device_t dev = pf->dev; u32 loop = 0; u16 opcode, result; if (pf->state & IXL_PF_STATE_EMPR_RESETTING) { /* Flag cleared at end of this function */ ixl_handle_empr_reset(pf); return; } /* Admin Queue handling */ event.buf_len = IXL_AQ_BUF_SZ; event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT | M_ZERO); if (!event.msg_buf) { device_printf(dev, "%s: Unable to allocate memory for Admin" " Queue event!\n", __func__); return; } IXL_PF_LOCK(pf); /* clean and process any events */ do { ret = i40e_clean_arq_element(hw, &event, &result); if (ret) break; opcode = LE16_TO_CPU(event.desc.opcode); ixl_dbg(pf, IXL_DBG_AQ, "Admin Queue event: %#06x\n", opcode); switch (opcode) { case i40e_aqc_opc_get_link_status: ixl_link_event(pf, &event); break; case i40e_aqc_opc_send_msg_to_pf: #ifdef PCI_IOV ixl_handle_vf_msg(pf, &event); #endif break; case i40e_aqc_opc_event_lan_overflow: default: break; } } while (result && (loop++ < IXL_ADM_LIMIT)); free(event.msg_buf, M_DEVBUF); /* * If there are still messages to process, reschedule ourselves. * Otherwise, re-enable our interrupt. */ if (result > 0) taskqueue_enqueue(pf->tq, &pf->adminq); else ixl_enable_intr0(hw); IXL_PF_UNLOCK(pf); } /** * Update VSI-specific ethernet statistics counters. **/ void ixl_update_eth_stats(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *es; struct i40e_eth_stats *oes; struct i40e_hw_port_stats *nsd; u16 stat_idx = vsi->info.stat_counter_idx; es = &vsi->eth_stats; oes = &vsi->eth_stats_offsets; nsd = &pf->stats; /* Gather up the stats that the hw collects */ ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), vsi->stat_offsets_loaded, &oes->tx_errors, &es->tx_errors); ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), vsi->stat_offsets_loaded, &oes->rx_discards, &es->rx_discards); ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx), I40E_GLV_GORCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_bytes, &es->rx_bytes); ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), I40E_GLV_UPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unicast, &es->rx_unicast); ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), I40E_GLV_MPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_multicast, &es->rx_multicast); ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), I40E_GLV_BPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_broadcast, &es->rx_broadcast); ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), I40E_GLV_GOTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_bytes, &es->tx_bytes); ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), I40E_GLV_UPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_unicast, &es->tx_unicast); ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), I40E_GLV_MPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_multicast, &es->tx_multicast); ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), I40E_GLV_BPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); vsi->stat_offsets_loaded = true; } void ixl_update_vsi_stats(struct ixl_vsi *vsi) { struct ixl_pf *pf; struct ifnet *ifp; struct i40e_eth_stats *es; u64 tx_discards; struct i40e_hw_port_stats *nsd; pf = vsi->back; ifp = vsi->ifp; es = &vsi->eth_stats; nsd = &pf->stats; ixl_update_eth_stats(vsi); tx_discards = es->tx_discards + nsd->tx_dropped_link_down; for (int i = 0; i < vsi->num_queues; i++) tx_discards += vsi->queues[i].txr.br->br_drops; /* Update ifnet stats */ IXL_SET_IPACKETS(vsi, es->rx_unicast + es->rx_multicast + es->rx_broadcast); IXL_SET_OPACKETS(vsi, es->tx_unicast + es->tx_multicast + es->tx_broadcast); IXL_SET_IBYTES(vsi, es->rx_bytes); IXL_SET_OBYTES(vsi, es->tx_bytes); IXL_SET_IMCASTS(vsi, es->rx_multicast); IXL_SET_OMCASTS(vsi, es->tx_multicast); IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments + nsd->rx_jabber); IXL_SET_OERRORS(vsi, es->tx_errors); IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); IXL_SET_OQDROPS(vsi, tx_discards); IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); IXL_SET_COLLISIONS(vsi, 0); } /** * Reset all of the stats for the given pf **/ void ixl_pf_reset_stats(struct ixl_pf *pf) { bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); pf->stat_offsets_loaded = false; } /** * Resets all stats of the given vsi **/ void ixl_vsi_reset_stats(struct ixl_vsi *vsi) { bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); vsi->stat_offsets_loaded = false; } /** * Read and update a 48 bit stat from the hw * * Since the device stats are not reset at PFReset, they likely will not * be zeroed when the driver starts. We'll save the first values read * and use them as offsets to be subtracted from the raw values in order * to report stats that count from zero. **/ void ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, bool offset_loaded, u64 *offset, u64 *stat) { u64 new_data; #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__) new_data = rd64(hw, loreg); #else /* * Use two rd32's instead of one rd64; FreeBSD versions before * 10 don't support 64-bit bus reads/writes. */ new_data = rd32(hw, loreg); new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; #endif if (!offset_loaded) *offset = new_data; if (new_data >= *offset) *stat = new_data - *offset; else *stat = (new_data + ((u64)1 << 48)) - *offset; *stat &= 0xFFFFFFFFFFFFULL; } /** * Read and update a 32 bit stat from the hw **/ void ixl_stat_update32(struct i40e_hw *hw, u32 reg, bool offset_loaded, u64 *offset, u64 *stat) { u32 new_data; new_data = rd32(hw, reg); if (!offset_loaded) *offset = new_data; if (new_data >= *offset) *stat = (u32)(new_data - *offset); else *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); } void ixl_add_device_sysctls(struct ixl_pf *pf) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid_list *ctx_list = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); struct sysctl_oid *debug_node; struct sysctl_oid_list *debug_list; struct sysctl_oid *fec_node; struct sysctl_oid_list *fec_list; /* Set up sysctls */ SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_current_speed, "A", "Current Port Speed"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_show_fw, "A", "Firmware version"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD, pf, 0, ixl_sysctl_unallocated_queues, "I", "Queues not allocated to a PF or VF"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_pf_tx_itr, "I", "Immediately set TX ITR value for all queues"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_pf_rx_itr, "I", "Immediately set RX ITR value for all queues"); SYSCTL_ADD_INT(ctx, ctx_list, OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); SYSCTL_ADD_INT(ctx, ctx_list, OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); /* Add FEC sysctls for 25G adapters */ /* * XXX: These settings can be changed, but that isn't supported, * so these are read-only for now. */ if (hw->device_id == I40E_DEV_ID_25G_B || hw->device_id == I40E_DEV_ID_25G_SFP28) { fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls"); fec_list = SYSCTL_CHILDREN(fec_node); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RD, pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RD, pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RD, pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RD, pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RD, pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes"); } /* Add sysctls meant to print debug information, but don't list them * in "sysctl -a" output. */ debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls"); debug_list = SYSCTL_CHILDREN(debug_node); SYSCTL_ADD_UINT(ctx, debug_list, OID_AUTO, "shared_debug_mask", CTLFLAG_RW, &pf->hw.debug_mask, 0, "Shared code debug message level"); SYSCTL_ADD_UINT(ctx, debug_list, OID_AUTO, "core_debug_mask", CTLFLAG_RW, &pf->dbg_mask, 0, "Non-hared code debug message level"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_hkey, "A", "View RSS key"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD, pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR, pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); if (pf->has_i2c) { SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus"); } #ifdef PCI_IOV SYSCTL_ADD_UINT(ctx, debug_list, OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl, 0, "PF/VF Virtual Channel debug level"); #endif } /* * Primarily for finding out how many queues can be assigned to VFs, * at runtime. */ static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int queues; IXL_PF_LOCK(pf); queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr); IXL_PF_UNLOCK(pf); return sysctl_handle_int(oidp, NULL, queues, req); } /* ** Set flow control using sysctl: ** 0 - off ** 1 - rx pause ** 2 - tx pause ** 3 - full */ int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int requested_fc, error = 0; enum i40e_status_code aq_error = 0; u8 fc_aq_err = 0; /* Get request */ requested_fc = pf->fc; error = sysctl_handle_int(oidp, &requested_fc, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (requested_fc < 0 || requested_fc > 3) { device_printf(dev, "Invalid fc mode; valid modes are 0 through 3\n"); return (EINVAL); } /* Set fc ability for port */ hw->fc.requested_mode = requested_fc; aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE); if (aq_error) { device_printf(dev, "%s: Error setting new fc mode %d; fc_err %#x\n", __func__, aq_error, fc_aq_err); return (EIO); } pf->fc = requested_fc; /* Get new link state */ i40e_msec_delay(250); hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); return (0); } char * ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed) { int index; char *speeds[] = { "Unknown", "100 Mbps", "1 Gbps", "10 Gbps", "40 Gbps", "20 Gbps", "25 Gbps", }; switch (link_speed) { case I40E_LINK_SPEED_100MB: index = 1; break; case I40E_LINK_SPEED_1GB: index = 2; break; case I40E_LINK_SPEED_10GB: index = 3; break; case I40E_LINK_SPEED_40GB: index = 4; break; case I40E_LINK_SPEED_20GB: index = 5; break; case I40E_LINK_SPEED_25GB: index = 6; break; case I40E_LINK_SPEED_UNKNOWN: default: index = 0; break; } return speeds[index]; } int ixl_current_speed(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; int error = 0; ixl_update_link_status(pf); error = sysctl_handle_string(oidp, ixl_aq_speed_to_str(hw->phy.link_info.link_speed), 8, req); return (error); } static u8 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) { static u16 speedmap[6] = { (I40E_LINK_SPEED_100MB | (0x1 << 8)), (I40E_LINK_SPEED_1GB | (0x2 << 8)), (I40E_LINK_SPEED_10GB | (0x4 << 8)), (I40E_LINK_SPEED_20GB | (0x8 << 8)), (I40E_LINK_SPEED_25GB | (0x10 << 8)), (I40E_LINK_SPEED_40GB | (0x20 << 8)) }; u8 retval = 0; for (int i = 0; i < 6; i++) { if (to_aq) retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; else retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; } return (retval); } int ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config; enum i40e_status_code aq_error = 0; /* Get current capability information */ aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL); if (aq_error) { device_printf(dev, "%s: Error getting phy capabilities %d," " aq error: %d\n", __func__, aq_error, hw->aq.asq_last_status); return (EIO); } /* Prepare new config */ bzero(&config, sizeof(config)); config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); config.phy_type = abilities.phy_type; config.phy_type_ext = abilities.phy_type_ext; config.abilities = abilities.abilities | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; /* Do aq command & restart link */ aq_error = i40e_aq_set_phy_config(hw, &config, NULL); if (aq_error) { device_printf(dev, "%s: Error setting new phy config %d," " aq error: %d\n", __func__, aq_error, hw->aq.asq_last_status); return (EIO); } return (0); } /* ** Control link advertise speed: ** Flags: ** 0x1 - advertise 100 Mb ** 0x2 - advertise 1G ** 0x4 - advertise 10G ** 0x8 - advertise 20G ** 0x10 - advertise 25G ** 0x20 - advertise 40G ** ** Set to 0 to disable link */ int ixl_set_advertise(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; u8 converted_speeds; int requested_ls = 0; int error = 0; /* Read in new mode */ requested_ls = pf->advertised_speed; error = sysctl_handle_int(oidp, &requested_ls, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Check if changing speeds is supported */ switch (hw->device_id) { case I40E_DEV_ID_25G_B: case I40E_DEV_ID_25G_SFP28: device_printf(dev, "Changing advertised speeds not supported" " on this device.\n"); return (EINVAL); } if (requested_ls < 0 || requested_ls > 0xff) { } /* Check for valid value */ converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { device_printf(dev, "Invalid advertised speed; " "valid flags are: 0x%02x\n", ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); return (EINVAL); } error = ixl_set_advertised_speeds(pf, requested_ls); if (error) return (error); pf->advertised_speed = requested_ls; ixl_update_link_status(pf); return (0); } /* * Input: bitmap of enum i40e_aq_link_speed */ static u64 ixl_max_aq_speed_to_value(u8 link_speeds) { if (link_speeds & I40E_LINK_SPEED_40GB) return IF_Gbps(40); if (link_speeds & I40E_LINK_SPEED_25GB) return IF_Gbps(25); if (link_speeds & I40E_LINK_SPEED_20GB) return IF_Gbps(20); if (link_speeds & I40E_LINK_SPEED_10GB) return IF_Gbps(10); if (link_speeds & I40E_LINK_SPEED_1GB) return IF_Gbps(1); if (link_speeds & I40E_LINK_SPEED_100MB) return IF_Mbps(100); else /* Minimum supported link speed */ return IF_Mbps(100); } /* ** Get the width and transaction speed of ** the bus this adapter is plugged into. */ void ixl_get_bus_info(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; u16 link; u32 offset, num_ports; u64 max_speed; /* Some devices don't use PCIE */ if (hw->mac.type == I40E_MAC_X722) return; /* Read PCI Express Capabilities Link Status Register */ pci_find_cap(dev, PCIY_EXPRESS, &offset); link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); /* Fill out hw struct with PCIE info */ i40e_set_pci_config_data(hw, link); /* Use info to print out bandwidth messages */ device_printf(dev,"PCI Express Bus: Speed %s %s\n", ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s": (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s": (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"), (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" : (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" : (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" : (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" : ("Unknown")); /* * If adapter is in slot with maximum supported speed, * no warning message needs to be printed out. */ if (hw->bus.speed >= i40e_bus_speed_8000 && hw->bus.width >= i40e_bus_width_pcie_x8) return; num_ports = bitcount32(hw->func_caps.valid_functions); max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000; if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) { device_printf(dev, "PCI-Express bandwidth available" " for this device may be insufficient for" " optimal performance.\n"); device_printf(dev, "Please move the device to a different" " PCI-e link with more lanes and/or higher" " transfer rate.\n"); } } static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; struct sbuf *sbuf; sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); ixl_nvm_version_str(hw, sbuf); sbuf_finish(sbuf); sbuf_delete(sbuf); return 0; } void ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) { if ((nvma->command == I40E_NVM_READ) && ((nvma->config & 0xFF) == 0xF) && (((nvma->config & 0xF00) >> 8) == 0xF) && (nvma->offset == 0) && (nvma->data_size == 1)) { // device_printf(dev, "- Get Driver Status Command\n"); } else if (nvma->command == I40E_NVM_READ) { } else { switch (nvma->command) { case 0xB: device_printf(dev, "- command: I40E_NVM_READ\n"); break; case 0xC: device_printf(dev, "- command: I40E_NVM_WRITE\n"); break; default: device_printf(dev, "- command: unknown 0x%08x\n", nvma->command); break; } device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF); device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8); device_printf(dev, "- offset : 0x%08x\n", nvma->offset); device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size); } } int ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) { struct i40e_hw *hw = &pf->hw; struct i40e_nvm_access *nvma; device_t dev = pf->dev; enum i40e_status_code status = 0; int perrno; DEBUGFUNC("ixl_handle_nvmupd_cmd"); /* Sanity checks */ if (ifd->ifd_len < sizeof(struct i40e_nvm_access) || ifd->ifd_data == NULL) { device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", __func__); device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n", __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access)); device_printf(dev, "%s: data pointer: %p\n", __func__, ifd->ifd_data); return (EINVAL); } nvma = (struct i40e_nvm_access *)ifd->ifd_data; if (pf->dbg_mask & IXL_DBG_NVMUPD) ixl_print_nvm_cmd(dev, nvma); if (pf->state & IXL_PF_STATE_EMPR_RESETTING) { int count = 0; while (count++ < 100) { i40e_msec_delay(100); if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) break; } } if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) { IXL_PF_LOCK(pf); status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); IXL_PF_UNLOCK(pf); } else { perrno = -EBUSY; } if (status) device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", i40e_stat_str(hw, status), perrno); /* * -EPERM is actually ERESTART, which the kernel interprets as it needing * to run this ioctl again. So use -EACCES for -EPERM instead. */ if (perrno == -EPERM) return (-EACCES); else return (perrno); } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ void ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { struct ixl_vsi *vsi = ifp->if_softc; struct ixl_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; INIT_DEBUGOUT("ixl_media_status: begin"); IXL_PF_LOCK(pf); hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); ixl_update_link_status(pf); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!pf->link_up) { IXL_PF_UNLOCK(pf); return; } ifmr->ifm_status |= IFM_ACTIVE; /* Hardware always does full-duplex */ ifmr->ifm_active |= IFM_FDX; switch (hw->phy.link_info.phy_type) { /* 100 M */ case I40E_PHY_TYPE_100BASE_TX: ifmr->ifm_active |= IFM_100_TX; break; /* 1 G */ case I40E_PHY_TYPE_1000BASE_T: ifmr->ifm_active |= IFM_1000_T; break; case I40E_PHY_TYPE_1000BASE_SX: ifmr->ifm_active |= IFM_1000_SX; break; case I40E_PHY_TYPE_1000BASE_LX: ifmr->ifm_active |= IFM_1000_LX; break; case I40E_PHY_TYPE_1000BASE_T_OPTICAL: ifmr->ifm_active |= IFM_OTHER; break; /* 10 G */ case I40E_PHY_TYPE_10GBASE_SFPP_CU: ifmr->ifm_active |= IFM_10G_TWINAX; break; case I40E_PHY_TYPE_10GBASE_SR: ifmr->ifm_active |= IFM_10G_SR; break; case I40E_PHY_TYPE_10GBASE_LR: ifmr->ifm_active |= IFM_10G_LR; break; case I40E_PHY_TYPE_10GBASE_T: ifmr->ifm_active |= IFM_10G_T; break; case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: case I40E_PHY_TYPE_10GBASE_AOC: ifmr->ifm_active |= IFM_OTHER; break; /* 25 G */ case I40E_PHY_TYPE_25GBASE_KR: ifmr->ifm_active |= IFM_25G_KR; break; case I40E_PHY_TYPE_25GBASE_CR: ifmr->ifm_active |= IFM_25G_CR; break; case I40E_PHY_TYPE_25GBASE_SR: ifmr->ifm_active |= IFM_25G_SR; break; case I40E_PHY_TYPE_25GBASE_LR: ifmr->ifm_active |= IFM_UNKNOWN; break; /* 40 G */ case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_40GBASE_CR4_CU: ifmr->ifm_active |= IFM_40G_CR4; break; case I40E_PHY_TYPE_40GBASE_SR4: ifmr->ifm_active |= IFM_40G_SR4; break; case I40E_PHY_TYPE_40GBASE_LR4: ifmr->ifm_active |= IFM_40G_LR4; break; case I40E_PHY_TYPE_XLAUI: ifmr->ifm_active |= IFM_OTHER; break; case I40E_PHY_TYPE_1000BASE_KX: ifmr->ifm_active |= IFM_1000_KX; break; case I40E_PHY_TYPE_SGMII: ifmr->ifm_active |= IFM_1000_SGMII; break; /* ERJ: What's the difference between these? */ case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_10GBASE_CR1: ifmr->ifm_active |= IFM_10G_CR1; break; case I40E_PHY_TYPE_10GBASE_KX4: ifmr->ifm_active |= IFM_10G_KX4; break; case I40E_PHY_TYPE_10GBASE_KR: ifmr->ifm_active |= IFM_10G_KR; break; case I40E_PHY_TYPE_SFI: ifmr->ifm_active |= IFM_10G_SFI; break; /* Our single 20G media type */ case I40E_PHY_TYPE_20GBASE_KR2: ifmr->ifm_active |= IFM_20G_KR2; break; case I40E_PHY_TYPE_40GBASE_KR4: ifmr->ifm_active |= IFM_40G_KR4; break; case I40E_PHY_TYPE_XLPPI: case I40E_PHY_TYPE_40GBASE_AOC: ifmr->ifm_active |= IFM_40G_XLPPI; break; /* Unknown to driver */ default: ifmr->ifm_active |= IFM_UNKNOWN; break; } /* Report flow control status as well */ if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ifmr->ifm_active |= IFM_ETH_TXPAUSE; if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ifmr->ifm_active |= IFM_ETH_RXPAUSE; IXL_PF_UNLOCK(pf); } void ixl_init(void *arg) { struct ixl_pf *pf = arg; IXL_PF_LOCK(pf); ixl_init_locked(pf); IXL_PF_UNLOCK(pf); } /* * NOTE: Fortville does not support forcing media speeds. Instead, * use the set_advertise sysctl to set the speeds Fortville * will advertise or be allowed to operate at. */ int ixl_media_change(struct ifnet * ifp) { struct ixl_vsi *vsi = ifp->if_softc; struct ifmedia *ifm = &vsi->media; INIT_DEBUGOUT("ixl_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n"); return (ENODEV); } /********************************************************************* * Ioctl entry point * * ixl_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ int ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data) { struct ixl_vsi *vsi = ifp->if_softc; struct ixl_pf *pf = vsi->back; struct ifreq *ifr = (struct ifreq *)data; struct ifdrv *ifd = (struct ifdrv *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; bool avoid_reset = FALSE; #endif int error = 0; switch (command) { case SIOCSIFADDR: IOCTL_DEBUGOUT("ioctl: SIOCSIFADDR (Set Interface Address)"); #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif #if defined(INET) || defined(INET6) /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ixl_init(pf); #ifdef INET if (!(ifp->if_flags & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else error = ether_ioctl(ifp, command, data); break; #endif case SIOCSIFMTU: IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); if (ifr->ifr_mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) { error = EINVAL; } else { IXL_PF_LOCK(pf); ifp->if_mtu = ifr->ifr_mtu; vsi->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; if (ifp->if_drv_flags & IFF_DRV_RUNNING) ixl_init_locked(pf); IXL_PF_UNLOCK(pf); } break; case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); IXL_PF_LOCK(pf); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ pf->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { ixl_set_promisc(vsi); } } else { IXL_PF_UNLOCK(pf); ixl_init(pf); IXL_PF_LOCK(pf); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ixl_stop_locked(pf); } } pf->if_flags = ifp->if_flags; IXL_PF_UNLOCK(pf); break; case SIOCSDRVSPEC: case SIOCGDRVSPEC: IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific " "Info)\n"); /* NVM update command */ if (ifd->ifd_cmd == I40E_NVM_ACCESS) error = ixl_handle_nvmupd_cmd(pf, ifd); else error = EINVAL; break; case SIOCADDMULTI: IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IXL_PF_LOCK(pf); ixl_disable_rings_intr(vsi); ixl_add_multi(vsi); ixl_enable_intr(vsi); IXL_PF_UNLOCK(pf); } break; case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IXL_PF_LOCK(pf); ixl_disable_rings_intr(vsi); ixl_del_multi(vsi); ixl_enable_intr(vsi); IXL_PF_UNLOCK(pf); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: case SIOCGIFXMEDIA: IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &vsi->media, command); break; case SIOCSIFCAP: { int mask = ifr->ifr_reqcap ^ ifp->if_capenable; IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); ixl_cap_txcsum_tso(vsi, ifp, mask); if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_VLAN_HWFILTER) ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IXL_PF_LOCK(pf); ixl_init_locked(pf); IXL_PF_UNLOCK(pf); } VLAN_CAPABILITIES(ifp); break; } #if __FreeBSD_version >= 1003000 case SIOCGI2C: { struct ifi2creq i2c; int i; IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)"); if (!pf->has_i2c) return (ENOTTY); error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); if (error != 0) break; if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { error = EINVAL; break; } if (i2c.len > sizeof(i2c.data)) { error = EINVAL; break; } for (i = 0; i < i2c.len; i++) if (ixl_read_i2c_byte(pf, i2c.offset + i, i2c.dev_addr, &i2c.data[i])) return (EIO); error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); break; } #endif default: IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command); error = ether_ioctl(ifp, command, data); break; } return (error); } int ixl_find_i2c_interface(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; bool i2c_en, port_matched; u32 reg; for (int i = 0; i < 4; i++) { reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i)); i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK); port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK) >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) & BIT(hw->port); if (i2c_en && port_matched) return (i); } return (-1); } static char * ixl_phy_type_string(u32 bit_pos, bool ext) { static char * phy_types_str[32] = { "SGMII", "1000BASE-KX", "10GBASE-KX4", "10GBASE-KR", "40GBASE-KR4", "XAUI", "XFI", "SFI", "XLAUI", "XLPPI", "40GBASE-CR4", "10GBASE-CR1", "Reserved (12)", "Reserved (13)", "Reserved (14)", "Reserved (15)", "Reserved (16)", "100BASE-TX", "1000BASE-T", "10GBASE-T", "10GBASE-SR", "10GBASE-LR", "10GBASE-SFP+Cu", "10GBASE-CR1", "40GBASE-CR4", "40GBASE-SR4", "40GBASE-LR4", "1000BASE-SX", "1000BASE-LX", "1000BASE-T Optical", "20GBASE-KR2", "Reserved (31)" }; static char * ext_phy_types_str[4] = { "25GBASE-KR", "25GBASE-CR", "25GBASE-SR", "25GBASE-LR" }; if (ext && bit_pos > 3) return "Invalid_Ext"; if (bit_pos > 31) return "Invalid"; return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; } int ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; struct i40e_aq_desc desc; enum i40e_status_code status; struct i40e_aqc_get_link_status *aq_link_status = (struct i40e_aqc_get_link_status *)&desc.params.raw; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); if (status) { device_printf(dev, "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status)); return (0); } static char * ixl_phy_type_string_ls(u8 val) { if (val >= 0x1F) return ixl_phy_type_string(val - 0x1F, true); else return ixl_phy_type_string(val, false); } static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; struct sbuf *buf; int error = 0; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); return (ENOMEM); } struct i40e_aqc_get_link_status link_status; error = ixl_aq_get_link_status(pf, &link_status); if (error) { sbuf_delete(buf); return (error); } /* TODO: Add 25G types */ sbuf_printf(buf, "\n" "PHY Type : 0x%02x<%s>\n" "Speed : 0x%02x\n" "Link info: 0x%02x\n" "AN info : 0x%02x\n" "Ext info : 0x%02x\n" "Loopback : 0x%02x\n" "Max Frame: %d\n" "Config : 0x%02x\n" "Power : 0x%02x", link_status.phy_type, ixl_phy_type_string_ls(link_status.phy_type), link_status.link_speed, link_status.link_info, link_status.an_info, link_status.ext_info, link_status.loopback, link_status.max_frame_size, link_status.config, link_status.power_desc); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; enum i40e_status_code status; struct i40e_aq_get_phy_abilities_resp abilities; struct sbuf *buf; int error = 0; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); return (ENOMEM); } status = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL); if (status) { device_printf(dev, "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); sbuf_delete(buf); return (EIO); } sbuf_printf(buf, "\n" "PHY Type : %08x", abilities.phy_type); if (abilities.phy_type != 0) { sbuf_printf(buf, "<"); for (int i = 0; i < 32; i++) if ((1 << i) & abilities.phy_type) sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false)); sbuf_printf(buf, ">\n"); } sbuf_printf(buf, "PHY Ext : %02x", abilities.phy_type_ext); if (abilities.phy_type_ext != 0) { sbuf_printf(buf, "<"); for (int i = 0; i < 4; i++) if ((1 << i) & abilities.phy_type_ext) sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true)); sbuf_printf(buf, ">"); } sbuf_printf(buf, "\n"); sbuf_printf(buf, "Speed : %02x\n" "Abilities: %02x\n" "EEE cap : %04x\n" "EEER reg : %08x\n" "D3 Lpan : %02x\n" "ID : %02x %02x %02x %02x\n" "ModType : %02x %02x %02x\n" "ModType E: %01x\n" "FEC Cfg : %02x\n" "Ext CC : %02x", abilities.link_speed, abilities.abilities, abilities.eee_capability, abilities.eeer_val, abilities.d3_lpan, abilities.phy_id[0], abilities.phy_id[1], abilities.phy_id[2], abilities.phy_id[3], abilities.module_type[0], abilities.module_type[1], abilities.module_type[2], abilities.phy_type_ext >> 5, abilities.phy_type_ext & 0x1F, abilities.ext_comp_code); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct ixl_vsi *vsi = &pf->vsi; struct ixl_mac_filter *f; char *buf, *buf_i; int error = 0; int ftl_len = 0; int ftl_counter = 0; int buf_len = 0; int entry_len = 42; SLIST_FOREACH(f, &vsi->ftl, next) { ftl_len++; } if (ftl_len < 1) { sysctl_handle_string(oidp, "(none)", 6, req); return (0); } buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2; buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT); sprintf(buf_i++, "\n"); SLIST_FOREACH(f, &vsi->ftl, next) { sprintf(buf_i, MAC_FORMAT ", vlan %4d, flags %#06x", MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); buf_i += entry_len; /* don't print '\n' for last entry */ if (++ftl_counter != ftl_len) { sprintf(buf_i, "\n"); buf_i++; } } error = sysctl_handle_string(oidp, buf, strlen(buf), req); if (error) printf("sysctl error: %d\n", error); free(buf, M_DEVBUF); return error; } #define IXL_SW_RES_SIZE 0x14 int ixl_res_alloc_cmp(const void *a, const void *b) { const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; return ((int)one->resource_type - (int)two->resource_type); } /* * Longest string length: 25 */ char * ixl_switch_res_type_string(u8 type) { static char * ixl_switch_res_type_strings[0x14] = { "VEB", "VSI", "Perfect Match MAC address", "S-tag", "(Reserved)", "Multicast hash entry", "Unicast hash entry", "VLAN", "VSI List entry", "(Reserved)", "VLAN Statistic Pool", "Mirror Rule", "Queue Set", "Inner VLAN Forward filter", "(Reserved)", "Inner MAC", "IP", "GRE/VN1 Key", "VN2 Key", "Tunneling Port" }; if (type < 0x14) return ixl_switch_res_type_strings[type]; else return "(Reserved)"; } static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; enum i40e_status_code status; int error = 0; u8 num_entries; struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } bzero(resp, sizeof(resp)); status = i40e_aq_get_switch_resource_alloc(hw, &num_entries, resp, IXL_SW_RES_SIZE, NULL); if (status) { device_printf(dev, "%s: get_switch_resource_alloc() error %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); sbuf_delete(buf); return (error); } /* Sort entries by type for display */ qsort(resp, num_entries, sizeof(struct i40e_aqc_switch_resource_alloc_element_resp), &ixl_res_alloc_cmp); sbuf_cat(buf, "\n"); sbuf_printf(buf, "# of entries: %d\n", num_entries); sbuf_printf(buf, " Type | Guaranteed | Total | Used | Un-allocated\n" " | (this) | (all) | (this) | (all) \n"); for (int i = 0; i < num_entries; i++) { sbuf_printf(buf, "%25s | %10d %5d %6d %12d", ixl_switch_res_type_string(resp[i].resource_type), resp[i].guaranteed, resp[i].total, resp[i].used, resp[i].total_unalloced); if (i < num_entries - 1) sbuf_cat(buf, "\n"); } error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } /* ** Caller must init and delete sbuf; this function will clear and ** finish it for caller. ** ** XXX: Cannot use the SEID for this, since there is no longer a ** fixed mapping between SEID and element type. */ char * ixl_switch_element_string(struct sbuf *s, struct i40e_aqc_switch_config_element_resp *element) { sbuf_clear(s); switch (element->element_type) { case I40E_AQ_SW_ELEM_TYPE_MAC: sbuf_printf(s, "MAC %3d", element->element_info); break; case I40E_AQ_SW_ELEM_TYPE_PF: sbuf_printf(s, "PF %3d", element->element_info); break; case I40E_AQ_SW_ELEM_TYPE_VF: sbuf_printf(s, "VF %3d", element->element_info); break; case I40E_AQ_SW_ELEM_TYPE_EMP: sbuf_cat(s, "EMP"); break; case I40E_AQ_SW_ELEM_TYPE_BMC: sbuf_cat(s, "BMC"); break; case I40E_AQ_SW_ELEM_TYPE_PV: sbuf_cat(s, "PV"); break; case I40E_AQ_SW_ELEM_TYPE_VEB: sbuf_cat(s, "VEB"); break; case I40E_AQ_SW_ELEM_TYPE_PA: sbuf_cat(s, "PA"); break; case I40E_AQ_SW_ELEM_TYPE_VSI: sbuf_printf(s, "VSI %3d", element->element_info); break; default: sbuf_cat(s, "?"); break; } sbuf_finish(s); return sbuf_data(s); } static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; struct sbuf *nmbuf; enum i40e_status_code status; int error = 0; u16 next = 0; u8 aq_buf[I40E_AQ_LARGE_BUF]; struct i40e_aqc_get_switch_config_resp *sw_config; sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); return (ENOMEM); } status = i40e_aq_get_switch_config(hw, sw_config, sizeof(aq_buf), &next, NULL); if (status) { device_printf(dev, "%s: aq_get_switch_config() error %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); sbuf_delete(buf); return error; } if (next) device_printf(dev, "%s: TODO: get more config with SEID %d\n", __func__, next); nmbuf = sbuf_new_auto(); if (!nmbuf) { device_printf(dev, "Could not allocate sbuf for name output.\n"); sbuf_delete(buf); return (ENOMEM); } sbuf_cat(buf, "\n"); /* Assuming <= 255 elements in switch */ sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported); sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total); /* Exclude: ** Revision -- all elements are revision 1 for now */ sbuf_printf(buf, "SEID ( Name ) | Uplink | Downlink | Conn Type\n" " | | | (uplink)\n"); for (int i = 0; i < sw_config->header.num_reported; i++) { // "%4d (%8s) | %8s %8s %#8x", sbuf_printf(buf, "%4d", sw_config->element[i].seid); sbuf_cat(buf, " "); sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, &sw_config->element[i])); sbuf_cat(buf, " | "); sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid); sbuf_cat(buf, " "); sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid); sbuf_cat(buf, " "); sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type); if (i < sw_config->header.num_reported - 1) sbuf_cat(buf, "\n"); } sbuf_delete(nmbuf); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; int error = 0; enum i40e_status_code status; u32 reg; struct i40e_aqc_get_set_rss_key_data key_data; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } sbuf_cat(buf, "\n"); if (hw->mac.type == I40E_MAC_X722) { bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key)); status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); if (status) device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); sbuf_printf(buf, "%40D", (u_char *)key_data.standard_rss_key, ""); } else { for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); sbuf_printf(buf, "%4D", (u_char *)®, ""); } } error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; int error = 0; enum i40e_status_code status; u8 hlut[512]; u32 reg; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } sbuf_cat(buf, "\n"); if (hw->mac.type == I40E_MAC_X722) { bzero(hlut, sizeof(hlut)); status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); if (status) device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); sbuf_printf(buf, "%512D", (u_char *)hlut, ""); } else { for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { reg = rd32(hw, I40E_PFQF_HLUT(i)); sbuf_printf(buf, "%4D", (u_char *)®, ""); } } error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; u64 hena; hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); return sysctl_handle_long(oidp, NULL, hena, req); } /* * Sysctl to disable firmware's link management * * 1 - Disable link management on this port * 0 - Re-enable link management * * On normal NVMs, firmware manages link by default. */ static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int requested_mode = -1; enum i40e_status_code status = 0; int error = 0; /* Read in new mode */ error = sysctl_handle_int(oidp, &requested_mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Check for sane value */ if (requested_mode < 0 || requested_mode > 1) { device_printf(dev, "Valid modes are 0 or 1\n"); return (EINVAL); } /* Set new mode */ status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL); if (status) { device_printf(dev, "%s: Error setting new phy debug mode %s," " aq error: %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } return (0); } /* * Sysctl to read a byte from I2C bus. * * Input: 32-bit value: * bits 0-7: device address (0xA0 or 0xA2) * bits 8-15: offset (0-255) * bits 16-31: unused * Output: 8-bit value read */ static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int input = -1, error = 0; device_printf(dev, "%s: start\n", __func__); u8 dev_addr, offset, output; /* Read in I2C read parameters */ error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Validate device address */ dev_addr = input & 0xFF; if (dev_addr != 0xA0 && dev_addr != 0xA2) { return (EINVAL); } offset = (input >> 8) & 0xFF; error = ixl_read_i2c_byte(pf, offset, dev_addr, &output); if (error) return (error); device_printf(dev, "%02X\n", output); return (0); } /* * Sysctl to write a byte to the I2C bus. * * Input: 32-bit value: * bits 0-7: device address (0xA0 or 0xA2) * bits 8-15: offset (0-255) * bits 16-23: value to write * bits 24-31: unused * Output: 8-bit value written */ static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int input = -1, error = 0; u8 dev_addr, offset, value; /* Read in I2C write parameters */ error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Validate device address */ dev_addr = input & 0xFF; if (dev_addr != 0xA0 && dev_addr != 0xA2) { return (EINVAL); } offset = (input >> 8) & 0xFF; value = (input >> 16) & 0xFF; error = ixl_write_i2c_byte(pf, offset, dev_addr, value); if (error) return (error); device_printf(dev, "%02X written\n", value); return (0); } static int ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, u8 bit_pos, int *is_set) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; enum i40e_status_code status; status = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, abilities, NULL); if (status) { device_printf(dev, "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } *is_set = !!(abilities->phy_type_ext & bit_pos); return (0); } static int ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, u8 bit_pos, int set) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; struct i40e_aq_set_phy_config config; enum i40e_status_code status; /* Set new PHY config */ memset(&config, 0, sizeof(config)); config.fec_config = abilities->phy_type_ext & ~(bit_pos); if (set) config.fec_config |= bit_pos; if (config.fec_config != abilities->phy_type_ext) { config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; config.phy_type = abilities->phy_type; config.phy_type_ext = abilities->phy_type_ext; config.link_speed = abilities->link_speed; config.eee_capability = abilities->eee_capability; config.eeer = abilities->eeer_val; config.low_power_ctrl = abilities->d3_lpan; status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) { device_printf(dev, "%s: i40e_aq_set_phy_config() status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } } return (0); } static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); } static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); } static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); } static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); } static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); } Index: head/sys/dev/ixl/ixl_txrx.c =================================================================== --- head/sys/dev/ixl/ixl_txrx.c (revision 332422) +++ head/sys/dev/ixl/ixl_txrx.c (revision 332423) @@ -1,1839 +1,1839 @@ /****************************************************************************** Copyright (c) 2013-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ /* ** IXL driver TX/RX Routines: ** This was seperated to allow usage by ** both the PF and VF drivers. */ #ifndef IXL_STANDALONE_BUILD #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #endif #include "ixl.h" #ifdef RSS #include #endif /* Local Prototypes */ static void ixl_rx_checksum(struct mbuf *, u32, u32, u8); static void ixl_refresh_mbufs(struct ixl_queue *, int); static int ixl_xmit(struct ixl_queue *, struct mbuf **); static int ixl_tx_setup_offload(struct ixl_queue *, struct mbuf *, u32 *, u32 *); static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *); static inline void ixl_rx_discard(struct rx_ring *, int); static inline void ixl_rx_input(struct rx_ring *, struct ifnet *, struct mbuf *, u8); static inline bool ixl_tso_detect_sparse(struct mbuf *mp); static inline u32 ixl_get_tx_head(struct ixl_queue *que); #ifdef DEV_NETMAP #include int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip = 1; #endif /* DEV_NETMAP */ /* * @key key is saved into this parameter */ void ixl_get_default_rss_key(u32 *key) { MPASS(key != NULL); u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687, 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377, 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1, 0x0, 0x0, 0x0}; bcopy(rss_seed, key, IXL_RSS_KEY_SIZE); } /* ** Multiqueue Transmit driver */ int ixl_mq_start(struct ifnet *ifp, struct mbuf *m) { struct ixl_vsi *vsi = ifp->if_softc; struct ixl_queue *que; struct tx_ring *txr; int err, i; #ifdef RSS u32 bucket_id; #endif /* ** Which queue to use: ** ** When doing RSS, map it to the same outbound ** queue as the incoming flow would be mapped to. ** If everything is setup correctly, it should be ** the same bucket that the current CPU we're on is. */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { #ifdef RSS if (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m), &bucket_id) == 0) { i = bucket_id % vsi->num_queues; } else #endif i = m->m_pkthdr.flowid % vsi->num_queues; } else i = curcpu % vsi->num_queues; que = &vsi->queues[i]; txr = &que->txr; err = drbr_enqueue(ifp, txr->br, m); if (err) return (err); if (IXL_TX_TRYLOCK(txr)) { ixl_mq_start_locked(ifp, txr); IXL_TX_UNLOCK(txr); } else taskqueue_enqueue(que->tq, &que->tx_task); return (0); } int ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) { struct ixl_queue *que = txr->que; struct ixl_vsi *vsi = que->vsi; struct mbuf *next; int err = 0; if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) || vsi->link_active == 0) return (ENETDOWN); /* Process the transmit queue */ while ((next = drbr_peek(ifp, txr->br)) != NULL) { if ((err = ixl_xmit(que, &next)) != 0) { if (next == NULL) drbr_advance(ifp, txr->br); else drbr_putback(ifp, txr->br, next); break; } drbr_advance(ifp, txr->br); /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, next); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; } if (txr->avail < IXL_TX_CLEANUP_THRESHOLD) ixl_txeof(que); return (err); } /* * Called from a taskqueue to drain queued transmit packets. */ void ixl_deferred_mq_start(void *arg, int pending) { struct ixl_queue *que = arg; struct tx_ring *txr = &que->txr; struct ixl_vsi *vsi = que->vsi; struct ifnet *ifp = vsi->ifp; IXL_TX_LOCK(txr); if (!drbr_empty(ifp, txr->br)) ixl_mq_start_locked(ifp, txr); IXL_TX_UNLOCK(txr); } /* ** Flush all queue ring buffers */ void ixl_qflush(struct ifnet *ifp) { struct ixl_vsi *vsi = ifp->if_softc; for (int i = 0; i < vsi->num_queues; i++) { struct ixl_queue *que = &vsi->queues[i]; struct tx_ring *txr = &que->txr; struct mbuf *m; IXL_TX_LOCK(txr); while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) m_freem(m); IXL_TX_UNLOCK(txr); } if_qflush(ifp); } /* ** Find mbuf chains passed to the driver ** that are 'sparse', using more than 8 ** mbufs to deliver an mss-size chunk of data */ static inline bool ixl_tso_detect_sparse(struct mbuf *mp) { struct mbuf *m; int num, mss; num = 0; mss = mp->m_pkthdr.tso_segsz; /* Exclude first mbuf; assume it contains all headers */ for (m = mp->m_next; m != NULL; m = m->m_next) { if (m == NULL) break; num++; mss -= m->m_len % mp->m_pkthdr.tso_segsz; if (mss < 1) { if (num > IXL_SPARSE_CHAIN) return (true); num = (mss == 0) ? 0 : 1; mss += mp->m_pkthdr.tso_segsz; } } return (false); } /********************************************************************* * * This routine maps the mbufs to tx descriptors, allowing the * TX engine to transmit the packets. * - return 0 on success, positive on failure * **********************************************************************/ #define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) static int ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp) { struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; struct ixl_tx_buf *buf; struct i40e_tx_desc *txd = NULL; struct mbuf *m_head, *m; int i, j, error, nsegs; int first, last = 0; u16 vtag = 0; u32 cmd, off; bus_dmamap_t map; bus_dma_tag_t tag; bus_dma_segment_t segs[IXL_MAX_TSO_SEGS]; cmd = off = 0; m_head = *m_headp; /* * Important to capture the first descriptor * used because it will contain the index of * the one we tell the hardware to report back */ first = txr->next_avail; buf = &txr->buffers[first]; map = buf->map; tag = txr->tx_tag; if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { /* Use larger mapping for TSO */ tag = txr->tso_tag; if (ixl_tso_detect_sparse(m_head)) { m = m_defrag(m_head, M_NOWAIT); if (m == NULL) { m_freem(*m_headp); *m_headp = NULL; return (ENOBUFS); } *m_headp = m; } } /* * Map the packet for DMA. */ error = bus_dmamap_load_mbuf_sg(tag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { struct mbuf *m; m = m_defrag(*m_headp, M_NOWAIT); if (m == NULL) { que->mbuf_defrag_failed++; m_freem(*m_headp); *m_headp = NULL; return (ENOBUFS); } *m_headp = m; /* Try it again */ error = bus_dmamap_load_mbuf_sg(tag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { que->tx_dmamap_failed++; m_freem(*m_headp); *m_headp = NULL; return (error); } } else if (error != 0) { que->tx_dmamap_failed++; m_freem(*m_headp); *m_headp = NULL; return (error); } /* Make certain there are enough descriptors */ if (nsegs > txr->avail - 2) { txr->no_desc++; error = ENOBUFS; goto xmit_fail; } m_head = *m_headp; /* Set up the TSO/CSUM offload */ if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) { error = ixl_tx_setup_offload(que, m_head, &cmd, &off); if (error) goto xmit_fail; } cmd |= I40E_TX_DESC_CMD_ICRC; /* Grab the VLAN tag */ if (m_head->m_flags & M_VLANTAG) { cmd |= I40E_TX_DESC_CMD_IL2TAG1; vtag = htole16(m_head->m_pkthdr.ether_vtag); } i = txr->next_avail; for (j = 0; j < nsegs; j++) { bus_size_t seglen; buf = &txr->buffers[i]; buf->tag = tag; /* Keep track of the type tag */ txd = &txr->base[i]; seglen = segs[j].ds_len; txd->buffer_addr = htole64(segs[j].ds_addr); txd->cmd_type_offset_bsz = htole64(I40E_TX_DESC_DTYPE_DATA | ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT) | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT) | ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | ((u64)vtag << I40E_TXD_QW1_L2TAG1_SHIFT)); last = i; /* descriptor that will get completion IRQ */ if (++i == que->num_desc) i = 0; buf->m_head = NULL; buf->eop_index = -1; } /* Set the last descriptor for report */ txd->cmd_type_offset_bsz |= htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT)); txr->avail -= nsegs; txr->next_avail = i; buf->m_head = m_head; /* Swap the dma map between the first and last descriptor */ txr->buffers[first].map = buf->map; buf->map = map; bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE); /* Set the index of the descriptor that will be marked done */ buf = &txr->buffers[first]; buf->eop_index = last; bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * Advance the Transmit Descriptor Tail (Tdt), this tells the * hardware that this frame is available to transmit. */ ++txr->total_packets; wr32(hw, txr->tail, i); /* Mark outstanding work */ atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG); return (0); xmit_fail: bus_dmamap_unload(tag, buf->map); return (error); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. This is * called only once at attach, setup is done every reset. * **********************************************************************/ int ixl_allocate_tx_data(struct ixl_queue *que) { struct tx_ring *txr = &que->txr; struct ixl_vsi *vsi = que->vsi; device_t dev = vsi->dev; struct ixl_tx_buf *buf; int error = 0; /* * Setup DMA descriptor areas. */ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ IXL_TSO_SIZE, /* maxsize */ IXL_MAX_TX_SEGS, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->tx_tag))) { device_printf(dev,"Unable to allocate TX DMA tag\n"); goto fail; } /* Make a special tag for TSO */ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ IXL_TSO_SIZE, /* maxsize */ IXL_MAX_TSO_SEGS, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->tso_tag))) { device_printf(dev,"Unable to allocate TX TSO DMA tag\n"); goto fail; } if (!(txr->buffers = (struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) * que->num_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); error = ENOMEM; goto fail; } /* Create the descriptor buffer default dma maps */ buf = txr->buffers; for (int i = 0; i < que->num_desc; i++, buf++) { buf->tag = txr->tx_tag; error = bus_dmamap_create(buf->tag, 0, &buf->map); if (error != 0) { device_printf(dev, "Unable to create TX DMA map\n"); goto fail; } } fail: return (error); } /********************************************************************* * * (Re)Initialize a queue transmit ring. * - called by init, it clears the descriptor ring, * and frees any stale mbufs * **********************************************************************/ void ixl_init_tx_ring(struct ixl_queue *que) { #ifdef DEV_NETMAP struct netmap_adapter *na = NA(que->vsi->ifp); struct netmap_slot *slot; #endif /* DEV_NETMAP */ struct tx_ring *txr = &que->txr; struct ixl_tx_buf *buf; /* Clear the old ring contents */ IXL_TX_LOCK(txr); #ifdef DEV_NETMAP /* * (under lock): if in netmap mode, do some consistency * checks and set slot to entry 0 of the netmap ring. */ slot = netmap_reset(na, NR_TX, que->me, 0); #endif /* DEV_NETMAP */ bzero((void *)txr->base, (sizeof(struct i40e_tx_desc)) * que->num_desc); /* Reset indices */ txr->next_avail = 0; txr->next_to_clean = 0; /* Reset watchdog status */ txr->watchdog_timer = 0; #ifdef IXL_FDIR /* Initialize flow director */ txr->atr_rate = ixl_atr_rate; txr->atr_count = 0; #endif /* Free any existing tx mbufs. */ buf = txr->buffers; for (int i = 0; i < que->num_desc; i++, buf++) { if (buf->m_head != NULL) { bus_dmamap_sync(buf->tag, buf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(buf->tag, buf->map); m_freem(buf->m_head); buf->m_head = NULL; } #ifdef DEV_NETMAP /* * In netmap mode, set the map for the packet buffer. * NOTE: Some drivers (not this one) also need to set * the physical buffer address in the NIC ring. * netmap_idx_n2k() maps a nic index, i, into the corresponding * netmap slot index, si */ if (slot) { - int si = netmap_idx_n2k(&na->tx_rings[que->me], i); + int si = netmap_idx_n2k(na->tx_rings[que->me], i); netmap_load_map(na, buf->tag, buf->map, NMB(na, slot + si)); } #endif /* DEV_NETMAP */ /* Clear the EOP index */ buf->eop_index = -1; } /* Set number of descriptors available */ txr->avail = que->num_desc; bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); IXL_TX_UNLOCK(txr); } /********************************************************************* * * Free transmit ring related data structures. * **********************************************************************/ void ixl_free_que_tx(struct ixl_queue *que) { struct tx_ring *txr = &que->txr; struct ixl_tx_buf *buf; INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me); for (int i = 0; i < que->num_desc; i++) { buf = &txr->buffers[i]; if (buf->m_head != NULL) { bus_dmamap_sync(buf->tag, buf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(buf->tag, buf->map); m_freem(buf->m_head); buf->m_head = NULL; if (buf->map != NULL) { bus_dmamap_destroy(buf->tag, buf->map); buf->map = NULL; } } else if (buf->map != NULL) { bus_dmamap_unload(buf->tag, buf->map); bus_dmamap_destroy(buf->tag, buf->map); buf->map = NULL; } } if (txr->br != NULL) buf_ring_free(txr->br, M_DEVBUF); if (txr->buffers != NULL) { free(txr->buffers, M_DEVBUF); txr->buffers = NULL; } if (txr->tx_tag != NULL) { bus_dma_tag_destroy(txr->tx_tag); txr->tx_tag = NULL; } if (txr->tso_tag != NULL) { bus_dma_tag_destroy(txr->tso_tag); txr->tso_tag = NULL; } INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me); return; } /********************************************************************* * * Setup descriptor for hw offloads * **********************************************************************/ static int ixl_tx_setup_offload(struct ixl_queue *que, struct mbuf *mp, u32 *cmd, u32 *off) { struct ether_vlan_header *eh; #ifdef INET struct ip *ip = NULL; #endif struct tcphdr *th = NULL; #ifdef INET6 struct ip6_hdr *ip6; #endif int elen, ip_hlen = 0, tcp_hlen; u16 etype; u8 ipproto = 0; bool tso = FALSE; /* Set up the TSO context descriptor if required */ if (mp->m_pkthdr.csum_flags & CSUM_TSO) { tso = ixl_tso_setup(que, mp); if (tso) ++que->tso; else return (ENXIO); } /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); elen = ETHER_HDR_LEN; } switch (etype) { #ifdef INET case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + elen); ip_hlen = ip->ip_hl << 2; ipproto = ip->ip_p; th = (struct tcphdr *)((caddr_t)ip + ip_hlen); /* The IP checksum must be recalculated with TSO */ if (tso) *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; else *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; break; #endif #ifdef INET6 case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + elen); ip_hlen = sizeof(struct ip6_hdr); ipproto = ip6->ip6_nxt; th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); *cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; break; #endif default: break; } *off |= (elen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; *off |= (ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; switch (ipproto) { case IPPROTO_TCP: tcp_hlen = th->th_off << 2; if (mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) { *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; *off |= (tcp_hlen >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; } #ifdef IXL_FDIR ixl_atr(que, th, etype); #endif break; case IPPROTO_UDP: if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) { *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; *off |= (sizeof(struct udphdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; } break; case IPPROTO_SCTP: if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) { *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; *off |= (sizeof(struct sctphdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; } /* Fall Thru */ default: break; } return (0); } /********************************************************************** * * Setup context for hardware segmentation offload (TSO) * **********************************************************************/ static bool ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp) { struct tx_ring *txr = &que->txr; struct i40e_tx_context_desc *TXD; struct ixl_tx_buf *buf; u32 cmd, mss, type, tsolen; u16 etype; int idx, elen, ip_hlen, tcp_hlen; struct ether_vlan_header *eh; #ifdef INET struct ip *ip; #endif #ifdef INET6 struct ip6_hdr *ip6; #endif #if defined(INET6) || defined(INET) struct tcphdr *th; #endif u64 type_cmd_tso_mss; /* * Determine where frame payload starts. * Jump over vlan headers if already present */ eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = eh->evl_proto; } else { elen = ETHER_HDR_LEN; etype = eh->evl_encap_proto; } switch (ntohs(etype)) { #ifdef INET6 case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + elen); if (ip6->ip6_nxt != IPPROTO_TCP) return (ENXIO); ip_hlen = sizeof(struct ip6_hdr); th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); tcp_hlen = th->th_off << 2; /* * The corresponding flag is set by the stack in the IPv4 * TSO case, but not in IPv6 (at least in FreeBSD 10.2). * So, set it here because the rest of the flow requires it. */ mp->m_pkthdr.csum_flags |= CSUM_TCP_IPV6; break; #endif #ifdef INET case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + elen); if (ip->ip_p != IPPROTO_TCP) return (ENXIO); ip->ip_sum = 0; ip_hlen = ip->ip_hl << 2; th = (struct tcphdr *)((caddr_t)ip + ip_hlen); th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); tcp_hlen = th->th_off << 2; break; #endif default: printf("%s: CSUM_TSO but no supported IP version (0x%04x)", __func__, ntohs(etype)); return FALSE; } /* Ensure we have at least the IP+TCP header in the first mbuf. */ if (mp->m_len < elen + ip_hlen + sizeof(struct tcphdr)) return FALSE; idx = txr->next_avail; buf = &txr->buffers[idx]; TXD = (struct i40e_tx_context_desc *) &txr->base[idx]; tsolen = mp->m_pkthdr.len - (elen + ip_hlen + tcp_hlen); type = I40E_TX_DESC_DTYPE_CONTEXT; cmd = I40E_TX_CTX_DESC_TSO; /* TSO MSS must not be less than 64 */ if (mp->m_pkthdr.tso_segsz < IXL_MIN_TSO_MSS) { que->mss_too_small++; mp->m_pkthdr.tso_segsz = IXL_MIN_TSO_MSS; } mss = mp->m_pkthdr.tso_segsz; type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) | ((u64)cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | ((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | ((u64)mss << I40E_TXD_CTX_QW1_MSS_SHIFT); TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss); TXD->tunneling_params = htole32(0); buf->m_head = NULL; buf->eop_index = -1; if (++idx == que->num_desc) idx = 0; txr->avail--; txr->next_avail = idx; return TRUE; } /* ** ixl_get_tx_head - Retrieve the value from the ** location the HW records its HEAD index */ static inline u32 ixl_get_tx_head(struct ixl_queue *que) { struct tx_ring *txr = &que->txr; void *head = &txr->base[que->num_desc]; return LE32_TO_CPU(*(volatile __le32 *)head); } /********************************************************************** * * Examine each tx_buffer in the used queue. If the hardware is done * processing the packet then free associated resources. The * tx_buffer is put back on the free queue. * **********************************************************************/ bool ixl_txeof(struct ixl_queue *que) { struct tx_ring *txr = &que->txr; u32 first, last, head, done, processed; struct ixl_tx_buf *buf; struct i40e_tx_desc *tx_desc, *eop_desc; mtx_assert(&txr->mtx, MA_OWNED); #ifdef DEV_NETMAP // XXX todo: implement moderation if (netmap_tx_irq(que->vsi->ifp, que->me)) return FALSE; #endif /* DEF_NETMAP */ /* These are not the descriptors you seek, move along :) */ if (txr->avail == que->num_desc) { atomic_store_rel_32(&txr->watchdog_timer, 0); return FALSE; } processed = 0; first = txr->next_to_clean; buf = &txr->buffers[first]; tx_desc = (struct i40e_tx_desc *)&txr->base[first]; last = buf->eop_index; if (last == -1) return FALSE; eop_desc = (struct i40e_tx_desc *)&txr->base[last]; /* Get the Head WB value */ head = ixl_get_tx_head(que); /* ** Get the index of the first descriptor ** BEYOND the EOP and call that 'done'. ** I do this so the comparison in the ** inner while loop below can be simple */ if (++last == que->num_desc) last = 0; done = last; bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_POSTREAD); /* ** The HEAD index of the ring is written in a ** defined location, this rather than a done bit ** is what is used to keep track of what must be ** 'cleaned'. */ while (first != head) { /* We clean the range of the packet */ while (first != done) { ++txr->avail; ++processed; if (buf->m_head) { txr->bytes += /* for ITR adjustment */ buf->m_head->m_pkthdr.len; txr->tx_bytes += /* for TX stats */ buf->m_head->m_pkthdr.len; bus_dmamap_sync(buf->tag, buf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(buf->tag, buf->map); m_freem(buf->m_head); buf->m_head = NULL; } buf->eop_index = -1; if (++first == que->num_desc) first = 0; buf = &txr->buffers[first]; tx_desc = &txr->base[first]; } ++txr->packets; /* See if there is more work now */ last = buf->eop_index; if (last != -1) { eop_desc = &txr->base[last]; /* Get next done point */ if (++last == que->num_desc) last = 0; done = last; } else break; } bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); txr->next_to_clean = first; /* * If there are no pending descriptors, clear the timeout. */ if (txr->avail == que->num_desc) { atomic_store_rel_32(&txr->watchdog_timer, 0); return FALSE; } return TRUE; } /********************************************************************* * * Refresh mbuf buffers for RX descriptor rings * - now keeps its own state so discards due to resource * exhaustion are unnecessary, if an mbuf cannot be obtained * it just returns, keeping its placeholder, thus it can simply * be recalled to try again. * **********************************************************************/ static void ixl_refresh_mbufs(struct ixl_queue *que, int limit) { struct ixl_vsi *vsi = que->vsi; struct rx_ring *rxr = &que->rxr; bus_dma_segment_t hseg[1]; bus_dma_segment_t pseg[1]; struct ixl_rx_buf *buf; struct mbuf *mh, *mp; int i, j, nsegs, error; bool refreshed = FALSE; i = j = rxr->next_refresh; /* Control the loop with one beyond */ if (++j == que->num_desc) j = 0; while (j != limit) { buf = &rxr->buffers[i]; if (rxr->hdr_split == FALSE) goto no_split; if (buf->m_head == NULL) { mh = m_gethdr(M_NOWAIT, MT_DATA); if (mh == NULL) goto update; } else mh = buf->m_head; mh->m_pkthdr.len = mh->m_len = MHLEN; mh->m_len = MHLEN; mh->m_flags |= M_PKTHDR; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->htag, buf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { printf("Refresh mbufs: hdr dmamap load" " failure - %d\n", error); m_free(mh); buf->m_head = NULL; goto update; } buf->m_head = mh; bus_dmamap_sync(rxr->htag, buf->hmap, BUS_DMASYNC_PREREAD); rxr->base[i].read.hdr_addr = htole64(hseg[0].ds_addr); no_split: if (buf->m_pack == NULL) { mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxr->mbuf_sz); if (mp == NULL) goto update; } else mp = buf->m_pack; mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->ptag, buf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { printf("Refresh mbufs: payload dmamap load" " failure - %d\n", error); m_free(mp); buf->m_pack = NULL; goto update; } buf->m_pack = mp; bus_dmamap_sync(rxr->ptag, buf->pmap, BUS_DMASYNC_PREREAD); rxr->base[i].read.pkt_addr = htole64(pseg[0].ds_addr); /* Used only when doing header split */ rxr->base[i].read.hdr_addr = 0; refreshed = TRUE; /* Next is precalculated */ i = j; rxr->next_refresh = i; if (++j == que->num_desc) j = 0; } update: if (refreshed) /* Update hardware tail index */ wr32(vsi->hw, rxr->tail, rxr->next_refresh); return; } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per descriptor, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've defined. * **********************************************************************/ int ixl_allocate_rx_data(struct ixl_queue *que) { struct rx_ring *rxr = &que->rxr; struct ixl_vsi *vsi = que->vsi; device_t dev = vsi->dev; struct ixl_rx_buf *buf; int i, bsize, error; bsize = sizeof(struct ixl_rx_buf) * que->num_desc; if (!(rxr->buffers = (struct ixl_rx_buf *) malloc(bsize, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate rx_buffer memory\n"); error = ENOMEM; return (error); } if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MSIZE, /* maxsize */ 1, /* nsegments */ MSIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &rxr->htag))) { device_printf(dev, "Unable to create RX DMA htag\n"); return (error); } if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM16BYTES, /* maxsize */ 1, /* nsegments */ MJUM16BYTES, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &rxr->ptag))) { device_printf(dev, "Unable to create RX DMA ptag\n"); return (error); } for (i = 0; i < que->num_desc; i++) { buf = &rxr->buffers[i]; error = bus_dmamap_create(rxr->htag, BUS_DMA_NOWAIT, &buf->hmap); if (error) { device_printf(dev, "Unable to create RX head map\n"); break; } error = bus_dmamap_create(rxr->ptag, BUS_DMA_NOWAIT, &buf->pmap); if (error) { device_printf(dev, "Unable to create RX pkt map\n"); break; } } return (error); } /********************************************************************* * * (Re)Initialize the queue receive ring and its buffers. * **********************************************************************/ int ixl_init_rx_ring(struct ixl_queue *que) { struct rx_ring *rxr = &que->rxr; struct ixl_vsi *vsi = que->vsi; #if defined(INET6) || defined(INET) struct ifnet *ifp = vsi->ifp; struct lro_ctrl *lro = &rxr->lro; #endif struct ixl_rx_buf *buf; bus_dma_segment_t pseg[1], hseg[1]; int rsize, nsegs, error = 0; #ifdef DEV_NETMAP struct netmap_adapter *na = NA(que->vsi->ifp); struct netmap_slot *slot; #endif /* DEV_NETMAP */ IXL_RX_LOCK(rxr); #ifdef DEV_NETMAP /* same as in ixl_init_tx_ring() */ slot = netmap_reset(na, NR_RX, que->me, 0); #endif /* DEV_NETMAP */ /* Clear the ring contents */ rsize = roundup2(que->num_desc * sizeof(union i40e_rx_desc), DBA_ALIGN); bzero((void *)rxr->base, rsize); /* Cleanup any existing buffers */ for (int i = 0; i < que->num_desc; i++) { buf = &rxr->buffers[i]; if (buf->m_head != NULL) { bus_dmamap_sync(rxr->htag, buf->hmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->htag, buf->hmap); buf->m_head->m_flags |= M_PKTHDR; m_freem(buf->m_head); } if (buf->m_pack != NULL) { bus_dmamap_sync(rxr->ptag, buf->pmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->ptag, buf->pmap); buf->m_pack->m_flags |= M_PKTHDR; m_freem(buf->m_pack); } buf->m_head = NULL; buf->m_pack = NULL; } /* header split is off */ rxr->hdr_split = FALSE; /* Now replenish the mbufs */ for (int j = 0; j != que->num_desc; ++j) { struct mbuf *mh, *mp; buf = &rxr->buffers[j]; #ifdef DEV_NETMAP /* * In netmap mode, fill the map and set the buffer * address in the NIC ring, considering the offset * between the netmap and NIC rings (see comment in * ixgbe_setup_transmit_ring() ). No need to allocate * an mbuf, so end the block with a continue; */ if (slot) { - int sj = netmap_idx_n2k(&na->rx_rings[que->me], j); + int sj = netmap_idx_n2k(na->rx_rings[que->me], j); uint64_t paddr; void *addr; addr = PNMB(na, slot + sj, &paddr); netmap_load_map(na, rxr->dma.tag, buf->pmap, addr); /* Update descriptor and the cached value */ rxr->base[j].read.pkt_addr = htole64(paddr); rxr->base[j].read.hdr_addr = 0; continue; } #endif /* DEV_NETMAP */ /* ** Don't allocate mbufs if not ** doing header split, its wasteful */ if (rxr->hdr_split == FALSE) goto skip_head; /* First the header */ buf->m_head = m_gethdr(M_NOWAIT, MT_DATA); if (buf->m_head == NULL) { error = ENOBUFS; goto fail; } m_adj(buf->m_head, ETHER_ALIGN); mh = buf->m_head; mh->m_len = mh->m_pkthdr.len = MHLEN; mh->m_flags |= M_PKTHDR; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->htag, buf->hmap, buf->m_head, hseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) /* Nothing elegant to do here */ goto fail; bus_dmamap_sync(rxr->htag, buf->hmap, BUS_DMASYNC_PREREAD); /* Update descriptor */ rxr->base[j].read.hdr_addr = htole64(hseg[0].ds_addr); skip_head: /* Now the payload cluster */ buf->m_pack = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxr->mbuf_sz); if (buf->m_pack == NULL) { error = ENOBUFS; goto fail; } mp = buf->m_pack; mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->ptag, buf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) goto fail; bus_dmamap_sync(rxr->ptag, buf->pmap, BUS_DMASYNC_PREREAD); /* Update descriptor */ rxr->base[j].read.pkt_addr = htole64(pseg[0].ds_addr); rxr->base[j].read.hdr_addr = 0; } /* Setup our descriptor indices */ rxr->next_check = 0; rxr->next_refresh = 0; rxr->lro_enabled = FALSE; rxr->split = 0; rxr->bytes = 0; rxr->discard = FALSE; wr32(vsi->hw, rxr->tail, que->num_desc - 1); ixl_flush(vsi->hw); #if defined(INET6) || defined(INET) /* ** Now set up the LRO interface: */ if (ifp->if_capenable & IFCAP_LRO) { int err = tcp_lro_init(lro); if (err) { if_printf(ifp, "queue %d: LRO Initialization failed!\n", que->me); goto fail; } INIT_DBG_IF(ifp, "queue %d: RX Soft LRO Initialized", que->me); rxr->lro_enabled = TRUE; lro->ifp = vsi->ifp; } #endif bus_dmamap_sync(rxr->dma.tag, rxr->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); fail: IXL_RX_UNLOCK(rxr); return (error); } /********************************************************************* * * Free station receive ring data structures * **********************************************************************/ void ixl_free_que_rx(struct ixl_queue *que) { struct rx_ring *rxr = &que->rxr; struct ixl_rx_buf *buf; INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me); /* Cleanup any existing buffers */ if (rxr->buffers != NULL) { for (int i = 0; i < que->num_desc; i++) { buf = &rxr->buffers[i]; if (buf->m_head != NULL) { bus_dmamap_sync(rxr->htag, buf->hmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->htag, buf->hmap); buf->m_head->m_flags |= M_PKTHDR; m_freem(buf->m_head); } if (buf->m_pack != NULL) { bus_dmamap_sync(rxr->ptag, buf->pmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->ptag, buf->pmap); buf->m_pack->m_flags |= M_PKTHDR; m_freem(buf->m_pack); } buf->m_head = NULL; buf->m_pack = NULL; if (buf->hmap != NULL) { bus_dmamap_destroy(rxr->htag, buf->hmap); buf->hmap = NULL; } if (buf->pmap != NULL) { bus_dmamap_destroy(rxr->ptag, buf->pmap); buf->pmap = NULL; } } if (rxr->buffers != NULL) { free(rxr->buffers, M_DEVBUF); rxr->buffers = NULL; } } if (rxr->htag != NULL) { bus_dma_tag_destroy(rxr->htag); rxr->htag = NULL; } if (rxr->ptag != NULL) { bus_dma_tag_destroy(rxr->ptag); rxr->ptag = NULL; } INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me); return; } static inline void ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype) { #if defined(INET6) || defined(INET) /* * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet * should be computed by hardware. Also it should not have VLAN tag in * ethernet header. */ if (rxr->lro_enabled && (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { /* * Send to the stack if: ** - LRO not enabled, or ** - no LRO resources, or ** - lro enqueue fails */ if (rxr->lro.lro_cnt != 0) if (tcp_lro_rx(&rxr->lro, m, 0) == 0) return; } #endif (*ifp->if_input)(ifp, m); } static inline void ixl_rx_discard(struct rx_ring *rxr, int i) { struct ixl_rx_buf *rbuf; rbuf = &rxr->buffers[i]; if (rbuf->fmp != NULL) {/* Partial chain ? */ rbuf->fmp->m_flags |= M_PKTHDR; m_freem(rbuf->fmp); rbuf->fmp = NULL; } /* ** With advanced descriptors the writeback ** clobbers the buffer addrs, so its easier ** to just free the existing mbufs and take ** the normal refresh path to get new buffers ** and mapping. */ if (rbuf->m_head) { m_free(rbuf->m_head); rbuf->m_head = NULL; } if (rbuf->m_pack) { m_free(rbuf->m_pack); rbuf->m_pack = NULL; } return; } #ifdef RSS /* ** i40e_ptype_to_hash: parse the packet type ** to determine the appropriate hash. */ static inline int ixl_ptype_to_hash(u8 ptype) { struct i40e_rx_ptype_decoded decoded; decoded = decode_rx_desc_ptype(ptype); if (!decoded.known) return M_HASHTYPE_OPAQUE_HASH; if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_L2) return M_HASHTYPE_OPAQUE_HASH; /* Note: anything that gets to this point is IP */ if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) { switch (decoded.inner_prot) { case I40E_RX_PTYPE_INNER_PROT_TCP: return M_HASHTYPE_RSS_TCP_IPV6; case I40E_RX_PTYPE_INNER_PROT_UDP: return M_HASHTYPE_RSS_UDP_IPV6; default: return M_HASHTYPE_RSS_IPV6; } } if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) { switch (decoded.inner_prot) { case I40E_RX_PTYPE_INNER_PROT_TCP: return M_HASHTYPE_RSS_TCP_IPV4; case I40E_RX_PTYPE_INNER_PROT_UDP: return M_HASHTYPE_RSS_UDP_IPV4; default: return M_HASHTYPE_RSS_IPV4; } } /* We should never get here!! */ return M_HASHTYPE_OPAQUE_HASH; } #endif /* RSS */ /********************************************************************* * * This routine executes in interrupt context. It replenishes * the mbufs in the descriptor and sends data which has been * dma'ed into host memory to upper layer. * * We loop at most count times if count is > 0, or until done if * count < 0. * * Return TRUE for more work, FALSE for all clean. *********************************************************************/ bool ixl_rxeof(struct ixl_queue *que, int count) { struct ixl_vsi *vsi = que->vsi; struct rx_ring *rxr = &que->rxr; struct ifnet *ifp = vsi->ifp; #if defined(INET6) || defined(INET) struct lro_ctrl *lro = &rxr->lro; #endif int i, nextp, processed = 0; union i40e_rx_desc *cur; struct ixl_rx_buf *rbuf, *nbuf; IXL_RX_LOCK(rxr); #ifdef DEV_NETMAP if (netmap_rx_irq(ifp, que->me, &count)) { IXL_RX_UNLOCK(rxr); return (FALSE); } #endif /* DEV_NETMAP */ for (i = rxr->next_check; count != 0;) { struct mbuf *sendmp, *mh, *mp; u32 status, error; u16 hlen, plen, vtag; u64 qword; u8 ptype; bool eop; /* Sync the ring. */ bus_dmamap_sync(rxr->dma.tag, rxr->dma.map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); cur = &rxr->base[i]; qword = le64toh(cur->wb.qword1.status_error_len); status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; plen = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; hlen = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT; ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0) { ++rxr->not_done; break; } if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; count--; sendmp = NULL; nbuf = NULL; cur->wb.qword1.status_error_len = 0; rbuf = &rxr->buffers[i]; mh = rbuf->m_head; mp = rbuf->m_pack; eop = (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)); if (status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1); else vtag = 0; /* Remove device access to the rx buffers. */ if (rbuf->m_head != NULL) { bus_dmamap_sync(rxr->htag, rbuf->hmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->htag, rbuf->hmap); } if (rbuf->m_pack != NULL) { bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->ptag, rbuf->pmap); } /* ** Make sure bad packets are discarded, ** note that only EOP descriptor has valid ** error results. */ if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { rxr->desc_errs++; ixl_rx_discard(rxr, i); goto next_desc; } /* Prefetch the next buffer */ if (!eop) { nextp = i + 1; if (nextp == que->num_desc) nextp = 0; nbuf = &rxr->buffers[nextp]; prefetch(nbuf); } /* ** The header mbuf is ONLY used when header ** split is enabled, otherwise we get normal ** behavior, ie, both header and payload ** are DMA'd into the payload buffer. ** ** Rather than using the fmp/lmp global pointers ** we now keep the head of a packet chain in the ** buffer struct and pass this along from one ** descriptor to the next, until we get EOP. */ if (rxr->hdr_split && (rbuf->fmp == NULL)) { if (hlen > IXL_RX_HDR) hlen = IXL_RX_HDR; mh->m_len = hlen; mh->m_flags |= M_PKTHDR; mh->m_next = NULL; mh->m_pkthdr.len = mh->m_len; /* Null buf pointer so it is refreshed */ rbuf->m_head = NULL; /* ** Check the payload length, this ** could be zero if its a small ** packet. */ if (plen > 0) { mp->m_len = plen; mp->m_next = NULL; mp->m_flags &= ~M_PKTHDR; mh->m_next = mp; mh->m_pkthdr.len += mp->m_len; /* Null buf pointer so it is refreshed */ rbuf->m_pack = NULL; rxr->split++; } /* ** Now create the forward ** chain so when complete ** we wont have to. */ if (eop == 0) { /* stash the chain head */ nbuf->fmp = mh; /* Make forward chain */ if (plen) mp->m_next = nbuf->m_pack; else mh->m_next = nbuf->m_pack; } else { /* Singlet, prepare to send */ sendmp = mh; if (vtag) { sendmp->m_pkthdr.ether_vtag = vtag; sendmp->m_flags |= M_VLANTAG; } } } else { /* ** Either no header split, or a ** secondary piece of a fragmented ** split packet. */ mp->m_len = plen; /* ** See if there is a stored head ** that determines what we are */ sendmp = rbuf->fmp; rbuf->m_pack = rbuf->fmp = NULL; if (sendmp != NULL) /* secondary frag */ sendmp->m_pkthdr.len += mp->m_len; else { /* first desc of a non-ps chain */ sendmp = mp; sendmp->m_flags |= M_PKTHDR; sendmp->m_pkthdr.len = mp->m_len; } /* Pass the head pointer on */ if (eop == 0) { nbuf->fmp = sendmp; sendmp = NULL; mp->m_next = nbuf->m_pack; } } ++processed; /* Sending this frame? */ if (eop) { sendmp->m_pkthdr.rcvif = ifp; /* gather stats */ rxr->rx_packets++; rxr->rx_bytes += sendmp->m_pkthdr.len; /* capture data for dynamic ITR adjustment */ rxr->packets++; rxr->bytes += sendmp->m_pkthdr.len; /* Set VLAN tag (field only valid in eop desc) */ if (vtag) { sendmp->m_pkthdr.ether_vtag = vtag; sendmp->m_flags |= M_VLANTAG; } if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) ixl_rx_checksum(sendmp, status, error, ptype); #ifdef RSS sendmp->m_pkthdr.flowid = le32toh(cur->wb.qword0.hi_dword.rss); M_HASHTYPE_SET(sendmp, ixl_ptype_to_hash(ptype)); #else sendmp->m_pkthdr.flowid = que->msix; M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE); #endif } next_desc: bus_dmamap_sync(rxr->dma.tag, rxr->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Advance our pointers to the next descriptor. */ if (++i == que->num_desc) i = 0; /* Now send to the stack or do LRO */ if (sendmp != NULL) { rxr->next_check = i; IXL_RX_UNLOCK(rxr); ixl_rx_input(rxr, ifp, sendmp, ptype); IXL_RX_LOCK(rxr); i = rxr->next_check; } /* Every 8 descriptors we go to refresh mbufs */ if (processed == 8) { ixl_refresh_mbufs(que, i); processed = 0; } } /* Refresh any remaining buf structs */ if (ixl_rx_unrefreshed(que)) ixl_refresh_mbufs(que, i); rxr->next_check = i; IXL_RX_UNLOCK(rxr); #if defined(INET6) || defined(INET) /* * Flush any outstanding LRO work */ #if __FreeBSD_version >= 1100105 tcp_lro_flush_all(lro); #else struct lro_entry *queued; while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } #endif #endif /* defined(INET6) || defined(INET) */ return (FALSE); } /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void ixl_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype) { struct i40e_rx_ptype_decoded decoded; decoded = decode_rx_desc_ptype(ptype); /* Errors? */ if (error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) { mp->m_pkthdr.csum_flags = 0; return; } /* IPv6 with extension headers likely have bad csum */ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) if (status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) { mp->m_pkthdr.csum_flags = 0; return; } /* IP Checksum Good */ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; if (status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)) { mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mp->m_pkthdr.csum_data |= htons(0xffff); } return; } #if __FreeBSD_version >= 1100000 uint64_t ixl_get_counter(if_t ifp, ift_counter cnt) { struct ixl_vsi *vsi; vsi = if_getsoftc(ifp); switch (cnt) { case IFCOUNTER_IPACKETS: return (vsi->ipackets); case IFCOUNTER_IERRORS: return (vsi->ierrors); case IFCOUNTER_OPACKETS: return (vsi->opackets); case IFCOUNTER_OERRORS: return (vsi->oerrors); case IFCOUNTER_COLLISIONS: /* Collisions are by standard impossible in 40G/10G Ethernet */ return (0); case IFCOUNTER_IBYTES: return (vsi->ibytes); case IFCOUNTER_OBYTES: return (vsi->obytes); case IFCOUNTER_IMCASTS: return (vsi->imcasts); case IFCOUNTER_OMCASTS: return (vsi->omcasts); case IFCOUNTER_IQDROPS: return (vsi->iqdrops); case IFCOUNTER_OQDROPS: return (vsi->oqdrops); case IFCOUNTER_NOPROTO: return (vsi->noproto); default: return (if_get_counter_default(ifp, cnt)); } } #endif Index: head/sys/dev/netmap/if_ptnet.c =================================================================== --- head/sys/dev/netmap/if_ptnet.c (revision 332422) +++ head/sys/dev/netmap/if_ptnet.c (revision 332423) @@ -1,2324 +1,2326 @@ /*- * Copyright (c) 2016, Vincenzo Maffione * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* Driver for ptnet paravirtualized network device. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #ifndef INET #error "INET not defined, cannot support offloadings" #endif #if __FreeBSD_version >= 1100000 static uint64_t ptnet_get_counter(if_t, ift_counter); #else typedef struct ifnet *if_t; #define if_getsoftc(_ifp) (_ifp)->if_softc #endif //#define PTNETMAP_STATS //#define DEBUG #ifdef DEBUG #define DBG(x) x #else /* !DEBUG */ #define DBG(x) #endif /* !DEBUG */ extern int ptnet_vnet_hdr; /* Tunable parameter */ struct ptnet_softc; struct ptnet_queue_stats { uint64_t packets; /* if_[io]packets */ uint64_t bytes; /* if_[io]bytes */ uint64_t errors; /* if_[io]errors */ uint64_t iqdrops; /* if_iqdrops */ uint64_t mcasts; /* if_[io]mcasts */ #ifdef PTNETMAP_STATS uint64_t intrs; uint64_t kicks; #endif /* PTNETMAP_STATS */ }; struct ptnet_queue { struct ptnet_softc *sc; struct resource *irq; void *cookie; int kring_id; struct ptnet_csb_gh *ptgh; struct ptnet_csb_hg *pthg; unsigned int kick; struct mtx lock; struct buf_ring *bufring; /* for TX queues */ struct ptnet_queue_stats stats; #ifdef PTNETMAP_STATS struct ptnet_queue_stats last_stats; #endif /* PTNETMAP_STATS */ struct taskqueue *taskq; struct task task; char lock_name[16]; }; #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) struct ptnet_softc { device_t dev; if_t ifp; struct ifmedia media; struct mtx lock; char lock_name[16]; char hwaddr[ETHER_ADDR_LEN]; /* Mirror of PTFEAT register. */ uint32_t ptfeatures; unsigned int vnet_hdr_len; /* PCI BARs support. */ struct resource *iomem; struct resource *msix_mem; unsigned int num_rings; unsigned int num_tx_rings; struct ptnet_queue *queues; struct ptnet_queue *rxqueues; struct ptnet_csb_gh *csb_gh; struct ptnet_csb_hg *csb_hg; unsigned int min_tx_space; struct netmap_pt_guest_adapter *ptna; struct callout tick; #ifdef PTNETMAP_STATS struct timeval last_ts; #endif /* PTNETMAP_STATS */ }; #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) static int ptnet_probe(device_t); static int ptnet_attach(device_t); static int ptnet_detach(device_t); static int ptnet_suspend(device_t); static int ptnet_resume(device_t); static int ptnet_shutdown(device_t); static void ptnet_init(void *opaque); static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); static int ptnet_init_locked(struct ptnet_softc *sc); static int ptnet_stop(struct ptnet_softc *sc); static int ptnet_transmit(if_t ifp, struct mbuf *m); static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, bool may_resched); static void ptnet_qflush(if_t ifp); static void ptnet_tx_task(void *context, int pending); static int ptnet_media_change(if_t ifp); static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); #ifdef PTNETMAP_STATS static void ptnet_tick(void *opaque); #endif static int ptnet_irqs_init(struct ptnet_softc *sc); static void ptnet_irqs_fini(struct ptnet_softc *sc); static uint32_t ptnet_nm_ptctl(if_t ifp, uint32_t cmd); -static int ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, - unsigned *txd, unsigned *rxr, unsigned *rxd); +static int ptnet_nm_config(struct netmap_adapter *na, + struct nm_config_info *info); static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); static int ptnet_nm_register(struct netmap_adapter *na, int onoff); static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); static void ptnet_nm_intr(struct netmap_adapter *na, int onoff); static void ptnet_tx_intr(void *opaque); static void ptnet_rx_intr(void *opaque); static unsigned ptnet_rx_discard(struct netmap_kring *kring, unsigned int head); static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched); static void ptnet_rx_task(void *context, int pending); #ifdef DEVICE_POLLING static poll_handler_t ptnet_poll; #endif static device_method_t ptnet_methods[] = { DEVMETHOD(device_probe, ptnet_probe), DEVMETHOD(device_attach, ptnet_attach), DEVMETHOD(device_detach, ptnet_detach), DEVMETHOD(device_suspend, ptnet_suspend), DEVMETHOD(device_resume, ptnet_resume), DEVMETHOD(device_shutdown, ptnet_shutdown), DEVMETHOD_END }; static driver_t ptnet_driver = { "ptnet", ptnet_methods, sizeof(struct ptnet_softc) }; /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ static devclass_t ptnet_devclass; DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass, NULL, NULL, SI_ORDER_MIDDLE + 2); static int ptnet_probe(device_t dev) { if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { return (ENXIO); } device_set_desc(dev, "ptnet network adapter"); return (BUS_PROBE_DEFAULT); } static inline void ptnet_kick(struct ptnet_queue *pq) { #ifdef PTNETMAP_STATS pq->stats.kicks ++; #endif /* PTNETMAP_STATS */ bus_write_4(pq->sc->iomem, pq->kick, 0); } #define PTNET_BUF_RING_SIZE 4096 #define PTNET_RX_BUDGET 512 #define PTNET_RX_BATCH 1 #define PTNET_TX_BUDGET 512 #define PTNET_TX_BATCH 64 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) #define PTNET_MAX_PKT_SIZE 65536 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP) #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |\ CSUM_SCTP_IPV6) #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ PTNET_CSUM_OFFLOAD_IPV6) static int ptnet_attach(device_t dev) { uint32_t ptfeatures = 0; unsigned int num_rx_rings, num_tx_rings; struct netmap_adapter na_arg; unsigned int nifp_offset; struct ptnet_softc *sc; if_t ifp; uint32_t macreg; int err, rid; int i; sc = device_get_softc(dev); sc->dev = dev; /* Setup PCI resources. */ pci_enable_busmaster(dev); rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->iomem == NULL) { device_printf(dev, "Failed to map I/O BAR\n"); return (ENXIO); } /* Negotiate features with the hypervisor. */ if (ptnet_vnet_hdr) { ptfeatures |= PTNETMAP_F_VNET_HDR; } bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ sc->ptfeatures = ptfeatures; num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); sc->num_rings = num_tx_rings + num_rx_rings; sc->num_tx_rings = num_tx_rings; if (sc->num_rings * sizeof(struct ptnet_csb_gh) > PAGE_SIZE) { device_printf(dev, "CSB cannot handle that many rings (%u)\n", sc->num_rings); err = ENOMEM; goto err_path; } /* Allocate CSB and carry out CSB allocation protocol. */ sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0); if (sc->csb_gh == NULL) { device_printf(dev, "Failed to allocate CSB\n"); err = ENOMEM; goto err_path; } sc->csb_hg = (struct ptnet_csb_hg *)(((char *)sc->csb_gh) + PAGE_SIZE); { /* * We use uint64_t rather than vm_paddr_t since we * need 64 bit addresses even on 32 bit platforms. */ uint64_t paddr = vtophys(sc->csb_gh); /* CSB allocation protocol: write to BAH first, then * to BAL (for both GH and HG sections). */ bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, (paddr >> 32) & 0xffffffff); bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, paddr & 0xffffffff); paddr = vtophys(sc->csb_hg); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, (paddr >> 32) & 0xffffffff); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, paddr & 0xffffffff); } /* Allocate and initialize per-queue data structures. */ sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->queues == NULL) { err = ENOMEM; goto err_path; } sc->rxqueues = sc->queues + num_tx_rings; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; pq->sc = sc; pq->kring_id = i; pq->kick = PTNET_IO_KICK_BASE + 4 * i; pq->ptgh = sc->csb_gh + i; pq->pthg = sc->csb_hg + i; snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", device_get_nameunit(dev), i); mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); if (i >= num_tx_rings) { /* RX queue: fix kring_id. */ pq->kring_id -= num_tx_rings; } else { /* TX queue: allocate buf_ring. */ pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, M_DEVBUF, M_NOWAIT, &pq->lock); if (pq->bufring == NULL) { err = ENOMEM; goto err_path; } } } sc->min_tx_space = 64; /* Safe initial value. */ err = ptnet_irqs_init(sc); if (err) { goto err_path; } /* Setup Ethernet interface. */ sc->ifp = ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "Failed to allocate ifnet\n"); err = ENOMEM; goto err_path; } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_baudrate = IF_Gbps(10); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; ifp->if_init = ptnet_init; ifp->if_ioctl = ptnet_ioctl; #if __FreeBSD_version >= 1100000 ifp->if_get_counter = ptnet_get_counter; #endif ifp->if_transmit = ptnet_transmit; ifp->if_qflush = ptnet_qflush; ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, ptnet_media_status); ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); sc->hwaddr[0] = (macreg >> 8) & 0xff; sc->hwaddr[1] = macreg & 0xff; macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); sc->hwaddr[2] = (macreg >> 24) & 0xff; sc->hwaddr[3] = (macreg >> 16) & 0xff; sc->hwaddr[4] = (macreg >> 8) & 0xff; sc->hwaddr[5] = macreg & 0xff; ether_ifattach(ifp, sc->hwaddr); ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { /* Similarly to what the vtnet driver does, we can emulate * VLAN offloadings by inserting and removing the 802.1Q * header during transmit and receive. We are then able * to do checksum offloading of VLAN frames. */ ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_LRO | IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWTAGGING; } ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING /* Don't enable polling by default. */ ifp->if_capabilities |= IFCAP_POLLING; #endif snprintf(sc->lock_name, sizeof(sc->lock_name), "%s", device_get_nameunit(dev)); mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); callout_init_mtx(&sc->tick, &sc->lock, 0); /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); memset(&na_arg, 0, sizeof(na_arg)); na_arg.ifp = ifp; na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); na_arg.num_tx_rings = num_tx_rings; na_arg.num_rx_rings = num_rx_rings; na_arg.nm_config = ptnet_nm_config; na_arg.nm_krings_create = ptnet_nm_krings_create; na_arg.nm_krings_delete = ptnet_nm_krings_delete; na_arg.nm_dtor = ptnet_nm_dtor; na_arg.nm_intr = ptnet_nm_intr; na_arg.nm_register = ptnet_nm_register; na_arg.nm_txsync = ptnet_nm_txsync; na_arg.nm_rxsync = ptnet_nm_rxsync; netmap_pt_guest_attach(&na_arg, nifp_offset, bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID)); /* Now a netmap adapter for this ifp has been allocated, and it * can be accessed through NA(ifp). We also have to initialize the CSB * pointer. */ sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); /* If virtio-net header was negotiated, set the virt_hdr_len field in * the netmap adapter, to inform users that this netmap adapter requires * the application to deal with the headers. */ ptnet_update_vnet_hdr(sc); device_printf(dev, "%s() completed\n", __func__); return (0); err_path: ptnet_detach(dev); return err; } static int ptnet_detach(device_t dev) { struct ptnet_softc *sc = device_get_softc(dev); int i; #ifdef DEVICE_POLLING if (sc->ifp->if_capenable & IFCAP_POLLING) { ether_poll_deregister(sc->ifp); } #endif callout_drain(&sc->tick); if (sc->queues) { /* Drain taskqueues before calling if_detach. */ for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (pq->taskq) { taskqueue_drain(pq->taskq, &pq->task); } } } if (sc->ifp) { ether_ifdetach(sc->ifp); /* Uninitialize netmap adapters for this device. */ netmap_detach(sc->ifp); ifmedia_removeall(&sc->media); if_free(sc->ifp); sc->ifp = NULL; } ptnet_irqs_fini(sc); if (sc->csb_gh) { bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0); bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0); contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF); sc->csb_gh = NULL; sc->csb_hg = NULL; } if (sc->queues) { for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (mtx_initialized(&pq->lock)) { mtx_destroy(&pq->lock); } if (pq->bufring != NULL) { buf_ring_free(pq->bufring, M_DEVBUF); } } free(sc->queues, M_DEVBUF); sc->queues = NULL; } if (sc->iomem) { bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); sc->iomem = NULL; } mtx_destroy(&sc->lock); device_printf(dev, "%s() completed\n", __func__); return (0); } static int ptnet_suspend(device_t dev) { struct ptnet_softc *sc; sc = device_get_softc(dev); (void)sc; return (0); } static int ptnet_resume(device_t dev) { struct ptnet_softc *sc; sc = device_get_softc(dev); (void)sc; return (0); } static int ptnet_shutdown(device_t dev) { /* * Suspend already does all of what we need to * do here; we just never expect to be resumed. */ return (ptnet_suspend(dev)); } static int ptnet_irqs_init(struct ptnet_softc *sc) { int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); int nvecs = sc->num_rings; device_t dev = sc->dev; int err = ENOSPC; int cpu_cur; int i; if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { device_printf(dev, "Could not find MSI-X capability\n"); return (ENXIO); } sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->msix_mem == NULL) { device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); return (ENXIO); } if (pci_msix_count(dev) < nvecs) { device_printf(dev, "Not enough MSI-X vectors\n"); goto err_path; } err = pci_alloc_msix(dev, &nvecs); if (err) { device_printf(dev, "Failed to allocate MSI-X vectors\n"); goto err_path; } for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; rid = i + 1; pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (pq->irq == NULL) { device_printf(dev, "Failed to allocate interrupt " "for queue #%d\n", i); err = ENOSPC; goto err_path; } } cpu_cur = CPU_FIRST(); for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; void (*handler)(void *) = ptnet_tx_intr; if (i >= sc->num_tx_rings) { handler = ptnet_rx_intr; } err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL /* intr_filter */, handler, pq, &pq->cookie); if (err) { device_printf(dev, "Failed to register intr handler " "for queue #%d\n", i); goto err_path; } bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); #if 0 bus_bind_intr(sc->dev, pq->irq, cpu_cur); #endif cpu_cur = CPU_NEXT(cpu_cur); } device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); cpu_cur = CPU_FIRST(); for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; static void (*handler)(void *context, int pending); handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task; TASK_INIT(&pq->task, 0, handler, pq); pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, taskqueue_thread_enqueue, &pq->taskq); taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", device_get_nameunit(sc->dev), cpu_cur); cpu_cur = CPU_NEXT(cpu_cur); } return 0; err_path: ptnet_irqs_fini(sc); return err; } static void ptnet_irqs_fini(struct ptnet_softc *sc) { device_t dev = sc->dev; int i; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (pq->taskq) { taskqueue_free(pq->taskq); pq->taskq = NULL; } if (pq->cookie) { bus_teardown_intr(dev, pq->irq, pq->cookie); pq->cookie = NULL; } if (pq->irq) { bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); pq->irq = NULL; } } if (sc->msix_mem) { pci_release_msi(dev); bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), sc->msix_mem); sc->msix_mem = NULL; } } static void ptnet_init(void *opaque) { struct ptnet_softc *sc = opaque; PTNET_CORE_LOCK(sc); ptnet_init_locked(sc); PTNET_CORE_UNLOCK(sc); } static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct ptnet_softc *sc = if_getsoftc(ifp); device_t dev = sc->dev; struct ifreq *ifr = (struct ifreq *)data; int mask, err = 0; switch (cmd) { case SIOCSIFFLAGS: device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags); PTNET_CORE_LOCK(sc); if (ifp->if_flags & IFF_UP) { /* Network stack wants the iff to be up. */ err = ptnet_init_locked(sc); } else { /* Network stack wants the iff to be down. */ err = ptnet_stop(sc); } /* We don't need to do nothing to support IFF_PROMISC, * since that is managed by the backend port. */ PTNET_CORE_UNLOCK(sc); break; case SIOCSIFCAP: device_printf(dev, "SIOCSIFCAP %x %x\n", ifr->ifr_reqcap, ifp->if_capenable); mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { struct ptnet_queue *pq; int i; if (ifr->ifr_reqcap & IFCAP_POLLING) { err = ether_poll_register(ptnet_poll, ifp); if (err) { break; } /* Stop queues and sync with taskqueues. */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; for (i = 0; i < sc->num_rings; i++) { pq = sc-> queues + i; /* Make sure the worker sees the * IFF_DRV_RUNNING down. */ PTNET_Q_LOCK(pq); pq->ptgh->guest_need_kick = 0; PTNET_Q_UNLOCK(pq); /* Wait for rescheduling to finish. */ if (pq->taskq) { taskqueue_drain(pq->taskq, &pq->task); } } ifp->if_drv_flags |= IFF_DRV_RUNNING; } else { err = ether_poll_deregister(ifp); for (i = 0; i < sc->num_rings; i++) { pq = sc-> queues + i; PTNET_Q_LOCK(pq); pq->ptgh->guest_need_kick = 1; PTNET_Q_UNLOCK(pq); } } } #endif /* DEVICE_POLLING */ ifp->if_capenable = ifr->ifr_reqcap; break; case SIOCSIFMTU: /* We support any reasonable MTU. */ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { err = EINVAL; } else { PTNET_CORE_LOCK(sc); ifp->if_mtu = ifr->ifr_mtu; PTNET_CORE_UNLOCK(sc); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); break; default: err = ether_ioctl(ifp, cmd, data); break; } return err; } static int ptnet_init_locked(struct ptnet_softc *sc) { if_t ifp = sc->ifp; struct netmap_adapter *na_dr = &sc->ptna->dr.up; struct netmap_adapter *na_nm = &sc->ptna->hwup.up; unsigned int nm_buf_size; int ret; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { return 0; /* nothing to do */ } device_printf(sc->dev, "%s\n", __func__); /* Translate offload capabilities according to if_capenable. */ ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= PTNET_CSUM_OFFLOAD; if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6; if (ifp->if_capenable & IFCAP_TSO4) ifp->if_hwassist |= CSUM_IP_TSO; if (ifp->if_capenable & IFCAP_TSO6) ifp->if_hwassist |= CSUM_IP6_TSO; /* * Prepare the interface for netmap mode access. */ netmap_update_config(na_dr); ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); if (ret) { device_printf(sc->dev, "netmap_mem_finalize() failed\n"); return ret; } if (sc->ptna->backend_regifs == 0) { ret = ptnet_nm_krings_create(na_nm); if (ret) { device_printf(sc->dev, "ptnet_nm_krings_create() " "failed\n"); goto err_mem_finalize; } ret = netmap_mem_rings_create(na_dr); if (ret) { device_printf(sc->dev, "netmap_mem_rings_create() " "failed\n"); goto err_rings_create; } ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); if (ret) { device_printf(sc->dev, "netmap_mem_get_lut() " "failed\n"); goto err_get_lut; } } ret = ptnet_nm_register(na_dr, 1 /* on */); if (ret) { goto err_register; } nm_buf_size = NETMAP_BUF_SIZE(na_dr); KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, sc->min_tx_space); #ifdef PTNETMAP_STATS callout_reset(&sc->tick, hz, ptnet_tick, sc); #endif ifp->if_drv_flags |= IFF_DRV_RUNNING; return 0; err_register: memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); err_get_lut: netmap_mem_rings_delete(na_dr); err_rings_create: ptnet_nm_krings_delete(na_nm); err_mem_finalize: netmap_mem_deref(na_dr->nm_mem, na_dr); return ret; } /* To be called under core lock. */ static int ptnet_stop(struct ptnet_softc *sc) { if_t ifp = sc->ifp; struct netmap_adapter *na_dr = &sc->ptna->dr.up; struct netmap_adapter *na_nm = &sc->ptna->hwup.up; int i; device_printf(sc->dev, "%s\n", __func__); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { return 0; /* nothing to do */ } /* Clear the driver-ready flag, and synchronize with all the queues, * so that after this loop we are sure nobody is working anymore with * the device. This scheme is taken from the vtnet driver. */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; callout_stop(&sc->tick); for (i = 0; i < sc->num_rings; i++) { PTNET_Q_LOCK(sc->queues + i); PTNET_Q_UNLOCK(sc->queues + i); } ptnet_nm_register(na_dr, 0 /* off */); if (sc->ptna->backend_regifs == 0) { netmap_mem_rings_delete(na_dr); ptnet_nm_krings_delete(na_nm); } netmap_mem_deref(na_dr->nm_mem, na_dr); return 0; } static void ptnet_qflush(if_t ifp) { struct ptnet_softc *sc = if_getsoftc(ifp); int i; /* Flush all the bufrings and do the interface flush. */ for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; struct mbuf *m; PTNET_Q_LOCK(pq); if (pq->bufring) { while ((m = buf_ring_dequeue_sc(pq->bufring))) { m_freem(m); } } PTNET_Q_UNLOCK(pq); } if_qflush(ifp); } static int ptnet_media_change(if_t ifp) { struct ptnet_softc *sc = if_getsoftc(ifp); struct ifmedia *ifm = &sc->media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { return EINVAL; } return 0; } #if __FreeBSD_version >= 1100000 static uint64_t ptnet_get_counter(if_t ifp, ift_counter cnt) { struct ptnet_softc *sc = if_getsoftc(ifp); struct ptnet_queue_stats stats[2]; int i; /* Accumulate statistics over the queues. */ memset(stats, 0, sizeof(stats)); for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; int idx = (i < sc->num_tx_rings) ? 0 : 1; stats[idx].packets += pq->stats.packets; stats[idx].bytes += pq->stats.bytes; stats[idx].errors += pq->stats.errors; stats[idx].iqdrops += pq->stats.iqdrops; stats[idx].mcasts += pq->stats.mcasts; } switch (cnt) { case IFCOUNTER_IPACKETS: return (stats[1].packets); case IFCOUNTER_IQDROPS: return (stats[1].iqdrops); case IFCOUNTER_IERRORS: return (stats[1].errors); case IFCOUNTER_OPACKETS: return (stats[0].packets); case IFCOUNTER_OBYTES: return (stats[0].bytes); case IFCOUNTER_OMCASTS: return (stats[0].mcasts); default: return (if_get_counter_default(ifp, cnt)); } } #endif #ifdef PTNETMAP_STATS /* Called under core lock. */ static void ptnet_tick(void *opaque) { struct ptnet_softc *sc = opaque; int i; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; struct ptnet_queue_stats cur = pq->stats; struct timeval now; unsigned int delta; microtime(&now); delta = now.tv_usec - sc->last_ts.tv_usec + (now.tv_sec - sc->last_ts.tv_sec) * 1000000; delta /= 1000; /* in milliseconds */ if (delta == 0) continue; device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " "intr %lu\n", i, delta, (cur.packets - pq->last_stats.packets), (cur.kicks - pq->last_stats.kicks), (cur.intrs - pq->last_stats.intrs)); pq->last_stats = cur; } microtime(&sc->last_ts); callout_schedule(&sc->tick, hz); } #endif /* PTNETMAP_STATS */ static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) { /* We are always active, as the backend netmap port is * always open in netmap mode. */ ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; } static uint32_t ptnet_nm_ptctl(if_t ifp, uint32_t cmd) { struct ptnet_softc *sc = if_getsoftc(ifp); /* * Write a command and read back error status, * with zero meaning success. */ bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); return bus_read_4(sc->iomem, PTNET_IO_PTCTL); } static int -ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, unsigned *txd, - unsigned *rxr, unsigned *rxd) +ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info) { struct ptnet_softc *sc = if_getsoftc(na->ifp); - *txr = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); - *rxr = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); - *txd = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); - *rxd = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); + info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); + info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); + info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); + info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); + info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); - device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u\n", - *txr, *rxr, *txd, *rxd); + device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n", + info->num_tx_rings, info->num_rx_rings, + info->num_tx_descs, info->num_rx_descs, + info->rx_buf_maxsize); return 0; } static void ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) { int i; /* Sync krings from the host, reading from * CSB. */ for (i = 0; i < sc->num_rings; i++) { struct ptnet_csb_gh *ptgh = sc->queues[i].ptgh; struct ptnet_csb_hg *pthg = sc->queues[i].pthg; struct netmap_kring *kring; if (i < na->num_tx_rings) { - kring = na->tx_rings + i; + kring = na->tx_rings[i]; } else { - kring = na->rx_rings + i - na->num_tx_rings; + kring = na->rx_rings[i - na->num_tx_rings]; } kring->rhead = kring->ring->head = ptgh->head; kring->rcur = kring->ring->cur = ptgh->cur; kring->nr_hwcur = pthg->hwcur; kring->nr_hwtail = kring->rtail = kring->ring->tail = pthg->hwtail; ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, pthg->hwcur, ptgh->head, ptgh->cur, pthg->hwtail); ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", t, i, kring->nr_hwcur, kring->rhead, kring->rcur, kring->ring->head, kring->ring->cur, kring->nr_hwtail, kring->rtail, kring->ring->tail); } } static void ptnet_update_vnet_hdr(struct ptnet_softc *sc) { unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; } static int ptnet_nm_register(struct netmap_adapter *na, int onoff) { /* device-specific */ if_t ifp = na->ifp; struct ptnet_softc *sc = if_getsoftc(ifp); int native = (na == &sc->ptna->hwup.up); struct ptnet_queue *pq; enum txrx t; int ret = 0; int i; if (!onoff) { sc->ptna->backend_regifs--; } /* If this is the last netmap client, guest interrupt enable flags may * be in arbitrary state. Since these flags are going to be used also * by the netdevice driver, we have to make sure to start with * notifications enabled. Also, schedule NAPI to flush pending packets * in the RX rings, since we will not receive further interrupts * until these will be processed. */ if (native && !onoff && na->active_fds == 0) { D("Exit netmap mode, re-enable interrupts"); for (i = 0; i < sc->num_rings; i++) { pq = sc->queues + i; pq->ptgh->guest_need_kick = 1; } } if (onoff) { if (sc->ptna->backend_regifs == 0) { /* Initialize notification enable fields in the CSB. */ for (i = 0; i < sc->num_rings; i++) { pq = sc->queues + i; pq->pthg->host_need_kick = 1; pq->ptgh->guest_need_kick = (!(ifp->if_capenable & IFCAP_POLLING) && i >= sc->num_tx_rings); } /* Set the virtio-net header length. */ ptnet_update_vnet_hdr(sc); /* Make sure the host adapter passed through is ready * for txsync/rxsync. */ ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_CREATE); if (ret) { return ret; } } /* Sync from CSB must be done after REGIF PTCTL. Skip this * step only if this is a netmap client and it is not the * first one. */ if ((!native && sc->ptna->backend_regifs == 0) || (native && na->active_fds == 0)) { ptnet_sync_from_csb(sc, na); } /* If not native, don't call nm_set_native_flags, since we don't want * to replace if_transmit method, nor set NAF_NETMAP_ON */ if (native) { for_rx_tx(t) { for (i = 0; i <= nma_get_nrings(na, t); i++) { - struct netmap_kring *kring = &NMR(na, t)[i]; + struct netmap_kring *kring = NMR(na, t)[i]; if (nm_kring_pending_on(kring)) { kring->nr_mode = NKR_NETMAP_ON; } } } nm_set_native_flags(na); } } else { if (native) { nm_clear_native_flags(na); for_rx_tx(t) { for (i = 0; i <= nma_get_nrings(na, t); i++) { - struct netmap_kring *kring = &NMR(na, t)[i]; + struct netmap_kring *kring = NMR(na, t)[i]; if (nm_kring_pending_off(kring)) { kring->nr_mode = NKR_NETMAP_OFF; } } } } /* Sync from CSB must be done before UNREGIF PTCTL, on the last * netmap client. */ if (native && na->active_fds == 0) { ptnet_sync_from_csb(sc, na); } if (sc->ptna->backend_regifs == 0) { ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_DELETE); } } if (onoff) { sc->ptna->backend_regifs++; } return ret; } static int ptnet_nm_txsync(struct netmap_kring *kring, int flags) { struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); struct ptnet_queue *pq = sc->queues + kring->ring_id; bool notify; notify = netmap_pt_guest_txsync(pq->ptgh, pq->pthg, kring, flags); if (notify) { ptnet_kick(pq); } return 0; } static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags) { struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; bool notify; notify = netmap_pt_guest_rxsync(pq->ptgh, pq->pthg, kring, flags); if (notify) { ptnet_kick(pq); } return 0; } static void ptnet_nm_intr(struct netmap_adapter *na, int onoff) { struct ptnet_softc *sc = if_getsoftc(na->ifp); int i; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; pq->ptgh->guest_need_kick = onoff; } } static void ptnet_tx_intr(void *opaque) { struct ptnet_queue *pq = opaque; struct ptnet_softc *sc = pq->sc; DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); #ifdef PTNETMAP_STATS pq->stats.intrs ++; #endif /* PTNETMAP_STATS */ if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { return; } /* Schedule the tasqueue to flush process transmissions requests. * However, vtnet, if_em and if_igb just call ptnet_transmit() here, * at least when using MSI-X interrupts. The if_em driver, instead * schedule taskqueue when using legacy interrupts. */ taskqueue_enqueue(pq->taskq, &pq->task); } static void ptnet_rx_intr(void *opaque) { struct ptnet_queue *pq = opaque; struct ptnet_softc *sc = pq->sc; unsigned int unused; DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); #ifdef PTNETMAP_STATS pq->stats.intrs ++; #endif /* PTNETMAP_STATS */ if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { return; } /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, * receive-side processing is executed directly in the interrupt * service routine. Alternatively, we may schedule the taskqueue. */ ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); } /* The following offloadings-related functions are taken from the vtnet * driver, but the same functionality is required for the ptnet driver. * As a temporary solution, I copied this code from vtnet and I started * to generalize it (taking away driver-specific statistic accounting), * making as little modifications as possible. * In the future we need to share these functions between vtnet and ptnet. */ static int ptnet_tx_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start) { struct ether_vlan_header *evh; int offset; evh = mtod(m, struct ether_vlan_header *); if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { /* BMV: We should handle nested VLAN tags too. */ *etype = ntohs(evh->evl_proto); offset = sizeof(struct ether_vlan_header); } else { *etype = ntohs(evh->evl_encap_proto); offset = sizeof(struct ether_header); } switch (*etype) { #if defined(INET) case ETHERTYPE_IP: { struct ip *ip, iphdr; if (__predict_false(m->m_len < offset + sizeof(struct ip))) { m_copydata(m, offset, sizeof(struct ip), (caddr_t) &iphdr); ip = &iphdr; } else ip = (struct ip *)(m->m_data + offset); *proto = ip->ip_p; *start = offset + (ip->ip_hl << 2); break; } #endif #if defined(INET6) case ETHERTYPE_IPV6: *proto = -1; *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); /* Assert the network stack sent us a valid packet. */ KASSERT(*start > offset, ("%s: mbuf %p start %d offset %d proto %d", __func__, m, *start, offset, *proto)); break; #endif default: /* Here we should increment the tx_csum_bad_ethtype counter. */ return (EINVAL); } return (0); } static int ptnet_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type, int offset, bool allow_ecn, struct virtio_net_hdr *hdr) { static struct timeval lastecn; static int curecn; struct tcphdr *tcp, tcphdr; if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); tcp = &tcphdr; } else tcp = (struct tcphdr *)(m->m_data + offset); hdr->hdr_len = offset + (tcp->th_off << 2); hdr->gso_size = m->m_pkthdr.tso_segsz; hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6; if (tcp->th_flags & TH_CWR) { /* * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, * ECN support is not on a per-interface basis, but globally via * the net.inet.tcp.ecn.enable sysctl knob. The default is off. */ if (!allow_ecn) { if (ppsratecheck(&lastecn, &curecn, 1)) if_printf(ifp, "TSO with ECN not negotiated with host\n"); return (ENOTSUP); } hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; } /* Here we should increment tx_tso counter. */ return (0); } static struct mbuf * ptnet_tx_offload(if_t ifp, struct mbuf *m, bool allow_ecn, struct virtio_net_hdr *hdr) { int flags, etype, csum_start, proto, error; flags = m->m_pkthdr.csum_flags; error = ptnet_tx_offload_ctx(m, &etype, &proto, &csum_start); if (error) goto drop; if ((etype == ETHERTYPE_IP && flags & PTNET_CSUM_OFFLOAD) || (etype == ETHERTYPE_IPV6 && flags & PTNET_CSUM_OFFLOAD_IPV6)) { /* * We could compare the IP protocol vs the CSUM_ flag too, * but that really should not be necessary. */ hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->csum_start = csum_start; hdr->csum_offset = m->m_pkthdr.csum_data; /* Here we should increment the tx_csum counter. */ } if (flags & CSUM_TSO) { if (__predict_false(proto != IPPROTO_TCP)) { /* Likely failed to correctly parse the mbuf. * Here we should increment the tx_tso_not_tcp * counter. */ goto drop; } KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, ("%s: mbuf %p TSO without checksum offload %#x", __func__, m, flags)); error = ptnet_tx_offload_tso(ifp, m, etype, csum_start, allow_ecn, hdr); if (error) goto drop; } return (m); drop: m_freem(m); return (NULL); } static void ptnet_vlan_tag_remove(struct mbuf *m) { struct ether_vlan_header *evh; evh = mtod(m, struct ether_vlan_header *); m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); m->m_flags |= M_VLANTAG; /* Strip the 802.1Q header. */ bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, ETHER_HDR_LEN - ETHER_TYPE_LEN); m_adj(m, ETHER_VLAN_ENCAP_LEN); } /* * Use the checksum offset in the VirtIO header to set the * correct CSUM_* flags. */ static int ptnet_rx_csum_by_offset(struct mbuf *m, uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) { #if defined(INET) || defined(INET6) int offset = hdr->csum_start + hdr->csum_offset; #endif /* Only do a basic sanity check on the offset. */ switch (eth_type) { #if defined(INET) case ETHERTYPE_IP: if (__predict_false(offset < ip_start + sizeof(struct ip))) return (1); break; #endif #if defined(INET6) case ETHERTYPE_IPV6: if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) return (1); break; #endif default: /* Here we should increment the rx_csum_bad_ethtype counter. */ return (1); } /* * Use the offset to determine the appropriate CSUM_* flags. This is * a bit dirty, but we can get by with it since the checksum offsets * happen to be different. We assume the host host does not do IPv4 * header checksum offloading. */ switch (hdr->csum_offset) { case offsetof(struct udphdr, uh_sum): case offsetof(struct tcphdr, th_sum): m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case offsetof(struct sctphdr, checksum): m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; break; default: /* Here we should increment the rx_csum_bad_offset counter. */ return (1); } return (0); } static int ptnet_rx_csum_by_parse(struct mbuf *m, uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) { int offset, proto; switch (eth_type) { #if defined(INET) case ETHERTYPE_IP: { struct ip *ip; if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) return (1); ip = (struct ip *)(m->m_data + ip_start); proto = ip->ip_p; offset = ip_start + (ip->ip_hl << 2); break; } #endif #if defined(INET6) case ETHERTYPE_IPV6: if (__predict_false(m->m_len < ip_start + sizeof(struct ip6_hdr))) return (1); offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); if (__predict_false(offset < 0)) return (1); break; #endif default: /* Here we should increment the rx_csum_bad_ethtype counter. */ return (1); } switch (proto) { case IPPROTO_TCP: if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case IPPROTO_UDP: if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case IPPROTO_SCTP: if (__predict_false(m->m_len < offset + sizeof(struct sctphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; break; default: /* * For the remaining protocols, FreeBSD does not support * checksum offloading, so the checksum will be recomputed. */ #if 0 if_printf(ifp, "cksum offload of unsupported " "protocol eth_type=%#x proto=%d csum_start=%d " "csum_offset=%d\n", __func__, eth_type, proto, hdr->csum_start, hdr->csum_offset); #endif break; } return (0); } /* * Set the appropriate CSUM_* flags. Unfortunately, the information * provided is not directly useful to us. The VirtIO header gives the * offset of the checksum, which is all Linux needs, but this is not * how FreeBSD does things. We are forced to peek inside the packet * a bit. * * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD * could accept the offsets and let the stack figure it out. */ static int ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr) { struct ether_header *eh; struct ether_vlan_header *evh; uint16_t eth_type; int offset, error; eh = mtod(m, struct ether_header *); eth_type = ntohs(eh->ether_type); if (eth_type == ETHERTYPE_VLAN) { /* BMV: We should handle nested VLAN tags too. */ evh = mtod(m, struct ether_vlan_header *); eth_type = ntohs(evh->evl_proto); offset = sizeof(struct ether_vlan_header); } else offset = sizeof(struct ether_header); if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) error = ptnet_rx_csum_by_offset(m, eth_type, offset, hdr); else error = ptnet_rx_csum_by_parse(m, eth_type, offset, hdr); return (error); } /* End of offloading-related functions to be shared with vtnet. */ static inline void ptnet_sync_tail(struct ptnet_csb_hg *pthg, struct netmap_kring *kring) { struct netmap_ring *ring = kring->ring; /* Update hwcur and hwtail as known by the host. */ ptnetmap_guest_read_kring_csb(pthg, kring); /* nm_sync_finalize */ ring->tail = kring->rtail = kring->nr_hwtail; } static void ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, unsigned int head, unsigned int sync_flags) { struct netmap_ring *ring = kring->ring; struct ptnet_csb_gh *ptgh = pq->ptgh; struct ptnet_csb_hg *pthg = pq->pthg; /* Some packets have been pushed to the netmap ring. We have * to tell the host to process the new packets, updating cur * and head in the CSB. */ ring->head = ring->cur = head; /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ kring->rcur = kring->rhead = head; ptnetmap_guest_write_kring_csb(ptgh, kring->rcur, kring->rhead); /* Kick the host if needed. */ if (NM_ACCESS_ONCE(pthg->host_need_kick)) { ptgh->sync_flags = sync_flags; ptnet_kick(pq); } } #define PTNET_TX_NOSPACE(_h, _k, _min) \ ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ (_k)->rtail - (_h)) < (_min) /* This function may be called by the network stack, or by * by the taskqueue thread. */ static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, bool may_resched) { struct ptnet_softc *sc = pq->sc; bool have_vnet_hdr = sc->vnet_hdr_len; struct netmap_adapter *na = &sc->ptna->dr.up; if_t ifp = sc->ifp; unsigned int batch_count = 0; struct ptnet_csb_gh *ptgh; struct ptnet_csb_hg *pthg; struct netmap_kring *kring; struct netmap_ring *ring; struct netmap_slot *slot; unsigned int count = 0; unsigned int minspace; unsigned int head; unsigned int lim; struct mbuf *mhead; struct mbuf *mf; int nmbuf_bytes; uint8_t *nmbuf; if (!PTNET_Q_TRYLOCK(pq)) { /* We failed to acquire the lock, schedule the taskqueue. */ RD(1, "Deferring TX work"); if (may_resched) { taskqueue_enqueue(pq->taskq, &pq->task); } return 0; } if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { PTNET_Q_UNLOCK(pq); RD(1, "Interface is down"); return ENETDOWN; } ptgh = pq->ptgh; pthg = pq->pthg; - kring = na->tx_rings + pq->kring_id; + kring = na->tx_rings[pq->kring_id]; ring = kring->ring; lim = kring->nkr_num_slots - 1; head = ring->head; minspace = sc->min_tx_space; while (count < budget) { if (PTNET_TX_NOSPACE(head, kring, minspace)) { /* We ran out of slot, let's see if the host has * freed up some, by reading hwcur and hwtail from * the CSB. */ ptnet_sync_tail(pthg, kring); if (PTNET_TX_NOSPACE(head, kring, minspace)) { /* Still no slots available. Reactivate the * interrupts so that we can be notified * when some free slots are made available by * the host. */ ptgh->guest_need_kick = 1; /* Double-check. */ ptnet_sync_tail(pthg, kring); if (likely(PTNET_TX_NOSPACE(head, kring, minspace))) { break; } RD(1, "Found more slots by doublecheck"); /* More slots were freed before reactivating * the interrupts. */ ptgh->guest_need_kick = 0; } } mhead = drbr_peek(ifp, pq->bufring); if (!mhead) { break; } /* Initialize transmission state variables. */ slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_bytes = 0; /* If needed, prepare the virtio-net header at the beginning * of the first slot. */ if (have_vnet_hdr) { struct virtio_net_hdr *vh = (struct virtio_net_hdr *)nmbuf; /* For performance, we could replace this memset() with * two 8-bytes-wide writes. */ memset(nmbuf, 0, PTNET_HDR_SIZE); if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { mhead = ptnet_tx_offload(ifp, mhead, false, vh); if (unlikely(!mhead)) { /* Packet dropped because errors * occurred while preparing the vnet * header. Let's go ahead with the next * packet. */ pq->stats.errors ++; drbr_advance(ifp, pq->bufring); continue; } } ND(1, "%s: [csum_flags %lX] vnet hdr: flags %x " "csum_start %u csum_ofs %u hdr_len = %u " "gso_size %u gso_type %x", __func__, mhead->m_pkthdr.csum_flags, vh->flags, vh->csum_start, vh->csum_offset, vh->hdr_len, vh->gso_size, vh->gso_type); nmbuf += PTNET_HDR_SIZE; nmbuf_bytes += PTNET_HDR_SIZE; } for (mf = mhead; mf; mf = mf->m_next) { uint8_t *mdata = mf->m_data; int mlen = mf->m_len; for (;;) { int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; if (mlen < copy) { copy = mlen; } memcpy(nmbuf, mdata, copy); mdata += copy; mlen -= copy; nmbuf += copy; nmbuf_bytes += copy; if (!mlen) { break; } slot->len = nmbuf_bytes; slot->flags = NS_MOREFRAG; head = nm_next(head, lim); KASSERT(head != ring->tail, ("Unexpectedly run out of TX space")); slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_bytes = 0; } } /* Complete last slot and update head. */ slot->len = nmbuf_bytes; slot->flags = 0; head = nm_next(head, lim); /* Consume the packet just processed. */ drbr_advance(ifp, pq->bufring); /* Copy the packet to listeners. */ ETHER_BPF_MTAP(ifp, mhead); pq->stats.packets ++; pq->stats.bytes += mhead->m_pkthdr.len; if (mhead->m_flags & M_MCAST) { pq->stats.mcasts ++; } m_freem(mhead); count ++; if (++batch_count == PTNET_TX_BATCH) { ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); batch_count = 0; } } if (batch_count) { ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); } if (count >= budget && may_resched) { DBG(RD(1, "out of budget: resched, %d mbufs pending\n", drbr_inuse(ifp, pq->bufring))); taskqueue_enqueue(pq->taskq, &pq->task); } PTNET_Q_UNLOCK(pq); return count; } static int ptnet_transmit(if_t ifp, struct mbuf *m) { struct ptnet_softc *sc = if_getsoftc(ifp); struct ptnet_queue *pq; unsigned int queue_idx; int err; DBG(device_printf(sc->dev, "transmit %p\n", m)); /* Insert 802.1Q header if needed. */ if (m->m_flags & M_VLANTAG) { m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); if (m == NULL) { return ENOBUFS; } m->m_flags &= ~M_VLANTAG; } /* Get the flow-id if available. */ queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? m->m_pkthdr.flowid : curcpu; if (unlikely(queue_idx >= sc->num_tx_rings)) { queue_idx %= sc->num_tx_rings; } pq = sc->queues + queue_idx; err = drbr_enqueue(ifp, pq->bufring, m); if (err) { /* ENOBUFS when the bufring is full */ RD(1, "%s: drbr_enqueue() failed %d\n", __func__, err); pq->stats.errors ++; return err; } if (ifp->if_capenable & IFCAP_POLLING) { /* If polling is on, the transmit queues will be * drained by the poller. */ return 0; } err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); return (err < 0) ? err : 0; } static unsigned int ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) { struct netmap_ring *ring = kring->ring; struct netmap_slot *slot = ring->slot + head; for (;;) { head = nm_next(head, kring->nkr_num_slots - 1); if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { break; } slot = ring->slot + head; } return head; } static inline struct mbuf * ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) { uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; do { unsigned int copy; if (mtail->m_len == MCLBYTES) { struct mbuf *mf; mf = m_getcl(M_NOWAIT, MT_DATA, 0); if (unlikely(!mf)) { return NULL; } mtail->m_next = mf; mtail = mf; mdata = mtod(mtail, uint8_t *); mtail->m_len = 0; } copy = MCLBYTES - mtail->m_len; if (nmbuf_len < copy) { copy = nmbuf_len; } memcpy(mdata, nmbuf, copy); nmbuf += copy; nmbuf_len -= copy; mdata += copy; mtail->m_len += copy; } while (nmbuf_len); return mtail; } static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) { struct ptnet_softc *sc = pq->sc; bool have_vnet_hdr = sc->vnet_hdr_len; struct ptnet_csb_gh *ptgh = pq->ptgh; struct ptnet_csb_hg *pthg = pq->pthg; struct netmap_adapter *na = &sc->ptna->dr.up; - struct netmap_kring *kring = na->rx_rings + pq->kring_id; + struct netmap_kring *kring = na->rx_rings[pq->kring_id]; struct netmap_ring *ring = kring->ring; unsigned int const lim = kring->nkr_num_slots - 1; unsigned int batch_count = 0; if_t ifp = sc->ifp; unsigned int count = 0; uint32_t head; PTNET_Q_LOCK(pq); if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { goto unlock; } kring->nr_kflags &= ~NKR_PENDINTR; head = ring->head; while (count < budget) { uint32_t prev_head = head; struct mbuf *mhead, *mtail; struct virtio_net_hdr *vh; struct netmap_slot *slot; unsigned int nmbuf_len; uint8_t *nmbuf; int deliver = 1; /* the mbuf to the network stack. */ host_sync: if (head == ring->tail) { /* We ran out of slot, let's see if the host has * added some, by reading hwcur and hwtail from * the CSB. */ ptnet_sync_tail(pthg, kring); if (head == ring->tail) { /* Still no slots available. Reactivate * interrupts as they were disabled by the * host thread right before issuing the * last interrupt. */ ptgh->guest_need_kick = 1; /* Double-check. */ ptnet_sync_tail(pthg, kring); if (likely(head == ring->tail)) { break; } ptgh->guest_need_kick = 0; } } /* Initialize ring state variables, possibly grabbing the * virtio-net header. */ slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_len = slot->len; vh = (struct virtio_net_hdr *)nmbuf; if (have_vnet_hdr) { if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { /* There is no good reason why host should * put the header in multiple netmap slots. * If this is the case, discard. */ RD(1, "Fragmented vnet-hdr: dropping"); head = ptnet_rx_discard(kring, head); pq->stats.iqdrops ++; deliver = 0; goto skip; } ND(1, "%s: vnet hdr: flags %x csum_start %u " "csum_ofs %u hdr_len = %u gso_size %u " "gso_type %x", __func__, vh->flags, vh->csum_start, vh->csum_offset, vh->hdr_len, vh->gso_size, vh->gso_type); nmbuf += PTNET_HDR_SIZE; nmbuf_len -= PTNET_HDR_SIZE; } /* Allocate the head of a new mbuf chain. * We use m_getcl() to allocate an mbuf with standard cluster * size (MCLBYTES). In the future we could use m_getjcl() * to choose different sizes. */ mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (unlikely(mhead == NULL)) { device_printf(sc->dev, "%s: failed to allocate mbuf " "head\n", __func__); pq->stats.errors ++; break; } /* Initialize the mbuf state variables. */ mhead->m_pkthdr.len = nmbuf_len; mtail->m_len = 0; /* Scan all the netmap slots containing the current packet. */ for (;;) { DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " "len %u, flags %u\n", __func__, head, ring->tail, slot->len, slot->flags)); mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); if (unlikely(!mtail)) { /* Ouch. We ran out of memory while processing * a packet. We have to restore the previous * head position, free the mbuf chain, and * schedule the taskqueue to give the packet * another chance. */ device_printf(sc->dev, "%s: failed to allocate" " mbuf frag, reset head %u --> %u\n", __func__, head, prev_head); head = prev_head; m_freem(mhead); pq->stats.errors ++; if (may_resched) { taskqueue_enqueue(pq->taskq, &pq->task); } goto escape; } /* We have to increment head irrespective of the * NS_MOREFRAG being set or not. */ head = nm_next(head, lim); if (!(slot->flags & NS_MOREFRAG)) { break; } if (unlikely(head == ring->tail)) { /* The very last slot prepared by the host has * the NS_MOREFRAG set. Drop it and continue * the outer cycle (to do the double-check). */ RD(1, "Incomplete packet: dropping"); m_freem(mhead); pq->stats.iqdrops ++; goto host_sync; } slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_len = slot->len; mhead->m_pkthdr.len += nmbuf_len; } mhead->m_pkthdr.rcvif = ifp; mhead->m_pkthdr.csum_flags = 0; /* Store the queue idx in the packet header. */ mhead->m_pkthdr.flowid = pq->kring_id; M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { struct ether_header *eh; eh = mtod(mhead, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { ptnet_vlan_tag_remove(mhead); /* * With the 802.1Q header removed, update the * checksum starting location accordingly. */ if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) vh->csum_start -= ETHER_VLAN_ENCAP_LEN; } } if (have_vnet_hdr && (vh->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID))) { if (unlikely(ptnet_rx_csum(mhead, vh))) { m_freem(mhead); RD(1, "Csum offload error: dropping"); pq->stats.iqdrops ++; deliver = 0; } } skip: count ++; if (++batch_count >= PTNET_RX_BATCH) { /* Some packets have been (or will be) pushed to the network * stack. We need to update the CSB to tell the host about * the new ring->cur and ring->head (RX buffer refill). */ ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); batch_count = 0; } if (likely(deliver)) { pq->stats.packets ++; pq->stats.bytes += mhead->m_pkthdr.len; PTNET_Q_UNLOCK(pq); (*ifp->if_input)(ifp, mhead); PTNET_Q_LOCK(pq); /* The ring->head index (and related indices) are * updated under pq lock by ptnet_ring_update(). * Since we dropped the lock to call if_input(), we * must reload ring->head and restart processing the * ring from there. */ head = ring->head; if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { /* The interface has gone down while we didn't * have the lock. Stop any processing and exit. */ goto unlock; } } } escape: if (batch_count) { ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); } if (count >= budget && may_resched) { /* If we ran out of budget or the double-check found new * slots to process, schedule the taskqueue. */ DBG(RD(1, "out of budget: resched h %u t %u\n", head, ring->tail)); taskqueue_enqueue(pq->taskq, &pq->task); } unlock: PTNET_Q_UNLOCK(pq); return count; } static void ptnet_rx_task(void *context, int pending) { struct ptnet_queue *pq = context; DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); } static void ptnet_tx_task(void *context, int pending) { struct ptnet_queue *pq = context; DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); } #ifdef DEVICE_POLLING /* We don't need to handle differently POLL_AND_CHECK_STATUS and * POLL_ONLY, since we don't have an Interrupt Status Register. */ static int ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) { struct ptnet_softc *sc = if_getsoftc(ifp); unsigned int queue_budget; unsigned int count = 0; bool borrow = false; int i; KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); queue_budget = MAX(budget / sc->num_rings, 1); RD(1, "Per-queue budget is %d", queue_budget); while (budget) { unsigned int rcnt = 0; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (borrow) { queue_budget = MIN(queue_budget, budget); if (queue_budget == 0) { break; } } if (i < sc->num_tx_rings) { rcnt += ptnet_drain_transmit_queue(pq, queue_budget, false); } else { rcnt += ptnet_rx_eof(pq, queue_budget, false); } } if (!rcnt) { /* A scan of the queues gave no result, we can * stop here. */ break; } if (rcnt > budget) { /* This may happen when initial budget < sc->num_rings, * since one packet budget is given to each queue * anyway. Just pretend we didn't eat "so much". */ rcnt = budget; } count += rcnt; budget -= rcnt; borrow = true; } return count; } #endif /* DEVICE_POLLING */ Index: head/sys/dev/netmap/if_re_netmap.h =================================================================== --- head/sys/dev/netmap/if_re_netmap.h (revision 332422) +++ head/sys/dev/netmap/if_re_netmap.h (revision 332423) @@ -1,375 +1,375 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2011-2014 Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * netmap support for: re * * For more details on netmap support please see ixgbe_netmap.h */ #include #include #include #include /* vtophys ? */ #include /* * Register/unregister. We are already under netmap lock. */ static int re_netmap_reg(struct netmap_adapter *na, int onoff) { struct ifnet *ifp = na->ifp; struct rl_softc *adapter = ifp->if_softc; RL_LOCK(adapter); re_stop(adapter); /* also clears IFF_DRV_RUNNING */ if (onoff) { nm_set_native_flags(na); } else { nm_clear_native_flags(na); } re_init_locked(adapter); /* also enables intr */ RL_UNLOCK(adapter); return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1); } /* * Reconcile kernel and user view of the transmit ring. */ static int re_netmap_txsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int nm_i; /* index into the netmap ring */ u_int nic_i; /* index into the NIC ring */ u_int n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; /* device-specific */ struct rl_softc *sc = ifp->if_softc; struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc; bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); // XXX extra postwrite ? /* * First part: process new packets to send. */ nm_i = kring->nr_hwcur; if (nm_i != head) { /* we have new packets to send */ nic_i = sc->rl_ldata.rl_tx_prodidx; // XXX or netmap_idx_k2n(kring, nm_i); for (n = 0; nm_i != head; n++) { struct netmap_slot *slot = &ring->slot[nm_i]; u_int len = slot->len; uint64_t paddr; void *addr = PNMB(na, slot, &paddr); /* device-specific */ struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[nic_i]; int cmd = slot->len | RL_TDESC_CMD_EOF | RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF ; NM_CHECK_ADDR_LEN(na, addr, len); if (nic_i == lim) /* mark end of ring */ cmd |= RL_TDESC_CMD_EOR; if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr)); netmap_reload_map(na, sc->rl_ldata.rl_tx_mtag, txd[nic_i].tx_dmamap, addr); } slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); /* Fill the slot in the NIC ring. */ desc->rl_cmdstat = htole32(cmd); /* make sure changes to the buffer are synced */ bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd[nic_i].tx_dmamap, BUS_DMASYNC_PREWRITE); nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } sc->rl_ldata.rl_tx_prodidx = nic_i; kring->nr_hwcur = head; /* synchronize the NIC ring */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* start ? */ CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); } /* * Second part: reclaim buffers for completed transmissions. */ if (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { nic_i = sc->rl_ldata.rl_tx_considx; for (n = 0; nic_i != sc->rl_ldata.rl_tx_prodidx; n++, nic_i = RL_TX_DESC_NXT(sc, nic_i)) { uint32_t cmdstat = le32toh(sc->rl_ldata.rl_tx_list[nic_i].rl_cmdstat); if (cmdstat & RL_TDESC_STAT_OWN) break; } if (n > 0) { sc->rl_ldata.rl_tx_considx = nic_i; sc->rl_ldata.rl_tx_free += n; kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); } } return 0; } /* * Reconcile kernel and user view of the receive ring. */ static int re_netmap_rxsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int nm_i; /* index into the netmap ring */ u_int nic_i; /* index into the NIC ring */ u_int n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; /* device-specific */ struct rl_softc *sc = ifp->if_softc; struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc; if (head > lim) return netmap_ring_reinit(kring); bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * First part: import newly received packets. * * This device uses all the buffers in the ring, so we need * another termination condition in addition to RL_RDESC_STAT_OWN * cleared (all buffers could have it cleared). The easiest one * is to stop right before nm_hwcur. */ if (netmap_no_pendintr || force_update) { uint32_t stop_i = nm_prev(kring->nr_hwcur, lim); nic_i = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */ nm_i = netmap_idx_n2k(kring, nic_i); while (nm_i != stop_i) { struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[nic_i]; uint32_t rxstat = le32toh(cur_rx->rl_cmdstat); uint32_t total_len; if ((rxstat & RL_RDESC_STAT_OWN) != 0) break; total_len = rxstat & sc->rl_rxlenmask; /* XXX subtract crc */ total_len = (total_len < 4) ? 0 : total_len - 4; ring->slot[nm_i].len = total_len; ring->slot[nm_i].flags = 0; /* sync was in re_newbuf() */ bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd[nic_i].rx_dmamap, BUS_DMASYNC_POSTREAD); // if_inc_counter(sc->rl_ifp, IFCOUNTER_IPACKETS, 1); nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } sc->rl_ldata.rl_rx_prodidx = nic_i; kring->nr_hwtail = nm_i; kring->nr_kflags &= ~NKR_PENDINTR; } /* * Second part: skip past packets that userspace has released. */ nm_i = kring->nr_hwcur; if (nm_i != head) { nic_i = netmap_idx_k2n(kring, nm_i); for (n = 0; nm_i != head; n++) { struct netmap_slot *slot = &ring->slot[nm_i]; uint64_t paddr; void *addr = PNMB(na, slot, &paddr); struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[nic_i]; int cmd = NETMAP_BUF_SIZE(na) | RL_RDESC_CMD_OWN; if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ goto ring_reset; if (nic_i == lim) /* mark end of ring */ cmd |= RL_RDESC_CMD_EOR; if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr)); netmap_reload_map(na, sc->rl_ldata.rl_rx_mtag, rxd[nic_i].rx_dmamap, addr); slot->flags &= ~NS_BUF_CHANGED; } desc->rl_cmdstat = htole32(cmd); bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd[nic_i].rx_dmamap, BUS_DMASYNC_PREREAD); nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } kring->nr_hwcur = head; bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } return 0; ring_reset: return netmap_ring_reinit(kring); } /* * Additional routines to init the tx and rx rings. * In other drivers we do that inline in the main code. */ static void re_netmap_tx_init(struct rl_softc *sc) { struct rl_txdesc *txd; struct rl_desc *desc; int i, n; struct netmap_adapter *na = NA(sc->rl_ifp); struct netmap_slot *slot; slot = netmap_reset(na, NR_TX, 0, 0); /* slot is NULL if we are not in native netmap mode */ if (!slot) return; /* in netmap mode, overwrite addresses and maps */ txd = sc->rl_ldata.rl_tx_desc; desc = sc->rl_ldata.rl_tx_list; n = sc->rl_ldata.rl_tx_desc_cnt; /* l points in the netmap ring, i points in the NIC ring */ for (i = 0; i < n; i++) { uint64_t paddr; - int l = netmap_idx_n2k(&na->tx_rings[0], i); + int l = netmap_idx_n2k(na->tx_rings[0], i); void *addr = PNMB(na, slot + l, &paddr); desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr)); desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr)); netmap_load_map(na, sc->rl_ldata.rl_tx_mtag, txd[i].tx_dmamap, addr); } } static void re_netmap_rx_init(struct rl_softc *sc) { struct netmap_adapter *na = NA(sc->rl_ifp); struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0); struct rl_desc *desc = sc->rl_ldata.rl_rx_list; uint32_t cmdstat; uint32_t nic_i, max_avail; uint32_t const n = sc->rl_ldata.rl_rx_desc_cnt; if (!slot) return; /* * Do not release the slots owned by userspace, * and also keep one empty. */ - max_avail = n - 1 - nm_kr_rxspace(&na->rx_rings[0]); + max_avail = n - 1 - nm_kr_rxspace(na->rx_rings[0]); for (nic_i = 0; nic_i < n; nic_i++) { void *addr; uint64_t paddr; - uint32_t nm_i = netmap_idx_n2k(&na->rx_rings[0], nic_i); + uint32_t nm_i = netmap_idx_n2k(na->rx_rings[0], nic_i); addr = PNMB(na, slot + nm_i, &paddr); netmap_reload_map(na, sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[nic_i].rx_dmamap, addr); bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[nic_i].rx_dmamap, BUS_DMASYNC_PREREAD); desc[nic_i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr)); desc[nic_i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr)); cmdstat = NETMAP_BUF_SIZE(na); if (nic_i == n - 1) /* mark the end of ring */ cmdstat |= RL_RDESC_CMD_EOR; if (nic_i < max_avail) cmdstat |= RL_RDESC_CMD_OWN; desc[nic_i].rl_cmdstat = htole32(cmdstat); } } static void re_netmap_attach(struct rl_softc *sc) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = sc->rl_ifp; na.na_flags = NAF_BDG_MAYSLEEP; na.num_tx_desc = sc->rl_ldata.rl_tx_desc_cnt; na.num_rx_desc = sc->rl_ldata.rl_rx_desc_cnt; na.nm_txsync = re_netmap_txsync; na.nm_rxsync = re_netmap_rxsync; na.nm_register = re_netmap_reg; na.num_tx_rings = na.num_rx_rings = 1; netmap_attach(&na); } /* end of file */ Index: head/sys/dev/netmap/if_vtnet_netmap.h =================================================================== --- head/sys/dev/netmap/if_vtnet_netmap.h (revision 332422) +++ head/sys/dev/netmap/if_vtnet_netmap.h (revision 332423) @@ -1,456 +1,432 @@ /* * Copyright (C) 2014 Vincenzo Maffione, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ */ #include #include #include #include /* vtophys ? */ #include #define SOFTC_T vtnet_softc /* Free all the unused buffer in all the RX virtqueues. * This function is called when entering and exiting netmap mode. * - buffers queued by the virtio driver return skbuf/mbuf pointer * and need to be freed; * - buffers queued by netmap return the txq/rxq, and do not need work */ static void vtnet_netmap_free_bufs(struct SOFTC_T* sc) { int i, nmb = 0, n = 0, last; for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i]; struct virtqueue *vq; struct mbuf *m; struct vtnet_txq *txq = &sc->vtnet_txqs[i]; struct vtnet_tx_header *txhdr; last = 0; vq = rxq->vtnrx_vq; while ((m = virtqueue_drain(vq, &last)) != NULL) { n++; if (m != (void *)rxq) m_freem(m); else nmb++; } last = 0; vq = txq->vtntx_vq; while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { n++; if (txhdr != (void *)txq) { m_freem(txhdr->vth_mbuf); uma_zfree(vtnet_tx_header_zone, txhdr); } else nmb++; } } D("freed %d mbufs, %d netmap bufs on %d queues", n - nmb, nmb, i); } /* Register and unregister. */ static int vtnet_netmap_reg(struct netmap_adapter *na, int onoff) { struct ifnet *ifp = na->ifp; struct SOFTC_T *sc = ifp->if_softc; VTNET_CORE_LOCK(sc); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* enable or disable flags and callbacks in na and ifp */ if (onoff) { nm_set_native_flags(na); } else { nm_clear_native_flags(na); } /* drain queues so netmap and native drivers * do not interfere with each other */ vtnet_netmap_free_bufs(sc); vtnet_init_locked(sc); /* also enable intr */ VTNET_CORE_UNLOCK(sc); return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1); } /* Reconcile kernel and user view of the transmit ring. */ static int vtnet_netmap_txsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int ring_nr = kring->ring_id; u_int nm_i; /* index into the netmap ring */ u_int nic_i; /* index into the NIC ring */ u_int n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; /* device-specific */ struct SOFTC_T *sc = ifp->if_softc; struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr]; struct virtqueue *vq = txq->vtntx_vq; int interrupts = !(kring->nr_kflags & NKR_NOINTR); /* * First part: process new packets to send. */ rmb(); nm_i = kring->nr_hwcur; if (nm_i != head) { /* we have new packets to send */ struct sglist *sg = txq->vtntx_sg; nic_i = netmap_idx_k2n(kring, nm_i); for (n = 0; nm_i != head; n++) { /* we use an empty header here */ static struct virtio_net_hdr_mrg_rxbuf hdr; struct netmap_slot *slot = &ring->slot[nm_i]; u_int len = slot->len; uint64_t paddr; void *addr = PNMB(na, slot, &paddr); int err; NM_CHECK_ADDR_LEN(na, addr, len); slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); /* Initialize the scatterlist, expose it to the hypervisor, * and kick the hypervisor (if necessary). */ sglist_reset(sg); // cheap // if vtnet_hdr_size > 0 ... err = sglist_append(sg, &hdr, sc->vtnet_hdr_size); // XXX later, support multi segment err = sglist_append_phys(sg, paddr, len); /* use na as the cookie */ err = virtqueue_enqueue(vq, txq, sg, sg->sg_nseg, 0); if (unlikely(err < 0)) { D("virtqueue_enqueue failed"); break; } nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } /* Update hwcur depending on where we stopped. */ kring->nr_hwcur = nm_i; /* note we migth break early */ /* No more free TX slots? Ask the hypervisor for notifications, * possibly only when a considerable amount of work has been * done. */ ND(3,"sent %d packets, hwcur %d", n, nm_i); virtqueue_disable_intr(vq); virtqueue_notify(vq); } else { if (ring->head != ring->tail) ND(5, "pure notify ? head %d tail %d nused %d %d", ring->head, ring->tail, virtqueue_nused(vq), (virtqueue_dump(vq), 1)); virtqueue_notify(vq); if (interrupts) { virtqueue_enable_intr(vq); // like postpone with 0 } } /* Free used slots. We only consider our own used buffers, recognized * by the token we passed to virtqueue_add_outbuf. */ n = 0; for (;;) { struct vtnet_tx_header *txhdr = virtqueue_dequeue(vq, NULL); if (txhdr == NULL) break; if (likely(txhdr == (void *)txq)) { n++; if (virtqueue_nused(vq) < 32) { // XXX slow release break; } } else { /* leftover from previous transmission */ m_freem(txhdr->vth_mbuf); uma_zfree(vtnet_tx_header_zone, txhdr); } } if (n) { kring->nr_hwtail += n; if (kring->nr_hwtail > lim) kring->nr_hwtail -= lim + 1; } if (nm_i != kring->nr_hwtail /* && vtnet_txq_below_threshold(txq) == 0*/) { ND(3, "disable intr, hwcur %d", nm_i); virtqueue_disable_intr(vq); } else if (interrupts) { ND(3, "enable intr, hwcur %d", nm_i); virtqueue_postpone_intr(vq, VQ_POSTPONE_SHORT); } return 0; } static int vtnet_refill_rxq(struct netmap_kring *kring, u_int nm_i, u_int head) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int ring_nr = kring->ring_id; u_int const lim = kring->nkr_num_slots - 1; u_int n; /* device-specific */ struct SOFTC_T *sc = ifp->if_softc; struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr]; struct virtqueue *vq = rxq->vtnrx_vq; /* use a local sglist, default might be short */ struct sglist_seg ss[2]; struct sglist sg = { ss, 0, 0, 2 }; for (n = 0; nm_i != head; n++) { static struct virtio_net_hdr_mrg_rxbuf hdr; struct netmap_slot *slot = &ring->slot[nm_i]; uint64_t paddr; void *addr = PNMB(na, slot, &paddr); int err = 0; if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */ if (netmap_ring_reinit(kring)) return -1; } slot->flags &= ~NS_BUF_CHANGED; sglist_reset(&sg); // cheap err = sglist_append(&sg, &hdr, sc->vtnet_hdr_size); err = sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na)); /* writable for the host */ err = virtqueue_enqueue(vq, rxq, &sg, 0, sg.sg_nseg); if (err < 0) { D("virtqueue_enqueue failed"); break; } nm_i = nm_next(nm_i, lim); } return nm_i; } /* Reconcile kernel and user view of the receive ring. */ static int vtnet_netmap_rxsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int ring_nr = kring->ring_id; u_int nm_i; /* index into the netmap ring */ // u_int nic_i; /* index into the NIC ring */ u_int n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; int interrupts = !(kring->nr_kflags & NKR_NOINTR); /* device-specific */ struct SOFTC_T *sc = ifp->if_softc; struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr]; struct virtqueue *vq = rxq->vtnrx_vq; /* XXX netif_carrier_ok ? */ if (head > lim) return netmap_ring_reinit(kring); rmb(); /* * First part: import newly received packets. * Only accept our * own buffers (matching the token). We should only get * matching buffers, because of vtnet_netmap_free_rx_unused_bufs() * and vtnet_netmap_init_buffers(). */ if (netmap_no_pendintr || force_update) { struct netmap_adapter *token; nm_i = kring->nr_hwtail; n = 0; for (;;) { int len; token = virtqueue_dequeue(vq, &len); if (token == NULL) break; if (likely(token == (void *)rxq)) { ring->slot[nm_i].len = len; ring->slot[nm_i].flags = 0; nm_i = nm_next(nm_i, lim); n++; } else { D("This should not happen"); } } kring->nr_hwtail = nm_i; kring->nr_kflags &= ~NKR_PENDINTR; } ND("[B] h %d c %d hwcur %d hwtail %d", ring->head, ring->cur, kring->nr_hwcur, kring->nr_hwtail); /* * Second part: skip past packets that userspace has released. */ nm_i = kring->nr_hwcur; /* netmap ring index */ if (nm_i != head) { int err = vtnet_refill_rxq(kring, nm_i, head); if (err < 0) return 1; kring->nr_hwcur = err; virtqueue_notify(vq); /* After draining the queue may need an intr from the hypervisor */ if (interrupts) { vtnet_rxq_enable_intr(rxq); } } ND("[C] h %d c %d t %d hwcur %d hwtail %d", ring->head, ring->cur, ring->tail, kring->nr_hwcur, kring->nr_hwtail); return 0; } /* Enable/disable interrupts on all virtqueues. */ static void vtnet_netmap_intr(struct netmap_adapter *na, int onoff) { struct SOFTC_T *sc = na->ifp->if_softc; int i; for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i]; struct vtnet_txq *txq = &sc->vtnet_txqs[i]; struct virtqueue *txvq = txq->vtntx_vq; if (onoff) { vtnet_rxq_enable_intr(rxq); virtqueue_enable_intr(txvq); } else { vtnet_rxq_disable_intr(rxq); virtqueue_disable_intr(txvq); } } } /* Make RX virtqueues buffers pointing to netmap buffers. */ static int vtnet_netmap_init_rx_buffers(struct SOFTC_T *sc) { struct ifnet *ifp = sc->vtnet_ifp; struct netmap_adapter* na = NA(ifp); unsigned int r; if (!nm_native_on(na)) return 0; for (r = 0; r < na->num_rx_rings; r++) { - struct netmap_kring *kring = &na->rx_rings[r]; + struct netmap_kring *kring = na->rx_rings[r]; struct vtnet_rxq *rxq = &sc->vtnet_rxqs[r]; struct virtqueue *vq = rxq->vtnrx_vq; struct netmap_slot* slot; int err = 0; slot = netmap_reset(na, NR_RX, r, 0); if (!slot) { D("strange, null netmap ring %d", r); return 0; } /* Add up to na>-num_rx_desc-1 buffers to this RX virtqueue. * It's important to leave one virtqueue slot free, otherwise * we can run into ring->cur/ring->tail wraparounds. */ err = vtnet_refill_rxq(kring, 0, na->num_rx_desc-1); if (err < 0) return 0; virtqueue_notify(vq); } return 1; } -/* Update the virtio-net device configurations. Number of queues can - * change dinamically, by 'ethtool --set-channels $IFNAME combined $N'. - * This is actually the only way virtio-net can currently enable - * the multiqueue mode. - * XXX note that we seem to lose packets if the netmap ring has more - * slots than the queue - */ -static int -vtnet_netmap_config(struct netmap_adapter *na, u_int *txr, u_int *txd, - u_int *rxr, u_int *rxd) -{ - struct ifnet *ifp = na->ifp; - struct SOFTC_T *sc = ifp->if_softc; - - *txr = *rxr = sc->vtnet_max_vq_pairs; - *rxd = 512; // sc->vtnet_rx_nmbufs; - *txd = *rxd; // XXX - D("vtnet config txq=%d, txd=%d rxq=%d, rxd=%d", - *txr, *txd, *rxr, *rxd); - - return 0; -} - static void vtnet_netmap_attach(struct SOFTC_T *sc) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = sc->vtnet_ifp; na.num_tx_desc = 1024;// sc->vtnet_rx_nmbufs; na.num_rx_desc = 1024; // sc->vtnet_rx_nmbufs; na.nm_register = vtnet_netmap_reg; na.nm_txsync = vtnet_netmap_txsync; na.nm_rxsync = vtnet_netmap_rxsync; - na.nm_config = vtnet_netmap_config; na.nm_intr = vtnet_netmap_intr; na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs; D("max rings %d", sc->vtnet_max_vq_pairs); netmap_attach(&na); D("virtio attached txq=%d, txd=%d rxq=%d, rxd=%d", na.num_tx_rings, na.num_tx_desc, na.num_tx_rings, na.num_rx_desc); } /* end of file */ Index: head/sys/dev/netmap/netmap.c =================================================================== --- head/sys/dev/netmap/netmap.c (revision 332422) +++ head/sys/dev/netmap/netmap.c (revision 332423) @@ -1,3519 +1,3906 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2011-2014 Matteo Landi * Copyright (C) 2011-2016 Luigi Rizzo * Copyright (C) 2011-2016 Giuseppe Lettieri * Copyright (C) 2011-2016 Vincenzo Maffione * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * This module supports memory mapped access to network devices, * see netmap(4). * * The module uses a large, memory pool allocated by the kernel * and accessible as mmapped memory by multiple userspace threads/processes. * The memory pool contains packet buffers and "netmap rings", * i.e. user-accessible copies of the interface's queues. * * Access to the network card works like this: * 1. a process/thread issues one or more open() on /dev/netmap, to create * select()able file descriptor on which events are reported. * 2. on each descriptor, the process issues an ioctl() to identify * the interface that should report events to the file descriptor. * 3. on each descriptor, the process issues an mmap() request to * map the shared memory region within the process' address space. * The list of interesting queues is indicated by a location in * the shared memory region. * 4. using the functions in the netmap(4) userspace API, a process * can look up the occupation state of a queue, access memory buffers, * and retrieve received packets or enqueue packets to transmit. * 5. using some ioctl()s the process can synchronize the userspace view * of the queue with the actual status in the kernel. This includes both * receiving the notification of new packets, and transmitting new * packets on the output interface. * 6. select() or poll() can be used to wait for events on individual * transmit or receive queues (or all queues for a given interface). * SYNCHRONIZATION (USER) The netmap rings and data structures may be shared among multiple user threads or even independent processes. Any synchronization among those threads/processes is delegated to the threads themselves. Only one thread at a time can be in a system call on the same netmap ring. The OS does not enforce this and only guarantees against system crashes in case of invalid usage. LOCKING (INTERNAL) Within the kernel, access to the netmap rings is protected as follows: - a spinlock on each ring, to handle producer/consumer races on RX rings attached to the host stack (against multiple host threads writing from the host stack to the same ring), and on 'destination' rings attached to a VALE switch (i.e. RX rings in VALE ports, and TX rings in NIC/host ports) protecting multiple active senders for the same destination) - an atomic variable to guarantee that there is at most one instance of *_*xsync() on the ring at any time. For rings connected to user file descriptors, an atomic_test_and_set() protects this, and the lock on the ring is not actually used. For NIC RX rings connected to a VALE switch, an atomic_test_and_set() is also used to prevent multiple executions (the driver might indeed already guarantee this). For NIC TX rings connected to a VALE switch, the lock arbitrates access to the queue (both when allocating buffers and when pushing them out). - *xsync() should be protected against initializations of the card. On FreeBSD most devices have the reset routine protected by a RING lock (ixgbe, igb, em) or core lock (re). lem is missing the RING protection on rx_reset(), this should be added. On linux there is an external lock on the tx path, which probably also arbitrates access to the reset routine. XXX to be revised - a per-interface core_lock protecting access from the host stack while interfaces may be detached from netmap mode. XXX there should be no need for this lock if we detach the interfaces only while they are down. --- VALE SWITCH --- NMG_LOCK() serializes all modifications to switches and ports. A switch cannot be deleted until all ports are gone. For each switch, an SX lock (RWlock on linux) protects deletion of ports. When configuring or deleting a new port, the lock is acquired in exclusive mode (after holding NMG_LOCK). When forwarding, the lock is acquired in shared mode (without NMG_LOCK). The lock is held throughout the entire forwarding cycle, during which the thread may incur in a page fault. Hence it is important that sleepable shared locks are used. On the rx ring, the per-port lock is grabbed initially to reserve a number of slot in the ring, then the lock is released, packets are copied from source to destination, and then the lock is acquired again and the receive ring is updated. (A similar thing is done on the tx ring for NIC and host stack ports attached to the switch) */ /* --- internals ---- * * Roadmap to the code that implements the above. * * > 1. a process/thread issues one or more open() on /dev/netmap, to create * > select()able file descriptor on which events are reported. * * Internally, we allocate a netmap_priv_d structure, that will be * initialized on ioctl(NIOCREGIF). There is one netmap_priv_d * structure for each open(). * * os-specific: * FreeBSD: see netmap_open() (netmap_freebsd.c) * linux: see linux_netmap_open() (netmap_linux.c) * * > 2. on each descriptor, the process issues an ioctl() to identify * > the interface that should report events to the file descriptor. * * Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0. * Most important things happen in netmap_get_na() and * netmap_do_regif(), called from there. Additional details can be * found in the comments above those functions. * * In all cases, this action creates/takes-a-reference-to a * netmap_*_adapter describing the port, and allocates a netmap_if * and all necessary netmap rings, filling them with netmap buffers. * * In this phase, the sync callbacks for each ring are set (these are used * in steps 5 and 6 below). The callbacks depend on the type of adapter. * The adapter creation/initialization code puts them in the * netmap_adapter (fields na->nm_txsync and na->nm_rxsync). Then, they * are copied from there to the netmap_kring's during netmap_do_regif(), by * the nm_krings_create() callback. All the nm_krings_create callbacks * actually call netmap_krings_create() to perform this and the other * common stuff. netmap_krings_create() also takes care of the host rings, * if needed, by setting their sync callbacks appropriately. * * Additional actions depend on the kind of netmap_adapter that has been * registered: * * - netmap_hw_adapter: [netmap.c] * This is a system netdev/ifp with native netmap support. * The ifp is detached from the host stack by redirecting: * - transmissions (from the network stack) to netmap_transmit() * - receive notifications to the nm_notify() callback for * this adapter. The callback is normally netmap_notify(), unless * the ifp is attached to a bridge using bwrap, in which case it * is netmap_bwrap_intr_notify(). * * - netmap_generic_adapter: [netmap_generic.c] * A system netdev/ifp without native netmap support. * * (the decision about native/non native support is taken in * netmap_get_hw_na(), called by netmap_get_na()) * * - netmap_vp_adapter [netmap_vale.c] * Returned by netmap_get_bdg_na(). * This is a persistent or ephemeral VALE port. Ephemeral ports * are created on the fly if they don't already exist, and are * always attached to a bridge. * Persistent VALE ports must must be created separately, and i * then attached like normal NICs. The NIOCREGIF we are examining * will find them only if they had previosly been created and * attached (see VALE_CTL below). * * - netmap_pipe_adapter [netmap_pipe.c] * Returned by netmap_get_pipe_na(). * Both pipe ends are created, if they didn't already exist. * * - netmap_monitor_adapter [netmap_monitor.c] * Returned by netmap_get_monitor_na(). * If successful, the nm_sync callbacks of the monitored adapter * will be intercepted by the returned monitor. * * - netmap_bwrap_adapter [netmap_vale.c] * Cannot be obtained in this way, see VALE_CTL below * * * os-specific: * linux: we first go through linux_netmap_ioctl() to * adapt the FreeBSD interface to the linux one. * * * > 3. on each descriptor, the process issues an mmap() request to * > map the shared memory region within the process' address space. * > The list of interesting queues is indicated by a location in * > the shared memory region. * * os-specific: * FreeBSD: netmap_mmap_single (netmap_freebsd.c). * linux: linux_netmap_mmap (netmap_linux.c). * * > 4. using the functions in the netmap(4) userspace API, a process * > can look up the occupation state of a queue, access memory buffers, * > and retrieve received packets or enqueue packets to transmit. * * these actions do not involve the kernel. * * > 5. using some ioctl()s the process can synchronize the userspace view * > of the queue with the actual status in the kernel. This includes both * > receiving the notification of new packets, and transmitting new * > packets on the output interface. * * These are implemented in netmap_ioctl(), NIOCTXSYNC and NIOCRXSYNC * cases. They invoke the nm_sync callbacks on the netmap_kring * structures, as initialized in step 2 and maybe later modified * by a monitor. Monitors, however, will always call the original * callback before doing anything else. * * * > 6. select() or poll() can be used to wait for events on individual * > transmit or receive queues (or all queues for a given interface). * * Implemented in netmap_poll(). This will call the same nm_sync() * callbacks as in step 5 above. * * os-specific: * linux: we first go through linux_netmap_poll() to adapt * the FreeBSD interface to the linux one. * * * ---- VALE_CTL ----- * * VALE switches are controlled by issuing a NIOCREGIF with a non-null * nr_cmd in the nmreq structure. These subcommands are handled by * netmap_bdg_ctl() in netmap_vale.c. Persistent VALE ports are created * and destroyed by issuing the NETMAP_BDG_NEWIF and NETMAP_BDG_DELIF * subcommands, respectively. * * Any network interface known to the system (including a persistent VALE * port) can be attached to a VALE switch by issuing the - * NETMAP_BDG_ATTACH subcommand. After the attachment, persistent VALE ports + * NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports * look exactly like ephemeral VALE ports (as created in step 2 above). The * attachment of other interfaces, instead, requires the creation of a * netmap_bwrap_adapter. Moreover, the attached interface must be put in * netmap mode. This may require the creation of a netmap_generic_adapter if * we have no native support for the interface, or if generic adapters have * been forced by sysctl. * * Both persistent VALE ports and bwraps are handled by netmap_get_bdg_na(), * called by nm_bdg_ctl_attach(), and discriminated by the nm_bdg_attach() * callback. In the case of the bwrap, the callback creates the * netmap_bwrap_adapter. The initialization of the bwrap is then * completed by calling netmap_do_regif() on it, in the nm_bdg_ctl() * callback (netmap_bwrap_bdg_ctl in netmap_vale.c). * A generic adapter for the wrapped ifp will be created if needed, when * netmap_get_bdg_na() calls netmap_get_hw_na(). * * * ---- DATAPATHS ----- * * -= SYSTEM DEVICE WITH NATIVE SUPPORT =- * * na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach() * * - tx from netmap userspace: * concurrently: * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context * kring->nm_sync() == DEVICE_netmap_txsync() * 2) device interrupt handler * na->nm_notify() == netmap_notify() * - rx from netmap userspace: * concurrently: * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context * kring->nm_sync() == DEVICE_netmap_rxsync() * 2) device interrupt handler * na->nm_notify() == netmap_notify() * - rx from host stack * concurrently: * 1) host stack * netmap_transmit() * na->nm_notify == netmap_notify() * 2) ioctl(NIOCRXSYNC)/netmap_poll() in process context * kring->nm_sync() == netmap_rxsync_from_host * netmap_rxsync_from_host(na, NULL, NULL) * - tx to host stack * ioctl(NIOCTXSYNC)/netmap_poll() in process context * kring->nm_sync() == netmap_txsync_to_host * netmap_txsync_to_host(na) * nm_os_send_up() * FreeBSD: na->if_input() == ether_input() * linux: netif_rx() with NM_MAGIC_PRIORITY_RX * * * -= SYSTEM DEVICE WITH GENERIC SUPPORT =- * * na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach() * * - tx from netmap userspace: * concurrently: * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context * kring->nm_sync() == generic_netmap_txsync() * nm_os_generic_xmit_frame() * linux: dev_queue_xmit() with NM_MAGIC_PRIORITY_TX * ifp->ndo_start_xmit == generic_ndo_start_xmit() * gna->save_start_xmit == orig. dev. start_xmit * FreeBSD: na->if_transmit() == orig. dev if_transmit * 2) generic_mbuf_destructor() * na->nm_notify() == netmap_notify() * - rx from netmap userspace: * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context * kring->nm_sync() == generic_netmap_rxsync() * mbq_safe_dequeue() * 2) device driver * generic_rx_handler() * mbq_safe_enqueue() * na->nm_notify() == netmap_notify() * - rx from host stack * FreeBSD: same as native * Linux: same as native except: * 1) host stack * dev_queue_xmit() without NM_MAGIC_PRIORITY_TX * ifp->ndo_start_xmit == generic_ndo_start_xmit() * netmap_transmit() * na->nm_notify() == netmap_notify() * - tx to host stack (same as native): * * * -= VALE =- * * INCOMING: * * - VALE ports: * ioctl(NIOCTXSYNC)/netmap_poll() in process context * kring->nm_sync() == netmap_vp_txsync() * * - system device with native support: * from cable: * interrupt * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring) * kring->nm_sync() == DEVICE_netmap_rxsync() * netmap_vp_txsync() * kring->nm_sync() == DEVICE_netmap_rxsync() * from host stack: * netmap_transmit() * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring) * kring->nm_sync() == netmap_rxsync_from_host() * netmap_vp_txsync() * * - system device with generic support: * from device driver: * generic_rx_handler() * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring) * kring->nm_sync() == generic_netmap_rxsync() * netmap_vp_txsync() * kring->nm_sync() == generic_netmap_rxsync() * from host stack: * netmap_transmit() * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring) * kring->nm_sync() == netmap_rxsync_from_host() * netmap_vp_txsync() * * (all cases) --> nm_bdg_flush() * dest_na->nm_notify() == (see below) * * OUTGOING: * * - VALE ports: * concurrently: * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context * kring->nm_sync() == netmap_vp_rxsync() * 2) from nm_bdg_flush() * na->nm_notify() == netmap_notify() * * - system device with native support: * to cable: * na->nm_notify() == netmap_bwrap_notify() * netmap_vp_rxsync() * kring->nm_sync() == DEVICE_netmap_txsync() * netmap_vp_rxsync() * to host stack: * netmap_vp_rxsync() * kring->nm_sync() == netmap_txsync_to_host * netmap_vp_rxsync_locked() * * - system device with generic adapter: * to device driver: * na->nm_notify() == netmap_bwrap_notify() * netmap_vp_rxsync() * kring->nm_sync() == generic_netmap_txsync() * netmap_vp_rxsync() * to host stack: * netmap_vp_rxsync() * kring->nm_sync() == netmap_txsync_to_host * netmap_vp_rxsync() * */ /* * OS-specific code that is used only within this file. * Other OS-specific code that must be accessed by drivers * is present in netmap_kern.h */ #if defined(__FreeBSD__) #include /* prerequisite */ #include #include #include /* defines used in kernel.h */ #include /* types used in module initialization */ #include /* cdevsw struct, UID, GID */ #include /* FIONBIO */ #include #include /* struct socket */ #include #include #include #include /* sockaddrs */ #include #include #include #include #include #include #include /* BIOCIMMEDIATE */ #include /* bus_dmamap_* */ #include #include #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #elif defined (_WIN32) #include "win_glue.h" #else #error Unsupported platform #endif /* unsupported */ /* * common headers */ #include #include #include /* user-controlled variables */ int netmap_verbose; static int netmap_no_timestamp; /* don't timestamp on rxsync */ int netmap_no_pendintr = 1; int netmap_txsync_retry = 2; static int netmap_fwd = 0; /* force transparent forwarding */ /* * netmap_admode selects the netmap mode to use. * Invalid values are reset to NETMAP_ADMODE_BEST */ enum { NETMAP_ADMODE_BEST = 0, /* use native, fallback to generic */ NETMAP_ADMODE_NATIVE, /* either native or none */ NETMAP_ADMODE_GENERIC, /* force generic */ NETMAP_ADMODE_LAST }; static int netmap_admode = NETMAP_ADMODE_BEST; /* netmap_generic_mit controls mitigation of RX notifications for * the generic netmap adapter. The value is a time interval in * nanoseconds. */ int netmap_generic_mit = 100*1000; /* We use by default netmap-aware qdiscs with generic netmap adapters, * even if there can be a little performance hit with hardware NICs. * However, using the qdisc is the safer approach, for two reasons: * 1) it prevents non-fifo qdiscs to break the TX notification * scheme, which is based on mbuf destructors when txqdisc is * not used. * 2) it makes it possible to transmit over software devices that * change skb->dev, like bridge, veth, ... * * Anyway users looking for the best performance should * use native adapters. */ #ifdef linux int netmap_generic_txqdisc = 1; #endif /* Default number of slots and queues for generic adapters. */ int netmap_generic_ringsize = 1024; int netmap_generic_rings = 1; /* Non-zero if ptnet devices are allowed to use virtio-net headers. */ int ptnet_vnet_hdr = 1; /* 0 if ptnetmap should not use worker threads for TX processing */ int ptnetmap_tx_workers = 1; /* * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated * in some other operating systems */ SYSBEGIN(main_init); SYSCTL_DECL(_dev_netmap); SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args"); SYSCTL_INT(_dev_netmap, OID_AUTO, verbose, CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode"); SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp, CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp"); SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr, 0, "Always look for new received packets."); SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW, &netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush."); SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0, "Force NR_FORWARD mode"); SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0, "Adapter mode. 0 selects the best option available," "1 forces native adapter, 2 forces emulated adapter"); SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit, 0, "RX notification interval in nanoseconds"); SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW, &netmap_generic_ringsize, 0, "Number of per-ring slots for emulated netmap mode"); SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW, &netmap_generic_rings, 0, "Number of TX/RX queues for emulated netmap adapters"); #ifdef linux SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW, &netmap_generic_txqdisc, 0, "Use qdisc for generic adapters"); #endif SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr, 0, "Allow ptnet devices to use virtio-net headers"); SYSCTL_INT(_dev_netmap, OID_AUTO, ptnetmap_tx_workers, CTLFLAG_RW, &ptnetmap_tx_workers, 0, "Use worker threads for pnetmap TX processing"); SYSEND; NMG_LOCK_T netmap_global_lock; /* * mark the ring as stopped, and run through the locks * to make sure other users get to see it. * stopped must be either NR_KR_STOPPED (for unbounded stop) * of NR_KR_LOCKED (brief stop for mutual exclusion purposes) */ static void netmap_disable_ring(struct netmap_kring *kr, int stopped) { nm_kr_stop(kr, stopped); // XXX check if nm_kr_stop is sufficient mtx_lock(&kr->q_lock); mtx_unlock(&kr->q_lock); nm_kr_put(kr); } /* stop or enable a single ring */ void netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped) { if (stopped) - netmap_disable_ring(NMR(na, t) + ring_id, stopped); + netmap_disable_ring(NMR(na, t)[ring_id], stopped); else - NMR(na, t)[ring_id].nkr_stopped = 0; + NMR(na, t)[ring_id]->nkr_stopped = 0; } /* stop or enable all the rings of na */ void netmap_set_all_rings(struct netmap_adapter *na, int stopped) { int i; enum txrx t; if (!nm_netmap_on(na)) return; for_rx_tx(t) { for (i = 0; i < netmap_real_rings(na, t); i++) { netmap_set_ring(na, i, t, stopped); } } } /* * Convenience function used in drivers. Waits for current txsync()s/rxsync()s * to finish and prevents any new one from starting. Call this before turning * netmap mode off, or before removing the hardware rings (e.g., on module * onload). */ void netmap_disable_all_rings(struct ifnet *ifp) { if (NM_NA_VALID(ifp)) { netmap_set_all_rings(NA(ifp), NM_KR_STOPPED); } } /* * Convenience function used in drivers. Re-enables rxsync and txsync on the * adapter's rings In linux drivers, this should be placed near each * napi_enable(). */ void netmap_enable_all_rings(struct ifnet *ifp) { if (NM_NA_VALID(ifp)) { netmap_set_all_rings(NA(ifp), 0 /* enabled */); } } void netmap_make_zombie(struct ifnet *ifp) { if (NM_NA_VALID(ifp)) { struct netmap_adapter *na = NA(ifp); netmap_set_all_rings(na, NM_KR_LOCKED); na->na_flags |= NAF_ZOMBIE; netmap_set_all_rings(na, 0); } } void netmap_undo_zombie(struct ifnet *ifp) { if (NM_NA_VALID(ifp)) { struct netmap_adapter *na = NA(ifp); if (na->na_flags & NAF_ZOMBIE) { netmap_set_all_rings(na, NM_KR_LOCKED); na->na_flags &= ~NAF_ZOMBIE; netmap_set_all_rings(na, 0); } } } /* * generic bound_checking function */ u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg) { u_int oldv = *v; const char *op = NULL; if (dflt < lo) dflt = lo; if (dflt > hi) dflt = hi; if (oldv < lo) { *v = dflt; op = "Bump"; } else if (oldv > hi) { *v = hi; op = "Clamp"; } if (op && msg) nm_prinf("%s %s to %d (was %d)\n", op, msg, *v, oldv); return *v; } /* * packet-dump function, user-supplied or static buffer. * The destination buffer must be at least 30+4*len */ const char * nm_dump_buf(char *p, int len, int lim, char *dst) { static char _dst[8192]; int i, j, i0; static char hex[] ="0123456789abcdef"; char *o; /* output position */ #define P_HI(x) hex[((x) & 0xf0)>>4] #define P_LO(x) hex[((x) & 0xf)] #define P_C(x) ((x) >= 0x20 && (x) <= 0x7e ? (x) : '.') if (!dst) dst = _dst; if (lim <= 0 || lim > len) lim = len; o = dst; sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim); o += strlen(o); /* hexdump routine */ for (i = 0; i < lim; ) { sprintf(o, "%5d: ", i); o += strlen(o); memset(o, ' ', 48); i0 = i; for (j=0; j < 16 && i < lim; i++, j++) { o[j*3] = P_HI(p[i]); o[j*3+1] = P_LO(p[i]); } i = i0; for (j=0; j < 16 && i < lim; i++, j++) o[j + 48] = P_C(p[i]); o[j+48] = '\n'; o += j+49; } *o = '\0'; #undef P_HI #undef P_LO #undef P_C return dst; } /* * Fetch configuration from the device, to cope with dynamic * reconfigurations after loading the module. */ /* call with NMG_LOCK held */ int netmap_update_config(struct netmap_adapter *na) { - u_int txr, txd, rxr, rxd; + struct nm_config_info info; - txr = txd = rxr = rxd = 0; + bzero(&info, sizeof(info)); if (na->nm_config == NULL || - na->nm_config(na, &txr, &txd, &rxr, &rxd)) - { + na->nm_config(na, &info)) { /* take whatever we had at init time */ - txr = na->num_tx_rings; - txd = na->num_tx_desc; - rxr = na->num_rx_rings; - rxd = na->num_rx_desc; + info.num_tx_rings = na->num_tx_rings; + info.num_tx_descs = na->num_tx_desc; + info.num_rx_rings = na->num_rx_rings; + info.num_rx_descs = na->num_rx_desc; + info.rx_buf_maxsize = na->rx_buf_maxsize; } - if (na->num_tx_rings == txr && na->num_tx_desc == txd && - na->num_rx_rings == rxr && na->num_rx_desc == rxd) + if (na->num_tx_rings == info.num_tx_rings && + na->num_tx_desc == info.num_tx_descs && + na->num_rx_rings == info.num_rx_rings && + na->num_rx_desc == info.num_rx_descs && + na->rx_buf_maxsize == info.rx_buf_maxsize) return 0; /* nothing changed */ - if (netmap_verbose || na->active_fds > 0) { - D("stored config %s: txring %d x %d, rxring %d x %d", - na->name, - na->num_tx_rings, na->num_tx_desc, - na->num_rx_rings, na->num_rx_desc); - D("new config %s: txring %d x %d, rxring %d x %d", - na->name, txr, txd, rxr, rxd); - } if (na->active_fds == 0) { - D("configuration changed (but fine)"); - na->num_tx_rings = txr; - na->num_tx_desc = txd; - na->num_rx_rings = rxr; - na->num_rx_desc = rxd; + D("configuration changed for %s: txring %d x %d, " + "rxring %d x %d, rxbufsz %d", + na->name, na->num_tx_rings, na->num_tx_desc, + na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize); + na->num_tx_rings = info.num_tx_rings; + na->num_tx_desc = info.num_tx_descs; + na->num_rx_rings = info.num_rx_rings; + na->num_rx_desc = info.num_rx_descs; + na->rx_buf_maxsize = info.rx_buf_maxsize; return 0; } - D("configuration changed while active, this is bad..."); + D("WARNING: configuration changed for %s while active: " + "txring %d x %d, rxring %d x %d, rxbufsz %d", + na->name, info.num_tx_rings, info.num_tx_descs, + info.num_rx_rings, info.num_rx_descs, + info.rx_buf_maxsize); return 1; } /* nm_sync callbacks for the host rings */ static int netmap_txsync_to_host(struct netmap_kring *kring, int flags); static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags); /* create the krings array and initialize the fields common to all adapters. * The array layout is this: * * +----------+ * na->tx_rings ----->| | \ * | | } na->num_tx_ring * | | / * +----------+ * | | host tx kring * na->rx_rings ----> +----------+ * | | \ * | | } na->num_rx_rings * | | / * +----------+ * | | host rx kring * +----------+ * na->tailroom ----->| | \ * | | } tailroom bytes * | | / * +----------+ * * Note: for compatibility, host krings are created even when not needed. * The tailroom space is currently used by vale ports for allocating leases. */ /* call with NMG_LOCK held */ int netmap_krings_create(struct netmap_adapter *na, u_int tailroom) { u_int i, len, ndesc; struct netmap_kring *kring; u_int n[NR_TXRX]; enum txrx t; if (na->tx_rings != NULL) { D("warning: krings were already created"); return 0; } /* account for the (possibly fake) host rings */ n[NR_TX] = na->num_tx_rings + 1; n[NR_RX] = na->num_rx_rings + 1; - len = (n[NR_TX] + n[NR_RX]) * sizeof(struct netmap_kring) + tailroom; + len = (n[NR_TX] + n[NR_RX]) * + (sizeof(struct netmap_kring) + sizeof(struct netmap_kring *)) + + tailroom; na->tx_rings = nm_os_malloc((size_t)len); if (na->tx_rings == NULL) { D("Cannot allocate krings"); return ENOMEM; } na->rx_rings = na->tx_rings + n[NR_TX]; + na->tailroom = na->rx_rings + n[NR_RX]; + + /* link the krings in the krings array */ + kring = (struct netmap_kring *)((char *)na->tailroom + tailroom); + for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) { + na->tx_rings[i] = kring; + kring++; + } /* * All fields in krings are 0 except the one initialized below. * but better be explicit on important kring fields. */ for_rx_tx(t) { ndesc = nma_get_ndesc(na, t); for (i = 0; i < n[t]; i++) { - kring = &NMR(na, t)[i]; + kring = NMR(na, t)[i]; bzero(kring, sizeof(*kring)); kring->na = na; + kring->notify_na = na; kring->ring_id = i; kring->tx = t; kring->nkr_num_slots = ndesc; kring->nr_mode = NKR_NETMAP_OFF; kring->nr_pending_mode = NKR_NETMAP_OFF; if (i < nma_get_nrings(na, t)) { kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync); } else { + if (!(na->na_flags & NAF_HOST_RINGS)) + kring->nr_kflags |= NKR_FAKERING; kring->nm_sync = (t == NR_TX ? netmap_txsync_to_host: netmap_rxsync_from_host); } kring->nm_notify = na->nm_notify; kring->rhead = kring->rcur = kring->nr_hwcur = 0; /* * IMPORTANT: Always keep one slot empty. */ kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0); snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name, nm_txrx2str(t), i); ND("ktx %s h %d c %d t %d", kring->name, kring->rhead, kring->rcur, kring->rtail); mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF); nm_os_selinfo_init(&kring->si); } nm_os_selinfo_init(&na->si[t]); } - na->tailroom = na->rx_rings + n[NR_RX]; return 0; } /* undo the actions performed by netmap_krings_create */ /* call with NMG_LOCK held */ void netmap_krings_delete(struct netmap_adapter *na) { - struct netmap_kring *kring = na->tx_rings; + struct netmap_kring **kring = na->tx_rings; enum txrx t; if (na->tx_rings == NULL) { D("warning: krings were already deleted"); return; } for_rx_tx(t) nm_os_selinfo_uninit(&na->si[t]); /* we rely on the krings layout described above */ for ( ; kring != na->tailroom; kring++) { - mtx_destroy(&kring->q_lock); - nm_os_selinfo_uninit(&kring->si); + mtx_destroy(&(*kring)->q_lock); + nm_os_selinfo_uninit(&(*kring)->si); } nm_os_free(na->tx_rings); na->tx_rings = na->rx_rings = na->tailroom = NULL; } /* * Destructor for NIC ports. They also have an mbuf queue * on the rings connected to the host so we need to purge * them first. */ /* call with NMG_LOCK held */ void netmap_hw_krings_delete(struct netmap_adapter *na) { - struct mbq *q = &na->rx_rings[na->num_rx_rings].rx_queue; + struct mbq *q = &na->rx_rings[na->num_rx_rings]->rx_queue; ND("destroy sw mbq with len %d", mbq_len(q)); mbq_purge(q); mbq_safe_fini(q); netmap_krings_delete(na); } static void netmap_mem_drop(struct netmap_adapter *na) { int last = netmap_mem_deref(na->nm_mem, na); /* if the native allocator had been overrided on regif, * restore it now and drop the temporary one */ if (last && na->nm_mem_prev) { netmap_mem_put(na->nm_mem); na->nm_mem = na->nm_mem_prev; na->nm_mem_prev = NULL; } } /* * Undo everything that was done in netmap_do_regif(). In particular, * call nm_register(ifp,0) to stop netmap mode on the interface and * revert to normal operation. */ /* call with NMG_LOCK held */ static void netmap_unset_ringid(struct netmap_priv_d *); static void netmap_krings_put(struct netmap_priv_d *); void netmap_do_unregif(struct netmap_priv_d *priv) { struct netmap_adapter *na = priv->np_na; NMG_LOCK_ASSERT(); na->active_fds--; /* unset nr_pending_mode and possibly release exclusive mode */ netmap_krings_put(priv); #ifdef WITH_MONITOR /* XXX check whether we have to do something with monitor * when rings change nr_mode. */ if (na->active_fds <= 0) { /* walk through all the rings and tell any monitor * that the port is going to exit netmap mode */ netmap_monitor_stop(na); } #endif if (na->active_fds <= 0 || nm_kring_pending(priv)) { na->nm_register(na, 0); } /* delete rings and buffers that are no longer needed */ netmap_mem_rings_delete(na); if (na->active_fds <= 0) { /* last instance */ /* * (TO CHECK) We enter here * when the last reference to this file descriptor goes * away. This means we cannot have any pending poll() * or interrupt routine operating on the structure. * XXX The file may be closed in a thread while * another thread is using it. * Linux keeps the file opened until the last reference * by any outstanding ioctl/poll or mmap is gone. * FreeBSD does not track mmap()s (but we do) and * wakes up any sleeping poll(). Need to check what * happens if the close() occurs while a concurrent * syscall is running. */ if (netmap_verbose) D("deleting last instance for %s", na->name); if (nm_netmap_on(na)) { D("BUG: netmap on while going to delete the krings"); } na->nm_krings_delete(na); } /* possibily decrement counter of tx_si/rx_si users */ netmap_unset_ringid(priv); /* delete the nifp */ netmap_mem_if_delete(na, priv->np_nifp); /* drop the allocator */ netmap_mem_drop(na); /* mark the priv as unregistered */ priv->np_na = NULL; priv->np_nifp = NULL; } /* call with NMG_LOCK held */ static __inline int nm_si_user(struct netmap_priv_d *priv, enum txrx t) { return (priv->np_na != NULL && (priv->np_qlast[t] - priv->np_qfirst[t] > 1)); } struct netmap_priv_d* netmap_priv_new(void) { struct netmap_priv_d *priv; priv = nm_os_malloc(sizeof(struct netmap_priv_d)); if (priv == NULL) return NULL; priv->np_refs = 1; nm_os_get_module(); return priv; } /* * Destructor of the netmap_priv_d, called when the fd is closed * Action: undo all the things done by NIOCREGIF, * On FreeBSD we need to track whether there are active mmap()s, * and we use np_active_mmaps for that. On linux, the field is always 0. * Return: 1 if we can free priv, 0 otherwise. * */ /* call with NMG_LOCK held */ void netmap_priv_delete(struct netmap_priv_d *priv) { struct netmap_adapter *na = priv->np_na; /* number of active references to this fd */ if (--priv->np_refs > 0) { return; } nm_os_put_module(); if (na) { netmap_do_unregif(priv); } netmap_unget_na(na, priv->np_ifp); bzero(priv, sizeof(*priv)); /* for safety */ nm_os_free(priv); } /* call with NMG_LOCK *not* held */ void netmap_dtor(void *data) { struct netmap_priv_d *priv = data; NMG_LOCK(); netmap_priv_delete(priv); NMG_UNLOCK(); } /* * Handlers for synchronization of the rings from/to the host stack. * These are associated to a network interface and are just another * ring pair managed by userspace. * * Netmap also supports transparent forwarding (NS_FORWARD and NR_FORWARD * flags): * * - Before releasing buffers on hw RX rings, the application can mark * them with the NS_FORWARD flag. During the next RXSYNC or poll(), they * will be forwarded to the host stack, similarly to what happened if * the application moved them to the host TX ring. * * - Before releasing buffers on the host RX ring, the application can * mark them with the NS_FORWARD flag. During the next RXSYNC or poll(), * they will be forwarded to the hw TX rings, saving the application * from doing the same task in user-space. * * Transparent fowarding can be enabled per-ring, by setting the NR_FORWARD * flag, or globally with the netmap_fwd sysctl. * * The transfer NIC --> host is relatively easy, just encapsulate * into mbufs and we are done. The host --> NIC side is slightly * harder because there might not be room in the tx ring so it * might take a while before releasing the buffer. */ /* * Pass a whole queue of mbufs to the host stack as coming from 'dst' * We do not need to lock because the queue is private. * After this call the queue is empty. */ static void netmap_send_up(struct ifnet *dst, struct mbq *q) { struct mbuf *m; struct mbuf *head = NULL, *prev = NULL; /* Send packets up, outside the lock; head/prev machinery * is only useful for Windows. */ while ((m = mbq_dequeue(q)) != NULL) { if (netmap_verbose & NM_VERB_HOST) D("sending up pkt %p size %d", m, MBUF_LEN(m)); prev = nm_os_send_up(dst, m, prev); if (head == NULL) head = prev; } if (head) nm_os_send_up(dst, NULL, head); mbq_fini(q); } /* * Scan the buffers from hwcur to ring->head, and put a copy of those * marked NS_FORWARD (or all of them if forced) into a queue of mbufs. * Drop remaining packets in the unlikely event * of an mbuf shortage. */ static void netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force) { u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; u_int n; struct netmap_adapter *na = kring->na; for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) { struct mbuf *m; struct netmap_slot *slot = &kring->ring->slot[n]; if ((slot->flags & NS_FORWARD) == 0 && !force) continue; if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) { RD(5, "bad pkt at %d len %d", n, slot->len); continue; } slot->flags &= ~NS_FORWARD; // XXX needed ? /* XXX TODO: adapt to the case of a multisegment packet */ m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL); if (m == NULL) break; mbq_enqueue(q, m); } } static inline int _nm_may_forward(struct netmap_kring *kring) { return ((netmap_fwd || kring->ring->flags & NR_FORWARD) && kring->na->na_flags & NAF_HOST_RINGS && kring->tx == NR_RX); } static inline int nm_may_forward_up(struct netmap_kring *kring) { return _nm_may_forward(kring) && kring->ring_id != kring->na->num_rx_rings; } static inline int nm_may_forward_down(struct netmap_kring *kring, int sync_flags) { return _nm_may_forward(kring) && (sync_flags & NAF_CAN_FORWARD_DOWN) && kring->ring_id == kring->na->num_rx_rings; } /* * Send to the NIC rings packets marked NS_FORWARD between * kring->nr_hwcur and kring->rhead. * Called under kring->rx_queue.lock on the sw rx ring. * * It can only be called if the user opened all the TX hw rings, * see NAF_CAN_FORWARD_DOWN flag. * We can touch the TX netmap rings (slots, head and cur) since * we are in poll/ioctl system call context, and the application * is not supposed to touch the ring (using a different thread) * during the execution of the system call. */ static u_int netmap_sw_to_nic(struct netmap_adapter *na) { - struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings]; + struct netmap_kring *kring = na->rx_rings[na->num_rx_rings]; struct netmap_slot *rxslot = kring->ring->slot; u_int i, rxcur = kring->nr_hwcur; u_int const head = kring->rhead; u_int const src_lim = kring->nkr_num_slots - 1; u_int sent = 0; /* scan rings to find space, then fill as much as possible */ for (i = 0; i < na->num_tx_rings; i++) { - struct netmap_kring *kdst = &na->tx_rings[i]; + struct netmap_kring *kdst = na->tx_rings[i]; struct netmap_ring *rdst = kdst->ring; u_int const dst_lim = kdst->nkr_num_slots - 1; /* XXX do we trust ring or kring->rcur,rtail ? */ for (; rxcur != head && !nm_ring_empty(rdst); rxcur = nm_next(rxcur, src_lim) ) { struct netmap_slot *src, *dst, tmp; u_int dst_head = rdst->head; src = &rxslot[rxcur]; if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd) continue; sent++; dst = &rdst->slot[dst_head]; tmp = *src; src->buf_idx = dst->buf_idx; src->flags = NS_BUF_CHANGED; dst->buf_idx = tmp.buf_idx; dst->len = tmp.len; dst->flags = NS_BUF_CHANGED; rdst->head = rdst->cur = nm_next(dst_head, dst_lim); } /* if (sent) XXX txsync ? it would be just an optimization */ } return sent; } /* * netmap_txsync_to_host() passes packets up. We are called from a * system call in user process context, and the only contention * can be among multiple user threads erroneously calling * this routine concurrently. */ static int netmap_txsync_to_host(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; struct mbq q; /* Take packets from hwcur to head and pass them up. * Force hwcur = head since netmap_grab_packets() stops at head */ mbq_init(&q); netmap_grab_packets(kring, &q, 1 /* force */); ND("have %d pkts in queue", mbq_len(&q)); kring->nr_hwcur = head; kring->nr_hwtail = head + lim; if (kring->nr_hwtail > lim) kring->nr_hwtail -= lim + 1; netmap_send_up(na->ifp, &q); return 0; } /* * rxsync backend for packets coming from the host stack. * They have been put in kring->rx_queue by netmap_transmit(). * We protect access to the kring using kring->rx_queue.lock * * also moves to the nic hw rings any packet the user has marked * for transparent-mode forwarding, then sets the NR_FORWARD * flag in the kring to let the caller push them out */ static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_ring *ring = kring->ring; u_int nm_i, n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; int ret = 0; struct mbq *q = &kring->rx_queue, fq; mbq_init(&fq); /* fq holds packets to be freed */ mbq_lock(q); /* First part: import newly received packets */ n = mbq_len(q); if (n) { /* grab packets from the queue */ struct mbuf *m; uint32_t stop_i; nm_i = kring->nr_hwtail; stop_i = nm_prev(kring->nr_hwcur, lim); while ( nm_i != stop_i && (m = mbq_dequeue(q)) != NULL ) { int len = MBUF_LEN(m); struct netmap_slot *slot = &ring->slot[nm_i]; m_copydata(m, 0, len, NMB(na, slot)); ND("nm %d len %d", nm_i, len); if (netmap_verbose) D("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL)); slot->len = len; slot->flags = 0; nm_i = nm_next(nm_i, lim); mbq_enqueue(&fq, m); } kring->nr_hwtail = nm_i; } /* * Second part: skip past packets that userspace has released. */ nm_i = kring->nr_hwcur; if (nm_i != head) { /* something was released */ if (nm_may_forward_down(kring, flags)) { ret = netmap_sw_to_nic(na); if (ret > 0) { kring->nr_kflags |= NR_FORWARD; ret = 0; } } kring->nr_hwcur = head; } mbq_unlock(q); mbq_purge(&fq); mbq_fini(&fq); return ret; } /* Get a netmap adapter for the port. * * If it is possible to satisfy the request, return 0 * with *na containing the netmap adapter found. * Otherwise return an error code, with *na containing NULL. * * When the port is attached to a bridge, we always return * EBUSY. * Otherwise, if the port is already bound to a file descriptor, * then we unconditionally return the existing adapter into *na. * In all the other cases, we return (into *na) either native, * generic or NULL, according to the following table: * * native_support * active_fds dev.netmap.admode YES NO * ------------------------------------------------------- * >0 * NA(ifp) NA(ifp) * * 0 NETMAP_ADMODE_BEST NATIVE GENERIC * 0 NETMAP_ADMODE_NATIVE NATIVE NULL * 0 NETMAP_ADMODE_GENERIC GENERIC GENERIC * */ static void netmap_hw_dtor(struct netmap_adapter *); /* needed by NM_IS_NATIVE() */ int netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na) { /* generic support */ int i = netmap_admode; /* Take a snapshot. */ struct netmap_adapter *prev_na; int error = 0; *na = NULL; /* default */ /* reset in case of invalid value */ if (i < NETMAP_ADMODE_BEST || i >= NETMAP_ADMODE_LAST) i = netmap_admode = NETMAP_ADMODE_BEST; if (NM_NA_VALID(ifp)) { prev_na = NA(ifp); /* If an adapter already exists, return it if * there are active file descriptors or if * netmap is not forced to use generic * adapters. */ if (NETMAP_OWNED_BY_ANY(prev_na) || i != NETMAP_ADMODE_GENERIC || prev_na->na_flags & NAF_FORCE_NATIVE #ifdef WITH_PIPES /* ugly, but we cannot allow an adapter switch * if some pipe is referring to this one */ || prev_na->na_next_pipe > 0 #endif ) { *na = prev_na; goto assign_mem; } } /* If there isn't native support and netmap is not allowed * to use generic adapters, we cannot satisfy the request. */ if (!NM_IS_NATIVE(ifp) && i == NETMAP_ADMODE_NATIVE) return EOPNOTSUPP; /* Otherwise, create a generic adapter and return it, * saving the previously used netmap adapter, if any. * * Note that here 'prev_na', if not NULL, MUST be a * native adapter, and CANNOT be a generic one. This is * true because generic adapters are created on demand, and * destroyed when not used anymore. Therefore, if the adapter * currently attached to an interface 'ifp' is generic, it * must be that * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))). * Consequently, if NA(ifp) is generic, we will enter one of * the branches above. This ensures that we never override * a generic adapter with another generic adapter. */ error = generic_netmap_attach(ifp); if (error) return error; *na = NA(ifp); assign_mem: if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) && (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) { (*na)->nm_mem_prev = (*na)->nm_mem; (*na)->nm_mem = netmap_mem_get(nmd); } return 0; } /* * MUST BE CALLED UNDER NMG_LOCK() * * Get a refcounted reference to a netmap adapter attached - * to the interface specified by nmr. + * to the interface specified by req. * This is always called in the execution of an ioctl(). * * Return ENXIO if the interface specified by the request does * not exist, ENOTSUP if netmap is not supported by the interface, * EBUSY if the interface is already attached to a bridge, * EINVAL if parameters are invalid, ENOMEM if needed resources * could not be allocated. * If successful, hold a reference to the netmap adapter. * - * If the interface specified by nmr is a system one, also keep + * If the interface specified by req is a system one, also keep * a reference to it and return a valid *ifp. */ int -netmap_get_na(struct nmreq *nmr, struct netmap_adapter **na, - struct ifnet **ifp, struct netmap_mem_d *nmd, int create) +netmap_get_na(struct nmreq_header *hdr, + struct netmap_adapter **na, struct ifnet **ifp, + struct netmap_mem_d *nmd, int create) { + struct nmreq_register *req = (struct nmreq_register *)hdr->nr_body; int error = 0; struct netmap_adapter *ret = NULL; int nmd_ref = 0; *na = NULL; /* default return value */ *ifp = NULL; + if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) { + return EINVAL; + } + + if (req->nr_mode == NR_REG_PIPE_MASTER || + req->nr_mode == NR_REG_PIPE_SLAVE) { + /* Do not accept deprecated pipe modes. */ + D("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax"); + return EINVAL; + } + NMG_LOCK_ASSERT(); /* if the request contain a memid, try to find the * corresponding memory region */ - if (nmd == NULL && nmr->nr_arg2) { - nmd = netmap_mem_find(nmr->nr_arg2); + if (nmd == NULL && req->nr_mem_id) { + nmd = netmap_mem_find(req->nr_mem_id); if (nmd == NULL) return EINVAL; /* keep the rereference */ nmd_ref = 1; } /* We cascade through all possible types of netmap adapter. * All netmap_get_*_na() functions return an error and an na, * with the following combinations: * * error na * 0 NULL type doesn't match * !0 NULL type matches, but na creation/lookup failed * 0 !NULL type matches and na created/found * !0 !NULL impossible */ /* try to see if this is a ptnetmap port */ - error = netmap_get_pt_host_na(nmr, na, nmd, create); + error = netmap_get_pt_host_na(hdr, na, nmd, create); if (error || *na != NULL) goto out; /* try to see if this is a monitor port */ - error = netmap_get_monitor_na(nmr, na, nmd, create); + error = netmap_get_monitor_na(hdr, na, nmd, create); if (error || *na != NULL) goto out; /* try to see if this is a pipe port */ - error = netmap_get_pipe_na(nmr, na, nmd, create); + error = netmap_get_pipe_na(hdr, na, nmd, create); if (error || *na != NULL) goto out; /* try to see if this is a bridge port */ - error = netmap_get_bdg_na(nmr, na, nmd, create); + error = netmap_get_bdg_na(hdr, na, nmd, create); if (error) goto out; if (*na != NULL) /* valid match in netmap_get_bdg_na() */ goto out; /* * This must be a hardware na, lookup the name in the system. * Note that by hardware we actually mean "it shows up in ifconfig". * This may still be a tap, a veth/epair, or even a * persistent VALE port. */ - *ifp = ifunit_ref(nmr->nr_name); + *ifp = ifunit_ref(hdr->nr_name); if (*ifp == NULL) { error = ENXIO; goto out; } error = netmap_get_hw_na(*ifp, nmd, &ret); if (error) goto out; *na = ret; netmap_adapter_get(ret); out: if (error) { if (ret) netmap_adapter_put(ret); if (*ifp) { if_rele(*ifp); *ifp = NULL; } } if (nmd_ref) netmap_mem_put(nmd); return error; } /* undo netmap_get_na() */ void netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp) { if (ifp) if_rele(ifp); if (na) netmap_adapter_put(na); } #define NM_FAIL_ON(t) do { \ if (unlikely(t)) { \ RD(5, "%s: fail '" #t "' " \ "h %d c %d t %d " \ "rh %d rc %d rt %d " \ "hc %d ht %d", \ kring->name, \ head, cur, ring->tail, \ kring->rhead, kring->rcur, kring->rtail, \ kring->nr_hwcur, kring->nr_hwtail); \ return kring->nkr_num_slots; \ } \ } while (0) /* * validate parameters on entry for *_txsync() * Returns ring->cur if ok, or something >= kring->nkr_num_slots * in case of error. * * rhead, rcur and rtail=hwtail are stored from previous round. * hwcur is the next packet to send to the ring. * * We want * hwcur <= *rhead <= head <= cur <= tail = *rtail <= hwtail * * hwcur, rhead, rtail and hwtail are reliable */ u_int nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring) { u_int head = ring->head; /* read only once */ u_int cur = ring->cur; /* read only once */ u_int n = kring->nkr_num_slots; ND(5, "%s kcur %d ktail %d head %d cur %d tail %d", kring->name, kring->nr_hwcur, kring->nr_hwtail, ring->head, ring->cur, ring->tail); #if 1 /* kernel sanity checks; but we can trust the kring. */ NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n || kring->rtail >= n || kring->nr_hwtail >= n); #endif /* kernel sanity checks */ /* * user sanity checks. We only use head, * A, B, ... are possible positions for head: * * 0 A rhead B rtail C n-1 * 0 D rtail E rhead F n-1 * * B, F, D are valid. A, C, E are wrong */ if (kring->rtail >= kring->rhead) { /* want rhead <= head <= rtail */ NM_FAIL_ON(head < kring->rhead || head > kring->rtail); /* and also head <= cur <= rtail */ NM_FAIL_ON(cur < head || cur > kring->rtail); } else { /* here rtail < rhead */ /* we need head outside rtail .. rhead */ NM_FAIL_ON(head > kring->rtail && head < kring->rhead); /* two cases now: head <= rtail or head >= rhead */ if (head <= kring->rtail) { /* want head <= cur <= rtail */ NM_FAIL_ON(cur < head || cur > kring->rtail); } else { /* head >= rhead */ /* cur must be outside rtail..head */ NM_FAIL_ON(cur > kring->rtail && cur < head); } } if (ring->tail != kring->rtail) { RD(5, "%s tail overwritten was %d need %d", kring->name, ring->tail, kring->rtail); ring->tail = kring->rtail; } kring->rhead = head; kring->rcur = cur; return head; } /* * validate parameters on entry for *_rxsync() * Returns ring->head if ok, kring->nkr_num_slots on error. * * For a valid configuration, * hwcur <= head <= cur <= tail <= hwtail * * We only consider head and cur. * hwcur and hwtail are reliable. * */ u_int nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring) { uint32_t const n = kring->nkr_num_slots; uint32_t head, cur; ND(5,"%s kc %d kt %d h %d c %d t %d", kring->name, kring->nr_hwcur, kring->nr_hwtail, ring->head, ring->cur, ring->tail); /* * Before storing the new values, we should check they do not * move backwards. However: * - head is not an issue because the previous value is hwcur; * - cur could in principle go back, however it does not matter * because we are processing a brand new rxsync() */ cur = kring->rcur = ring->cur; /* read only once */ head = kring->rhead = ring->head; /* read only once */ #if 1 /* kernel sanity checks */ NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n); #endif /* kernel sanity checks */ /* user sanity checks */ if (kring->nr_hwtail >= kring->nr_hwcur) { /* want hwcur <= rhead <= hwtail */ NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail); /* and also rhead <= rcur <= hwtail */ NM_FAIL_ON(cur < head || cur > kring->nr_hwtail); } else { /* we need rhead outside hwtail..hwcur */ NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail); /* two cases now: head <= hwtail or head >= hwcur */ if (head <= kring->nr_hwtail) { /* want head <= cur <= hwtail */ NM_FAIL_ON(cur < head || cur > kring->nr_hwtail); } else { /* cur must be outside hwtail..head */ NM_FAIL_ON(cur < head && cur > kring->nr_hwtail); } } if (ring->tail != kring->rtail) { RD(5, "%s tail overwritten was %d need %d", kring->name, ring->tail, kring->rtail); ring->tail = kring->rtail; } return head; } /* * Error routine called when txsync/rxsync detects an error. * Can't do much more than resetting head =cur = hwcur, tail = hwtail * Return 1 on reinit. * * This routine is only called by the upper half of the kernel. * It only reads hwcur (which is changed only by the upper half, too) * and hwtail (which may be changed by the lower half, but only on * a tx ring and only to increase it, so any error will be recovered * on the next call). For the above, we don't strictly need to call * it under lock. */ int netmap_ring_reinit(struct netmap_kring *kring) { struct netmap_ring *ring = kring->ring; u_int i, lim = kring->nkr_num_slots - 1; int errors = 0; // XXX KASSERT nm_kr_tryget RD(10, "called for %s", kring->name); // XXX probably wrong to trust userspace kring->rhead = ring->head; kring->rcur = ring->cur; kring->rtail = ring->tail; if (ring->cur > lim) errors++; if (ring->head > lim) errors++; if (ring->tail > lim) errors++; for (i = 0; i <= lim; i++) { u_int idx = ring->slot[i].buf_idx; u_int len = ring->slot[i].len; if (idx < 2 || idx >= kring->na->na_lut.objtotal) { RD(5, "bad index at slot %d idx %d len %d ", i, idx, len); ring->slot[i].buf_idx = 0; ring->slot[i].len = 0; } else if (len > NETMAP_BUF_SIZE(kring->na)) { ring->slot[i].len = 0; RD(5, "bad len at slot %d idx %d len %d", i, idx, len); } } if (errors) { RD(10, "total %d errors", errors); RD(10, "%s reinit, cur %d -> %d tail %d -> %d", kring->name, ring->cur, kring->nr_hwcur, ring->tail, kring->nr_hwtail); ring->head = kring->rhead = kring->nr_hwcur; ring->cur = kring->rcur = kring->nr_hwcur; ring->tail = kring->rtail = kring->nr_hwtail; } return (errors ? 1 : 0); } /* interpret the ringid and flags fields of an nmreq, by translating them * into a pair of intervals of ring indices: * * [priv->np_txqfirst, priv->np_txqlast) and * [priv->np_rxqfirst, priv->np_rxqlast) * */ int -netmap_interp_ringid(struct netmap_priv_d *priv, uint16_t ringid, uint32_t flags) +netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode, + uint16_t nr_ringid, uint64_t nr_flags) { struct netmap_adapter *na = priv->np_na; - u_int j, i = ringid & NETMAP_RING_MASK; - u_int reg = flags & NR_REG_MASK; int excluded_direction[] = { NR_TX_RINGS_ONLY, NR_RX_RINGS_ONLY }; enum txrx t; + u_int j; - if (reg == NR_REG_DEFAULT) { - /* convert from old ringid to flags */ - if (ringid & NETMAP_SW_RING) { - reg = NR_REG_SW; - } else if (ringid & NETMAP_HW_RING) { - reg = NR_REG_ONE_NIC; - } else { - reg = NR_REG_ALL_NIC; - } - D("deprecated API, old ringid 0x%x -> ringid %x reg %d", ringid, i, reg); - } - - if ((flags & NR_PTNETMAP_HOST) && ((reg != NR_REG_ALL_NIC && - reg != NR_REG_PIPE_MASTER && reg != NR_REG_PIPE_SLAVE) || - flags & (NR_RX_RINGS_ONLY|NR_TX_RINGS_ONLY))) { + if ((nr_flags & NR_PTNETMAP_HOST) && ((nr_mode != NR_REG_ALL_NIC) || + nr_flags & (NR_RX_RINGS_ONLY|NR_TX_RINGS_ONLY))) { D("Error: only NR_REG_ALL_NIC supported with netmap passthrough"); return EINVAL; } for_rx_tx(t) { - if (flags & excluded_direction[t]) { + if (nr_flags & excluded_direction[t]) { priv->np_qfirst[t] = priv->np_qlast[t] = 0; continue; } - switch (reg) { + switch (nr_mode) { case NR_REG_ALL_NIC: - case NR_REG_PIPE_MASTER: - case NR_REG_PIPE_SLAVE: priv->np_qfirst[t] = 0; priv->np_qlast[t] = nma_get_nrings(na, t); ND("ALL/PIPE: %s %d %d", nm_txrx2str(t), priv->np_qfirst[t], priv->np_qlast[t]); break; case NR_REG_SW: case NR_REG_NIC_SW: if (!(na->na_flags & NAF_HOST_RINGS)) { D("host rings not supported"); return EINVAL; } - priv->np_qfirst[t] = (reg == NR_REG_SW ? + priv->np_qfirst[t] = (nr_mode == NR_REG_SW ? nma_get_nrings(na, t) : 0); priv->np_qlast[t] = nma_get_nrings(na, t) + 1; - ND("%s: %s %d %d", reg == NR_REG_SW ? "SW" : "NIC+SW", + ND("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW", nm_txrx2str(t), priv->np_qfirst[t], priv->np_qlast[t]); break; case NR_REG_ONE_NIC: - if (i >= na->num_tx_rings && i >= na->num_rx_rings) { - D("invalid ring id %d", i); + if (nr_ringid >= na->num_tx_rings && + nr_ringid >= na->num_rx_rings) { + D("invalid ring id %d", nr_ringid); return EINVAL; } /* if not enough rings, use the first one */ - j = i; + j = nr_ringid; if (j >= nma_get_nrings(na, t)) j = 0; priv->np_qfirst[t] = j; priv->np_qlast[t] = j + 1; ND("ONE_NIC: %s %d %d", nm_txrx2str(t), priv->np_qfirst[t], priv->np_qlast[t]); break; default: - D("invalid regif type %d", reg); + D("invalid regif type %d", nr_mode); return EINVAL; } } - priv->np_flags = (flags & ~NR_REG_MASK) | reg; + priv->np_flags = nr_flags | nr_mode; // TODO /* Allow transparent forwarding mode in the host --> nic * direction only if all the TX hw rings have been opened. */ if (priv->np_qfirst[NR_TX] == 0 && priv->np_qlast[NR_TX] >= na->num_tx_rings) { priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN; } if (netmap_verbose) { D("%s: tx [%d,%d) rx [%d,%d) id %d", na->name, priv->np_qfirst[NR_TX], priv->np_qlast[NR_TX], priv->np_qfirst[NR_RX], priv->np_qlast[NR_RX], - i); + nr_ringid); } return 0; } /* * Set the ring ID. For devices with a single queue, a request * for all rings is the same as a single ring. */ static int -netmap_set_ringid(struct netmap_priv_d *priv, uint16_t ringid, uint32_t flags) +netmap_set_ringid(struct netmap_priv_d *priv, uint32_t nr_mode, + uint16_t nr_ringid, uint64_t nr_flags) { struct netmap_adapter *na = priv->np_na; int error; enum txrx t; - error = netmap_interp_ringid(priv, ringid, flags); + error = netmap_interp_ringid(priv, nr_mode, nr_ringid, nr_flags); if (error) { return error; } - priv->np_txpoll = (ringid & NETMAP_NO_TX_POLL) ? 0 : 1; + priv->np_txpoll = (nr_flags & NR_NO_TX_POLL) ? 0 : 1; /* optimization: count the users registered for more than * one ring, which are the ones sleeping on the global queue. * The default netmap_notify() callback will then * avoid signaling the global queue if nobody is using it */ for_rx_tx(t) { if (nm_si_user(priv, t)) na->si_users[t]++; } return 0; } static void netmap_unset_ringid(struct netmap_priv_d *priv) { struct netmap_adapter *na = priv->np_na; enum txrx t; for_rx_tx(t) { if (nm_si_user(priv, t)) na->si_users[t]--; priv->np_qfirst[t] = priv->np_qlast[t] = 0; } priv->np_flags = 0; priv->np_txpoll = 0; } /* Set the nr_pending_mode for the requested rings. * If requested, also try to get exclusive access to the rings, provided * the rings we want to bind are not exclusively owned by a previous bind. */ static int netmap_krings_get(struct netmap_priv_d *priv) { struct netmap_adapter *na = priv->np_na; u_int i; struct netmap_kring *kring; int excl = (priv->np_flags & NR_EXCLUSIVE); enum txrx t; if (netmap_verbose) D("%s: grabbing tx [%d, %d) rx [%d, %d)", na->name, priv->np_qfirst[NR_TX], priv->np_qlast[NR_TX], priv->np_qfirst[NR_RX], priv->np_qlast[NR_RX]); /* first round: check that all the requested rings * are neither alread exclusively owned, nor we * want exclusive ownership when they are already in use */ for_rx_tx(t) { for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { - kring = &NMR(na, t)[i]; + kring = NMR(na, t)[i]; if ((kring->nr_kflags & NKR_EXCLUSIVE) || (kring->users && excl)) { ND("ring %s busy", kring->name); return EBUSY; } } } /* second round: increment usage count (possibly marking them * as exclusive) and set the nr_pending_mode */ for_rx_tx(t) { for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { - kring = &NMR(na, t)[i]; + kring = NMR(na, t)[i]; kring->users++; if (excl) kring->nr_kflags |= NKR_EXCLUSIVE; kring->nr_pending_mode = NKR_NETMAP_ON; } } return 0; } /* Undo netmap_krings_get(). This is done by clearing the exclusive mode * if was asked on regif, and unset the nr_pending_mode if we are the * last users of the involved rings. */ static void netmap_krings_put(struct netmap_priv_d *priv) { struct netmap_adapter *na = priv->np_na; u_int i; struct netmap_kring *kring; int excl = (priv->np_flags & NR_EXCLUSIVE); enum txrx t; ND("%s: releasing tx [%d, %d) rx [%d, %d)", na->name, priv->np_qfirst[NR_TX], priv->np_qlast[NR_TX], priv->np_qfirst[NR_RX], priv->np_qlast[MR_RX]); - for_rx_tx(t) { for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { - kring = &NMR(na, t)[i]; + kring = NMR(na, t)[i]; if (excl) kring->nr_kflags &= ~NKR_EXCLUSIVE; kring->users--; if (kring->users == 0) kring->nr_pending_mode = NKR_NETMAP_OFF; } } } +static int +nm_priv_rx_enabled(struct netmap_priv_d *priv) +{ + return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]); +} + /* * possibly move the interface to netmap-mode. * If success it returns a pointer to netmap_if, otherwise NULL. * This must be called with NMG_LOCK held. * * The following na callbacks are called in the process: * * na->nm_config() [by netmap_update_config] * (get current number and size of rings) * * We have a generic one for linux (netmap_linux_config). * The bwrap has to override this, since it has to forward * the request to the wrapped adapter (netmap_bwrap_config). * * * na->nm_krings_create() * (create and init the krings array) * * One of the following: * * * netmap_hw_krings_create, (hw ports) * creates the standard layout for the krings * and adds the mbq (used for the host rings). * * * netmap_vp_krings_create (VALE ports) * add leases and scratchpads * * * netmap_pipe_krings_create (pipes) * create the krings and rings of both ends and * cross-link them * * * netmap_monitor_krings_create (monitors) * avoid allocating the mbq * * * netmap_bwrap_krings_create (bwraps) * create both the brap krings array, * the krings array of the wrapped adapter, and * (if needed) the fake array for the host adapter * * na->nm_register(, 1) * (put the adapter in netmap mode) * * This may be one of the following: * * * netmap_hw_reg (hw ports) * checks that the ifp is still there, then calls * the hardware specific callback; * * * netmap_vp_reg (VALE ports) * If the port is connected to a bridge, * set the NAF_NETMAP_ON flag under the * bridge write lock. * * * netmap_pipe_reg (pipes) * inform the other pipe end that it is no * longer responsible for the lifetime of this * pipe end * * * netmap_monitor_reg (monitors) * intercept the sync callbacks of the monitored * rings * * * netmap_bwrap_reg (bwraps) * cross-link the bwrap and hwna rings, * forward the request to the hwna, override * the hwna notify callback (to get the frames * coming from outside go through the bridge). * * */ int netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na, - uint16_t ringid, uint32_t flags) + uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags) { struct netmap_if *nifp = NULL; int error; NMG_LOCK_ASSERT(); - /* ring configuration may have changed, fetch from the card */ - netmap_update_config(na); priv->np_na = na; /* store the reference */ - error = netmap_set_ringid(priv, ringid, flags); + error = netmap_set_ringid(priv, nr_mode, nr_ringid, nr_flags); if (error) goto err; error = netmap_mem_finalize(na->nm_mem, na); if (error) goto err; if (na->active_fds == 0) { + + /* cache the allocator info in the na */ + error = netmap_mem_get_lut(na->nm_mem, &na->na_lut); + if (error) + goto err_drop_mem; + ND("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal, + na->na_lut.objsize); + + /* ring configuration may have changed, fetch from the card */ + netmap_update_config(na); + /* * If this is the first registration of the adapter, * perform sanity checks and create the in-kernel view * of the netmap rings (the netmap krings). */ - if (na->ifp) { + if (na->ifp && nm_priv_rx_enabled(priv)) { /* This netmap adapter is attached to an ifnet. */ unsigned nbs = netmap_mem_bufsize(na->nm_mem); unsigned mtu = nm_os_ifnet_mtu(na->ifp); - /* The maximum amount of bytes that a single - * receive or transmit NIC descriptor can hold. */ - unsigned hw_max_slot_len = 4096; - if (mtu <= hw_max_slot_len) { + ND("mtu %d rx_buf_maxsize %d netmap_buf_size %d", + mtu, na->rx_buf_maxsize, nbs); + + if (mtu <= na->rx_buf_maxsize) { /* The MTU fits a single NIC slot. We only * Need to check that netmap buffers are * large enough to hold an MTU. NS_MOREFRAG * cannot be used in this case. */ if (nbs < mtu) { nm_prerr("error: netmap buf size (%u) " - "< device MTU (%u)", nbs, mtu); + "< device MTU (%u)\n", nbs, mtu); error = EINVAL; goto err_drop_mem; } } else { /* More NIC slots may be needed to receive * or transmit a single packet. Check that * the adapter supports NS_MOREFRAG and that * netmap buffers are large enough to hold * the maximum per-slot size. */ if (!(na->na_flags & NAF_MOREFRAG)) { nm_prerr("error: large MTU (%d) needed " "but %s does not support " - "NS_MOREFRAG", mtu, + "NS_MOREFRAG\n", mtu, na->ifp->if_xname); error = EINVAL; goto err_drop_mem; - } else if (nbs < hw_max_slot_len) { + } else if (nbs < na->rx_buf_maxsize) { nm_prerr("error: using NS_MOREFRAG on " "%s requires netmap buf size " - ">= %u", na->ifp->if_xname, - hw_max_slot_len); + ">= %u\n", na->ifp->if_xname, + na->rx_buf_maxsize); error = EINVAL; goto err_drop_mem; } else { nm_prinf("info: netmap application on " "%s needs to support " "NS_MOREFRAG " - "(MTU=%u,netmap_buf_size=%u)", + "(MTU=%u,netmap_buf_size=%u)\n", na->ifp->if_xname, mtu, nbs); } } } /* * Depending on the adapter, this may also create * the netmap rings themselves */ error = na->nm_krings_create(na); if (error) - goto err_drop_mem; + goto err_put_lut; } /* now the krings must exist and we can check whether some * previous bind has exclusive ownership on them, and set * nr_pending_mode */ error = netmap_krings_get(priv); if (error) goto err_del_krings; /* create all needed missing netmap rings */ error = netmap_mem_rings_create(na); if (error) goto err_rel_excl; /* in all cases, create a new netmap if */ nifp = netmap_mem_if_new(na, priv); if (nifp == NULL) { error = ENOMEM; goto err_del_rings; } - if (na->active_fds == 0) { - /* cache the allocator info in the na */ - error = netmap_mem_get_lut(na->nm_mem, &na->na_lut); - if (error) - goto err_del_if; - ND("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal, - na->na_lut.objsize); - } - if (nm_kring_pending(priv)) { /* Some kring is switching mode, tell the adapter to * react on this. */ error = na->nm_register(na, 1); if (error) - goto err_put_lut; + goto err_del_if; } /* Commit the reference. */ na->active_fds++; /* * advertise that the interface is ready by setting np_nifp. * The barrier is needed because readers (poll, *SYNC and mmap) * check for priv->np_nifp != NULL without locking */ mb(); /* make sure previous writes are visible to all CPUs */ priv->np_nifp = nifp; return 0; -err_put_lut: - if (na->active_fds == 0) - memset(&na->na_lut, 0, sizeof(na->na_lut)); err_del_if: netmap_mem_if_delete(na, nifp); err_del_rings: netmap_mem_rings_delete(na); err_rel_excl: netmap_krings_put(priv); err_del_krings: if (na->active_fds == 0) na->nm_krings_delete(na); +err_put_lut: + if (na->active_fds == 0) + memset(&na->na_lut, 0, sizeof(na->na_lut)); err_drop_mem: netmap_mem_drop(na); err: priv->np_na = NULL; return error; } /* * update kring and ring at the end of rxsync/txsync. */ static inline void nm_sync_finalize(struct netmap_kring *kring) { /* * Update ring tail to what the kernel knows * After txsync: head/rhead/hwcur might be behind cur/rcur * if no carrier. */ kring->ring->tail = kring->rtail = kring->nr_hwtail; ND(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d", kring->name, kring->nr_hwcur, kring->nr_hwtail, kring->rhead, kring->rcur, kring->rtail); } /* set ring timestamp */ static inline void ring_timestamp_set(struct netmap_ring *ring) { if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) { microtime(&ring->ts); } } +static int nmreq_copyin(struct nmreq_header *, int); +static int nmreq_copyout(struct nmreq_header *, int); +static int nmreq_checkoptions(struct nmreq_header *); /* * ioctl(2) support for the "netmap" device. * * Following a list of accepted commands: - * - NIOCGINFO + * - NIOCCTRL device control API + * - NIOCTXSYNC sync TX rings + * - NIOCRXSYNC sync RX rings * - SIOCGIFADDR just for convenience - * - NIOCREGIF - * - NIOCTXSYNC - * - NIOCRXSYNC + * - NIOCGINFO deprecated (legacy API) + * - NIOCREGIF deprecated (legacy API) * * Return 0 on success, errno otherwise. */ int -netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data, struct thread *td) +netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data, + struct thread *td, int nr_body_is_user) { struct mbq q; /* packets from RX hw queues to host stack */ - struct nmreq *nmr = (struct nmreq *) data; struct netmap_adapter *na = NULL; struct netmap_mem_d *nmd = NULL; struct ifnet *ifp = NULL; int error = 0; u_int i, qfirst, qlast; struct netmap_if *nifp; - struct netmap_kring *krings; + struct netmap_kring **krings; int sync_flags; enum txrx t; - if (cmd == NIOCGINFO || cmd == NIOCREGIF) { - /* truncate name */ - nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0'; - if (nmr->nr_version != NETMAP_API) { - D("API mismatch for %s got %d need %d", - nmr->nr_name, - nmr->nr_version, NETMAP_API); - nmr->nr_version = NETMAP_API; + switch (cmd) { + case NIOCCTRL: { + struct nmreq_header *hdr = (struct nmreq_header *)data; + + if (hdr->nr_version != NETMAP_API) { + D("API mismatch for reqtype %d: got %d need %d", + hdr->nr_version, + hdr->nr_version, NETMAP_API); + hdr->nr_version = NETMAP_API; } - if (nmr->nr_version < NETMAP_MIN_API || - nmr->nr_version > NETMAP_MAX_API) { + if (hdr->nr_version < NETMAP_MIN_API || + hdr->nr_version > NETMAP_MAX_API) { return EINVAL; } - } - switch (cmd) { - case NIOCGINFO: /* return capabilities etc */ - if (nmr->nr_cmd == NETMAP_BDG_LIST) { - error = netmap_bdg_ctl(nmr, NULL); - break; + /* Make a kernel-space copy of the user-space nr_body. + * For convenince, the nr_body pointer and the pointers + * in the options list will be replaced with their + * kernel-space counterparts. The original pointers are + * saved internally and later restored by nmreq_copyout + */ + error = nmreq_copyin(hdr, nr_body_is_user); + if (error) { + return error; } - NMG_LOCK(); - do { - /* memsize is always valid */ - u_int memflags; - uint64_t memsize; + /* Sanitize hdr->nr_name. */ + hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0'; - if (nmr->nr_name[0] != '\0') { + switch (hdr->nr_reqtype) { + case NETMAP_REQ_REGISTER: { + struct nmreq_register *req = + (struct nmreq_register *)hdr->nr_body; + /* Protect access to priv from concurrent requests. */ + NMG_LOCK(); + do { + u_int memflags; +#ifdef WITH_EXTMEM + struct nmreq_option *opt; +#endif /* WITH_EXTMEM */ - /* get a refcount */ - error = netmap_get_na(nmr, &na, &ifp, NULL, 1 /* create */); + if (priv->np_nifp != NULL) { /* thread already registered */ + error = EBUSY; + break; + } + +#ifdef WITH_EXTMEM + opt = nmreq_findoption((struct nmreq_option *)hdr->nr_options, + NETMAP_REQ_OPT_EXTMEM); + if (opt != NULL) { + struct nmreq_opt_extmem *e = + (struct nmreq_opt_extmem *)opt; + + error = nmreq_checkduplicate(opt); + if (error) { + opt->nro_status = error; + break; + } + nmd = netmap_mem_ext_create(e->nro_usrptr, + &e->nro_info, &error); + opt->nro_status = error; + if (nmd == NULL) + break; + } +#endif /* WITH_EXTMEM */ + + if (nmd == NULL && req->nr_mem_id) { + /* find the allocator and get a reference */ + nmd = netmap_mem_find(req->nr_mem_id); + if (nmd == NULL) { + error = EINVAL; + break; + } + } + /* find the interface and a reference */ + error = netmap_get_na(hdr, &na, &ifp, nmd, + 1 /* create */); /* keep reference */ + if (error) + break; + if (NETMAP_OWNED_BY_KERN(na)) { + error = EBUSY; + break; + } + + if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) { + error = EIO; + break; + } + + error = netmap_do_regif(priv, na, req->nr_mode, + req->nr_ringid, req->nr_flags); + if (error) { /* reg. failed, release priv and ref */ + break; + } + nifp = priv->np_nifp; + priv->np_td = td; /* for debugging purposes */ + + /* return the offset of the netmap_if object */ + req->nr_rx_rings = na->num_rx_rings; + req->nr_tx_rings = na->num_tx_rings; + req->nr_rx_slots = na->num_rx_desc; + req->nr_tx_slots = na->num_tx_desc; + error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags, + &req->nr_mem_id); if (error) { - na = NULL; - ifp = NULL; + netmap_do_unregif(priv); break; } - nmd = na->nm_mem; /* get memory allocator */ - } else { - nmd = netmap_mem_find(nmr->nr_arg2 ? nmr->nr_arg2 : 1); - if (nmd == NULL) { - error = EINVAL; + if (memflags & NETMAP_MEM_PRIVATE) { + *(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM; + } + for_rx_tx(t) { + priv->np_si[t] = nm_si_user(priv, t) ? + &na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si; + } + + if (req->nr_extra_bufs) { + if (netmap_verbose) + D("requested %d extra buffers", + req->nr_extra_bufs); + req->nr_extra_bufs = netmap_extra_alloc(na, + &nifp->ni_bufs_head, req->nr_extra_bufs); + if (netmap_verbose) + D("got %d extra buffers", req->nr_extra_bufs); + } + req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp); + + error = nmreq_checkoptions(hdr); + if (error) { + netmap_do_unregif(priv); break; } + + /* store ifp reference so that priv destructor may release it */ + priv->np_ifp = ifp; + } while (0); + if (error) { + netmap_unget_na(na, ifp); } + /* release the reference from netmap_mem_find() or + * netmap_mem_ext_create() + */ + if (nmd) + netmap_mem_put(nmd); + NMG_UNLOCK(); + break; + } - error = netmap_mem_get_info(nmd, &memsize, &memflags, - &nmr->nr_arg2); - if (error) - break; - nmr->nr_memsize = (uint32_t)memsize; - if (na == NULL) /* only memory info */ - break; - nmr->nr_offset = 0; - nmr->nr_rx_slots = nmr->nr_tx_slots = 0; - netmap_update_config(na); - nmr->nr_rx_rings = na->num_rx_rings; - nmr->nr_tx_rings = na->num_tx_rings; - nmr->nr_rx_slots = na->num_rx_desc; - nmr->nr_tx_slots = na->num_tx_desc; - } while (0); - netmap_unget_na(na, ifp); - NMG_UNLOCK(); - break; + case NETMAP_REQ_PORT_INFO_GET: { + struct nmreq_port_info_get *req = + (struct nmreq_port_info_get *)hdr->nr_body; - case NIOCREGIF: - /* - * If nmr->nr_cmd is not zero, this NIOCREGIF is not really - * a regif operation, but a different one, specified by the - * value of nmr->nr_cmd. - */ - i = nmr->nr_cmd; - if (i == NETMAP_BDG_ATTACH || i == NETMAP_BDG_DETACH - || i == NETMAP_BDG_VNET_HDR - || i == NETMAP_BDG_NEWIF - || i == NETMAP_BDG_DELIF - || i == NETMAP_BDG_POLLING_ON - || i == NETMAP_BDG_POLLING_OFF) { - /* possibly attach/detach NIC and VALE switch */ - error = netmap_bdg_ctl(nmr, NULL); + NMG_LOCK(); + do { + u_int memflags; + + if (hdr->nr_name[0] != '\0') { + /* Build a nmreq_register out of the nmreq_port_info_get, + * so that we can call netmap_get_na(). */ + struct nmreq_register regreq; + bzero(®req, sizeof(regreq)); + regreq.nr_tx_slots = req->nr_tx_slots; + regreq.nr_rx_slots = req->nr_rx_slots; + regreq.nr_tx_rings = req->nr_tx_rings; + regreq.nr_rx_rings = req->nr_rx_rings; + regreq.nr_mem_id = req->nr_mem_id; + + /* get a refcount */ + hdr->nr_reqtype = NETMAP_REQ_REGISTER; + hdr->nr_body = (uint64_t)®req; + error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */); + hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */ + hdr->nr_body = (uint64_t)req; /* reset nr_body */ + if (error) { + na = NULL; + ifp = NULL; + break; + } + nmd = na->nm_mem; /* get memory allocator */ + } else { + nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1); + if (nmd == NULL) { + error = EINVAL; + break; + } + } + + error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags, + &req->nr_mem_id); + if (error) + break; + if (na == NULL) /* only memory info */ + break; + req->nr_offset = 0; + req->nr_rx_slots = req->nr_tx_slots = 0; + netmap_update_config(na); + req->nr_rx_rings = na->num_rx_rings; + req->nr_tx_rings = na->num_tx_rings; + req->nr_rx_slots = na->num_rx_desc; + req->nr_tx_slots = na->num_tx_desc; + } while (0); + netmap_unget_na(na, ifp); + NMG_UNLOCK(); break; - } else if (i == NETMAP_PT_HOST_CREATE || i == NETMAP_PT_HOST_DELETE) { - /* forward the command to the ptnetmap subsystem */ - error = ptnetmap_ctl(nmr, priv->np_na); + } +#ifdef WITH_VALE + case NETMAP_REQ_VALE_ATTACH: { + error = nm_bdg_ctl_attach(hdr, NULL /* userspace request */); break; - } else if (i == NETMAP_VNET_HDR_GET) { - /* get vnet-header length for this netmap port */ + } + + case NETMAP_REQ_VALE_DETACH: { + error = nm_bdg_ctl_detach(hdr, NULL /* userspace request */); + break; + } + + case NETMAP_REQ_VALE_LIST: { + error = netmap_bdg_list(hdr); + break; + } + + case NETMAP_REQ_PORT_HDR_SET: { + struct nmreq_port_hdr *req = + (struct nmreq_port_hdr *)hdr->nr_body; + /* Build a nmreq_register out of the nmreq_port_hdr, + * so that we can call netmap_get_bdg_na(). */ + struct nmreq_register regreq; + bzero(®req, sizeof(regreq)); + /* For now we only support virtio-net headers, and only for + * VALE ports, but this may change in future. Valid lengths + * for the virtio-net header are 0 (no header), 10 and 12. */ + if (req->nr_hdr_len != 0 && + req->nr_hdr_len != sizeof(struct nm_vnet_hdr) && + req->nr_hdr_len != 12) { + error = EINVAL; + break; + } + NMG_LOCK(); + hdr->nr_reqtype = NETMAP_REQ_REGISTER; + hdr->nr_body = (uint64_t)®req; + error = netmap_get_bdg_na(hdr, &na, NULL, 0); + hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET; + hdr->nr_body = (uint64_t)req; + if (na && !error) { + struct netmap_vp_adapter *vpna = + (struct netmap_vp_adapter *)na; + na->virt_hdr_len = req->nr_hdr_len; + if (na->virt_hdr_len) { + vpna->mfs = NETMAP_BUF_SIZE(na); + } + D("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na); + netmap_adapter_put(na); + } else if (!na) { + error = ENXIO; + } + NMG_UNLOCK(); + break; + } + + case NETMAP_REQ_PORT_HDR_GET: { + /* Get vnet-header length for this netmap port */ + struct nmreq_port_hdr *req = + (struct nmreq_port_hdr *)hdr->nr_body; + /* Build a nmreq_register out of the nmreq_port_hdr, + * so that we can call netmap_get_bdg_na(). */ + struct nmreq_register regreq; struct ifnet *ifp; + bzero(®req, sizeof(regreq)); NMG_LOCK(); - error = netmap_get_na(nmr, &na, &ifp, NULL, 0); + hdr->nr_reqtype = NETMAP_REQ_REGISTER; + hdr->nr_body = (uint64_t)®req; + error = netmap_get_na(hdr, &na, &ifp, NULL, 0); + hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET; + hdr->nr_body = (uint64_t)req; if (na && !error) { - nmr->nr_arg1 = na->virt_hdr_len; + req->nr_hdr_len = na->virt_hdr_len; } netmap_unget_na(na, ifp); NMG_UNLOCK(); break; - } else if (i == NETMAP_POOLS_INFO_GET) { - /* get information from the memory allocator */ + } + + case NETMAP_REQ_VALE_NEWIF: { + error = nm_vi_create(hdr); + break; + } + + case NETMAP_REQ_VALE_DELIF: { + error = nm_vi_destroy(hdr->nr_name); + break; + } + + case NETMAP_REQ_VALE_POLLING_ENABLE: + case NETMAP_REQ_VALE_POLLING_DISABLE: { + error = nm_bdg_polling(hdr); + break; + } +#endif /* WITH_VALE */ + case NETMAP_REQ_POOLS_INFO_GET: { + struct nmreq_pools_info *req = + (struct nmreq_pools_info *)hdr->nr_body; + /* Get information from the memory allocator. This + * netmap device must already be bound to a port. + * Note that hdr->nr_name is ignored. */ NMG_LOCK(); if (priv->np_na && priv->np_na->nm_mem) { struct netmap_mem_d *nmd = priv->np_na->nm_mem; - error = netmap_mem_pools_info_get(nmr, nmd); + error = netmap_mem_pools_info_get(req, nmd); } else { error = EINVAL; } NMG_UNLOCK(); break; - } else if (i == NETMAP_POOLS_CREATE) { - nmd = netmap_mem_ext_create(nmr, &error); - if (nmd == NULL) - break; - /* reset the fields used by POOLS_CREATE to - * avoid confusing the rest of the code - */ - nmr->nr_cmd = 0; - nmr->nr_arg1 = 0; - nmr->nr_arg2 = 0; - nmr->nr_arg3 = 0; - } else if (i != 0) { - D("nr_cmd must be 0 not %d", i); + } + + default: { error = EINVAL; break; } - - /* protect access to priv from concurrent NIOCREGIF */ - NMG_LOCK(); - do { - u_int memflags; - uint64_t memsize; - - if (priv->np_nifp != NULL) { /* thread already registered */ - error = EBUSY; - break; - } - - if (nmr->nr_arg2) { - /* find the allocator and get a reference */ - nmd = netmap_mem_find(nmr->nr_arg2); - if (nmd == NULL) { - error = EINVAL; - break; - } - } - /* find the interface and a reference */ - error = netmap_get_na(nmr, &na, &ifp, nmd, - 1 /* create */); /* keep reference */ - if (error) - break; - if (NETMAP_OWNED_BY_KERN(na)) { - error = EBUSY; - break; - } - - if (na->virt_hdr_len && !(nmr->nr_flags & NR_ACCEPT_VNET_HDR)) { - error = EIO; - break; - } - - error = netmap_do_regif(priv, na, nmr->nr_ringid, nmr->nr_flags); - if (error) { /* reg. failed, release priv and ref */ - break; - } - nifp = priv->np_nifp; - priv->np_td = td; // XXX kqueue, debugging only - - /* return the offset of the netmap_if object */ - nmr->nr_rx_rings = na->num_rx_rings; - nmr->nr_tx_rings = na->num_tx_rings; - nmr->nr_rx_slots = na->num_rx_desc; - nmr->nr_tx_slots = na->num_tx_desc; - error = netmap_mem_get_info(na->nm_mem, &memsize, &memflags, - &nmr->nr_arg2); - if (error) { - netmap_do_unregif(priv); - break; - } - nmr->nr_memsize = (uint32_t)memsize; - if (memflags & NETMAP_MEM_PRIVATE) { - *(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM; - } - for_rx_tx(t) { - priv->np_si[t] = nm_si_user(priv, t) ? - &na->si[t] : &NMR(na, t)[priv->np_qfirst[t]].si; - } - - if (nmr->nr_arg3) { - if (netmap_verbose) - D("requested %d extra buffers", nmr->nr_arg3); - nmr->nr_arg3 = netmap_extra_alloc(na, - &nifp->ni_bufs_head, nmr->nr_arg3); - if (netmap_verbose) - D("got %d extra buffers", nmr->nr_arg3); - } - nmr->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp); - - /* store ifp reference so that priv destructor may release it */ - priv->np_ifp = ifp; - } while (0); - if (error) { - netmap_unget_na(na, ifp); } - /* release the reference from netmap_mem_find() or - * netmap_mem_ext_create() - */ - if (nmd) - netmap_mem_put(nmd); - NMG_UNLOCK(); + /* Write back request body to userspace and reset the + * user-space pointer. */ + error = nmreq_copyout(hdr, error); break; + } case NIOCTXSYNC: - case NIOCRXSYNC: + case NIOCRXSYNC: { nifp = priv->np_nifp; if (nifp == NULL) { error = ENXIO; break; } mb(); /* make sure following reads are not from cache */ na = priv->np_na; /* we have a reference */ if (na == NULL) { D("Internal error: nifp != NULL && na == NULL"); error = ENXIO; break; } mbq_init(&q); t = (cmd == NIOCTXSYNC ? NR_TX : NR_RX); krings = NMR(na, t); qfirst = priv->np_qfirst[t]; qlast = priv->np_qlast[t]; sync_flags = priv->np_sync_flags; for (i = qfirst; i < qlast; i++) { - struct netmap_kring *kring = krings + i; + struct netmap_kring *kring = krings[i]; struct netmap_ring *ring = kring->ring; if (unlikely(nm_kr_tryget(kring, 1, &error))) { error = (error ? EIO : 0); continue; } if (cmd == NIOCTXSYNC) { if (netmap_verbose & NM_VERB_TXSYNC) D("pre txsync ring %d cur %d hwcur %d", i, ring->cur, kring->nr_hwcur); if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) { netmap_ring_reinit(kring); } else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) { nm_sync_finalize(kring); } if (netmap_verbose & NM_VERB_TXSYNC) D("post txsync ring %d cur %d hwcur %d", i, ring->cur, kring->nr_hwcur); } else { if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) { netmap_ring_reinit(kring); } if (nm_may_forward_up(kring)) { /* transparent forwarding, see netmap_poll() */ netmap_grab_packets(kring, &q, netmap_fwd); } if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) { nm_sync_finalize(kring); } ring_timestamp_set(ring); } nm_kr_put(kring); } if (mbq_peek(&q)) { netmap_send_up(na->ifp, &q); } break; + } -#ifdef WITH_VALE - case NIOCCONFIG: - error = netmap_bdg_config(nmr); + default: { + return netmap_ioctl_legacy(priv, cmd, data, td); break; -#endif -#ifdef __FreeBSD__ - case FIONBIO: - case FIOASYNC: - ND("FIONBIO/FIOASYNC are no-ops"); - break; + } + } - case BIOCIMMEDIATE: - case BIOCGHDRCMPLT: - case BIOCSHDRCMPLT: - case BIOCSSEESENT: - D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT"); + return (error); +} + +size_t +nmreq_size_by_type(uint16_t nr_reqtype) +{ + switch (nr_reqtype) { + case NETMAP_REQ_REGISTER: + return sizeof(struct nmreq_register); + case NETMAP_REQ_PORT_INFO_GET: + return sizeof(struct nmreq_port_info_get); + case NETMAP_REQ_VALE_ATTACH: + return sizeof(struct nmreq_vale_attach); + case NETMAP_REQ_VALE_DETACH: + return sizeof(struct nmreq_vale_detach); + case NETMAP_REQ_VALE_LIST: + return sizeof(struct nmreq_vale_list); + case NETMAP_REQ_PORT_HDR_SET: + case NETMAP_REQ_PORT_HDR_GET: + return sizeof(struct nmreq_port_hdr); + case NETMAP_REQ_VALE_NEWIF: + return sizeof(struct nmreq_vale_newif); + case NETMAP_REQ_VALE_DELIF: + return 0; + case NETMAP_REQ_VALE_POLLING_ENABLE: + case NETMAP_REQ_VALE_POLLING_DISABLE: + return sizeof(struct nmreq_vale_polling); + case NETMAP_REQ_POOLS_INFO_GET: + return sizeof(struct nmreq_pools_info); + } + return 0; +} + +static size_t +nmreq_opt_size_by_type(uint16_t nro_reqtype) +{ + size_t rv = sizeof(struct nmreq_option); +#ifdef NETMAP_REQ_OPT_DEBUG + if (nro_reqtype & NETMAP_REQ_OPT_DEBUG) + return (nro_reqtype & ~NETMAP_REQ_OPT_DEBUG); +#endif /* NETMAP_REQ_OPT_DEBUG */ + switch (nro_reqtype) { +#ifdef WITH_EXTMEM + case NETMAP_REQ_OPT_EXTMEM: + rv = sizeof(struct nmreq_opt_extmem); break; +#endif /* WITH_EXTMEM */ + } + /* subtract the common header */ + return rv - sizeof(struct nmreq_option); +} - default: /* allow device-specific ioctls */ - { - struct ifnet *ifp = ifunit_ref(nmr->nr_name); - if (ifp == NULL) { - error = ENXIO; - } else { - struct socket so; +int +nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user) +{ + size_t rqsz, optsz, bufsz; + int error; + char *ker = NULL, *p; + struct nmreq_option **next, *src; + struct nmreq_option buf; + uint64_t *ptrs; - bzero(&so, sizeof(so)); - so.so_vnet = ifp->if_vnet; - // so->so_proto not null. - error = ifioctl(&so, cmd, data, td); - if_rele(ifp); + if (hdr->nr_reserved) + return EINVAL; + + if (!nr_body_is_user) + return 0; + + hdr->nr_reserved = nr_body_is_user; + + /* compute the total size of the buffer */ + rqsz = nmreq_size_by_type(hdr->nr_reqtype); + if (rqsz > NETMAP_REQ_MAXSIZE) { + error = EMSGSIZE; + goto out_err; + } + if ((rqsz && hdr->nr_body == (uint64_t)NULL) || + (!rqsz && hdr->nr_body != (uint64_t)NULL)) { + /* Request body expected, but not found; or + * request body found but unexpected. */ + error = EINVAL; + goto out_err; + } + + bufsz = 2 * sizeof(void *) + rqsz; + optsz = 0; + for (src = (struct nmreq_option *)hdr->nr_options; src; + src = (struct nmreq_option *)buf.nro_next) + { + error = copyin(src, &buf, sizeof(*src)); + if (error) + goto out_err; + optsz += sizeof(*src); + optsz += nmreq_opt_size_by_type(buf.nro_reqtype); + if (rqsz + optsz > NETMAP_REQ_MAXSIZE) { + error = EMSGSIZE; + goto out_err; } - break; - } + bufsz += optsz + sizeof(void *); + } -#else /* linux */ - default: - error = EOPNOTSUPP; -#endif /* linux */ + ker = nm_os_malloc(bufsz); + if (ker == NULL) { + error = ENOMEM; + goto out_err; } + p = ker; - return (error); + /* make a copy of the user pointers */ + ptrs = (uint64_t*)p; + *ptrs++ = hdr->nr_body; + *ptrs++ = hdr->nr_options; + p = (char *)ptrs; + + /* copy the body */ + error = copyin((void *)hdr->nr_body, p, rqsz); + if (error) + goto out_restore; + /* overwrite the user pointer with the in-kernel one */ + hdr->nr_body = (uint64_t)p; + p += rqsz; + + /* copy the options */ + next = (struct nmreq_option **)&hdr->nr_options; + src = *next; + while (src) { + struct nmreq_option *opt; + + /* copy the option header */ + ptrs = (uint64_t *)p; + opt = (struct nmreq_option *)(ptrs + 1); + error = copyin(src, opt, sizeof(*src)); + if (error) + goto out_restore; + /* make a copy of the user next pointer */ + *ptrs = opt->nro_next; + /* overwrite the user pointer with the in-kernel one */ + *next = opt; + + /* initialize the option as not supported. + * Recognized options will update this field. + */ + opt->nro_status = EOPNOTSUPP; + + p = (char *)(opt + 1); + + /* copy the option body */ + optsz = nmreq_opt_size_by_type(opt->nro_reqtype); + if (optsz) { + /* the option body follows the option header */ + error = copyin(src + 1, p, optsz); + if (error) + goto out_restore; + p += optsz; + } + + /* move to next option */ + next = (struct nmreq_option **)&opt->nro_next; + src = *next; + } + return 0; + +out_restore: + ptrs = (uint64_t *)ker; + hdr->nr_body = *ptrs++; + hdr->nr_options = *ptrs++; + hdr->nr_reserved = 0; + nm_os_free(ker); +out_err: + return error; } +static int +nmreq_copyout(struct nmreq_header *hdr, int rerror) +{ + struct nmreq_option *src, *dst; + void *ker = (void *)hdr->nr_body, *bufstart; + uint64_t *ptrs; + size_t bodysz; + int error; + if (!hdr->nr_reserved) + return rerror; + + /* restore the user pointers in the header */ + ptrs = (uint64_t *)ker - 2; + bufstart = ptrs; + hdr->nr_body = *ptrs++; + src = (struct nmreq_option *)hdr->nr_options; + hdr->nr_options = *ptrs; + + if (!rerror) { + /* copy the body */ + bodysz = nmreq_size_by_type(hdr->nr_reqtype); + error = copyout(ker, (void *)hdr->nr_body, bodysz); + if (error) { + rerror = error; + goto out; + } + } + + /* copy the options */ + dst = (struct nmreq_option *)hdr->nr_options; + while (src) { + size_t optsz; + uint64_t next; + + /* restore the user pointer */ + next = src->nro_next; + ptrs = (uint64_t *)src - 1; + src->nro_next = *ptrs; + + /* always copy the option header */ + error = copyout(src, dst, sizeof(*src)); + if (error) { + rerror = error; + goto out; + } + + /* copy the option body only if there was no error */ + if (!rerror && !src->nro_status) { + optsz = nmreq_opt_size_by_type(src->nro_reqtype); + if (optsz) { + error = copyout(src + 1, dst + 1, optsz); + if (error) { + rerror = error; + goto out; + } + } + } + src = (struct nmreq_option *)next; + dst = (struct nmreq_option *)*ptrs; + } + + +out: + hdr->nr_reserved = 0; + nm_os_free(bufstart); + return rerror; +} + +struct nmreq_option * +nmreq_findoption(struct nmreq_option *opt, uint16_t reqtype) +{ + for ( ; opt; opt = (struct nmreq_option *)opt->nro_next) + if (opt->nro_reqtype == reqtype) + return opt; + return NULL; +} + +int +nmreq_checkduplicate(struct nmreq_option *opt) { + uint16_t type = opt->nro_reqtype; + int dup = 0; + + while ((opt = nmreq_findoption((struct nmreq_option *)opt->nro_next, + type))) { + dup++; + opt->nro_status = EINVAL; + } + return (dup ? EINVAL : 0); +} + +static int +nmreq_checkoptions(struct nmreq_header *hdr) +{ + struct nmreq_option *opt; + /* return error if there is still any option + * marked as not supported + */ + + for (opt = (struct nmreq_option *)hdr->nr_options; opt; + opt = (struct nmreq_option *)opt->nro_next) + if (opt->nro_status == EOPNOTSUPP) + return EOPNOTSUPP; + + return 0; +} + /* * select(2) and poll(2) handlers for the "netmap" device. * * Can be called for one or more queues. * Return true the event mask corresponding to ready events. * If there are no ready events, do a selrecord on either individual * selinfo or on the global one. * Device-dependent parts (locking and sync of tx/rx rings) * are done through callbacks. * * On linux, arguments are really pwait, the poll table, and 'td' is struct file * * The first one is remapped to pwait as selrecord() uses the name as an * hidden argument. */ int netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr) { struct netmap_adapter *na; struct netmap_kring *kring; struct netmap_ring *ring; u_int i, check_all_tx, check_all_rx, want[NR_TXRX], revents = 0; #define want_tx want[NR_TX] #define want_rx want[NR_RX] struct mbq q; /* packets from RX hw queues to host stack */ /* * In order to avoid nested locks, we need to "double check" * txsync and rxsync if we decide to do a selrecord(). * retry_tx (and retry_rx, later) prevent looping forever. */ int retry_tx = 1, retry_rx = 1; /* Transparent mode: send_down is 1 if we have found some * packets to forward (host RX ring --> NIC) during the rx * scan and we have not sent them down to the NIC yet. * Transparent mode requires to bind all rings to a single * file descriptor. */ int send_down = 0; int sync_flags = priv->np_sync_flags; mbq_init(&q); if (priv->np_nifp == NULL) { D("No if registered"); return POLLERR; } mb(); /* make sure following reads are not from cache */ na = priv->np_na; if (!nm_netmap_on(na)) return POLLERR; if (netmap_verbose & 0x8000) D("device %s events 0x%x", na->name, events); want_tx = events & (POLLOUT | POLLWRNORM); want_rx = events & (POLLIN | POLLRDNORM); /* * check_all_{tx|rx} are set if the card has more than one queue AND * the file descriptor is bound to all of them. If so, we sleep on * the "global" selinfo, otherwise we sleep on individual selinfo * (FreeBSD only allows two selinfo's per file descriptor). * The interrupt routine in the driver wake one or the other * (or both) depending on which clients are active. * * rxsync() is only called if we run out of buffers on a POLLIN. * txsync() is called if we run out of buffers on POLLOUT, or * there are pending packets to send. The latter can be disabled * passing NETMAP_NO_TX_POLL in the NIOCREG call. */ check_all_tx = nm_si_user(priv, NR_TX); check_all_rx = nm_si_user(priv, NR_RX); #ifdef __FreeBSD__ /* * We start with a lock free round which is cheap if we have * slots available. If this fails, then lock and call the sync * routines. We can't do this on Linux, as the contract says * that we must call nm_os_selrecord() unconditionally. */ if (want_tx) { enum txrx t = NR_TX; for (i = priv->np_qfirst[t]; want[t] && i < priv->np_qlast[t]; i++) { - kring = &NMR(na, t)[i]; + kring = NMR(na, t)[i]; /* XXX compare ring->cur and kring->tail */ if (!nm_ring_empty(kring->ring)) { revents |= want[t]; want[t] = 0; /* also breaks the loop */ } } } if (want_rx) { enum txrx t = NR_RX; want_rx = 0; /* look for a reason to run the handlers */ for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { - kring = &NMR(na, t)[i]; + kring = NMR(na, t)[i]; if (kring->ring->cur == kring->ring->tail /* try fetch new buffers */ || kring->rhead != kring->ring->head /* release buffers */) { want_rx = 1; } } if (!want_rx) revents |= events & (POLLIN | POLLRDNORM); /* we have data */ } #endif #ifdef linux /* The selrecord must be unconditional on linux. */ nm_os_selrecord(sr, check_all_tx ? - &na->si[NR_TX] : &na->tx_rings[priv->np_qfirst[NR_TX]].si); + &na->si[NR_TX] : &na->tx_rings[priv->np_qfirst[NR_TX]]->si); nm_os_selrecord(sr, check_all_rx ? - &na->si[NR_RX] : &na->rx_rings[priv->np_qfirst[NR_RX]].si); + &na->si[NR_RX] : &na->rx_rings[priv->np_qfirst[NR_RX]]->si); #endif /* linux */ /* * If we want to push packets out (priv->np_txpoll) or * want_tx is still set, we must issue txsync calls * (on all rings, to avoid that the tx rings stall). * Fortunately, normal tx mode has np_txpoll set. */ if (priv->np_txpoll || want_tx) { /* * The first round checks if anyone is ready, if not * do a selrecord and another round to handle races. * want_tx goes to 0 if any space is found, and is * used to skip rings with no pending transmissions. */ flush_tx: for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) { int found = 0; - kring = &na->tx_rings[i]; + kring = na->tx_rings[i]; ring = kring->ring; /* * Don't try to txsync this TX ring if we already found some * space in some of the TX rings (want_tx == 0) and there are no * TX slots in this ring that need to be flushed to the NIC - * (cur == hwcur). + * (head == hwcur). */ - if (!send_down && !want_tx && ring->cur == kring->nr_hwcur) + if (!send_down && !want_tx && ring->head == kring->nr_hwcur) continue; if (nm_kr_tryget(kring, 1, &revents)) continue; if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) { netmap_ring_reinit(kring); revents |= POLLERR; } else { if (kring->nm_sync(kring, sync_flags)) revents |= POLLERR; else nm_sync_finalize(kring); } /* * If we found new slots, notify potential * listeners on the same ring. * Since we just did a txsync, look at the copies * of cur,tail in the kring. */ found = kring->rcur != kring->rtail; nm_kr_put(kring); if (found) { /* notify other listeners */ revents |= want_tx; want_tx = 0; #ifndef linux kring->nm_notify(kring, 0); #endif /* linux */ } } /* if there were any packet to forward we must have handled them by now */ send_down = 0; if (want_tx && retry_tx && sr) { #ifndef linux nm_os_selrecord(sr, check_all_tx ? - &na->si[NR_TX] : &na->tx_rings[priv->np_qfirst[NR_TX]].si); + &na->si[NR_TX] : &na->tx_rings[priv->np_qfirst[NR_TX]]->si); #endif /* !linux */ retry_tx = 0; goto flush_tx; } } /* * If want_rx is still set scan receive rings. * Do it on all rings because otherwise we starve. */ if (want_rx) { /* two rounds here for race avoidance */ do_retry_rx: for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) { int found = 0; - kring = &na->rx_rings[i]; + kring = na->rx_rings[i]; ring = kring->ring; if (unlikely(nm_kr_tryget(kring, 1, &revents))) continue; if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) { netmap_ring_reinit(kring); revents |= POLLERR; } /* now we can use kring->rcur, rtail */ /* * transparent mode support: collect packets from * hw rxring(s) that have been released by the user */ if (nm_may_forward_up(kring)) { netmap_grab_packets(kring, &q, netmap_fwd); } /* Clear the NR_FORWARD flag anyway, it may be set by * the nm_sync() below only on for the host RX ring (see * netmap_rxsync_from_host()). */ kring->nr_kflags &= ~NR_FORWARD; if (kring->nm_sync(kring, sync_flags)) revents |= POLLERR; else nm_sync_finalize(kring); send_down |= (kring->nr_kflags & NR_FORWARD); ring_timestamp_set(ring); found = kring->rcur != kring->rtail; nm_kr_put(kring); if (found) { revents |= want_rx; retry_rx = 0; #ifndef linux kring->nm_notify(kring, 0); #endif /* linux */ } } #ifndef linux if (retry_rx && sr) { nm_os_selrecord(sr, check_all_rx ? - &na->si[NR_RX] : &na->rx_rings[priv->np_qfirst[NR_RX]].si); + &na->si[NR_RX] : &na->rx_rings[priv->np_qfirst[NR_RX]]->si); } #endif /* !linux */ if (send_down || retry_rx) { retry_rx = 0; if (send_down) goto flush_tx; /* and retry_rx */ else goto do_retry_rx; } } /* * Transparent mode: released bufs (i.e. between kring->nr_hwcur and * ring->head) marked with NS_FORWARD on hw rx rings are passed up * to the host stack. */ if (mbq_peek(&q)) { netmap_send_up(na->ifp, &q); } return (revents); #undef want_tx #undef want_rx } int nma_intr_enable(struct netmap_adapter *na, int onoff) { bool changed = false; enum txrx t; int i; for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t); i++) { - struct netmap_kring *kring = &NMR(na, t)[i]; + struct netmap_kring *kring = NMR(na, t)[i]; int on = !(kring->nr_kflags & NKR_NOINTR); if (!!onoff != !!on) { changed = true; } if (onoff) { kring->nr_kflags &= ~NKR_NOINTR; } else { kring->nr_kflags |= NKR_NOINTR; } } } if (!changed) { return 0; /* nothing to do */ } if (!na->nm_intr) { D("Cannot %s interrupts for %s", onoff ? "enable" : "disable", na->name); return -1; } na->nm_intr(na, onoff); return 0; } /*-------------------- driver support routines -------------------*/ /* default notify callback */ static int netmap_notify(struct netmap_kring *kring, int flags) { - struct netmap_adapter *na = kring->na; + struct netmap_adapter *na = kring->notify_na; enum txrx t = kring->tx; nm_os_selwakeup(&kring->si); /* optimization: avoid a wake up on the global * queue if nobody has registered for more * than one ring */ if (na->si_users[t] > 0) nm_os_selwakeup(&na->si[t]); return NM_IRQ_COMPLETED; } /* called by all routines that create netmap_adapters. * provide some defaults and get a reference to the * memory allocator */ int netmap_attach_common(struct netmap_adapter *na) { if (na->num_tx_rings == 0 || na->num_rx_rings == 0) { D("%s: invalid rings tx %d rx %d", na->name, na->num_tx_rings, na->num_rx_rings); return EINVAL; } + if (!na->rx_buf_maxsize) { + /* Set a conservative default (larger is safer). */ + na->rx_buf_maxsize = PAGE_SIZE; + } + #ifdef __FreeBSD__ if (na->na_flags & NAF_HOST_RINGS && na->ifp) { na->if_input = na->ifp->if_input; /* for netmap_send_up */ } na->pdev = na; /* make sure netmap_mem_map() is called */ #endif /* __FreeBSD__ */ if (na->nm_krings_create == NULL) { /* we assume that we have been called by a driver, * since other port types all provide their own * nm_krings_create */ na->nm_krings_create = netmap_hw_krings_create; na->nm_krings_delete = netmap_hw_krings_delete; } if (na->nm_notify == NULL) na->nm_notify = netmap_notify; na->active_fds = 0; if (na->nm_mem == NULL) { /* use the global allocator */ na->nm_mem = netmap_mem_get(&nm_mem); } #ifdef WITH_VALE if (na->nm_bdg_attach == NULL) /* no special nm_bdg_attach callback. On VALE * attach, we need to interpose a bwrap */ na->nm_bdg_attach = netmap_bwrap_attach; #endif return 0; } /* Wrapper for the register callback provided netmap-enabled * hardware drivers. * nm_iszombie(na) means that the driver module has been * unloaded, so we cannot call into it. * nm_os_ifnet_lock() must guarantee mutual exclusion with * module unloading. */ static int netmap_hw_reg(struct netmap_adapter *na, int onoff) { struct netmap_hw_adapter *hwna = (struct netmap_hw_adapter*)na; int error = 0; nm_os_ifnet_lock(); if (nm_iszombie(na)) { if (onoff) { error = ENXIO; } else if (na != NULL) { na->na_flags &= ~NAF_NETMAP_ON; } goto out; } error = hwna->nm_hw_register(na, onoff); out: nm_os_ifnet_unlock(); return error; } static void netmap_hw_dtor(struct netmap_adapter *na) { if (nm_iszombie(na) || na->ifp == NULL) return; WNA(na->ifp) = NULL; } /* * Allocate a netmap_adapter object, and initialize it from the * 'arg' passed by the driver on attach. * We allocate a block of memory of 'size' bytes, which has room * for struct netmap_adapter plus additional room private to * the caller. * Return 0 on success, ENOMEM otherwise. */ int netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg) { struct netmap_hw_adapter *hwna = NULL; struct ifnet *ifp = NULL; if (size < sizeof(struct netmap_hw_adapter)) { D("Invalid netmap adapter size %d", (int)size); return EINVAL; } if (arg == NULL || arg->ifp == NULL) goto fail; ifp = arg->ifp; if (NA(ifp) && !NM_NA_VALID(ifp)) { /* If NA(ifp) is not null but there is no valid netmap * adapter it means that someone else is using the same * pointer (e.g. ax25_ptr on linux). This happens for * instance when also PF_RING is in use. */ D("Error: netmap adapter hook is busy"); return EBUSY; } hwna = nm_os_malloc(size); if (hwna == NULL) goto fail; hwna->up = *arg; hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE; strncpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name)); if (override_reg) { hwna->nm_hw_register = hwna->up.nm_register; hwna->up.nm_register = netmap_hw_reg; } if (netmap_attach_common(&hwna->up)) { nm_os_free(hwna); goto fail; } netmap_adapter_get(&hwna->up); NM_ATTACH_NA(ifp, &hwna->up); #ifdef linux if (ifp->netdev_ops) { /* prepare a clone of the netdev ops */ #ifndef NETMAP_LINUX_HAVE_NETDEV_OPS hwna->nm_ndo.ndo_start_xmit = ifp->netdev_ops; #else hwna->nm_ndo = *ifp->netdev_ops; #endif /* NETMAP_LINUX_HAVE_NETDEV_OPS */ } hwna->nm_ndo.ndo_start_xmit = linux_netmap_start_xmit; hwna->nm_ndo.ndo_change_mtu = linux_netmap_change_mtu; if (ifp->ethtool_ops) { hwna->nm_eto = *ifp->ethtool_ops; } hwna->nm_eto.set_ringparam = linux_netmap_set_ringparam; #ifdef NETMAP_LINUX_HAVE_SET_CHANNELS hwna->nm_eto.set_channels = linux_netmap_set_channels; #endif /* NETMAP_LINUX_HAVE_SET_CHANNELS */ if (arg->nm_config == NULL) { hwna->up.nm_config = netmap_linux_config; } #endif /* linux */ if (arg->nm_dtor == NULL) { hwna->up.nm_dtor = netmap_hw_dtor; } if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n", hwna->up.num_tx_rings, hwna->up.num_tx_desc, hwna->up.num_rx_rings, hwna->up.num_rx_desc); return 0; fail: D("fail, arg %p ifp %p na %p", arg, ifp, hwna); return (hwna ? EINVAL : ENOMEM); } int netmap_attach(struct netmap_adapter *arg) { return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter), 1 /* override nm_reg */); } void NM_DBG(netmap_adapter_get)(struct netmap_adapter *na) { if (!na) { return; } refcount_acquire(&na->na_refcount); } /* returns 1 iff the netmap_adapter is destroyed */ int NM_DBG(netmap_adapter_put)(struct netmap_adapter *na) { if (!na) return 1; if (!refcount_release(&na->na_refcount)) return 0; if (na->nm_dtor) na->nm_dtor(na); if (na->tx_rings) { /* XXX should not happen */ D("freeing leftover tx_rings"); na->nm_krings_delete(na); } netmap_pipe_dealloc(na); if (na->nm_mem) netmap_mem_put(na->nm_mem); bzero(na, sizeof(*na)); nm_os_free(na); return 1; } /* nm_krings_create callback for all hardware native adapters */ int netmap_hw_krings_create(struct netmap_adapter *na) { int ret = netmap_krings_create(na, 0); if (ret == 0) { /* initialize the mbq for the sw rx ring */ - mbq_safe_init(&na->rx_rings[na->num_rx_rings].rx_queue); + mbq_safe_init(&na->rx_rings[na->num_rx_rings]->rx_queue); ND("initialized sw rx queue %d", na->num_rx_rings); } return ret; } /* * Called on module unload by the netmap-enabled drivers */ void netmap_detach(struct ifnet *ifp) { struct netmap_adapter *na = NA(ifp); if (!na) return; NMG_LOCK(); netmap_set_all_rings(na, NM_KR_LOCKED); /* * if the netmap adapter is not native, somebody * changed it, so we can not release it here. * The NAF_ZOMBIE flag will notify the new owner that * the driver is gone. */ if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) { na->na_flags |= NAF_ZOMBIE; } /* give active users a chance to notice that NAF_ZOMBIE has been * turned on, so that they can stop and return an error to userspace. * Note that this becomes a NOP if there are no active users and, * therefore, the put() above has deleted the na, since now NA(ifp) is * NULL. */ netmap_enable_all_rings(ifp); NMG_UNLOCK(); } /* * Intercept packets from the network stack and pass them * to netmap as incoming packets on the 'software' ring. * * We only store packets in a bounded mbq and then copy them * in the relevant rxsync routine. * * We rely on the OS to make sure that the ifp and na do not go * away (typically the caller checks for IFF_DRV_RUNNING or the like). * In nm_register() or whenever there is a reinitialization, * we make sure to make the mode change visible here. */ int netmap_transmit(struct ifnet *ifp, struct mbuf *m) { struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring, *tx_kring; u_int len = MBUF_LEN(m); u_int error = ENOBUFS; unsigned int txr; struct mbq *q; int busy; - kring = &na->rx_rings[na->num_rx_rings]; + kring = na->rx_rings[na->num_rx_rings]; // XXX [Linux] we do not need this lock // if we follow the down/configure/up protocol -gl // mtx_lock(&na->core_lock); if (!nm_netmap_on(na)) { D("%s not in netmap mode anymore", na->name); error = ENXIO; goto done; } txr = MBUF_TXQ(m); if (txr >= na->num_tx_rings) { txr %= na->num_tx_rings; } - tx_kring = &NMR(na, NR_TX)[txr]; + tx_kring = NMR(na, NR_TX)[txr]; if (tx_kring->nr_mode == NKR_NETMAP_OFF) { return MBUF_TRANSMIT(na, ifp, m); } q = &kring->rx_queue; // XXX reconsider long packets if we handle fragments if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */ D("%s from_host, drop packet size %d > %d", na->name, len, NETMAP_BUF_SIZE(na)); goto done; } if (nm_os_mbuf_has_offld(m)) { RD(1, "%s drop mbuf that needs offloadings", na->name); goto done; } /* protect against netmap_rxsync_from_host(), netmap_sw_to_nic() * and maybe other instances of netmap_transmit (the latter * not possible on Linux). * We enqueue the mbuf only if we are sure there is going to be * enough room in the host RX ring, otherwise we drop it. */ mbq_lock(q); busy = kring->nr_hwtail - kring->nr_hwcur; if (busy < 0) busy += kring->nkr_num_slots; if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) { RD(2, "%s full hwcur %d hwtail %d qlen %d", na->name, kring->nr_hwcur, kring->nr_hwtail, mbq_len(q)); } else { mbq_enqueue(q, m); ND(2, "%s %d bufs in queue", na->name, mbq_len(q)); /* notify outside the lock */ m = NULL; error = 0; } mbq_unlock(q); done: if (m) m_freem(m); /* unconditionally wake up listeners */ kring->nm_notify(kring, 0); /* this is normally netmap_notify(), but for nics * connected to a bridge it is netmap_bwrap_intr_notify(), * that possibly forwards the frames through the switch */ return (error); } /* * netmap_reset() is called by the driver routines when reinitializing * a ring. The driver is in charge of locking to protect the kring. * If native netmap mode is not set just return NULL. * If native netmap mode is set, in particular, we have to set nr_mode to * NKR_NETMAP_ON. */ struct netmap_slot * netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n, u_int new_cur) { struct netmap_kring *kring; int new_hwofs, lim; if (!nm_native_on(na)) { ND("interface not in native netmap mode"); return NULL; /* nothing to reinitialize */ } /* XXX note- in the new scheme, we are not guaranteed to be * under lock (e.g. when called on a device reset). * In this case, we should set a flag and do not trust too * much the values. In practice: TODO * - set a RESET flag somewhere in the kring * - do the processing in a conservative way * - let the *sync() fixup at the end. */ if (tx == NR_TX) { if (n >= na->num_tx_rings) return NULL; - kring = na->tx_rings + n; + kring = na->tx_rings[n]; if (kring->nr_pending_mode == NKR_NETMAP_OFF) { kring->nr_mode = NKR_NETMAP_OFF; return NULL; } // XXX check whether we should use hwcur or rcur new_hwofs = kring->nr_hwcur - new_cur; } else { if (n >= na->num_rx_rings) return NULL; - kring = na->rx_rings + n; + kring = na->rx_rings[n]; if (kring->nr_pending_mode == NKR_NETMAP_OFF) { kring->nr_mode = NKR_NETMAP_OFF; return NULL; } new_hwofs = kring->nr_hwtail - new_cur; } lim = kring->nkr_num_slots - 1; if (new_hwofs > lim) new_hwofs -= lim + 1; /* Always set the new offset value and realign the ring. */ if (netmap_verbose) D("%s %s%d hwofs %d -> %d, hwtail %d -> %d", na->name, tx == NR_TX ? "TX" : "RX", n, kring->nkr_hwofs, new_hwofs, kring->nr_hwtail, tx == NR_TX ? lim : kring->nr_hwtail); kring->nkr_hwofs = new_hwofs; if (tx == NR_TX) { kring->nr_hwtail = kring->nr_hwcur + lim; if (kring->nr_hwtail > lim) kring->nr_hwtail -= lim + 1; } /* * Wakeup on the individual and global selwait * We do the wakeup here, but the ring is not yet reconfigured. * However, we are under lock so there are no races. */ kring->nr_mode = NKR_NETMAP_ON; kring->nm_notify(kring, 0); return kring->ring->slot; } /* * Dispatch rx/tx interrupts to the netmap rings. * * "work_done" is non-null on the RX path, NULL for the TX path. * We rely on the OS to make sure that there is only one active * instance per queue, and that there is appropriate locking. * * The 'notify' routine depends on what the ring is attached to. * - for a netmap file descriptor, do a selwakeup on the individual * waitqueue, plus one on the global one if needed * (see netmap_notify) * - for a nic connected to a switch, call the proper forwarding routine * (see netmap_bwrap_intr_notify) */ int netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done) { struct netmap_kring *kring; enum txrx t = (work_done ? NR_RX : NR_TX); q &= NETMAP_RING_MASK; if (netmap_verbose) { RD(5, "received %s queue %d", work_done ? "RX" : "TX" , q); } if (q >= nma_get_nrings(na, t)) return NM_IRQ_PASS; // not a physical queue - kring = NMR(na, t) + q; + kring = NMR(na, t)[q]; if (kring->nr_mode == NKR_NETMAP_OFF) { return NM_IRQ_PASS; } if (t == NR_RX) { kring->nr_kflags |= NKR_PENDINTR; // XXX atomic ? *work_done = 1; /* do not fire napi again */ } return kring->nm_notify(kring, 0); } /* * Default functions to handle rx/tx interrupts from a physical device. * "work_done" is non-null on the RX path, NULL for the TX path. * * If the card is not in netmap mode, simply return NM_IRQ_PASS, * so that the caller proceeds with regular processing. * Otherwise call netmap_common_irq(). * * If the card is connected to a netmap file descriptor, * do a selwakeup on the individual queue, plus one on the global one * if needed (multiqueue card _and_ there are multiqueue listeners), * and return NR_IRQ_COMPLETED. * * Finally, if called on rx from an interface connected to a switch, * calls the proper forwarding routine. */ int netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done) { struct netmap_adapter *na = NA(ifp); /* * XXX emulated netmap mode sets NAF_SKIP_INTR so * we still use the regular driver even though the previous * check fails. It is unclear whether we should use * nm_native_on() here. */ if (!nm_netmap_on(na)) return NM_IRQ_PASS; if (na->na_flags & NAF_SKIP_INTR) { ND("use regular interrupt"); return NM_IRQ_PASS; } return netmap_common_irq(na, q, work_done); } /* * Module loader and unloader * * netmap_init() creates the /dev/netmap device and initializes * all global variables. Returns 0 on success, errno on failure * (but there is no chance) * * netmap_fini() destroys everything. */ static struct cdev *netmap_dev; /* /dev/netmap character device. */ extern struct cdevsw netmap_cdevsw; void netmap_fini(void) { if (netmap_dev) destroy_dev(netmap_dev); /* we assume that there are no longer netmap users */ nm_os_ifnet_fini(); netmap_uninit_bridges(); netmap_mem_fini(); NMG_LOCK_DESTROY(); nm_prinf("netmap: unloaded module.\n"); } int netmap_init(void) { int error; NMG_LOCK_INIT(); error = netmap_mem_init(); if (error != 0) goto fail; /* * MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls * when the module is compiled in. * XXX could use make_dev_credv() to get error number */ netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600, "netmap"); if (!netmap_dev) goto fail; error = netmap_init_bridges(); if (error) goto fail; #ifdef __FreeBSD__ nm_os_vi_init_index(); #endif error = nm_os_ifnet_init(); if (error) goto fail; nm_prinf("netmap: loaded module\n"); return (0); fail: netmap_fini(); return (EINVAL); /* may be incorrect */ } Index: head/sys/dev/netmap/netmap_freebsd.c =================================================================== --- head/sys/dev/netmap/netmap_freebsd.c (revision 332422) +++ head/sys/dev/netmap/netmap_freebsd.c (revision 332423) @@ -1,1511 +1,1615 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include /* POLLIN, POLLOUT */ #include /* types used in module initialization */ #include /* DEV_MODULE_ORDERED */ #include #include /* kern_ioctl() */ #include #include /* vtophys */ #include /* vtophys */ #include #include #include #include #include #include #include /* sockaddrs */ #include #include /* kthread_add() */ #include /* PROC_LOCK() */ #include /* RFNOWAIT */ #include /* sched_bind() */ #include /* mp_maxid */ #include #include #include /* IFT_ETHER */ #include /* ether_ifdetach */ #include /* LLADDR */ #include /* bus_dmamap_* */ #include /* in6_cksum_pseudo() */ #include /* in_pseudo(), in_cksum_hdr() */ #include #include #include #include /* ======================== FREEBSD-SPECIFIC ROUTINES ================== */ void nm_os_selinfo_init(NM_SELINFO_T *si) { struct mtx *m = &si->m; mtx_init(m, "nm_kn_lock", NULL, MTX_DEF); knlist_init_mtx(&si->si.si_note, m); } void nm_os_selinfo_uninit(NM_SELINFO_T *si) { /* XXX kqueue(9) needed; these will mirror knlist_init. */ knlist_delete(&si->si.si_note, curthread, 0 /* not locked */ ); knlist_destroy(&si->si.si_note); /* now we don't need the mutex anymore */ mtx_destroy(&si->m); } void * nm_os_malloc(size_t size) { return malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); } void * nm_os_realloc(void *addr, size_t new_size, size_t old_size __unused) { return realloc(addr, new_size, M_DEVBUF, M_NOWAIT | M_ZERO); } void nm_os_free(void *addr) { free(addr, M_DEVBUF); } void nm_os_ifnet_lock(void) { IFNET_RLOCK(); } void nm_os_ifnet_unlock(void) { IFNET_RUNLOCK(); } static int netmap_use_count = 0; void nm_os_get_module(void) { netmap_use_count++; } void nm_os_put_module(void) { netmap_use_count--; } static void netmap_ifnet_arrival_handler(void *arg __unused, struct ifnet *ifp) { netmap_undo_zombie(ifp); } static void netmap_ifnet_departure_handler(void *arg __unused, struct ifnet *ifp) { netmap_make_zombie(ifp); } static eventhandler_tag nm_ifnet_ah_tag; static eventhandler_tag nm_ifnet_dh_tag; int nm_os_ifnet_init(void) { nm_ifnet_ah_tag = EVENTHANDLER_REGISTER(ifnet_arrival_event, netmap_ifnet_arrival_handler, NULL, EVENTHANDLER_PRI_ANY); nm_ifnet_dh_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, netmap_ifnet_departure_handler, NULL, EVENTHANDLER_PRI_ANY); return 0; } void nm_os_ifnet_fini(void) { EVENTHANDLER_DEREGISTER(ifnet_arrival_event, nm_ifnet_ah_tag); EVENTHANDLER_DEREGISTER(ifnet_departure_event, nm_ifnet_dh_tag); } unsigned nm_os_ifnet_mtu(struct ifnet *ifp) { #if __FreeBSD_version < 1100030 return ifp->if_data.ifi_mtu; #else /* __FreeBSD_version >= 1100030 */ return ifp->if_mtu; #endif } rawsum_t nm_os_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum) { /* TODO XXX please use the FreeBSD implementation for this. */ uint16_t *words = (uint16_t *)data; int nw = len / 2; int i; for (i = 0; i < nw; i++) cur_sum += be16toh(words[i]); if (len & 1) cur_sum += (data[len-1] << 8); return cur_sum; } /* Fold a raw checksum: 'cur_sum' is in host byte order, while the * return value is in network byte order. */ uint16_t nm_os_csum_fold(rawsum_t cur_sum) { /* TODO XXX please use the FreeBSD implementation for this. */ while (cur_sum >> 16) cur_sum = (cur_sum & 0xFFFF) + (cur_sum >> 16); return htobe16((~cur_sum) & 0xFFFF); } uint16_t nm_os_csum_ipv4(struct nm_iphdr *iph) { #if 0 return in_cksum_hdr((void *)iph); #else return nm_os_csum_fold(nm_os_csum_raw((uint8_t*)iph, sizeof(struct nm_iphdr), 0)); #endif } void nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data, size_t datalen, uint16_t *check) { #ifdef INET uint16_t pseudolen = datalen + iph->protocol; /* Compute and insert the pseudo-header cheksum. */ *check = in_pseudo(iph->saddr, iph->daddr, htobe16(pseudolen)); /* Compute the checksum on TCP/UDP header + payload * (includes the pseudo-header). */ *check = nm_os_csum_fold(nm_os_csum_raw(data, datalen, 0)); #else static int notsupported = 0; if (!notsupported) { notsupported = 1; D("inet4 segmentation not supported"); } #endif } void nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data, size_t datalen, uint16_t *check) { #ifdef INET6 *check = in6_cksum_pseudo((void*)ip6h, datalen, ip6h->nexthdr, 0); *check = nm_os_csum_fold(nm_os_csum_raw(data, datalen, 0)); #else static int notsupported = 0; if (!notsupported) { notsupported = 1; D("inet6 segmentation not supported"); } #endif } /* on FreeBSD we send up one packet at a time */ void * nm_os_send_up(struct ifnet *ifp, struct mbuf *m, struct mbuf *prev) { NA(ifp)->if_input(ifp, m); return NULL; } int nm_os_mbuf_has_offld(struct mbuf *m) { return m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_SCTP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6 | CSUM_TSO); } static void freebsd_generic_rx_handler(struct ifnet *ifp, struct mbuf *m) { int stolen; if (!NM_NA_VALID(ifp)) { RD(1, "Warning: got RX packet for invalid emulated adapter"); return; } stolen = generic_rx_handler(ifp, m); if (!stolen) { struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)NA(ifp); gna->save_if_input(ifp, m); } } /* * Intercept the rx routine in the standard device driver. * Second argument is non-zero to intercept, 0 to restore */ int nm_os_catch_rx(struct netmap_generic_adapter *gna, int intercept) { struct netmap_adapter *na = &gna->up.up; struct ifnet *ifp = na->ifp; int ret = 0; nm_os_ifnet_lock(); if (intercept) { if (gna->save_if_input) { D("cannot intercept again"); ret = EINVAL; /* already set */ goto out; } gna->save_if_input = ifp->if_input; ifp->if_input = freebsd_generic_rx_handler; } else { if (!gna->save_if_input){ D("cannot restore"); ret = EINVAL; /* not saved */ goto out; } ifp->if_input = gna->save_if_input; gna->save_if_input = NULL; } out: nm_os_ifnet_unlock(); return ret; } /* * Intercept the packet steering routine in the tx path, * so that we can decide which queue is used for an mbuf. * Second argument is non-zero to intercept, 0 to restore. * On freebsd we just intercept if_transmit. */ int nm_os_catch_tx(struct netmap_generic_adapter *gna, int intercept) { struct netmap_adapter *na = &gna->up.up; struct ifnet *ifp = netmap_generic_getifp(gna); nm_os_ifnet_lock(); if (intercept) { na->if_transmit = ifp->if_transmit; ifp->if_transmit = netmap_transmit; } else { ifp->if_transmit = na->if_transmit; } nm_os_ifnet_unlock(); return 0; } /* * Transmit routine used by generic_netmap_txsync(). Returns 0 on success * and non-zero on error (which may be packet drops or other errors). * addr and len identify the netmap buffer, m is the (preallocated) * mbuf to use for transmissions. * * We should add a reference to the mbuf so the m_freem() at the end * of the transmission does not consume resources. * * On FreeBSD, and on multiqueue cards, we can force the queue using * if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) * i = m->m_pkthdr.flowid % adapter->num_queues; * else * i = curcpu % adapter->num_queues; * */ int nm_os_generic_xmit_frame(struct nm_os_gen_arg *a) { int ret; u_int len = a->len; struct ifnet *ifp = a->ifp; struct mbuf *m = a->m; #if __FreeBSD_version < 1100000 /* * Old FreeBSD versions. The mbuf has a cluster attached, * we need to copy from the cluster to the netmap buffer. */ if (MBUF_REFCNT(m) != 1) { D("invalid refcnt %d for %p", MBUF_REFCNT(m), m); panic("in generic_xmit_frame"); } if (m->m_ext.ext_size < len) { RD(5, "size %d < len %d", m->m_ext.ext_size, len); len = m->m_ext.ext_size; } bcopy(a->addr, m->m_data, len); #else /* __FreeBSD_version >= 1100000 */ /* New FreeBSD versions. Link the external storage to * the netmap buffer, so that no copy is necessary. */ m->m_ext.ext_buf = m->m_data = a->addr; m->m_ext.ext_size = len; #endif /* __FreeBSD_version >= 1100000 */ m->m_len = m->m_pkthdr.len = len; /* mbuf refcnt is not contended, no need to use atomic * (a memory barrier is enough). */ SET_MBUF_REFCNT(m, 2); M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); m->m_pkthdr.flowid = a->ring_nr; m->m_pkthdr.rcvif = ifp; /* used for tx notification */ ret = NA(ifp)->if_transmit(ifp, m); return ret ? -1 : 0; } #if __FreeBSD_version >= 1100005 struct netmap_adapter * netmap_getna(if_t ifp) { return (NA((struct ifnet *)ifp)); } #endif /* __FreeBSD_version >= 1100005 */ /* * The following two functions are empty until we have a generic * way to extract the info from the ifp */ int nm_os_generic_find_num_desc(struct ifnet *ifp, unsigned int *tx, unsigned int *rx) { return 0; } void nm_os_generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq) { unsigned num_rings = netmap_generic_rings ? netmap_generic_rings : 1; *txq = num_rings; *rxq = num_rings; } void nm_os_generic_set_features(struct netmap_generic_adapter *gna) { gna->rxsg = 1; /* Supported through m_copydata. */ gna->txqdisc = 0; /* Not supported. */ } void nm_os_mitigation_init(struct nm_generic_mit *mit, int idx, struct netmap_adapter *na) { ND("called"); mit->mit_pending = 0; mit->mit_ring_idx = idx; mit->mit_na = na; } void nm_os_mitigation_start(struct nm_generic_mit *mit) { ND("called"); } void nm_os_mitigation_restart(struct nm_generic_mit *mit) { ND("called"); } int nm_os_mitigation_active(struct nm_generic_mit *mit) { ND("called"); return 0; } void nm_os_mitigation_cleanup(struct nm_generic_mit *mit) { ND("called"); } static int nm_vi_dummy(struct ifnet *ifp, u_long cmd, caddr_t addr) { return EINVAL; } static void nm_vi_start(struct ifnet *ifp) { panic("nm_vi_start() must not be called"); } /* * Index manager of persistent virtual interfaces. * It is used to decide the lowest byte of the MAC address. * We use the same algorithm with management of bridge port index. */ #define NM_VI_MAX 255 static struct { uint8_t index[NM_VI_MAX]; /* XXX just for a reasonable number */ uint8_t active; struct mtx lock; } nm_vi_indices; void nm_os_vi_init_index(void) { int i; for (i = 0; i < NM_VI_MAX; i++) nm_vi_indices.index[i] = i; nm_vi_indices.active = 0; mtx_init(&nm_vi_indices.lock, "nm_vi_indices_lock", NULL, MTX_DEF); } /* return -1 if no index available */ static int nm_vi_get_index(void) { int ret; mtx_lock(&nm_vi_indices.lock); ret = nm_vi_indices.active == NM_VI_MAX ? -1 : nm_vi_indices.index[nm_vi_indices.active++]; mtx_unlock(&nm_vi_indices.lock); return ret; } static void nm_vi_free_index(uint8_t val) { int i, lim; mtx_lock(&nm_vi_indices.lock); lim = nm_vi_indices.active; for (i = 0; i < lim; i++) { if (nm_vi_indices.index[i] == val) { /* swap index[lim-1] and j */ int tmp = nm_vi_indices.index[lim-1]; nm_vi_indices.index[lim-1] = val; nm_vi_indices.index[i] = tmp; nm_vi_indices.active--; break; } } if (lim == nm_vi_indices.active) D("funny, index %u didn't found", val); mtx_unlock(&nm_vi_indices.lock); } #undef NM_VI_MAX /* * Implementation of a netmap-capable virtual interface that * registered to the system. * It is based on if_tap.c and ip_fw_log.c in FreeBSD 9. * * Note: Linux sets refcount to 0 on allocation of net_device, * then increments it on registration to the system. * FreeBSD sets refcount to 1 on if_alloc(), and does not * increment this refcount on if_attach(). */ int nm_os_vi_persist(const char *name, struct ifnet **ret) { struct ifnet *ifp; u_short macaddr_hi; uint32_t macaddr_mid; u_char eaddr[6]; int unit = nm_vi_get_index(); /* just to decide MAC address */ if (unit < 0) return EBUSY; /* * We use the same MAC address generation method with tap * except for the highest octet is 00:be instead of 00:bd */ macaddr_hi = htons(0x00be); /* XXX tap + 1 */ macaddr_mid = (uint32_t) ticks; bcopy(&macaddr_hi, eaddr, sizeof(short)); bcopy(&macaddr_mid, &eaddr[2], sizeof(uint32_t)); eaddr[5] = (uint8_t)unit; ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { D("if_alloc failed"); return ENOMEM; } if_initname(ifp, name, IF_DUNIT_NONE); ifp->if_mtu = 65536; ifp->if_flags = IFF_UP | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = (void *)nm_vi_dummy; ifp->if_ioctl = nm_vi_dummy; ifp->if_start = nm_vi_start; ifp->if_mtu = ETHERMTU; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_capabilities |= IFCAP_LINKSTATE; ifp->if_capenable |= IFCAP_LINKSTATE; ether_ifattach(ifp, eaddr); *ret = ifp; return 0; } /* unregister from the system and drop the final refcount */ void nm_os_vi_detach(struct ifnet *ifp) { nm_vi_free_index(((char *)IF_LLADDR(ifp))[5]); ether_ifdetach(ifp); if_free(ifp); } +#ifdef WITH_EXTMEM +#include +#include +struct nm_os_extmem { + vm_object_t obj; + vm_offset_t kva; + vm_offset_t size; + vm_pindex_t scan; +}; + +void +nm_os_extmem_delete(struct nm_os_extmem *e) +{ + D("freeing %lx bytes", e->size); + vm_map_remove(kernel_map, e->kva, e->kva + e->size); + nm_os_free(e); +} + +char * +nm_os_extmem_nextpage(struct nm_os_extmem *e) +{ + char *rv = NULL; + if (e->scan < e->kva + e->size) { + rv = (char *)e->scan; + e->scan += PAGE_SIZE; + } + return rv; +} + +int +nm_os_extmem_isequal(struct nm_os_extmem *e1, struct nm_os_extmem *e2) +{ + return (e1->obj == e1->obj); +} + +int +nm_os_extmem_nr_pages(struct nm_os_extmem *e) +{ + return e->size >> PAGE_SHIFT; +} + +struct nm_os_extmem * +nm_os_extmem_create(unsigned long p, struct nmreq_pools_info *pi, int *perror) +{ + vm_map_t map; + vm_map_entry_t entry; + vm_object_t obj; + vm_prot_t prot; + vm_pindex_t index; + boolean_t wired; + struct nm_os_extmem *e = NULL; + int rv, error = 0; + + e = nm_os_malloc(sizeof(*e)); + if (e == NULL) { + error = ENOMEM; + goto out; + } + + map = &curthread->td_proc->p_vmspace->vm_map; + rv = vm_map_lookup(&map, p, VM_PROT_RW, &entry, + &obj, &index, &prot, &wired); + if (rv != KERN_SUCCESS) { + D("address %lx not found", p); + goto out_free; + } + /* check that we are given the whole vm_object ? */ + vm_map_lookup_done(map, entry); + + // XXX can we really use obj after releasing the map lock? + e->obj = obj; + vm_object_reference(obj); + /* wire the memory and add the vm_object to the kernel map, + * to make sure that it is not fred even if the processes that + * are mmap()ing it all exit + */ + e->kva = vm_map_min(kernel_map); + e->size = obj->size << PAGE_SHIFT; + rv = vm_map_find(kernel_map, obj, 0, &e->kva, e->size, 0, + VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE, + VM_PROT_READ | VM_PROT_WRITE, 0); + if (rv != KERN_SUCCESS) { + D("vm_map_find(%lx) failed", e->size); + goto out_rel; + } + rv = vm_map_wire(kernel_map, e->kva, e->kva + e->size, + VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); + if (rv != KERN_SUCCESS) { + D("vm_map_wire failed"); + goto out_rem; + } + + e->scan = e->kva; + + return e; + +out_rem: + vm_map_remove(kernel_map, e->kva, e->kva + e->size); + e->obj = NULL; +out_rel: + vm_object_deallocate(e->obj); +out_free: + nm_os_free(e); +out: + if (perror) + *perror = error; + return NULL; +} +#endif /* WITH_EXTMEM */ + /* ======================== PTNETMAP SUPPORT ========================== */ #ifdef WITH_PTNETMAP_GUEST #include #include #include /* bus_dmamap_* */ #include #include #include /* * ptnetmap memory device (memdev) for freebsd guest, * ssed to expose host netmap memory to the guest through a PCI BAR. */ /* * ptnetmap memdev private data structure */ struct ptnetmap_memdev { device_t dev; struct resource *pci_io; struct resource *pci_mem; struct netmap_mem_d *nm_mem; }; static int ptn_memdev_probe(device_t); static int ptn_memdev_attach(device_t); static int ptn_memdev_detach(device_t); static int ptn_memdev_shutdown(device_t); static device_method_t ptn_memdev_methods[] = { DEVMETHOD(device_probe, ptn_memdev_probe), DEVMETHOD(device_attach, ptn_memdev_attach), DEVMETHOD(device_detach, ptn_memdev_detach), DEVMETHOD(device_shutdown, ptn_memdev_shutdown), DEVMETHOD_END }; static driver_t ptn_memdev_driver = { PTNETMAP_MEMDEV_NAME, ptn_memdev_methods, sizeof(struct ptnetmap_memdev), }; /* We use (SI_ORDER_MIDDLE+1) here, see DEV_MODULE_ORDERED() invocation * below. */ static devclass_t ptnetmap_devclass; DRIVER_MODULE_ORDERED(ptn_memdev, pci, ptn_memdev_driver, ptnetmap_devclass, NULL, NULL, SI_ORDER_MIDDLE + 1); /* * Map host netmap memory through PCI-BAR in the guest OS, * returning physical (nm_paddr) and virtual (nm_addr) addresses * of the netmap memory mapped in the guest. */ int nm_os_pt_memdev_iomap(struct ptnetmap_memdev *ptn_dev, vm_paddr_t *nm_paddr, void **nm_addr, uint64_t *mem_size) { int rid; D("ptn_memdev_driver iomap"); rid = PCIR_BAR(PTNETMAP_MEM_PCI_BAR); *mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_HI); *mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_LO) | (*mem_size << 32); /* map memory allocator */ ptn_dev->pci_mem = bus_alloc_resource(ptn_dev->dev, SYS_RES_MEMORY, &rid, 0, ~0, *mem_size, RF_ACTIVE); if (ptn_dev->pci_mem == NULL) { *nm_paddr = 0; *nm_addr = NULL; return ENOMEM; } *nm_paddr = rman_get_start(ptn_dev->pci_mem); *nm_addr = rman_get_virtual(ptn_dev->pci_mem); D("=== BAR %d start %lx len %lx mem_size %lx ===", PTNETMAP_MEM_PCI_BAR, (unsigned long)(*nm_paddr), (unsigned long)rman_get_size(ptn_dev->pci_mem), (unsigned long)*mem_size); return (0); } uint32_t nm_os_pt_memdev_ioread(struct ptnetmap_memdev *ptn_dev, unsigned int reg) { return bus_read_4(ptn_dev->pci_io, reg); } /* Unmap host netmap memory. */ void nm_os_pt_memdev_iounmap(struct ptnetmap_memdev *ptn_dev) { D("ptn_memdev_driver iounmap"); if (ptn_dev->pci_mem) { bus_release_resource(ptn_dev->dev, SYS_RES_MEMORY, PCIR_BAR(PTNETMAP_MEM_PCI_BAR), ptn_dev->pci_mem); ptn_dev->pci_mem = NULL; } } /* Device identification routine, return BUS_PROBE_DEFAULT on success, * positive on failure */ static int ptn_memdev_probe(device_t dev) { char desc[256]; if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID) return (ENXIO); if (pci_get_device(dev) != PTNETMAP_PCI_DEVICE_ID) return (ENXIO); snprintf(desc, sizeof(desc), "%s PCI adapter", PTNETMAP_MEMDEV_NAME); device_set_desc_copy(dev, desc); return (BUS_PROBE_DEFAULT); } /* Device initialization routine. */ static int ptn_memdev_attach(device_t dev) { struct ptnetmap_memdev *ptn_dev; int rid; uint16_t mem_id; D("ptn_memdev_driver attach"); ptn_dev = device_get_softc(dev); ptn_dev->dev = dev; pci_enable_busmaster(dev); rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); ptn_dev->pci_io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (ptn_dev->pci_io == NULL) { device_printf(dev, "cannot map I/O space\n"); return (ENXIO); } mem_id = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMID); /* create guest allocator */ ptn_dev->nm_mem = netmap_mem_pt_guest_attach(ptn_dev, mem_id); if (ptn_dev->nm_mem == NULL) { ptn_memdev_detach(dev); return (ENOMEM); } netmap_mem_get(ptn_dev->nm_mem); D("ptn_memdev_driver probe OK - host_mem_id: %d", mem_id); return (0); } /* Device removal routine. */ static int ptn_memdev_detach(device_t dev) { struct ptnetmap_memdev *ptn_dev; D("ptn_memdev_driver detach"); ptn_dev = device_get_softc(dev); if (ptn_dev->nm_mem) { netmap_mem_put(ptn_dev->nm_mem); ptn_dev->nm_mem = NULL; } if (ptn_dev->pci_mem) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(PTNETMAP_MEM_PCI_BAR), ptn_dev->pci_mem); ptn_dev->pci_mem = NULL; } if (ptn_dev->pci_io) { bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(PTNETMAP_IO_PCI_BAR), ptn_dev->pci_io); ptn_dev->pci_io = NULL; } return (0); } static int ptn_memdev_shutdown(device_t dev) { D("ptn_memdev_driver shutdown"); return bus_generic_shutdown(dev); } #endif /* WITH_PTNETMAP_GUEST */ /* * In order to track whether pages are still mapped, we hook into * the standard cdev_pager and intercept the constructor and * destructor. */ struct netmap_vm_handle_t { struct cdev *dev; struct netmap_priv_d *priv; }; static int netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color) { struct netmap_vm_handle_t *vmh = handle; if (netmap_verbose) D("handle %p size %jd prot %d foff %jd", handle, (intmax_t)size, prot, (intmax_t)foff); if (color) *color = 0; dev_ref(vmh->dev); return 0; } static void netmap_dev_pager_dtor(void *handle) { struct netmap_vm_handle_t *vmh = handle; struct cdev *dev = vmh->dev; struct netmap_priv_d *priv = vmh->priv; if (netmap_verbose) D("handle %p", handle); netmap_dtor(priv); free(vmh, M_DEVBUF); dev_rel(dev); } static int netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot, vm_page_t *mres) { struct netmap_vm_handle_t *vmh = object->handle; struct netmap_priv_d *priv = vmh->priv; struct netmap_adapter *na = priv->np_na; vm_paddr_t paddr; vm_page_t page; vm_memattr_t memattr; vm_pindex_t pidx; ND("object %p offset %jd prot %d mres %p", object, (intmax_t)offset, prot, mres); memattr = object->memattr; pidx = OFF_TO_IDX(offset); paddr = netmap_mem_ofstophys(na->nm_mem, offset); if (paddr == 0) return VM_PAGER_FAIL; if (((*mres)->flags & PG_FICTITIOUS) != 0) { /* * If the passed in result page is a fake page, update it with * the new physical address. */ page = *mres; vm_page_updatefake(page, paddr, memattr); } else { /* * Replace the passed in reqpage page with our own fake page and * free up the all of the original pages. */ #ifndef VM_OBJECT_WUNLOCK /* FreeBSD < 10.x */ #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK #define VM_OBJECT_WLOCK VM_OBJECT_LOCK #endif /* VM_OBJECT_WUNLOCK */ VM_OBJECT_WUNLOCK(object); page = vm_page_getfake(paddr, memattr); VM_OBJECT_WLOCK(object); vm_page_lock(*mres); vm_page_free(*mres); vm_page_unlock(*mres); *mres = page; vm_page_insert(page, object, pidx); } page->valid = VM_PAGE_BITS_ALL; return (VM_PAGER_OK); } static struct cdev_pager_ops netmap_cdev_pager_ops = { .cdev_pg_ctor = netmap_dev_pager_ctor, .cdev_pg_dtor = netmap_dev_pager_dtor, .cdev_pg_fault = netmap_dev_pager_fault, }; static int netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff, vm_size_t objsize, vm_object_t *objp, int prot) { int error; struct netmap_vm_handle_t *vmh; struct netmap_priv_d *priv; vm_object_t obj; if (netmap_verbose) D("cdev %p foff %jd size %jd objp %p prot %d", cdev, (intmax_t )*foff, (intmax_t )objsize, objp, prot); vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (vmh == NULL) return ENOMEM; vmh->dev = cdev; NMG_LOCK(); error = devfs_get_cdevpriv((void**)&priv); if (error) goto err_unlock; if (priv->np_nifp == NULL) { error = EINVAL; goto err_unlock; } vmh->priv = priv; priv->np_refs++; NMG_UNLOCK(); obj = cdev_pager_allocate(vmh, OBJT_DEVICE, &netmap_cdev_pager_ops, objsize, prot, *foff, NULL); if (obj == NULL) { D("cdev_pager_allocate failed"); error = EINVAL; goto err_deref; } *objp = obj; return 0; err_deref: NMG_LOCK(); priv->np_refs--; err_unlock: NMG_UNLOCK(); // err: free(vmh, M_DEVBUF); return error; } /* * On FreeBSD the close routine is only called on the last close on * the device (/dev/netmap) so we cannot do anything useful. * To track close() on individual file descriptors we pass netmap_dtor() to * devfs_set_cdevpriv() on open(). The FreeBSD kernel will call the destructor * when the last fd pointing to the device is closed. * * Note that FreeBSD does not even munmap() on close() so we also have * to track mmap() ourselves, and postpone the call to * netmap_dtor() is called when the process has no open fds and no active * memory maps on /dev/netmap, as in linux. */ static int netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { if (netmap_verbose) D("dev %p fflag 0x%x devtype %d td %p", dev, fflag, devtype, td); return 0; } static int netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct netmap_priv_d *priv; int error; (void)dev; (void)oflags; (void)devtype; (void)td; NMG_LOCK(); priv = netmap_priv_new(); if (priv == NULL) { error = ENOMEM; goto out; } error = devfs_set_cdevpriv(priv, netmap_dtor); if (error) { netmap_priv_delete(priv); } out: NMG_UNLOCK(); return error; } /******************** kthread wrapper ****************/ #include u_int nm_os_ncpus(void) { return mp_maxid + 1; } struct nm_kctx_ctx { struct thread *user_td; /* thread user-space (kthread creator) to send ioctl */ struct ptnetmap_cfgentry_bhyve cfg; /* worker function and parameter */ nm_kctx_worker_fn_t worker_fn; void *worker_private; struct nm_kctx *nmk; /* integer to manage multiple worker contexts (e.g., RX or TX on ptnetmap) */ long type; }; struct nm_kctx { struct thread *worker; struct mtx worker_lock; uint64_t scheduled; /* pending wake_up request */ struct nm_kctx_ctx worker_ctx; int run; /* used to stop kthread */ int attach_user; /* kthread attached to user_process */ int affinity; }; void inline nm_os_kctx_worker_wakeup(struct nm_kctx *nmk) { /* * There may be a race between FE and BE, * which call both this function, and worker kthread, * that reads nmk->scheduled. * * For us it is not important the counter value, * but simply that it has changed since the last * time the kthread saw it. */ mtx_lock(&nmk->worker_lock); nmk->scheduled++; if (nmk->worker_ctx.cfg.wchan) { wakeup((void *)(uintptr_t)nmk->worker_ctx.cfg.wchan); } mtx_unlock(&nmk->worker_lock); } void inline nm_os_kctx_send_irq(struct nm_kctx *nmk) { struct nm_kctx_ctx *ctx = &nmk->worker_ctx; int err; if (ctx->user_td && ctx->cfg.ioctl_fd > 0) { err = kern_ioctl(ctx->user_td, ctx->cfg.ioctl_fd, ctx->cfg.ioctl_cmd, (caddr_t)&ctx->cfg.ioctl_data); if (err) { D("kern_ioctl error: %d ioctl parameters: fd %d com %lu data %p", err, ctx->cfg.ioctl_fd, (unsigned long)ctx->cfg.ioctl_cmd, &ctx->cfg.ioctl_data); } } } static void nm_kctx_worker(void *data) { struct nm_kctx *nmk = data; struct nm_kctx_ctx *ctx = &nmk->worker_ctx; uint64_t old_scheduled = nmk->scheduled; if (nmk->affinity >= 0) { thread_lock(curthread); sched_bind(curthread, nmk->affinity); thread_unlock(curthread); } while (nmk->run) { /* * check if the parent process dies * (when kthread is attached to user process) */ if (ctx->user_td) { PROC_LOCK(curproc); thread_suspend_check(0); PROC_UNLOCK(curproc); } else { kthread_suspend_check(); } /* * if wchan is not defined, we don't have notification * mechanism and we continually execute worker_fn() */ if (!ctx->cfg.wchan) { ctx->worker_fn(ctx->worker_private, 1); /* worker body */ } else { /* checks if there is a pending notification */ mtx_lock(&nmk->worker_lock); if (likely(nmk->scheduled != old_scheduled)) { old_scheduled = nmk->scheduled; mtx_unlock(&nmk->worker_lock); ctx->worker_fn(ctx->worker_private, 1); /* worker body */ continue; } else if (nmk->run) { /* wait on event with one second timeout */ msleep((void *)(uintptr_t)ctx->cfg.wchan, &nmk->worker_lock, 0, "nmk_ev", hz); nmk->scheduled++; } mtx_unlock(&nmk->worker_lock); } } kthread_exit(); } void nm_os_kctx_worker_setaff(struct nm_kctx *nmk, int affinity) { nmk->affinity = affinity; } struct nm_kctx * -nm_os_kctx_create(struct nm_kctx_cfg *cfg, unsigned int cfgtype, - void *opaque) +nm_os_kctx_create(struct nm_kctx_cfg *cfg, void *opaque) { struct nm_kctx *nmk = NULL; - if (cfgtype != PTNETMAP_CFGTYPE_BHYVE) { - D("Unsupported cfgtype %u", cfgtype); - return NULL; - } - nmk = malloc(sizeof(*nmk), M_DEVBUF, M_NOWAIT | M_ZERO); if (!nmk) return NULL; mtx_init(&nmk->worker_lock, "nm_kthread lock", NULL, MTX_DEF); nmk->worker_ctx.worker_fn = cfg->worker_fn; nmk->worker_ctx.worker_private = cfg->worker_private; nmk->worker_ctx.type = cfg->type; nmk->affinity = -1; /* attach kthread to user process (ptnetmap) */ nmk->attach_user = cfg->attach_user; /* store kick/interrupt configuration */ if (opaque) { nmk->worker_ctx.cfg = *((struct ptnetmap_cfgentry_bhyve *)opaque); } return nmk; } int nm_os_kctx_worker_start(struct nm_kctx *nmk) { struct proc *p = NULL; int error = 0; if (nmk->worker) { return EBUSY; } /* check if we want to attach kthread to user process */ if (nmk->attach_user) { nmk->worker_ctx.user_td = curthread; p = curthread->td_proc; } /* enable kthread main loop */ nmk->run = 1; /* create kthread */ if((error = kthread_add(nm_kctx_worker, nmk, p, &nmk->worker, RFNOWAIT /* to be checked */, 0, "nm-kthread-%ld", nmk->worker_ctx.type))) { goto err; } D("nm_kthread started td %p", nmk->worker); return 0; err: D("nm_kthread start failed err %d", error); nmk->worker = NULL; return error; } void nm_os_kctx_worker_stop(struct nm_kctx *nmk) { if (!nmk->worker) { return; } /* tell to kthread to exit from main loop */ nmk->run = 0; /* wake up kthread if it sleeps */ kthread_resume(nmk->worker); nm_os_kctx_worker_wakeup(nmk); nmk->worker = NULL; } void nm_os_kctx_destroy(struct nm_kctx *nmk) { if (!nmk) return; if (nmk->worker) { nm_os_kctx_worker_stop(nmk); } memset(&nmk->worker_ctx.cfg, 0, sizeof(nmk->worker_ctx.cfg)); free(nmk, M_DEVBUF); } /******************** kqueue support ****************/ /* * nm_os_selwakeup also needs to issue a KNOTE_UNLOCKED. * We use a non-zero argument to distinguish the call from the one * in kevent_scan() which instead also needs to run netmap_poll(). * The knote uses a global mutex for the time being. We might * try to reuse the one in the si, but it is not allocated * permanently so it might be a bit tricky. * * The *kqfilter function registers one or another f_event * depending on read or write mode. * In the call to f_event() td_fpop is NULL so any child function * calling devfs_get_cdevpriv() would fail - and we need it in * netmap_poll(). As a workaround we store priv into kn->kn_hook * and pass it as first argument to netmap_poll(), which then * uses the failure to tell that we are called from f_event() * and do not need the selrecord(). */ void nm_os_selwakeup(struct nm_selinfo *si) { if (netmap_verbose) D("on knote %p", &si->si.si_note); selwakeuppri(&si->si, PI_NET); /* use a non-zero hint to tell the notification from the * call done in kqueue_scan() which uses 0 */ KNOTE_UNLOCKED(&si->si.si_note, 0x100 /* notification */); } void nm_os_selrecord(struct thread *td, struct nm_selinfo *si) { selrecord(td, &si->si); } static void netmap_knrdetach(struct knote *kn) { struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook; struct selinfo *si = &priv->np_si[NR_RX]->si; D("remove selinfo %p", si); knlist_remove(&si->si_note, kn, 0); } static void netmap_knwdetach(struct knote *kn) { struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook; struct selinfo *si = &priv->np_si[NR_TX]->si; D("remove selinfo %p", si); knlist_remove(&si->si_note, kn, 0); } /* * callback from notifies (generated externally) and our * calls to kevent(). The former we just return 1 (ready) * since we do not know better. * In the latter we call netmap_poll and return 0/1 accordingly. */ static int netmap_knrw(struct knote *kn, long hint, int events) { struct netmap_priv_d *priv; int revents; if (hint != 0) { ND(5, "call from notify"); return 1; /* assume we are ready */ } priv = kn->kn_hook; /* the notification may come from an external thread, * in which case we do not want to run the netmap_poll * This should be filtered above, but check just in case. */ if (curthread != priv->np_td) { /* should not happen */ RD(5, "curthread changed %p %p", curthread, priv->np_td); return 1; } else { revents = netmap_poll(priv, events, NULL); return (events & revents) ? 1 : 0; } } static int netmap_knread(struct knote *kn, long hint) { return netmap_knrw(kn, hint, POLLIN); } static int netmap_knwrite(struct knote *kn, long hint) { return netmap_knrw(kn, hint, POLLOUT); } static struct filterops netmap_rfiltops = { .f_isfd = 1, .f_detach = netmap_knrdetach, .f_event = netmap_knread, }; static struct filterops netmap_wfiltops = { .f_isfd = 1, .f_detach = netmap_knwdetach, .f_event = netmap_knwrite, }; /* * This is called when a thread invokes kevent() to record * a change in the configuration of the kqueue(). * The 'priv' should be the same as in the netmap device. */ static int netmap_kqfilter(struct cdev *dev, struct knote *kn) { struct netmap_priv_d *priv; int error; struct netmap_adapter *na; struct nm_selinfo *si; int ev = kn->kn_filter; if (ev != EVFILT_READ && ev != EVFILT_WRITE) { D("bad filter request %d", ev); return 1; } error = devfs_get_cdevpriv((void**)&priv); if (error) { D("device not yet setup"); return 1; } na = priv->np_na; if (na == NULL) { D("no netmap adapter for this file descriptor"); return 1; } /* the si is indicated in the priv */ si = priv->np_si[(ev == EVFILT_WRITE) ? NR_TX : NR_RX]; // XXX lock(priv) ? kn->kn_fop = (ev == EVFILT_WRITE) ? &netmap_wfiltops : &netmap_rfiltops; kn->kn_hook = priv; knlist_add(&si->si.si_note, kn, 1); // XXX unlock(priv) ND("register %p %s td %p priv %p kn %p np_nifp %p kn_fp/fpop %s", na, na->ifp->if_xname, curthread, priv, kn, priv->np_nifp, kn->kn_fp == curthread->td_fpop ? "match" : "MISMATCH"); return 0; } static int freebsd_netmap_poll(struct cdev *cdevi __unused, int events, struct thread *td) { struct netmap_priv_d *priv; if (devfs_get_cdevpriv((void **)&priv)) { return POLLERR; } return netmap_poll(priv, events, td); } static int freebsd_netmap_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data, int ffla __unused, struct thread *td) { int error; struct netmap_priv_d *priv; CURVNET_SET(TD_TO_VNET(td)); error = devfs_get_cdevpriv((void **)&priv); if (error) { /* XXX ENOENT should be impossible, since the priv * is now created in the open */ if (error == ENOENT) error = ENXIO; goto out; } - error = netmap_ioctl(priv, cmd, data, td); + error = netmap_ioctl(priv, cmd, data, td, /*nr_body_is_user=*/1); out: CURVNET_RESTORE(); return error; } extern struct cdevsw netmap_cdevsw; /* XXX used in netmap.c, should go elsewhere */ struct cdevsw netmap_cdevsw = { .d_version = D_VERSION, .d_name = "netmap", .d_open = netmap_open, .d_mmap_single = netmap_mmap_single, .d_ioctl = freebsd_netmap_ioctl, .d_poll = freebsd_netmap_poll, .d_kqfilter = netmap_kqfilter, .d_close = netmap_close, }; /*--- end of kqueue support ----*/ /* * Kernel entry point. * * Initialize/finalize the module and return. * * Return 0 on success, errno on failure. */ static int netmap_loader(__unused struct module *module, int event, __unused void *arg) { int error = 0; switch (event) { case MOD_LOAD: error = netmap_init(); break; case MOD_UNLOAD: /* * if some one is still using netmap, * then the module can not be unloaded. */ if (netmap_use_count) { D("netmap module can not be unloaded - netmap_use_count: %d", netmap_use_count); error = EBUSY; break; } netmap_fini(); break; default: error = EOPNOTSUPP; break; } return (error); } #ifdef DEV_MODULE_ORDERED /* * The netmap module contains three drivers: (i) the netmap character device * driver; (ii) the ptnetmap memdev PCI device driver, (iii) the ptnet PCI * device driver. The attach() routines of both (ii) and (iii) need the * lock of the global allocator, and such lock is initialized in netmap_init(), * which is part of (i). * Therefore, we make sure that (i) is loaded before (ii) and (iii), using * the 'order' parameter of driver declaration macros. For (i), we specify * SI_ORDER_MIDDLE, while higher orders are used with the DRIVER_MODULE_ORDERED * macros for (ii) and (iii). */ DEV_MODULE_ORDERED(netmap, netmap_loader, NULL, SI_ORDER_MIDDLE); #else /* !DEV_MODULE_ORDERED */ DEV_MODULE(netmap, netmap_loader, NULL); #endif /* DEV_MODULE_ORDERED */ MODULE_DEPEND(netmap, pci, 1, 1, 1); MODULE_VERSION(netmap, 1); /* reduce conditional code */ // linux API, use for the knlist in FreeBSD /* use a private mutex for the knlist */ Index: head/sys/dev/netmap/netmap_generic.c =================================================================== --- head/sys/dev/netmap/netmap_generic.c (revision 332422) +++ head/sys/dev/netmap/netmap_generic.c (revision 332423) @@ -1,1267 +1,1267 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2013-2016 Vincenzo Maffione * Copyright (C) 2013-2016 Luigi Rizzo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This module implements netmap support on top of standard, * unmodified device drivers. * * A NIOCREGIF request is handled here if the device does not * have native support. TX and RX rings are emulated as follows: * * NIOCREGIF * We preallocate a block of TX mbufs (roughly as many as * tx descriptors; the number is not critical) to speed up * operation during transmissions. The refcount on most of * these buffers is artificially bumped up so we can recycle * them more easily. Also, the destructor is intercepted * so we use it as an interrupt notification to wake up * processes blocked on a poll(). * * For each receive ring we allocate one "struct mbq" * (an mbuf tailq plus a spinlock). We intercept packets * (through if_input) * on the receive path and put them in the mbq from which * netmap receive routines can grab them. * * TX: * in the generic_txsync() routine, netmap buffers are copied * (or linked, in a future) to the preallocated mbufs * and pushed to the transmit queue. Some of these mbufs * (those with NS_REPORT, or otherwise every half ring) * have the refcount=1, others have refcount=2. * When the destructor is invoked, we take that as * a notification that all mbufs up to that one in * the specific ring have been completed, and generate * the equivalent of a transmit interrupt. * * RX: * */ #ifdef __FreeBSD__ #include /* prerequisite */ __FBSDID("$FreeBSD$"); #include #include #include #include /* PROT_EXEC */ #include #include /* sockaddrs */ #include #include #include #include #include /* bus_dmamap_* in netmap_kern.h */ // XXX temporary - D() defined here #include #include #include #define MBUF_RXQ(m) ((m)->m_pkthdr.flowid) #define smp_mb() /* * FreeBSD mbuf allocator/deallocator in emulation mode: */ #if __FreeBSD_version < 1100000 /* * For older versions of FreeBSD: * * We allocate EXT_PACKET mbuf+clusters, but need to set M_NOFREE * so that the destructor, if invoked, will not free the packet. * In principle we should set the destructor only on demand, * but since there might be a race we better do it on allocation. * As a consequence, we also need to set the destructor or we * would leak buffers. */ /* mbuf destructor, also need to change the type to EXT_EXTREF, * add an M_NOFREE flag, and then clear the flag and * chain into uma_zfree(zone_pack, mf) * (or reinstall the buffer ?) */ #define SET_MBUF_DESTRUCTOR(m, fn) do { \ (m)->m_ext.ext_free = (void *)fn; \ (m)->m_ext.ext_type = EXT_EXTREF; \ } while (0) static int void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) { /* restore original mbuf */ m->m_ext.ext_buf = m->m_data = m->m_ext.ext_arg1; m->m_ext.ext_arg1 = NULL; m->m_ext.ext_type = EXT_PACKET; m->m_ext.ext_free = NULL; if (MBUF_REFCNT(m) == 0) SET_MBUF_REFCNT(m, 1); uma_zfree(zone_pack, m); return 0; } static inline struct mbuf * nm_os_get_mbuf(struct ifnet *ifp, int len) { struct mbuf *m; (void)ifp; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m) { /* m_getcl() (mb_ctor_mbuf) has an assert that checks that * M_NOFREE flag is not specified as third argument, * so we have to set M_NOFREE after m_getcl(). */ m->m_flags |= M_NOFREE; m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save m->m_ext.ext_free = (void *)void_mbuf_dtor; m->m_ext.ext_type = EXT_EXTREF; ND(5, "create m %p refcnt %d", m, MBUF_REFCNT(m)); } return m; } #else /* __FreeBSD_version >= 1100000 */ /* * Newer versions of FreeBSD, using a straightforward scheme. * * We allocate mbufs with m_gethdr(), since the mbuf header is needed * by the driver. We also attach a customly-provided external storage, * which in this case is a netmap buffer. When calling m_extadd(), however * we pass a NULL address, since the real address (and length) will be * filled in by nm_os_generic_xmit_frame() right before calling * if_transmit(). * * The dtor function does nothing, however we need it since mb_free_ext() * has a KASSERT(), checking that the mbuf dtor function is not NULL. */ #if __FreeBSD_version <= 1200050 static void void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) { } #else /* __FreeBSD_version >= 1200051 */ /* The arg1 and arg2 pointers argument were removed by r324446, which * in included since version 1200051. */ static void void_mbuf_dtor(struct mbuf *m) { } #endif /* __FreeBSD_version >= 1200051 */ #define SET_MBUF_DESTRUCTOR(m, fn) do { \ (m)->m_ext.ext_free = (fn != NULL) ? \ (void *)fn : (void *)void_mbuf_dtor; \ } while (0) static inline struct mbuf * nm_os_get_mbuf(struct ifnet *ifp, int len) { struct mbuf *m; (void)ifp; (void)len; m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { return m; } m_extadd(m, NULL /* buf */, 0 /* size */, void_mbuf_dtor, NULL, NULL, 0, EXT_NET_DRV); return m; } #endif /* __FreeBSD_version >= 1100000 */ #elif defined _WIN32 #include "win_glue.h" #define MBUF_TXQ(m) 0//((m)->m_pkthdr.flowid) #define MBUF_RXQ(m) 0//((m)->m_pkthdr.flowid) #define smp_mb() //XXX: to be correctly defined #else /* linux */ #include "bsd_glue.h" #include /* struct ethtool_ops, get_ringparam */ #include static inline struct mbuf * nm_os_get_mbuf(struct ifnet *ifp, int len) { return alloc_skb(ifp->needed_headroom + len + ifp->needed_tailroom, GFP_ATOMIC); } #endif /* linux */ /* Common headers. */ #include #include #include #define for_each_kring_n(_i, _k, _karr, _n) \ - for (_k=_karr, _i = 0; _i < _n; (_k)++, (_i)++) + for ((_k)=*(_karr), (_i) = 0; (_i) < (_n); (_i)++, (_k) = (_karr)[(_i)]) #define for_each_tx_kring(_i, _k, _na) \ for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings) #define for_each_tx_kring_h(_i, _k, _na) \ for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings + 1) #define for_each_rx_kring(_i, _k, _na) \ for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings) #define for_each_rx_kring_h(_i, _k, _na) \ for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings + 1) /* ======================== PERFORMANCE STATISTICS =========================== */ #ifdef RATE_GENERIC #define IFRATE(x) x struct rate_stats { unsigned long txpkt; unsigned long txsync; unsigned long txirq; unsigned long txrepl; unsigned long txdrop; unsigned long rxpkt; unsigned long rxirq; unsigned long rxsync; }; struct rate_context { unsigned refcount; struct timer_list timer; struct rate_stats new; struct rate_stats old; }; #define RATE_PRINTK(_NAME_) \ printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD); #define RATE_PERIOD 2 static void rate_callback(unsigned long arg) { struct rate_context * ctx = (struct rate_context *)arg; struct rate_stats cur = ctx->new; int r; RATE_PRINTK(txpkt); RATE_PRINTK(txsync); RATE_PRINTK(txirq); RATE_PRINTK(txrepl); RATE_PRINTK(txdrop); RATE_PRINTK(rxpkt); RATE_PRINTK(rxsync); RATE_PRINTK(rxirq); printk("\n"); ctx->old = cur; r = mod_timer(&ctx->timer, jiffies + msecs_to_jiffies(RATE_PERIOD * 1000)); if (unlikely(r)) D("[v1000] Error: mod_timer()"); } static struct rate_context rate_ctx; void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi) { if (txp) rate_ctx.new.txpkt++; if (txs) rate_ctx.new.txsync++; if (txi) rate_ctx.new.txirq++; if (rxp) rate_ctx.new.rxpkt++; if (rxs) rate_ctx.new.rxsync++; if (rxi) rate_ctx.new.rxirq++; } #else /* !RATE */ #define IFRATE(x) #endif /* !RATE */ /* ========== GENERIC (EMULATED) NETMAP ADAPTER SUPPORT ============= */ /* * Wrapper used by the generic adapter layer to notify * the poller threads. Differently from netmap_rx_irq(), we check * only NAF_NETMAP_ON instead of NAF_NATIVE_ON to enable the irq. */ void netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done) { if (unlikely(!nm_netmap_on(na))) return; netmap_common_irq(na, q, work_done); #ifdef RATE_GENERIC if (work_done) rate_ctx.new.rxirq++; else rate_ctx.new.txirq++; #endif /* RATE_GENERIC */ } static int generic_netmap_unregister(struct netmap_adapter *na) { struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; struct netmap_kring *kring = NULL; int i, r; if (na->active_fds == 0) { na->na_flags &= ~NAF_NETMAP_ON; /* Stop intercepting packets on the RX path. */ nm_os_catch_rx(gna, 0); /* Release packet steering control. */ nm_os_catch_tx(gna, 0); } for_each_rx_kring_h(r, kring, na) { if (nm_kring_pending_off(kring)) { D("Emulated adapter: ring '%s' deactivated", kring->name); kring->nr_mode = NKR_NETMAP_OFF; } } for_each_tx_kring_h(r, kring, na) { if (nm_kring_pending_off(kring)) { kring->nr_mode = NKR_NETMAP_OFF; D("Emulated adapter: ring '%s' deactivated", kring->name); } } for_each_rx_kring(r, kring, na) { /* Free the mbufs still pending in the RX queues, * that did not end up into the corresponding netmap * RX rings. */ mbq_safe_purge(&kring->rx_queue); nm_os_mitigation_cleanup(&gna->mit[r]); } /* Decrement reference counter for the mbufs in the * TX pools. These mbufs can be still pending in drivers, * (e.g. this happens with virtio-net driver, which * does lazy reclaiming of transmitted mbufs). */ for_each_tx_kring(r, kring, na) { /* We must remove the destructor on the TX event, * because the destructor invokes netmap code, and * the netmap module may disappear before the * TX event is consumed. */ mtx_lock_spin(&kring->tx_event_lock); if (kring->tx_event) { SET_MBUF_DESTRUCTOR(kring->tx_event, NULL); } kring->tx_event = NULL; mtx_unlock_spin(&kring->tx_event_lock); } if (na->active_fds == 0) { nm_os_free(gna->mit); for_each_rx_kring(r, kring, na) { mbq_safe_fini(&kring->rx_queue); } for_each_tx_kring(r, kring, na) { mtx_destroy(&kring->tx_event_lock); if (kring->tx_pool == NULL) { continue; } for (i=0; inum_tx_desc; i++) { if (kring->tx_pool[i]) { m_freem(kring->tx_pool[i]); } } nm_os_free(kring->tx_pool); kring->tx_pool = NULL; } #ifdef RATE_GENERIC if (--rate_ctx.refcount == 0) { D("del_timer()"); del_timer(&rate_ctx.timer); } #endif D("Emulated adapter for %s deactivated", na->name); } return 0; } /* Enable/disable netmap mode for a generic network interface. */ static int generic_netmap_register(struct netmap_adapter *na, int enable) { struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; struct netmap_kring *kring = NULL; int error; int i, r; if (!na) { return EINVAL; } if (!enable) { /* This is actually an unregif. */ return generic_netmap_unregister(na); } if (na->active_fds == 0) { D("Emulated adapter for %s activated", na->name); /* Do all memory allocations when (na->active_fds == 0), to * simplify error management. */ /* Allocate memory for mitigation support on all the rx queues. */ gna->mit = nm_os_malloc(na->num_rx_rings * sizeof(struct nm_generic_mit)); if (!gna->mit) { D("mitigation allocation failed"); error = ENOMEM; goto out; } for_each_rx_kring(r, kring, na) { /* Init mitigation support. */ nm_os_mitigation_init(&gna->mit[r], r, na); /* Initialize the rx queue, as generic_rx_handler() can * be called as soon as nm_os_catch_rx() returns. */ mbq_safe_init(&kring->rx_queue); } /* * Prepare mbuf pools (parallel to the tx rings), for packet * transmission. Don't preallocate the mbufs here, it's simpler * to leave this task to txsync. */ for_each_tx_kring(r, kring, na) { kring->tx_pool = NULL; } for_each_tx_kring(r, kring, na) { kring->tx_pool = nm_os_malloc(na->num_tx_desc * sizeof(struct mbuf *)); if (!kring->tx_pool) { D("tx_pool allocation failed"); error = ENOMEM; goto free_tx_pools; } mtx_init(&kring->tx_event_lock, "tx_event_lock", NULL, MTX_SPIN); } } for_each_rx_kring_h(r, kring, na) { if (nm_kring_pending_on(kring)) { D("Emulated adapter: ring '%s' activated", kring->name); kring->nr_mode = NKR_NETMAP_ON; } } for_each_tx_kring_h(r, kring, na) { if (nm_kring_pending_on(kring)) { D("Emulated adapter: ring '%s' activated", kring->name); kring->nr_mode = NKR_NETMAP_ON; } } for_each_tx_kring(r, kring, na) { /* Initialize tx_pool and tx_event. */ for (i=0; inum_tx_desc; i++) { kring->tx_pool[i] = NULL; } kring->tx_event = NULL; } if (na->active_fds == 0) { /* Prepare to intercept incoming traffic. */ error = nm_os_catch_rx(gna, 1); if (error) { D("nm_os_catch_rx(1) failed (%d)", error); goto free_tx_pools; } /* Let netmap control the packet steering. */ error = nm_os_catch_tx(gna, 1); if (error) { D("nm_os_catch_tx(1) failed (%d)", error); goto catch_rx; } na->na_flags |= NAF_NETMAP_ON; #ifdef RATE_GENERIC if (rate_ctx.refcount == 0) { D("setup_timer()"); memset(&rate_ctx, 0, sizeof(rate_ctx)); setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx); if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) { D("Error: mod_timer()"); } } rate_ctx.refcount++; #endif /* RATE */ } return 0; /* Here (na->active_fds == 0) holds. */ catch_rx: nm_os_catch_rx(gna, 0); free_tx_pools: for_each_tx_kring(r, kring, na) { mtx_destroy(&kring->tx_event_lock); if (kring->tx_pool == NULL) { continue; } nm_os_free(kring->tx_pool); kring->tx_pool = NULL; } for_each_rx_kring(r, kring, na) { mbq_safe_fini(&kring->rx_queue); } nm_os_free(gna->mit); out: return error; } /* * Callback invoked when the device driver frees an mbuf used * by netmap to transmit a packet. This usually happens when * the NIC notifies the driver that transmission is completed. */ static void generic_mbuf_destructor(struct mbuf *m) { struct netmap_adapter *na = NA(GEN_TX_MBUF_IFP(m)); struct netmap_kring *kring; unsigned int r = MBUF_TXQ(m); unsigned int r_orig = r; if (unlikely(!nm_netmap_on(na) || r >= na->num_tx_rings)) { D("Error: no netmap adapter on device %p", GEN_TX_MBUF_IFP(m)); return; } /* * First, clear the event mbuf. * In principle, the event 'm' should match the one stored * on ring 'r'. However we check it explicitely to stay * safe against lower layers (qdisc, driver, etc.) changing * MBUF_TXQ(m) under our feet. If the match is not found * on 'r', we try to see if it belongs to some other ring. */ for (;;) { bool match = false; - kring = &na->tx_rings[r]; + kring = na->tx_rings[r]; mtx_lock_spin(&kring->tx_event_lock); if (kring->tx_event == m) { kring->tx_event = NULL; match = true; } mtx_unlock_spin(&kring->tx_event_lock); if (match) { if (r != r_orig) { RD(1, "event %p migrated: ring %u --> %u", m, r_orig, r); } break; } if (++r == na->num_tx_rings) r = 0; if (r == r_orig) { RD(1, "Cannot match event %p", m); return; } } /* Second, wake up clients. They will reclaim the event through * txsync. */ netmap_generic_irq(na, r, NULL); #ifdef __FreeBSD__ #if __FreeBSD_version <= 1200050 void_mbuf_dtor(m, NULL, NULL); #else /* __FreeBSD_version >= 1200051 */ void_mbuf_dtor(m); #endif /* __FreeBSD_version >= 1200051 */ #endif } /* Record completed transmissions and update hwtail. * * The oldest tx buffer not yet completed is at nr_hwtail + 1, * nr_hwcur is the first unsent buffer. */ static u_int generic_netmap_tx_clean(struct netmap_kring *kring, int txqdisc) { u_int const lim = kring->nkr_num_slots - 1; u_int nm_i = nm_next(kring->nr_hwtail, lim); u_int hwcur = kring->nr_hwcur; u_int n = 0; struct mbuf **tx_pool = kring->tx_pool; ND("hwcur = %d, hwtail = %d", kring->nr_hwcur, kring->nr_hwtail); while (nm_i != hwcur) { /* buffers not completed */ struct mbuf *m = tx_pool[nm_i]; if (txqdisc) { if (m == NULL) { /* Nothing to do, this is going * to be replenished. */ RD(3, "Is this happening?"); } else if (MBUF_QUEUED(m)) { break; /* Not dequeued yet. */ } else if (MBUF_REFCNT(m) != 1) { /* This mbuf has been dequeued but is still busy * (refcount is 2). * Leave it to the driver and replenish. */ m_freem(m); tx_pool[nm_i] = NULL; } } else { if (unlikely(m == NULL)) { int event_consumed; /* This slot was used to place an event. */ mtx_lock_spin(&kring->tx_event_lock); event_consumed = (kring->tx_event == NULL); mtx_unlock_spin(&kring->tx_event_lock); if (!event_consumed) { /* The event has not been consumed yet, * still busy in the driver. */ break; } /* The event has been consumed, we can go * ahead. */ } else if (MBUF_REFCNT(m) != 1) { /* This mbuf is still busy: its refcnt is 2. */ break; } } n++; nm_i = nm_next(nm_i, lim); } kring->nr_hwtail = nm_prev(nm_i, lim); ND("tx completed [%d] -> hwtail %d", n, kring->nr_hwtail); return n; } /* Compute a slot index in the middle between inf and sup. */ static inline u_int ring_middle(u_int inf, u_int sup, u_int lim) { u_int n = lim + 1; u_int e; if (sup >= inf) { e = (sup + inf) / 2; } else { /* wrap around */ e = (sup + n + inf) / 2; if (e >= n) { e -= n; } } if (unlikely(e >= n)) { D("This cannot happen"); e = 0; } return e; } static void generic_set_tx_event(struct netmap_kring *kring, u_int hwcur) { u_int lim = kring->nkr_num_slots - 1; struct mbuf *m; u_int e; u_int ntc = nm_next(kring->nr_hwtail, lim); /* next to clean */ if (ntc == hwcur) { return; /* all buffers are free */ } /* * We have pending packets in the driver between hwtail+1 * and hwcur, and we have to chose one of these slot to * generate a notification. * There is a race but this is only called within txsync which * does a double check. */ #if 0 /* Choose a slot in the middle, so that we don't risk ending * up in a situation where the client continuously wake up, * fills one or a few TX slots and go to sleep again. */ e = ring_middle(ntc, hwcur, lim); #else /* Choose the first pending slot, to be safe against driver * reordering mbuf transmissions. */ e = ntc; #endif m = kring->tx_pool[e]; if (m == NULL) { /* An event is already in place. */ return; } mtx_lock_spin(&kring->tx_event_lock); if (kring->tx_event) { /* An event is already in place. */ mtx_unlock_spin(&kring->tx_event_lock); return; } SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor); kring->tx_event = m; mtx_unlock_spin(&kring->tx_event_lock); kring->tx_pool[e] = NULL; ND(5, "Request Event at %d mbuf %p refcnt %d", e, m, m ? MBUF_REFCNT(m) : -2 ); /* Decrement the refcount. This will free it if we lose the race * with the driver. */ m_freem(m); smp_mb(); } /* * generic_netmap_txsync() transforms netmap buffers into mbufs * and passes them to the standard device driver * (ndo_start_xmit() or ifp->if_transmit() ). * On linux this is not done directly, but using dev_queue_xmit(), * since it implements the TX flow control (and takes some locks). */ static int generic_netmap_txsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int nm_i; /* index into the netmap ring */ // j u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; u_int ring_nr = kring->ring_id; IFRATE(rate_ctx.new.txsync++); rmb(); /* * First part: process new packets to send. */ nm_i = kring->nr_hwcur; if (nm_i != head) { /* we have new packets to send */ struct nm_os_gen_arg a; u_int event = -1; if (gna->txqdisc && nm_kr_txempty(kring)) { /* In txqdisc mode, we ask for a delayed notification, * but only when cur == hwtail, which means that the * client is going to block. */ event = ring_middle(nm_i, head, lim); ND(3, "Place txqdisc event (hwcur=%u,event=%u," "head=%u,hwtail=%u)", nm_i, event, head, kring->nr_hwtail); } a.ifp = ifp; a.ring_nr = ring_nr; a.head = a.tail = NULL; while (nm_i != head) { struct netmap_slot *slot = &ring->slot[nm_i]; u_int len = slot->len; void *addr = NMB(na, slot); /* device-specific */ struct mbuf *m; int tx_ret; NM_CHECK_ADDR_LEN(na, addr, len); /* Tale a mbuf from the tx pool (replenishing the pool * entry if necessary) and copy in the user packet. */ m = kring->tx_pool[nm_i]; if (unlikely(m == NULL)) { kring->tx_pool[nm_i] = m = nm_os_get_mbuf(ifp, NETMAP_BUF_SIZE(na)); if (m == NULL) { RD(2, "Failed to replenish mbuf"); /* Here we could schedule a timer which * retries to replenish after a while, * and notifies the client when it * manages to replenish some slots. In * any case we break early to avoid * crashes. */ break; } IFRATE(rate_ctx.new.txrepl++); } a.m = m; a.addr = addr; a.len = len; a.qevent = (nm_i == event); /* When not in txqdisc mode, we should ask * notifications when NS_REPORT is set, or roughly * every half ring. To optimize this, we set a * notification event when the client runs out of * TX ring space, or when transmission fails. In * the latter case we also break early. */ tx_ret = nm_os_generic_xmit_frame(&a); if (unlikely(tx_ret)) { if (!gna->txqdisc) { /* * No room for this mbuf in the device driver. * Request a notification FOR A PREVIOUS MBUF, * then call generic_netmap_tx_clean(kring) to do the * double check and see if we can free more buffers. * If there is space continue, else break; * NOTE: the double check is necessary if the problem * occurs in the txsync call after selrecord(). * Also, we need some way to tell the caller that not * all buffers were queued onto the device (this was * not a problem with native netmap driver where space * is preallocated). The bridge has a similar problem * and we solve it there by dropping the excess packets. */ generic_set_tx_event(kring, nm_i); if (generic_netmap_tx_clean(kring, gna->txqdisc)) { /* space now available */ continue; } else { break; } } /* In txqdisc mode, the netmap-aware qdisc * queue has the same length as the number of * netmap slots (N). Since tail is advanced * only when packets are dequeued, qdisc * queue overrun cannot happen, so * nm_os_generic_xmit_frame() did not fail * because of that. * However, packets can be dropped because * carrier is off, or because our qdisc is * being deactivated, or possibly for other * reasons. In these cases, we just let the * packet to be dropped. */ IFRATE(rate_ctx.new.txdrop++); } slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); nm_i = nm_next(nm_i, lim); IFRATE(rate_ctx.new.txpkt++); } if (a.head != NULL) { a.addr = NULL; nm_os_generic_xmit_frame(&a); } /* Update hwcur to the next slot to transmit. Here nm_i * is not necessarily head, we could break early. */ kring->nr_hwcur = nm_i; } /* * Second, reclaim completed buffers */ if (!gna->txqdisc && (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring))) { /* No more available slots? Set a notification event * on a netmap slot that will be cleaned in the future. * No doublecheck is performed, since txsync() will be * called twice by netmap_poll(). */ generic_set_tx_event(kring, nm_i); } generic_netmap_tx_clean(kring, gna->txqdisc); return 0; } /* * This handler is registered (through nm_os_catch_rx()) * within the attached network interface * in the RX subsystem, so that every mbuf passed up by * the driver can be stolen to the network stack. * Stolen packets are put in a queue where the * generic_netmap_rxsync() callback can extract them. * Returns 1 if the packet was stolen, 0 otherwise. */ int generic_rx_handler(struct ifnet *ifp, struct mbuf *m) { struct netmap_adapter *na = NA(ifp); struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; struct netmap_kring *kring; u_int work_done; u_int r = MBUF_RXQ(m); /* receive ring number */ if (r >= na->num_rx_rings) { r = r % na->num_rx_rings; } - kring = &na->rx_rings[r]; + kring = na->rx_rings[r]; if (kring->nr_mode == NKR_NETMAP_OFF) { /* We must not intercept this mbuf. */ return 0; } /* limit the size of the queue */ if (unlikely(!gna->rxsg && MBUF_LEN(m) > NETMAP_BUF_SIZE(na))) { /* This may happen when GRO/LRO features are enabled for * the NIC driver when the generic adapter does not * support RX scatter-gather. */ RD(2, "Warning: driver pushed up big packet " "(size=%d)", (int)MBUF_LEN(m)); m_freem(m); } else if (unlikely(mbq_len(&kring->rx_queue) > 1024)) { m_freem(m); } else { mbq_safe_enqueue(&kring->rx_queue, m); } if (netmap_generic_mit < 32768) { /* no rx mitigation, pass notification up */ netmap_generic_irq(na, r, &work_done); } else { /* same as send combining, filter notification if there is a * pending timer, otherwise pass it up and start a timer. */ if (likely(nm_os_mitigation_active(&gna->mit[r]))) { /* Record that there is some pending work. */ gna->mit[r].mit_pending = 1; } else { netmap_generic_irq(na, r, &work_done); nm_os_mitigation_start(&gna->mit[r]); } } /* We have intercepted the mbuf. */ return 1; } /* * generic_netmap_rxsync() extracts mbufs from the queue filled by * generic_netmap_rx_handler() and puts their content in the netmap * receive ring. * Access must be protected because the rx handler is asynchronous, */ static int generic_netmap_rxsync(struct netmap_kring *kring, int flags) { struct netmap_ring *ring = kring->ring; struct netmap_adapter *na = kring->na; u_int nm_i; /* index into the netmap ring */ //j, u_int n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; /* Adapter-specific variables. */ u_int nm_buf_len = NETMAP_BUF_SIZE(na); struct mbq tmpq; struct mbuf *m; int avail; /* in bytes */ int mlen; int copy; if (head > lim) return netmap_ring_reinit(kring); IFRATE(rate_ctx.new.rxsync++); /* * First part: skip past packets that userspace has released. * This can possibly make room for the second part. */ nm_i = kring->nr_hwcur; if (nm_i != head) { /* Userspace has released some packets. */ for (n = 0; nm_i != head; n++) { struct netmap_slot *slot = &ring->slot[nm_i]; slot->flags &= ~NS_BUF_CHANGED; nm_i = nm_next(nm_i, lim); } kring->nr_hwcur = head; } /* * Second part: import newly received packets. */ if (!netmap_no_pendintr && !force_update) { return 0; } nm_i = kring->nr_hwtail; /* First empty slot in the receive ring. */ /* Compute the available space (in bytes) in this netmap ring. * The first slot that is not considered in is the one before * nr_hwcur. */ avail = nm_prev(kring->nr_hwcur, lim) - nm_i; if (avail < 0) avail += lim + 1; avail *= nm_buf_len; /* First pass: While holding the lock on the RX mbuf queue, * extract as many mbufs as they fit the available space, * and put them in a temporary queue. * To avoid performing a per-mbuf division (mlen / nm_buf_len) to * to update avail, we do the update in a while loop that we * also use to set the RX slots, but without performing the copy. */ mbq_init(&tmpq); mbq_lock(&kring->rx_queue); for (n = 0;; n++) { m = mbq_peek(&kring->rx_queue); if (!m) { /* No more packets from the driver. */ break; } mlen = MBUF_LEN(m); if (mlen > avail) { /* No more space in the ring. */ break; } mbq_dequeue(&kring->rx_queue); while (mlen) { copy = nm_buf_len; if (mlen < copy) { copy = mlen; } mlen -= copy; avail -= nm_buf_len; ring->slot[nm_i].len = copy; ring->slot[nm_i].flags = (mlen ? NS_MOREFRAG : 0); nm_i = nm_next(nm_i, lim); } mbq_enqueue(&tmpq, m); } mbq_unlock(&kring->rx_queue); /* Second pass: Drain the temporary queue, going over the used RX slots, * and perform the copy out of the RX queue lock. */ nm_i = kring->nr_hwtail; for (;;) { void *nmaddr; int ofs = 0; int morefrag; m = mbq_dequeue(&tmpq); if (!m) { break; } do { nmaddr = NMB(na, &ring->slot[nm_i]); /* We only check the address here on generic rx rings. */ if (nmaddr == NETMAP_BUF_BASE(na)) { /* Bad buffer */ m_freem(m); mbq_purge(&tmpq); mbq_fini(&tmpq); return netmap_ring_reinit(kring); } copy = ring->slot[nm_i].len; m_copydata(m, ofs, copy, nmaddr); ofs += copy; morefrag = ring->slot[nm_i].flags & NS_MOREFRAG; nm_i = nm_next(nm_i, lim); } while (morefrag); m_freem(m); } mbq_fini(&tmpq); if (n) { kring->nr_hwtail = nm_i; IFRATE(rate_ctx.new.rxpkt += n); } kring->nr_kflags &= ~NKR_PENDINTR; return 0; } static void generic_netmap_dtor(struct netmap_adapter *na) { struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na; struct ifnet *ifp = netmap_generic_getifp(gna); struct netmap_adapter *prev_na = gna->prev; if (prev_na != NULL) { netmap_adapter_put(prev_na); if (nm_iszombie(na)) { /* * The driver has been removed without releasing * the reference so we need to do it here. */ netmap_adapter_put(prev_na); } D("Native netmap adapter %p restored", prev_na); } NM_ATTACH_NA(ifp, prev_na); /* * netmap_detach_common(), that it's called after this function, * overrides WNA(ifp) if na->ifp is not NULL. */ na->ifp = NULL; D("Emulated netmap adapter for %s destroyed", na->name); } int na_is_generic(struct netmap_adapter *na) { return na->nm_register == generic_netmap_register; } /* * generic_netmap_attach() makes it possible to use netmap on * a device without native netmap support. * This is less performant than native support but potentially * faster than raw sockets or similar schemes. * * In this "emulated" mode, netmap rings do not necessarily * have the same size as those in the NIC. We use a default * value and possibly override it if the OS has ways to fetch the * actual configuration. */ int generic_netmap_attach(struct ifnet *ifp) { struct netmap_adapter *na; struct netmap_generic_adapter *gna; int retval; u_int num_tx_desc, num_rx_desc; #ifdef __FreeBSD__ if (ifp->if_type == IFT_LOOP) { D("if_loop is not supported by %s", __func__); return EINVAL; } #endif if (NA(ifp) && !NM_NA_VALID(ifp)) { /* If NA(ifp) is not null but there is no valid netmap * adapter it means that someone else is using the same * pointer (e.g. ax25_ptr on linux). This happens for * instance when also PF_RING is in use. */ D("Error: netmap adapter hook is busy"); return EBUSY; } num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */ nm_os_generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); /* ignore errors */ ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc); if (num_tx_desc == 0 || num_rx_desc == 0) { D("Device has no hw slots (tx %u, rx %u)", num_tx_desc, num_rx_desc); return EINVAL; } gna = nm_os_malloc(sizeof(*gna)); if (gna == NULL) { D("no memory on attach, give up"); return ENOMEM; } na = (struct netmap_adapter *)gna; strncpy(na->name, ifp->if_xname, sizeof(na->name)); na->ifp = ifp; na->num_tx_desc = num_tx_desc; na->num_rx_desc = num_rx_desc; na->nm_register = &generic_netmap_register; na->nm_txsync = &generic_netmap_txsync; na->nm_rxsync = &generic_netmap_rxsync; na->nm_dtor = &generic_netmap_dtor; /* when using generic, NAF_NETMAP_ON is set so we force * NAF_SKIP_INTR to use the regular interrupt handler */ na->na_flags = NAF_SKIP_INTR | NAF_HOST_RINGS; ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)", ifp->num_tx_queues, ifp->real_num_tx_queues, ifp->tx_queue_len); ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)", ifp->num_rx_queues, ifp->real_num_rx_queues); nm_os_generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings); retval = netmap_attach_common(na); if (retval) { nm_os_free(gna); return retval; } gna->prev = NA(ifp); /* save old na */ if (gna->prev != NULL) { netmap_adapter_get(gna->prev); } NM_ATTACH_NA(ifp, na); nm_os_generic_set_features(gna); D("Emulated adapter for %s created (prev was %p)", na->name, gna->prev); return retval; } Index: head/sys/dev/netmap/netmap_kern.h =================================================================== --- head/sys/dev/netmap/netmap_kern.h (revision 332422) +++ head/sys/dev/netmap/netmap_kern.h (revision 332423) @@ -1,2178 +1,2227 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo * Copyright (C) 2013-2016 Universita` di Pisa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * The header contains the definitions of constants and function * prototypes used only in kernelspace. */ #ifndef _NET_NETMAP_KERN_H_ #define _NET_NETMAP_KERN_H_ #if defined(linux) #if defined(CONFIG_NETMAP_EXTMEM) #define WITH_EXTMEM #endif #if defined(CONFIG_NETMAP_VALE) #define WITH_VALE #endif #if defined(CONFIG_NETMAP_PIPE) #define WITH_PIPES #endif #if defined(CONFIG_NETMAP_MONITOR) #define WITH_MONITOR #endif #if defined(CONFIG_NETMAP_GENERIC) #define WITH_GENERIC #endif #if defined(CONFIG_NETMAP_PTNETMAP_GUEST) #define WITH_PTNETMAP_GUEST #endif #if defined(CONFIG_NETMAP_PTNETMAP_HOST) #define WITH_PTNETMAP_HOST #endif #if defined(CONFIG_NETMAP_SINK) #define WITH_SINK #endif #elif defined (_WIN32) #define WITH_VALE // comment out to disable VALE support #define WITH_PIPES #define WITH_MONITOR #define WITH_GENERIC #else /* neither linux nor windows */ #define WITH_VALE // comment out to disable VALE support #define WITH_PIPES #define WITH_MONITOR #define WITH_GENERIC #define WITH_PTNETMAP_HOST /* ptnetmap host support */ #define WITH_PTNETMAP_GUEST /* ptnetmap guest support */ - +#define WITH_EXTMEM #endif #if defined(__FreeBSD__) #include #define likely(x) __builtin_expect((long)!!(x), 1L) #define unlikely(x) __builtin_expect((long)!!(x), 0L) #define __user #define NM_LOCK_T struct mtx /* low level spinlock, used to protect queues */ #define NM_MTX_T struct sx /* OS-specific mutex (sleepable) */ #define NM_MTX_INIT(m) sx_init(&(m), #m) #define NM_MTX_DESTROY(m) sx_destroy(&(m)) #define NM_MTX_LOCK(m) sx_xlock(&(m)) #define NM_MTX_SPINLOCK(m) while (!sx_try_xlock(&(m))) ; #define NM_MTX_UNLOCK(m) sx_xunlock(&(m)) #define NM_MTX_ASSERT(m) sx_assert(&(m), SA_XLOCKED) #define NM_SELINFO_T struct nm_selinfo #define NM_SELRECORD_T struct thread #define MBUF_LEN(m) ((m)->m_pkthdr.len) #define MBUF_TXQ(m) ((m)->m_pkthdr.flowid) #define MBUF_TRANSMIT(na, ifp, m) ((na)->if_transmit(ifp, m)) #define GEN_TX_MBUF_IFP(m) ((m)->m_pkthdr.rcvif) #define NM_ATOMIC_T volatile int /* required by atomic/bitops.h */ /* atomic operations */ #include #define NM_ATOMIC_TEST_AND_SET(p) (!atomic_cmpset_acq_int((p), 0, 1)) #define NM_ATOMIC_CLEAR(p) atomic_store_rel_int((p), 0) #if __FreeBSD_version >= 1100030 #define WNA(_ifp) (_ifp)->if_netmap #else /* older FreeBSD */ #define WNA(_ifp) (_ifp)->if_pspare[0] #endif /* older FreeBSD */ #if __FreeBSD_version >= 1100005 struct netmap_adapter *netmap_getna(if_t ifp); #endif #if __FreeBSD_version >= 1100027 #define MBUF_REFCNT(m) ((m)->m_ext.ext_count) #define SET_MBUF_REFCNT(m, x) (m)->m_ext.ext_count = x #else #define MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *((m)->m_ext.ref_cnt) : -1) #define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ref_cnt) = x #endif #define MBUF_QUEUED(m) 1 struct nm_selinfo { struct selinfo si; struct mtx m; }; struct hrtimer { /* Not used in FreeBSD. */ }; #define NM_BNS_GET(b) #define NM_BNS_PUT(b) #elif defined (linux) #define NM_LOCK_T safe_spinlock_t // see bsd_glue.h #define NM_SELINFO_T wait_queue_head_t #define MBUF_LEN(m) ((m)->len) #define MBUF_TRANSMIT(na, ifp, m) \ ({ \ /* Avoid infinite recursion with generic. */ \ m->priority = NM_MAGIC_PRIORITY_TX; \ (((struct net_device_ops *)(na)->if_transmit)->ndo_start_xmit(m, ifp)); \ 0; \ }) /* See explanation in nm_os_generic_xmit_frame. */ #define GEN_TX_MBUF_IFP(m) ((struct ifnet *)skb_shinfo(m)->destructor_arg) #define NM_ATOMIC_T volatile long unsigned int #define NM_MTX_T struct mutex /* OS-specific sleepable lock */ #define NM_MTX_INIT(m) mutex_init(&(m)) #define NM_MTX_DESTROY(m) do { (void)(m); } while (0) #define NM_MTX_LOCK(m) mutex_lock(&(m)) #define NM_MTX_UNLOCK(m) mutex_unlock(&(m)) #define NM_MTX_ASSERT(m) mutex_is_locked(&(m)) #ifndef DEV_NETMAP #define DEV_NETMAP #endif /* DEV_NETMAP */ #elif defined (__APPLE__) #warning apple support is incomplete. #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #define NM_LOCK_T IOLock * #define NM_SELINFO_T struct selinfo #define MBUF_LEN(m) ((m)->m_pkthdr.len) #elif defined (_WIN32) #include "../../../WINDOWS/win_glue.h" #define NM_SELRECORD_T IO_STACK_LOCATION #define NM_SELINFO_T win_SELINFO // see win_glue.h #define NM_LOCK_T win_spinlock_t // see win_glue.h #define NM_MTX_T KGUARDED_MUTEX /* OS-specific mutex (sleepable) */ #define NM_MTX_INIT(m) KeInitializeGuardedMutex(&m); #define NM_MTX_DESTROY(m) do { (void)(m); } while (0) #define NM_MTX_LOCK(m) KeAcquireGuardedMutex(&(m)) #define NM_MTX_UNLOCK(m) KeReleaseGuardedMutex(&(m)) #define NM_MTX_ASSERT(m) assert(&m.Count>0) //These linknames are for the NDIS driver #define NETMAP_NDIS_LINKNAME_STRING L"\\DosDevices\\NMAPNDIS" #define NETMAP_NDIS_NTDEVICE_STRING L"\\Device\\NMAPNDIS" //Definition of internal driver-to-driver ioctl codes #define NETMAP_KERNEL_XCHANGE_POINTERS _IO('i', 180) #define NETMAP_KERNEL_SEND_SHUTDOWN_SIGNAL _IO_direct('i', 195) typedef struct hrtimer{ KTIMER timer; BOOLEAN active; KDPC deferred_proc; }; /* MSVC does not have likely/unlikely support */ #ifdef _MSC_VER #define likely(x) (x) #define unlikely(x) (x) #else #define likely(x) __builtin_expect((long)!!(x), 1L) #define unlikely(x) __builtin_expect((long)!!(x), 0L) #endif //_MSC_VER #else #error unsupported platform #endif /* end - platform-specific code */ #ifndef _WIN32 /* support for emulated sysctl */ #define SYSBEGIN(x) #define SYSEND #endif /* _WIN32 */ #define NM_ACCESS_ONCE(x) (*(volatile __typeof__(x) *)&(x)) #define NMG_LOCK_T NM_MTX_T #define NMG_LOCK_INIT() NM_MTX_INIT(netmap_global_lock) #define NMG_LOCK_DESTROY() NM_MTX_DESTROY(netmap_global_lock) #define NMG_LOCK() NM_MTX_LOCK(netmap_global_lock) #define NMG_UNLOCK() NM_MTX_UNLOCK(netmap_global_lock) #define NMG_LOCK_ASSERT() NM_MTX_ASSERT(netmap_global_lock) #if defined(__FreeBSD__) #define nm_prerr printf #define nm_prinf printf #elif defined (_WIN32) #define nm_prerr DbgPrint #define nm_prinf DbgPrint #elif defined(linux) #define nm_prerr(fmt, arg...) printk(KERN_ERR fmt, ##arg) #define nm_prinf(fmt, arg...) printk(KERN_INFO fmt, ##arg) #endif #define ND(format, ...) #define D(format, ...) \ do { \ struct timeval __xxts; \ microtime(&__xxts); \ nm_prerr("%03d.%06d [%4d] %-25s " format "\n", \ (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \ __LINE__, __FUNCTION__, ##__VA_ARGS__); \ } while (0) /* rate limited, lps indicates how many per second */ #define RD(lps, format, ...) \ do { \ static int t0, __cnt; \ if (t0 != time_second) { \ t0 = time_second; \ __cnt = 0; \ } \ if (__cnt++ < lps) \ D(format, ##__VA_ARGS__); \ } while (0) struct netmap_adapter; struct nm_bdg_fwd; struct nm_bridge; struct netmap_priv_d; /* os-specific NM_SELINFO_T initialzation/destruction functions */ void nm_os_selinfo_init(NM_SELINFO_T *); void nm_os_selinfo_uninit(NM_SELINFO_T *); const char *nm_dump_buf(char *p, int len, int lim, char *dst); void nm_os_selwakeup(NM_SELINFO_T *si); void nm_os_selrecord(NM_SELRECORD_T *sr, NM_SELINFO_T *si); int nm_os_ifnet_init(void); void nm_os_ifnet_fini(void); void nm_os_ifnet_lock(void); void nm_os_ifnet_unlock(void); unsigned nm_os_ifnet_mtu(struct ifnet *ifp); void nm_os_get_module(void); void nm_os_put_module(void); void netmap_make_zombie(struct ifnet *); void netmap_undo_zombie(struct ifnet *); /* os independent alloc/realloc/free */ void *nm_os_malloc(size_t); void *nm_os_vmalloc(size_t); void *nm_os_realloc(void *, size_t new_size, size_t old_size); void nm_os_free(void *); void nm_os_vfree(void *); /* passes a packet up to the host stack. * If the packet is sent (or dropped) immediately it returns NULL, * otherwise it links the packet to prev and returns m. * In this case, a final call with m=NULL and prev != NULL will send up * the entire chain to the host stack. */ void *nm_os_send_up(struct ifnet *, struct mbuf *m, struct mbuf *prev); int nm_os_mbuf_has_offld(struct mbuf *m); #include "netmap_mbq.h" extern NMG_LOCK_T netmap_global_lock; enum txrx { NR_RX = 0, NR_TX = 1, NR_TXRX }; static __inline const char* nm_txrx2str(enum txrx t) { return (t== NR_RX ? "RX" : "TX"); } static __inline enum txrx nm_txrx_swap(enum txrx t) { return (t== NR_RX ? NR_TX : NR_RX); } #define for_rx_tx(t) for ((t) = 0; (t) < NR_TXRX; (t)++) #ifdef WITH_MONITOR struct netmap_zmon_list { struct netmap_kring *next; struct netmap_kring *prev; }; #endif /* WITH_MONITOR */ /* * private, kernel view of a ring. Keeps track of the status of * a ring across system calls. * * nr_hwcur index of the next buffer to refill. * It corresponds to ring->head * at the time the system call returns. * * nr_hwtail index of the first buffer owned by the kernel. * On RX, hwcur->hwtail are receive buffers * not yet released. hwcur is advanced following * ring->head, hwtail is advanced on incoming packets, * and a wakeup is generated when hwtail passes ring->cur * On TX, hwcur->rcur have been filled by the sender * but not sent yet to the NIC; rcur->hwtail are available * for new transmissions, and hwtail->hwcur-1 are pending * transmissions not yet acknowledged. * * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots. * This is so that, on a reset, buffers owned by userspace are not * modified by the kernel. In particular: * RX rings: the next empty buffer (hwtail + hwofs) coincides with * the next empty buffer as known by the hardware (next_to_check or so). * TX rings: hwcur + hwofs coincides with next_to_send * - * For received packets, slot->flags is set to nkr_slot_flags - * so we can provide a proper initial value. - * * The following fields are used to implement lock-free copy of packets * from input to output ports in VALE switch: * nkr_hwlease buffer after the last one being copied. * A writer in nm_bdg_flush reserves N buffers * from nr_hwlease, advances it, then does the * copy outside the lock. * In RX rings (used for VALE ports), * nkr_hwtail <= nkr_hwlease < nkr_hwcur+N-1 * In TX rings (used for NIC or host stack ports) * nkr_hwcur <= nkr_hwlease < nkr_hwtail * nkr_leases array of nkr_num_slots where writers can report * completion of their block. NR_NOSLOT (~0) indicates * that the writer has not finished yet * nkr_lease_idx index of next free slot in nr_leases, to be assigned * * The kring is manipulated by txsync/rxsync and generic netmap function. * * Concurrent rxsync or txsync on the same ring are prevented through * by nm_kr_(try)lock() which in turn uses nr_busy. This is all we need * for NIC rings, and for TX rings attached to the host stack. * * RX rings attached to the host stack use an mbq (rx_queue) on both * rxsync_from_host() and netmap_transmit(). The mbq is protected * by its internal lock. * * RX rings attached to the VALE switch are accessed by both senders * and receiver. They are protected through the q_lock on the RX ring. */ struct netmap_kring { struct netmap_ring *ring; - uint32_t nr_hwcur; + uint32_t nr_hwcur; /* should be nr_hwhead */ uint32_t nr_hwtail; /* * Copies of values in user rings, so we do not need to look * at the ring (which could be modified). These are set in the * *sync_prologue()/finalize() routines. */ uint32_t rhead; uint32_t rcur; uint32_t rtail; uint32_t nr_kflags; /* private driver flags */ #define NKR_PENDINTR 0x1 // Pending interrupt. #define NKR_EXCLUSIVE 0x2 /* exclusive binding */ #define NKR_FORWARD 0x4 /* (host ring only) there are packets to forward */ #define NKR_NEEDRING 0x8 /* ring needed even if users==0 * (used internally by pipes and * by ptnetmap host ports) */ #define NKR_NOINTR 0x10 /* don't use interrupts on this ring */ +#define NKR_FAKERING 0x20 /* don't allocate/free buffers */ uint32_t nr_mode; uint32_t nr_pending_mode; #define NKR_NETMAP_OFF 0x0 #define NKR_NETMAP_ON 0x1 uint32_t nkr_num_slots; /* * On a NIC reset, the NIC ring indexes may be reset but the * indexes in the netmap rings remain the same. nkr_hwofs * keeps track of the offset between the two. */ int32_t nkr_hwofs; /* last_reclaim is opaque marker to help reduce the frequency * of operations such as reclaiming tx buffers. A possible use * is set it to ticks and do the reclaim only once per tick. */ uint64_t last_reclaim; NM_SELINFO_T si; /* poll/select wait queue */ NM_LOCK_T q_lock; /* protects kring and ring. */ NM_ATOMIC_T nr_busy; /* prevent concurrent syscalls */ + /* the adapter the owns this kring */ struct netmap_adapter *na; + + /* the adapter that wants to be notified when this kring has + * new slots avaialable. This is usually the same as the above, + * but wrappers may let it point to themselves + */ + struct netmap_adapter *notify_na; /* The following fields are for VALE switch support */ struct nm_bdg_fwd *nkr_ft; uint32_t *nkr_leases; #define NR_NOSLOT ((uint32_t)~0) /* used in nkr_*lease* */ uint32_t nkr_hwlease; uint32_t nkr_lease_idx; /* while nkr_stopped is set, no new [tr]xsync operations can * be started on this kring. * This is used by netmap_disable_all_rings() * to find a synchronization point where critical data * structures pointed to by the kring can be added or removed */ volatile int nkr_stopped; /* Support for adapters without native netmap support. * On tx rings we preallocate an array of tx buffers * (same size as the netmap ring), on rx rings we * store incoming mbufs in a queue that is drained by * a rxsync. */ struct mbuf **tx_pool; struct mbuf *tx_event; /* TX event used as a notification */ NM_LOCK_T tx_event_lock; /* protects the tx_event mbuf */ struct mbq rx_queue; /* intercepted rx mbufs. */ uint32_t users; /* existing bindings for this ring */ uint32_t ring_id; /* kring identifier */ enum txrx tx; /* kind of ring (tx or rx) */ char name[64]; /* diagnostic */ /* [tx]sync callback for this kring. * The default nm_kring_create callback (netmap_krings_create) * sets the nm_sync callback of each hardware tx(rx) kring to * the corresponding nm_txsync(nm_rxsync) taken from the * netmap_adapter; moreover, it sets the sync callback * of the host tx(rx) ring to netmap_txsync_to_host * (netmap_rxsync_from_host). * * Overrides: the above configuration is not changed by * any of the nm_krings_create callbacks. */ int (*nm_sync)(struct netmap_kring *kring, int flags); int (*nm_notify)(struct netmap_kring *kring, int flags); #ifdef WITH_PIPES struct netmap_kring *pipe; /* if this is a pipe ring, * pointer to the other end */ #endif /* WITH_PIPES */ #ifdef WITH_VALE int (*save_notify)(struct netmap_kring *kring, int flags); #endif #ifdef WITH_MONITOR /* array of krings that are monitoring this kring */ struct netmap_kring **monitors; uint32_t max_monitors; /* current size of the monitors array */ uint32_t n_monitors; /* next unused entry in the monitor array */ uint32_t mon_pos[NR_TXRX]; /* index of this ring in the monitored ring array */ uint32_t mon_tail; /* last seen slot on rx */ /* circular list of zero-copy monitors */ struct netmap_zmon_list zmon_list[NR_TXRX]; /* * Monitors work by intercepting the sync and notify callbacks of the * monitored krings. This is implemented by replacing the pointers * above and saving the previous ones in mon_* pointers below */ int (*mon_sync)(struct netmap_kring *kring, int flags); int (*mon_notify)(struct netmap_kring *kring, int flags); #endif } #ifdef _WIN32 __declspec(align(64)); #else __attribute__((__aligned__(64))); #endif /* return 1 iff the kring needs to be turned on */ static inline int nm_kring_pending_on(struct netmap_kring *kring) { return kring->nr_pending_mode == NKR_NETMAP_ON && kring->nr_mode == NKR_NETMAP_OFF; } /* return 1 iff the kring needs to be turned off */ static inline int nm_kring_pending_off(struct netmap_kring *kring) { return kring->nr_pending_mode == NKR_NETMAP_OFF && kring->nr_mode == NKR_NETMAP_ON; } /* return the next index, with wraparound */ static inline uint32_t nm_next(uint32_t i, uint32_t lim) { return unlikely (i == lim) ? 0 : i + 1; } /* return the previous index, with wraparound */ static inline uint32_t nm_prev(uint32_t i, uint32_t lim) { return unlikely (i == 0) ? lim : i - 1; } /* * * Here is the layout for the Rx and Tx rings. RxRING TxRING +-----------------+ +-----------------+ | | | | | free | | free | +-----------------+ +-----------------+ head->| owned by user |<-hwcur | not sent to nic |<-hwcur | | | yet | +-----------------+ | | cur->| available to | | | | user, not read | +-----------------+ | yet | cur->| (being | | | | prepared) | | | | | +-----------------+ + ------ + tail->| |<-hwtail | |<-hwlease | (being | ... | | ... | prepared) | ... | | ... +-----------------+ ... | | ... | |<-hwlease +-----------------+ | | tail->| |<-hwtail | | | | | | | | | | | | +-----------------+ +-----------------+ * The cur/tail (user view) and hwcur/hwtail (kernel view) * are used in the normal operation of the card. * * When a ring is the output of a switch port (Rx ring for * a VALE port, Tx ring for the host stack or NIC), slots * are reserved in blocks through 'hwlease' which points * to the next unused slot. * On an Rx ring, hwlease is always after hwtail, * and completions cause hwtail to advance. * On a Tx ring, hwlease is always between cur and hwtail, * and completions cause cur to advance. * * nm_kr_space() returns the maximum number of slots that * can be assigned. * nm_kr_lease() reserves the required number of buffers, * advances nkr_hwlease and also returns an entry in * a circular array where completions should be reported. */ struct lut_entry; #ifdef __FreeBSD__ #define plut_entry lut_entry #endif struct netmap_lut { struct lut_entry *lut; struct plut_entry *plut; uint32_t objtotal; /* max buffer index */ uint32_t objsize; /* buffer size */ }; struct netmap_vp_adapter; // forward +/* Struct to be filled by nm_config callbacks. */ +struct nm_config_info { + unsigned num_tx_rings; + unsigned num_rx_rings; + unsigned num_tx_descs; + unsigned num_rx_descs; + unsigned rx_buf_maxsize; +}; + /* * The "struct netmap_adapter" extends the "struct adapter" * (or equivalent) device descriptor. * It contains all base fields needed to support netmap operation. * There are in fact different types of netmap adapters * (native, generic, VALE switch...) so a netmap_adapter is * just the first field in the derived type. */ struct netmap_adapter { /* * On linux we do not have a good way to tell if an interface * is netmap-capable. So we always use the following trick: * NA(ifp) points here, and the first entry (which hopefully * always exists and is at least 32 bits) contains a magic * value which we can use to detect that the interface is good. */ uint32_t magic; uint32_t na_flags; /* enabled, and other flags */ #define NAF_SKIP_INTR 1 /* use the regular interrupt handler. * useful during initialization */ #define NAF_SW_ONLY 2 /* forward packets only to sw adapter */ #define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when * forwarding packets coming from this * interface */ #define NAF_MEM_OWNER 8 /* the adapter uses its own memory area * that cannot be changed */ #define NAF_NATIVE 16 /* the adapter is native. * Virtual ports (non persistent vale ports, * pipes, monitors...) should never use * this flag. */ #define NAF_NETMAP_ON 32 /* netmap is active (either native or * emulated). Where possible (e.g. FreeBSD) * IFCAP_NETMAP also mirrors this flag. */ #define NAF_HOST_RINGS 64 /* the adapter supports the host rings */ #define NAF_FORCE_NATIVE 128 /* the adapter is always NATIVE */ #define NAF_PTNETMAP_HOST 256 /* the adapter supports ptnetmap in the host */ #define NAF_MOREFRAG 512 /* the adapter supports NS_MOREFRAG */ #define NAF_ZOMBIE (1U<<30) /* the nic driver has been unloaded */ #define NAF_BUSY (1U<<31) /* the adapter is used internally and * cannot be registered from userspace */ int active_fds; /* number of user-space descriptors using this interface, which is equal to the number of struct netmap_if objs in the mapped region. */ u_int num_rx_rings; /* number of adapter receive rings */ u_int num_tx_rings; /* number of adapter transmit rings */ u_int num_tx_desc; /* number of descriptor in each queue */ u_int num_rx_desc; /* tx_rings and rx_rings are private but allocated * as a contiguous chunk of memory. Each array has * N+1 entries, for the adapter queues and for the host queue. */ - struct netmap_kring *tx_rings; /* array of TX rings. */ - struct netmap_kring *rx_rings; /* array of RX rings. */ + struct netmap_kring **tx_rings; /* array of TX rings. */ + struct netmap_kring **rx_rings; /* array of RX rings. */ void *tailroom; /* space below the rings array */ /* (used for leases) */ NM_SELINFO_T si[NR_TXRX]; /* global wait queues */ /* count users of the global wait queues */ int si_users[NR_TXRX]; void *pdev; /* used to store pci device */ /* copy of if_qflush and if_transmit pointers, to intercept * packets from the network stack when netmap is active. */ int (*if_transmit)(struct ifnet *, struct mbuf *); /* copy of if_input for netmap_send_up() */ void (*if_input)(struct ifnet *, struct mbuf *); /* Back reference to the parent ifnet struct. Used for * hardware ports (emulated netmap included). */ struct ifnet *ifp; /* adapter is ifp->if_softc */ /*---- callbacks for this netmap adapter -----*/ /* * nm_dtor() is the cleanup routine called when destroying * the adapter. * Called with NMG_LOCK held. * * nm_register() is called on NIOCREGIF and close() to enter * or exit netmap mode on the NIC * Called with NNG_LOCK held. * * nm_txsync() pushes packets to the underlying hw/switch * * nm_rxsync() collects packets from the underlying hw/switch * * nm_config() returns configuration information from the OS * Called with NMG_LOCK held. * * nm_krings_create() create and init the tx_rings and * rx_rings arrays of kring structures. In particular, * set the nm_sync callbacks for each ring. * There is no need to also allocate the corresponding * netmap_rings, since netmap_mem_rings_create() will always * be called to provide the missing ones. * Called with NNG_LOCK held. * * nm_krings_delete() cleanup and delete the tx_rings and rx_rings * arrays * Called with NMG_LOCK held. * * nm_notify() is used to act after data have become available * (or the stopped state of the ring has changed) * For hw devices this is typically a selwakeup(), * but for NIC/host ports attached to a switch (or vice-versa) * we also need to invoke the 'txsync' code downstream. * This callback pointer is actually used only to initialize * kring->nm_notify. * Return values are the same as for netmap_rx_irq(). */ void (*nm_dtor)(struct netmap_adapter *); int (*nm_register)(struct netmap_adapter *, int onoff); void (*nm_intr)(struct netmap_adapter *, int onoff); int (*nm_txsync)(struct netmap_kring *kring, int flags); int (*nm_rxsync)(struct netmap_kring *kring, int flags); int (*nm_notify)(struct netmap_kring *kring, int flags); #define NAF_FORCE_READ 1 #define NAF_FORCE_RECLAIM 2 #define NAF_CAN_FORWARD_DOWN 4 /* return configuration information */ - int (*nm_config)(struct netmap_adapter *, - u_int *txr, u_int *txd, u_int *rxr, u_int *rxd); + int (*nm_config)(struct netmap_adapter *, struct nm_config_info *info); int (*nm_krings_create)(struct netmap_adapter *); void (*nm_krings_delete)(struct netmap_adapter *); #ifdef WITH_VALE /* * nm_bdg_attach() initializes the na_vp field to point * to an adapter that can be attached to a VALE switch. If the * current adapter is already a VALE port, na_vp is simply a cast; * otherwise, na_vp points to a netmap_bwrap_adapter. * If applicable, this callback also initializes na_hostvp, * that can be used to connect the adapter host rings to the * switch. * Called with NMG_LOCK held. * * nm_bdg_ctl() is called on the actual attach/detach to/from * to/from the switch, to perform adapter-specific * initializations * Called with NMG_LOCK held. */ int (*nm_bdg_attach)(const char *bdg_name, struct netmap_adapter *); - int (*nm_bdg_ctl)(struct netmap_adapter *, struct nmreq *, int); + int (*nm_bdg_ctl)(struct nmreq_header *, struct netmap_adapter *); /* adapter used to attach this adapter to a VALE switch (if any) */ struct netmap_vp_adapter *na_vp; /* adapter used to attach the host rings of this adapter * to a VALE switch (if any) */ struct netmap_vp_adapter *na_hostvp; #endif /* standard refcount to control the lifetime of the adapter * (it should be equal to the lifetime of the corresponding ifp) */ int na_refcount; /* memory allocator (opaque) * We also cache a pointer to the lut_entry for translating * buffer addresses, the total number of buffers and the buffer size. */ struct netmap_mem_d *nm_mem; struct netmap_mem_d *nm_mem_prev; struct netmap_lut na_lut; /* additional information attached to this adapter * by other netmap subsystems. Currently used by * bwrap, LINUX/v1000 and ptnetmap */ void *na_private; /* array of pipes that have this adapter as a parent */ struct netmap_pipe_adapter **na_pipes; int na_next_pipe; /* next free slot in the array */ int na_max_pipes; /* size of the array */ /* Offset of ethernet header for each packet. */ u_int virt_hdr_len; - char name[64]; + /* Max number of bytes that the NIC can store in the buffer + * referenced by each RX descriptor. This translates to the maximum + * bytes that a single netmap slot can reference. Larger packets + * require NS_MOREFRAG support. */ + unsigned rx_buf_maxsize; + + char name[NETMAP_REQ_IFNAMSIZ]; /* used at least by pipes */ }; static __inline u_int nma_get_ndesc(struct netmap_adapter *na, enum txrx t) { return (t == NR_TX ? na->num_tx_desc : na->num_rx_desc); } static __inline void nma_set_ndesc(struct netmap_adapter *na, enum txrx t, u_int v) { if (t == NR_TX) na->num_tx_desc = v; else na->num_rx_desc = v; } static __inline u_int nma_get_nrings(struct netmap_adapter *na, enum txrx t) { return (t == NR_TX ? na->num_tx_rings : na->num_rx_rings); } static __inline void nma_set_nrings(struct netmap_adapter *na, enum txrx t, u_int v) { if (t == NR_TX) na->num_tx_rings = v; else na->num_rx_rings = v; } -static __inline struct netmap_kring* +static __inline struct netmap_kring** NMR(struct netmap_adapter *na, enum txrx t) { return (t == NR_TX ? na->tx_rings : na->rx_rings); } int nma_intr_enable(struct netmap_adapter *na, int onoff); /* * If the NIC is owned by the kernel * (i.e., bridge), neither another bridge nor user can use it; * if the NIC is owned by a user, only users can share it. * Evaluation must be done under NMG_LOCK(). */ #define NETMAP_OWNED_BY_KERN(na) ((na)->na_flags & NAF_BUSY) #define NETMAP_OWNED_BY_ANY(na) \ (NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0)) /* * derived netmap adapters for various types of ports */ struct netmap_vp_adapter { /* VALE software port */ struct netmap_adapter up; /* * Bridge support: * * bdg_port is the port number used in the bridge; * na_bdg points to the bridge this NA is attached to. */ int bdg_port; struct nm_bridge *na_bdg; int retry; int autodelete; /* remove the ifp on last reference */ /* Maximum Frame Size, used in bdg_mismatch_datapath() */ u_int mfs; /* Last source MAC on this port */ uint64_t last_smac; }; struct netmap_hw_adapter { /* physical device */ struct netmap_adapter up; #ifdef linux struct net_device_ops nm_ndo; struct ethtool_ops nm_eto; #endif const struct ethtool_ops* save_ethtool; int (*nm_hw_register)(struct netmap_adapter *, int onoff); }; #ifdef WITH_GENERIC /* Mitigation support. */ struct nm_generic_mit { struct hrtimer mit_timer; int mit_pending; int mit_ring_idx; /* index of the ring being mitigated */ struct netmap_adapter *mit_na; /* backpointer */ }; struct netmap_generic_adapter { /* emulated device */ struct netmap_hw_adapter up; /* Pointer to a previously used netmap adapter. */ struct netmap_adapter *prev; /* Emulated netmap adapters support: * - save_if_input saves the if_input hook (FreeBSD); * - mit implements rx interrupt mitigation; */ void (*save_if_input)(struct ifnet *, struct mbuf *); struct nm_generic_mit *mit; #ifdef linux netdev_tx_t (*save_start_xmit)(struct mbuf *, struct ifnet *); #endif /* Is the adapter able to use multiple RX slots to scatter * each packet pushed up by the driver? */ int rxsg; /* Is the transmission path controlled by a netmap-aware * device queue (i.e. qdisc on linux)? */ int txqdisc; }; #endif /* WITH_GENERIC */ static __inline int netmap_real_rings(struct netmap_adapter *na, enum txrx t) { return nma_get_nrings(na, t) + !!(na->na_flags & NAF_HOST_RINGS); } #ifdef WITH_VALE struct nm_bdg_polling_state; /* * Bridge wrapper for non VALE ports attached to a VALE switch. * * The real device must already have its own netmap adapter (hwna). * The bridge wrapper and the hwna adapter share the same set of * netmap rings and buffers, but they have two separate sets of * krings descriptors, with tx/rx meanings swapped: * * netmap * bwrap krings rings krings hwna * +------+ +------+ +-----+ +------+ +------+ * |tx_rings->| |\ /| |----| |<-tx_rings| * | | +------+ \ / +-----+ +------+ | | * | | X | | * | | / \ | | * | | +------+/ \+-----+ +------+ | | * |rx_rings->| | | |----| |<-rx_rings| * | | +------+ +-----+ +------+ | | * +------+ +------+ * * - packets coming from the bridge go to the brwap rx rings, * which are also the hwna tx rings. The bwrap notify callback * will then complete the hwna tx (see netmap_bwrap_notify). * * - packets coming from the outside go to the hwna rx rings, * which are also the bwrap tx rings. The (overwritten) hwna * notify method will then complete the bridge tx * (see netmap_bwrap_intr_notify). * * The bridge wrapper may optionally connect the hwna 'host' rings * to the bridge. This is done by using a second port in the * bridge and connecting it to the 'host' netmap_vp_adapter * contained in the netmap_bwrap_adapter. The brwap host adapter * cross-links the hwna host rings in the same way as shown above. * * - packets coming from the bridge and directed to the host stack * are handled by the bwrap host notify callback * (see netmap_bwrap_host_notify) * * - packets coming from the host stack are still handled by the * overwritten hwna notify callback (netmap_bwrap_intr_notify), * but are diverted to the host adapter depending on the ring number. * */ struct netmap_bwrap_adapter { struct netmap_vp_adapter up; struct netmap_vp_adapter host; /* for host rings */ struct netmap_adapter *hwna; /* the underlying device */ /* * When we attach a physical interface to the bridge, we * allow the controlling process to terminate, so we need * a place to store the n_detmap_priv_d data structure. * This is only done when physical interfaces * are attached to a bridge. */ struct netmap_priv_d *na_kpriv; struct nm_bdg_polling_state *na_polling_state; + /* we overwrite the hwna->na_vp pointer, so we save + * here its original value, to be restored at detach + */ + struct netmap_vp_adapter *saved_na_vp; }; +int nm_bdg_ctl_attach(struct nmreq_header *hdr, void *auth_token); +int nm_bdg_ctl_detach(struct nmreq_header *hdr, void *auth_token); +int nm_bdg_polling(struct nmreq_header *hdr); int netmap_bwrap_attach(const char *name, struct netmap_adapter *); -int netmap_vi_create(struct nmreq *, int); +int netmap_vi_create(struct nmreq_header *hdr, int); +int nm_vi_create(struct nmreq_header *); +int nm_vi_destroy(const char *name); +int netmap_bdg_list(struct nmreq_header *hdr); #else /* !WITH_VALE */ -#define netmap_vi_create(nmr, a) (EOPNOTSUPP) +#define netmap_vi_create(hdr, a) (EOPNOTSUPP) #endif /* WITH_VALE */ #ifdef WITH_PIPES #define NM_MAXPIPES 64 /* max number of pipes per adapter */ struct netmap_pipe_adapter { + /* pipe identifier is up.name */ struct netmap_adapter up; - u_int id; /* pipe identifier */ - int role; /* either NR_REG_PIPE_MASTER or NR_REG_PIPE_SLAVE */ +#define NM_PIPE_ROLE_MASTER 0x1 +#define NM_PIPE_ROLE_SLAVE 0x2 + int role; /* either NM_PIPE_ROLE_MASTER or NM_PIPE_ROLE_SLAVE */ struct netmap_adapter *parent; /* adapter that owns the memory */ struct netmap_pipe_adapter *peer; /* the other end of the pipe */ int peer_ref; /* 1 iff we are holding a ref to the peer */ struct ifnet *parent_ifp; /* maybe null */ u_int parent_slot; /* index in the parent pipe array */ }; #endif /* WITH_PIPES */ /* return slots reserved to rx clients; used in drivers */ static inline uint32_t nm_kr_rxspace(struct netmap_kring *k) { int space = k->nr_hwtail - k->nr_hwcur; if (space < 0) space += k->nkr_num_slots; ND("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail); return space; } /* return slots reserved to tx clients */ #define nm_kr_txspace(_k) nm_kr_rxspace(_k) /* True if no space in the tx ring, only valid after txsync_prologue */ static inline int nm_kr_txempty(struct netmap_kring *kring) { return kring->rcur == kring->nr_hwtail; } /* True if no more completed slots in the rx ring, only valid after * rxsync_prologue */ #define nm_kr_rxempty(_k) nm_kr_txempty(_k) /* * protect against multiple threads using the same ring. * also check that the ring has not been stopped or locked */ #define NM_KR_BUSY 1 /* some other thread is syncing the ring */ #define NM_KR_STOPPED 2 /* unbounded stop (ifconfig down or driver unload) */ #define NM_KR_LOCKED 3 /* bounded, brief stop for mutual exclusion */ /* release the previously acquired right to use the *sync() methods of the ring */ static __inline void nm_kr_put(struct netmap_kring *kr) { NM_ATOMIC_CLEAR(&kr->nr_busy); } /* true if the ifp that backed the adapter has disappeared (e.g., the * driver has been unloaded) */ static inline int nm_iszombie(struct netmap_adapter *na); /* try to obtain exclusive right to issue the *sync() operations on the ring. * The right is obtained and must be later relinquished via nm_kr_put() if and * only if nm_kr_tryget() returns 0. * If can_sleep is 1 there are only two other possible outcomes: * - the function returns NM_KR_BUSY * - the function returns NM_KR_STOPPED and sets the POLLERR bit in *perr * (if non-null) * In both cases the caller will typically skip the ring, possibly collecting * errors along the way. * If the calling context does not allow sleeping, the caller must pass 0 in can_sleep. * In the latter case, the function may also return NM_KR_LOCKED and leave *perr * untouched: ideally, the caller should try again at a later time. */ static __inline int nm_kr_tryget(struct netmap_kring *kr, int can_sleep, int *perr) { int busy = 1, stopped; /* check a first time without taking the lock * to avoid starvation for nm_kr_get() */ retry: stopped = kr->nkr_stopped; if (unlikely(stopped)) { goto stop; } busy = NM_ATOMIC_TEST_AND_SET(&kr->nr_busy); /* we should not return NM_KR_BUSY if the ring was * actually stopped, so check another time after * the barrier provided by the atomic operation */ stopped = kr->nkr_stopped; if (unlikely(stopped)) { goto stop; } if (unlikely(nm_iszombie(kr->na))) { stopped = NM_KR_STOPPED; goto stop; } return unlikely(busy) ? NM_KR_BUSY : 0; stop: if (!busy) nm_kr_put(kr); if (stopped == NM_KR_STOPPED) { /* if POLLERR is defined we want to use it to simplify netmap_poll(). * Otherwise, any non-zero value will do. */ #ifdef POLLERR #define NM_POLLERR POLLERR #else #define NM_POLLERR 1 #endif /* POLLERR */ if (perr) *perr |= NM_POLLERR; #undef NM_POLLERR } else if (can_sleep) { tsleep(kr, 0, "NM_KR_TRYGET", 4); goto retry; } return stopped; } /* put the ring in the 'stopped' state and wait for the current user (if any) to * notice. stopped must be either NM_KR_STOPPED or NM_KR_LOCKED */ static __inline void nm_kr_stop(struct netmap_kring *kr, int stopped) { kr->nkr_stopped = stopped; while (NM_ATOMIC_TEST_AND_SET(&kr->nr_busy)) tsleep(kr, 0, "NM_KR_GET", 4); } /* restart a ring after a stop */ static __inline void nm_kr_start(struct netmap_kring *kr) { kr->nkr_stopped = 0; nm_kr_put(kr); } /* * The following functions are used by individual drivers to * support netmap operation. * * netmap_attach() initializes a struct netmap_adapter, allocating the * struct netmap_ring's and the struct selinfo. * * netmap_detach() frees the memory allocated by netmap_attach(). * * netmap_transmit() replaces the if_transmit routine of the interface, * and is used to intercept packets coming from the stack. * * netmap_load_map/netmap_reload_map are helper routines to set/reset * the dmamap for a packet buffer * * netmap_reset() is a helper routine to be called in the hw driver * when reinitializing a ring. It should not be called by * virtual ports (vale, pipes, monitor) */ int netmap_attach(struct netmap_adapter *); int netmap_attach_ext(struct netmap_adapter *, size_t size, int override_reg); void netmap_detach(struct ifnet *); int netmap_transmit(struct ifnet *, struct mbuf *); struct netmap_slot *netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n, u_int new_cur); int netmap_ring_reinit(struct netmap_kring *); +int netmap_rings_config_get(struct netmap_adapter *, struct nm_config_info *); /* Return codes for netmap_*x_irq. */ enum { /* Driver should do normal interrupt processing, e.g. because * the interface is not in netmap mode. */ NM_IRQ_PASS = 0, /* Port is in netmap mode, and the interrupt work has been * completed. The driver does not have to notify netmap * again before the next interrupt. */ NM_IRQ_COMPLETED = -1, /* Port is in netmap mode, but the interrupt work has not been * completed. The driver has to make sure netmap will be * notified again soon, even if no more interrupts come (e.g. * on Linux the driver should not call napi_complete()). */ NM_IRQ_RESCHED = -2, }; /* default functions to handle rx/tx interrupts */ int netmap_rx_irq(struct ifnet *, u_int, u_int *); #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL) int netmap_common_irq(struct netmap_adapter *, u_int, u_int *work_done); #ifdef WITH_VALE /* functions used by external modules to interface with VALE */ #define netmap_vp_to_ifp(_vp) ((_vp)->up.ifp) #define netmap_ifp_to_vp(_ifp) (NA(_ifp)->na_vp) #define netmap_ifp_to_host_vp(_ifp) (NA(_ifp)->na_hostvp) #define netmap_bdg_idx(_vp) ((_vp)->bdg_port) const char *netmap_bdg_name(struct netmap_vp_adapter *); #else /* !WITH_VALE */ #define netmap_vp_to_ifp(_vp) NULL #define netmap_ifp_to_vp(_ifp) NULL #define netmap_ifp_to_host_vp(_ifp) NULL #define netmap_bdg_idx(_vp) -1 #define netmap_bdg_name(_vp) NULL #endif /* WITH_VALE */ static inline int nm_netmap_on(struct netmap_adapter *na) { return na && na->na_flags & NAF_NETMAP_ON; } static inline int nm_native_on(struct netmap_adapter *na) { return nm_netmap_on(na) && (na->na_flags & NAF_NATIVE); } static inline int nm_iszombie(struct netmap_adapter *na) { return na == NULL || (na->na_flags & NAF_ZOMBIE); } static inline void nm_update_hostrings_mode(struct netmap_adapter *na) { /* Process nr_mode and nr_pending_mode for host rings. */ - na->tx_rings[na->num_tx_rings].nr_mode = - na->tx_rings[na->num_tx_rings].nr_pending_mode; - na->rx_rings[na->num_rx_rings].nr_mode = - na->rx_rings[na->num_rx_rings].nr_pending_mode; + na->tx_rings[na->num_tx_rings]->nr_mode = + na->tx_rings[na->num_tx_rings]->nr_pending_mode; + na->rx_rings[na->num_rx_rings]->nr_mode = + na->rx_rings[na->num_rx_rings]->nr_pending_mode; } /* set/clear native flags and if_transmit/netdev_ops */ static inline void nm_set_native_flags(struct netmap_adapter *na) { struct ifnet *ifp = na->ifp; /* We do the setup for intercepting packets only if we are the * first user of this adapapter. */ if (na->active_fds > 0) { return; } na->na_flags |= NAF_NETMAP_ON; #ifdef IFCAP_NETMAP /* or FreeBSD ? */ ifp->if_capenable |= IFCAP_NETMAP; #endif #if defined (__FreeBSD__) na->if_transmit = ifp->if_transmit; ifp->if_transmit = netmap_transmit; #elif defined (_WIN32) (void)ifp; /* prevent a warning */ #elif defined (linux) na->if_transmit = (void *)ifp->netdev_ops; ifp->netdev_ops = &((struct netmap_hw_adapter *)na)->nm_ndo; ((struct netmap_hw_adapter *)na)->save_ethtool = ifp->ethtool_ops; ifp->ethtool_ops = &((struct netmap_hw_adapter*)na)->nm_eto; #endif /* linux */ nm_update_hostrings_mode(na); } static inline void nm_clear_native_flags(struct netmap_adapter *na) { struct ifnet *ifp = na->ifp; /* We undo the setup for intercepting packets only if we are the * last user of this adapapter. */ if (na->active_fds > 0) { return; } nm_update_hostrings_mode(na); #if defined(__FreeBSD__) ifp->if_transmit = na->if_transmit; #elif defined(_WIN32) (void)ifp; /* prevent a warning */ #else ifp->netdev_ops = (void *)na->if_transmit; ifp->ethtool_ops = ((struct netmap_hw_adapter*)na)->save_ethtool; #endif na->na_flags &= ~NAF_NETMAP_ON; #ifdef IFCAP_NETMAP /* or FreeBSD ? */ ifp->if_capenable &= ~IFCAP_NETMAP; #endif } +#ifdef linux +int netmap_linux_config(struct netmap_adapter *na, + struct nm_config_info *info); +#endif /* linux */ + /* * nm_*sync_prologue() functions are used in ioctl/poll and ptnetmap * kthreads. * We need netmap_ring* parameter, because in ptnetmap it is decoupled * from host kring. * The user-space ring pointers (head/cur/tail) are shared through * CSB between host and guest. */ /* * validates parameters in the ring/kring, returns a value for head * If any error, returns ring_size to force a reinit. */ uint32_t nm_txsync_prologue(struct netmap_kring *, struct netmap_ring *); /* * validates parameters in the ring/kring, returns a value for head * If any error, returns ring_size lim to force a reinit. */ uint32_t nm_rxsync_prologue(struct netmap_kring *, struct netmap_ring *); /* check/fix address and len in tx rings */ #if 1 /* debug version */ #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \ if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) { \ RD(5, "bad addr/len ring %d slot %d idx %d len %d", \ kring->ring_id, nm_i, slot->buf_idx, len); \ if (_l > NETMAP_BUF_SIZE(_na)) \ _l = NETMAP_BUF_SIZE(_na); \ } } while (0) #else /* no debug version */ #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \ if (_l > NETMAP_BUF_SIZE(_na)) \ _l = NETMAP_BUF_SIZE(_na); \ } while (0) #endif /*---------------------------------------------------------------*/ /* * Support routines used by netmap subsystems * (native drivers, VALE, generic, pipes, monitors, ...) */ /* common routine for all functions that create a netmap adapter. It performs * two main tasks: * - if the na points to an ifp, mark the ifp as netmap capable * using na as its native adapter; * - provide defaults for the setup callbacks and the memory allocator */ int netmap_attach_common(struct netmap_adapter *); /* fill priv->np_[tr]xq{first,last} using the ringid and flags information - * coming from a struct nmreq + * coming from a struct nmreq_register */ -int netmap_interp_ringid(struct netmap_priv_d *priv, uint16_t ringid, uint32_t flags); +int netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode, + uint16_t nr_ringid, uint64_t nr_flags); /* update the ring parameters (number and size of tx and rx rings). * It calls the nm_config callback, if available. */ int netmap_update_config(struct netmap_adapter *na); /* create and initialize the common fields of the krings array. * using the information that must be already available in the na. * tailroom can be used to request the allocation of additional * tailroom bytes after the krings array. This is used by * netmap_vp_adapter's (i.e., VALE ports) to make room for * leasing-related data structures */ int netmap_krings_create(struct netmap_adapter *na, u_int tailroom); /* deletes the kring array of the adapter. The array must have * been created using netmap_krings_create */ void netmap_krings_delete(struct netmap_adapter *na); int netmap_hw_krings_create(struct netmap_adapter *na); void netmap_hw_krings_delete(struct netmap_adapter *na); /* set the stopped/enabled status of ring * When stopping, they also wait for all current activity on the ring to * terminate. The status change is then notified using the na nm_notify * callback. */ void netmap_set_ring(struct netmap_adapter *, u_int ring_id, enum txrx, int stopped); /* set the stopped/enabled status of all rings of the adapter. */ void netmap_set_all_rings(struct netmap_adapter *, int stopped); /* convenience wrappers for netmap_set_all_rings */ void netmap_disable_all_rings(struct ifnet *); void netmap_enable_all_rings(struct ifnet *); int netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na, - uint16_t ringid, uint32_t flags); + uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags); void netmap_do_unregif(struct netmap_priv_d *priv); u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg); -int netmap_get_na(struct nmreq *nmr, struct netmap_adapter **na, - struct ifnet **ifp, struct netmap_mem_d *nmd, int create); +int netmap_get_na(struct nmreq_header *hdr, struct netmap_adapter **na, + struct ifnet **ifp, struct netmap_mem_d *nmd, int create); void netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp); int netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na); #ifdef WITH_VALE /* * The following bridge-related functions are used by other * kernel modules. * * VALE only supports unicast or broadcast. The lookup * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports, * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 to indicate * drop. */ -typedef u_int (*bdg_lookup_fn_t)(struct nm_bdg_fwd *ft, uint8_t *ring_nr, - struct netmap_vp_adapter *); +typedef uint32_t (*bdg_lookup_fn_t)(struct nm_bdg_fwd *ft, uint8_t *ring_nr, + struct netmap_vp_adapter *, void *private_data); typedef int (*bdg_config_fn_t)(struct nm_ifreq *); typedef void (*bdg_dtor_fn_t)(const struct netmap_vp_adapter *); +typedef void *(*bdg_update_private_data_fn_t)(void *private_data, void *callback_data, int *error); struct netmap_bdg_ops { bdg_lookup_fn_t lookup; bdg_config_fn_t config; bdg_dtor_fn_t dtor; }; -u_int netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring, - struct netmap_vp_adapter *); +uint32_t netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring, + struct netmap_vp_adapter *, void *private_data); #define NM_BRIDGES 8 /* number of bridges */ #define NM_BDG_MAXPORTS 254 /* up to 254 */ #define NM_BDG_BROADCAST NM_BDG_MAXPORTS #define NM_BDG_NOPORT (NM_BDG_MAXPORTS+1) /* these are redefined in case of no VALE support */ -int netmap_get_bdg_na(struct nmreq *nmr, struct netmap_adapter **na, +int netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct netmap_mem_d *nmd, int create); struct nm_bridge *netmap_init_bridges2(u_int); void netmap_uninit_bridges2(struct nm_bridge *, u_int); int netmap_init_bridges(void); void netmap_uninit_bridges(void); -int netmap_bdg_ctl(struct nmreq *nmr, struct netmap_bdg_ops *bdg_ops); -int netmap_bdg_config(struct nmreq *nmr); +int netmap_bdg_regops(const char *name, struct netmap_bdg_ops *bdg_ops, void *private_data, void *auth_token); +int nm_bdg_update_private_data(const char *name, bdg_update_private_data_fn_t callback, + void *callback_data, void *auth_token); +int netmap_bdg_config(struct nm_ifreq *nifr); +void *netmap_bdg_create(const char *bdg_name, int *return_status); +int netmap_bdg_destroy(const char *bdg_name, void *auth_token); #else /* !WITH_VALE */ #define netmap_get_bdg_na(_1, _2, _3, _4) 0 #define netmap_init_bridges(_1) 0 #define netmap_uninit_bridges() -#define netmap_bdg_ctl(_1, _2) EINVAL +#define netmap_bdg_regops(_1, _2) EINVAL #endif /* !WITH_VALE */ #ifdef WITH_PIPES /* max number of pipes per device */ #define NM_MAXPIPES 64 /* XXX this should probably be a sysctl */ void netmap_pipe_dealloc(struct netmap_adapter *); -int netmap_get_pipe_na(struct nmreq *nmr, struct netmap_adapter **na, - struct netmap_mem_d *nmd, int create); +int netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na, + struct netmap_mem_d *nmd, int create); #else /* !WITH_PIPES */ #define NM_MAXPIPES 0 #define netmap_pipe_alloc(_1, _2) 0 #define netmap_pipe_dealloc(_1) -#define netmap_get_pipe_na(nmr, _2, _3, _4) \ - ({ int role__ = (nmr)->nr_flags & NR_REG_MASK; \ - (role__ == NR_REG_PIPE_MASTER || \ - role__ == NR_REG_PIPE_SLAVE) ? EOPNOTSUPP : 0; }) +#define netmap_get_pipe_na(hdr, _2, _3, _4) \ + ((strchr(hdr->nr_name, '{') != NULL || strchr(hdr->nr_name, '}') != NULL) ? EOPNOTSUPP : 0) #endif #ifdef WITH_MONITOR -int netmap_get_monitor_na(struct nmreq *nmr, struct netmap_adapter **na, +int netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct netmap_mem_d *nmd, int create); void netmap_monitor_stop(struct netmap_adapter *na); #else -#define netmap_get_monitor_na(nmr, _2, _3, _4) \ - ((nmr)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0) +#define netmap_get_monitor_na(hdr, _2, _3, _4) \ + (((struct nmreq_register *)hdr->nr_body)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0) #endif #ifdef CONFIG_NET_NS struct net *netmap_bns_get(void); void netmap_bns_put(struct net *); void netmap_bns_getbridges(struct nm_bridge **, u_int *); #else #define netmap_bns_get() #define netmap_bns_put(_1) #define netmap_bns_getbridges(b, n) \ do { *b = nm_bridges; *n = NM_BRIDGES; } while (0) #endif /* Various prototypes */ int netmap_poll(struct netmap_priv_d *, int events, NM_SELRECORD_T *td); int netmap_init(void); void netmap_fini(void); int netmap_get_memory(struct netmap_priv_d* p); void netmap_dtor(void *data); -int netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data, struct thread *); +int netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data, + struct thread *, int nr_body_is_user); +int netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data, + struct thread *td); +size_t nmreq_size_by_type(uint16_t nr_reqtype); /* netmap_adapter creation/destruction */ // #define NM_DEBUG_PUTGET 1 #ifdef NM_DEBUG_PUTGET #define NM_DBG(f) __##f void __netmap_adapter_get(struct netmap_adapter *na); #define netmap_adapter_get(na) \ do { \ struct netmap_adapter *__na = na; \ D("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \ __netmap_adapter_get(__na); \ } while (0) int __netmap_adapter_put(struct netmap_adapter *na); #define netmap_adapter_put(na) \ ({ \ struct netmap_adapter *__na = na; \ D("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \ __netmap_adapter_put(__na); \ }) #else /* !NM_DEBUG_PUTGET */ #define NM_DBG(f) f void netmap_adapter_get(struct netmap_adapter *na); int netmap_adapter_put(struct netmap_adapter *na); #endif /* !NM_DEBUG_PUTGET */ /* * module variables */ #define NETMAP_BUF_BASE(_na) ((_na)->na_lut.lut[0].vaddr) #define NETMAP_BUF_SIZE(_na) ((_na)->na_lut.objsize) extern int netmap_no_pendintr; extern int netmap_mitigate; extern int netmap_verbose; /* for debugging */ enum { /* verbose flags */ NM_VERB_ON = 1, /* generic verbose */ NM_VERB_HOST = 0x2, /* verbose host stack */ NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */ NM_VERB_TXSYNC = 0x20, NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */ NM_VERB_TXINTR = 0x200, NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */ NM_VERB_NIC_TXSYNC = 0x2000, }; extern int netmap_txsync_retry; extern int netmap_flags; extern int netmap_generic_mit; extern int netmap_generic_ringsize; extern int netmap_generic_rings; #ifdef linux extern int netmap_generic_txqdisc; #endif extern int ptnetmap_tx_workers; /* * NA returns a pointer to the struct netmap adapter from the ifp, * WNA is used to write it. */ #define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp)) /* * On old versions of FreeBSD, NA(ifp) is a pspare. On linux we * overload another pointer in the netdev. * * We check if NA(ifp) is set and its first element has a related * magic value. The capenable is within the struct netmap_adapter. */ #define NETMAP_MAGIC 0x52697a7a #define NM_NA_VALID(ifp) (NA(ifp) && \ ((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC ) #define NM_ATTACH_NA(ifp, na) do { \ WNA(ifp) = na; \ if (NA(ifp)) \ NA(ifp)->magic = \ ((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC; \ } while(0) #define NM_IS_NATIVE(ifp) (NM_NA_VALID(ifp) && NA(ifp)->nm_dtor == netmap_hw_dtor) #if defined(__FreeBSD__) /* Assigns the device IOMMU domain to an allocator. * Returns -ENOMEM in case the domain is different */ #define nm_iommu_group_id(dev) (0) /* Callback invoked by the dma machinery after a successful dmamap_load */ static void netmap_dmamap_cb(__unused void *arg, __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error) { } /* bus_dmamap_load wrapper: call aforementioned function if map != NULL. * XXX can we do it without a callback ? */ static inline int netmap_load_map(struct netmap_adapter *na, bus_dma_tag_t tag, bus_dmamap_t map, void *buf) { if (map) bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na), netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); return 0; } static inline void netmap_unload_map(struct netmap_adapter *na, bus_dma_tag_t tag, bus_dmamap_t map) { if (map) bus_dmamap_unload(tag, map); } #define netmap_sync_map(na, tag, map, sz, t) /* update the map when a buffer changes. */ static inline void netmap_reload_map(struct netmap_adapter *na, bus_dma_tag_t tag, bus_dmamap_t map, void *buf) { if (map) { bus_dmamap_unload(tag, map); bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na), netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); } } #elif defined(_WIN32) #else /* linux */ int nm_iommu_group_id(bus_dma_tag_t dev); #include /* * on linux we need * dma_map_single(&pdev->dev, virt_addr, len, direction) * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction) */ #if 0 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[l]; /* set time_stamp *before* dma to help avoid a possible race */ buffer_info->time_stamp = jiffies; buffer_info->mapped_as_page = false; buffer_info->length = len; //buffer_info->next_to_watch = l; /* reload dma map */ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, NETMAP_BUF_SIZE, DMA_TO_DEVICE); buffer_info->dma = dma_map_single(&adapter->pdev->dev, addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { D("dma mapping error"); /* goto dma_error; See e1000_put_txbuf() */ /* XXX reset */ } tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX #endif static inline int netmap_load_map(struct netmap_adapter *na, bus_dma_tag_t tag, bus_dmamap_t map, void *buf, u_int size) { if (map) { *map = dma_map_single(na->pdev, buf, size, DMA_BIDIRECTIONAL); if (dma_mapping_error(na->pdev, *map)) { *map = 0; return ENOMEM; } } return 0; } static inline void netmap_unload_map(struct netmap_adapter *na, bus_dma_tag_t tag, bus_dmamap_t map, u_int sz) { if (*map) { dma_unmap_single(na->pdev, *map, sz, DMA_BIDIRECTIONAL); } } static inline void netmap_sync_map(struct netmap_adapter *na, bus_dma_tag_t tag, bus_dmamap_t map, u_int sz, enum txrx t) { if (*map) { if (t == NR_RX) dma_sync_single_for_cpu(na->pdev, *map, sz, DMA_FROM_DEVICE); else dma_sync_single_for_device(na->pdev, *map, sz, DMA_TO_DEVICE); } } static inline void netmap_reload_map(struct netmap_adapter *na, bus_dma_tag_t tag, bus_dmamap_t map, void *buf) { u_int sz = NETMAP_BUF_SIZE(na); if (*map) { dma_unmap_single(na->pdev, *map, sz, DMA_BIDIRECTIONAL); } *map = dma_map_single(na->pdev, buf, sz, DMA_BIDIRECTIONAL); } #endif /* linux */ /* * functions to map NIC to KRING indexes (n2k) and vice versa (k2n) */ static inline int netmap_idx_n2k(struct netmap_kring *kr, int idx) { int n = kr->nkr_num_slots; idx += kr->nkr_hwofs; if (idx < 0) return idx + n; else if (idx < n) return idx; else return idx - n; } static inline int netmap_idx_k2n(struct netmap_kring *kr, int idx) { int n = kr->nkr_num_slots; idx -= kr->nkr_hwofs; if (idx < 0) return idx + n; else if (idx < n) return idx; else return idx - n; } /* Entries of the look-up table. */ #ifdef __FreeBSD__ struct lut_entry { void *vaddr; /* virtual address. */ vm_paddr_t paddr; /* physical address. */ }; #else /* linux & _WIN32 */ /* dma-mapping in linux can assign a buffer a different address * depending on the device, so we need to have a separate * physical-address look-up table for each na. * We can still share the vaddrs, though, therefore we split * the lut_entry structure. */ struct lut_entry { void *vaddr; /* virtual address. */ }; struct plut_entry { vm_paddr_t paddr; /* physical address. */ }; #endif /* linux & _WIN32 */ struct netmap_obj_pool; /* * NMB return the virtual address of a buffer (buffer 0 on bad index) * PNMB also fills the physical address */ static inline void * NMB(struct netmap_adapter *na, struct netmap_slot *slot) { struct lut_entry *lut = na->na_lut.lut; uint32_t i = slot->buf_idx; return (unlikely(i >= na->na_lut.objtotal)) ? lut[0].vaddr : lut[i].vaddr; } static inline void * PNMB(struct netmap_adapter *na, struct netmap_slot *slot, uint64_t *pp) { uint32_t i = slot->buf_idx; struct lut_entry *lut = na->na_lut.lut; struct plut_entry *plut = na->na_lut.plut; void *ret = (i >= na->na_lut.objtotal) ? lut[0].vaddr : lut[i].vaddr; #ifdef _WIN32 *pp = (i >= na->na_lut.objtotal) ? (uint64_t)plut[0].paddr.QuadPart : (uint64_t)plut[i].paddr.QuadPart; #else *pp = (i >= na->na_lut.objtotal) ? plut[0].paddr : plut[i].paddr; #endif return ret; } /* * Structure associated to each netmap file descriptor. * It is created on open and left unbound (np_nifp == NULL). * A successful NIOCREGIF will set np_nifp and the first few fields; * this is protected by a global lock (NMG_LOCK) due to low contention. * * np_refs counts the number of references to the structure: one for the fd, * plus (on FreeBSD) one for each active mmap which we track ourselves * (linux automatically tracks them, but FreeBSD does not). * np_refs is protected by NMG_LOCK. * * Read access to the structure is lock free, because ni_nifp once set * can only go to 0 when nobody is using the entry anymore. Readers * must check that np_nifp != NULL before using the other fields. */ struct netmap_priv_d { struct netmap_if * volatile np_nifp; /* netmap if descriptor. */ struct netmap_adapter *np_na; struct ifnet *np_ifp; uint32_t np_flags; /* from the ioctl */ u_int np_qfirst[NR_TXRX], np_qlast[NR_TXRX]; /* range of tx/rx rings to scan */ uint16_t np_txpoll; int np_sync_flags; /* to be passed to nm_sync */ int np_refs; /* use with NMG_LOCK held */ /* pointers to the selinfo to be used for selrecord. * Either the local or the global one depending on the * number of rings. */ NM_SELINFO_T *np_si[NR_TXRX]; struct thread *np_td; /* kqueue, just debugging */ }; struct netmap_priv_d *netmap_priv_new(void); void netmap_priv_delete(struct netmap_priv_d *); static inline int nm_kring_pending(struct netmap_priv_d *np) { struct netmap_adapter *na = np->np_na; enum txrx t; int i; for_rx_tx(t) { for (i = np->np_qfirst[t]; i < np->np_qlast[t]; i++) { - struct netmap_kring *kring = &NMR(na, t)[i]; + struct netmap_kring *kring = NMR(na, t)[i]; if (kring->nr_mode != kring->nr_pending_mode) { return 1; } } } return 0; } #ifdef WITH_PIPES int netmap_pipe_txsync(struct netmap_kring *txkring, int flags); int netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags); #endif /* WITH_PIPES */ #ifdef WITH_MONITOR struct netmap_monitor_adapter { struct netmap_adapter up; struct netmap_priv_d priv; uint32_t flags; }; #endif /* WITH_MONITOR */ #ifdef WITH_GENERIC /* * generic netmap emulation for devices that do not have * native netmap support. */ int generic_netmap_attach(struct ifnet *ifp); int generic_rx_handler(struct ifnet *ifp, struct mbuf *m);; int nm_os_catch_rx(struct netmap_generic_adapter *gna, int intercept); int nm_os_catch_tx(struct netmap_generic_adapter *gna, int intercept); int na_is_generic(struct netmap_adapter *na); /* * the generic transmit routine is passed a structure to optionally * build a queue of descriptors, in an OS-specific way. * The payload is at addr, if non-null, and the routine should send or queue * the packet, returning 0 if successful, 1 on failure. * * At the end, if head is non-null, there will be an additional call * to the function with addr = NULL; this should tell the OS-specific * routine to send the queue and free any resources. Failure is ignored. */ struct nm_os_gen_arg { struct ifnet *ifp; void *m; /* os-specific mbuf-like object */ void *head, *tail; /* tailq, if the OS-specific routine needs to build one */ void *addr; /* payload of current packet */ u_int len; /* packet length */ u_int ring_nr; /* packet length */ u_int qevent; /* in txqdisc mode, place an event on this mbuf */ }; int nm_os_generic_xmit_frame(struct nm_os_gen_arg *); int nm_os_generic_find_num_desc(struct ifnet *ifp, u_int *tx, u_int *rx); void nm_os_generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq); void nm_os_generic_set_features(struct netmap_generic_adapter *gna); static inline struct ifnet* netmap_generic_getifp(struct netmap_generic_adapter *gna) { if (gna->prev) return gna->prev->ifp; return gna->up.up.ifp; } void netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done); //#define RATE_GENERIC /* Enables communication statistics for generic. */ #ifdef RATE_GENERIC void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi); #else #define generic_rate(txp, txs, txi, rxp, rxs, rxi) #endif /* * netmap_mitigation API. This is used by the generic adapter * to reduce the number of interrupt requests/selwakeup * to clients on incoming packets. */ void nm_os_mitigation_init(struct nm_generic_mit *mit, int idx, struct netmap_adapter *na); void nm_os_mitigation_start(struct nm_generic_mit *mit); void nm_os_mitigation_restart(struct nm_generic_mit *mit); int nm_os_mitigation_active(struct nm_generic_mit *mit); void nm_os_mitigation_cleanup(struct nm_generic_mit *mit); #else /* !WITH_GENERIC */ #define generic_netmap_attach(ifp) (EOPNOTSUPP) #define na_is_generic(na) (0) #endif /* WITH_GENERIC */ /* Shared declarations for the VALE switch. */ /* * Each transmit queue accumulates a batch of packets into * a structure before forwarding. Packets to the same * destination are put in a list using ft_next as a link field. * ft_frags and ft_next are valid only on the first fragment. */ struct nm_bdg_fwd { /* forwarding entry for a bridge */ void *ft_buf; /* netmap or indirect buffer */ uint8_t ft_frags; /* how many fragments (only on 1st frag) */ - uint8_t _ft_port; /* dst port (unused) */ + uint16_t ft_offset; /* dst port (unused) */ uint16_t ft_flags; /* flags, e.g. indirect */ uint16_t ft_len; /* src fragment len */ uint16_t ft_next; /* next packet to same destination */ }; /* struct 'virtio_net_hdr' from linux. */ struct nm_vnet_hdr { #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */ #define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */ uint8_t flags; #define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */ #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */ #define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */ #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */ #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */ uint8_t gso_type; uint16_t hdr_len; uint16_t gso_size; uint16_t csum_start; uint16_t csum_offset; }; #define WORST_CASE_GSO_HEADER (14+40+60) /* IPv6 + TCP */ /* Private definitions for IPv4, IPv6, UDP and TCP headers. */ struct nm_iphdr { uint8_t version_ihl; uint8_t tos; uint16_t tot_len; uint16_t id; uint16_t frag_off; uint8_t ttl; uint8_t protocol; uint16_t check; uint32_t saddr; uint32_t daddr; /*The options start here. */ }; struct nm_tcphdr { uint16_t source; uint16_t dest; uint32_t seq; uint32_t ack_seq; uint8_t doff; /* Data offset + Reserved */ uint8_t flags; uint16_t window; uint16_t check; uint16_t urg_ptr; }; struct nm_udphdr { uint16_t source; uint16_t dest; uint16_t len; uint16_t check; }; struct nm_ipv6hdr { uint8_t priority_version; uint8_t flow_lbl[3]; uint16_t payload_len; uint8_t nexthdr; uint8_t hop_limit; uint8_t saddr[16]; uint8_t daddr[16]; }; /* Type used to store a checksum (in host byte order) that hasn't been * folded yet. */ #define rawsum_t uint32_t rawsum_t nm_os_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum); uint16_t nm_os_csum_ipv4(struct nm_iphdr *iph); void nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data, size_t datalen, uint16_t *check); void nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data, size_t datalen, uint16_t *check); uint16_t nm_os_csum_fold(rawsum_t cur_sum); void bdg_mismatch_datapath(struct netmap_vp_adapter *na, struct netmap_vp_adapter *dst_na, const struct nm_bdg_fwd *ft_p, struct netmap_ring *dst_ring, u_int *j, u_int lim, u_int *howmany); /* persistent virtual port routines */ int nm_os_vi_persist(const char *, struct ifnet **); void nm_os_vi_detach(struct ifnet *); void nm_os_vi_init_index(void); /* * kernel thread routines */ struct nm_kctx; /* OS-specific kernel context - opaque */ typedef void (*nm_kctx_worker_fn_t)(void *data, int is_kthread); typedef void (*nm_kctx_notify_fn_t)(void *data); /* kthread configuration */ struct nm_kctx_cfg { long type; /* kthread type/identifier */ nm_kctx_worker_fn_t worker_fn; /* worker function */ void *worker_private;/* worker parameter */ nm_kctx_notify_fn_t notify_fn; /* notify function */ int attach_user; /* attach kthread to user process */ int use_kthread; /* use a kthread for the context */ }; /* kthread configuration */ struct nm_kctx *nm_os_kctx_create(struct nm_kctx_cfg *cfg, - unsigned int cfgtype, void *opaque); int nm_os_kctx_worker_start(struct nm_kctx *); void nm_os_kctx_worker_stop(struct nm_kctx *); void nm_os_kctx_destroy(struct nm_kctx *); void nm_os_kctx_worker_wakeup(struct nm_kctx *nmk); void nm_os_kctx_send_irq(struct nm_kctx *); void nm_os_kctx_worker_setaff(struct nm_kctx *, int); u_int nm_os_ncpus(void); #ifdef WITH_PTNETMAP_HOST /* * netmap adapter for host ptnetmap ports */ struct netmap_pt_host_adapter { struct netmap_adapter up; /* the passed-through adapter */ struct netmap_adapter *parent; /* parent->na_flags, saved at NETMAP_PT_HOST_CREATE time, * and restored at NETMAP_PT_HOST_DELETE time */ uint32_t parent_na_flags; int (*parent_nm_notify)(struct netmap_kring *kring, int flags); void *ptns; }; -/* ptnetmap HOST routines */ -int netmap_get_pt_host_na(struct nmreq *nmr, struct netmap_adapter **na, - struct netmap_mem_d * nmd, int create); -int ptnetmap_ctl(struct nmreq *nmr, struct netmap_adapter *na); + +/* ptnetmap host-side routines */ +int netmap_get_pt_host_na(struct nmreq_header *hdr, struct netmap_adapter **na, + struct netmap_mem_d * nmd, int create); +int ptnetmap_ctl(const char *nr_name, int create, struct netmap_adapter *na); + static inline int nm_ptnetmap_host_on(struct netmap_adapter *na) { return na && na->na_flags & NAF_PTNETMAP_HOST; } #else /* !WITH_PTNETMAP_HOST */ -#define netmap_get_pt_host_na(nmr, _2, _3, _4) \ - ((nmr)->nr_flags & (NR_PTNETMAP_HOST) ? EOPNOTSUPP : 0) -#define ptnetmap_ctl(_1, _2) EINVAL +#define netmap_get_pt_host_na(hdr, _2, _3, _4) \ + (((struct nmreq_register *)hdr->nr_body)->nr_flags & (NR_PTNETMAP_HOST) ? EOPNOTSUPP : 0) +#define ptnetmap_ctl(_1, _2, _3) EINVAL #define nm_ptnetmap_host_on(_1) EINVAL #endif /* !WITH_PTNETMAP_HOST */ #ifdef WITH_PTNETMAP_GUEST /* ptnetmap GUEST routines */ /* * netmap adapter for guest ptnetmap ports */ struct netmap_pt_guest_adapter { /* The netmap adapter to be used by netmap applications. * This field must be the first, to allow upcast. */ struct netmap_hw_adapter hwup; /* The netmap adapter to be used by the driver. */ struct netmap_hw_adapter dr; /* Reference counter to track users of backend netmap port: the * network stack and netmap clients. * Used to decide when we need (de)allocate krings/rings and * start (stop) ptnetmap kthreads. */ int backend_regifs; }; int netmap_pt_guest_attach(struct netmap_adapter *na, unsigned int nifp_offset, unsigned int memid); struct ptnet_csb_gh; struct ptnet_csb_hg; bool netmap_pt_guest_txsync(struct ptnet_csb_gh *ptgh, struct ptnet_csb_hg *pthg, struct netmap_kring *kring, int flags); bool netmap_pt_guest_rxsync(struct ptnet_csb_gh *ptgh, struct ptnet_csb_hg *pthg, struct netmap_kring *kring, int flags); int ptnet_nm_krings_create(struct netmap_adapter *na); void ptnet_nm_krings_delete(struct netmap_adapter *na); void ptnet_nm_dtor(struct netmap_adapter *na); #endif /* WITH_PTNETMAP_GUEST */ + +struct nmreq_option * nmreq_findoption(struct nmreq_option *, uint16_t); +int nmreq_checkduplicate(struct nmreq_option *); #endif /* _NET_NETMAP_KERN_H_ */ Index: head/sys/dev/netmap/netmap_legacy.c =================================================================== --- head/sys/dev/netmap/netmap_legacy.c (nonexistent) +++ head/sys/dev/netmap/netmap_legacy.c (revision 332423) @@ -0,0 +1,428 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (C) 2018 Vincenzo Maffione + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* $FreeBSD$ */ + +#if defined(__FreeBSD__) +#include /* prerequisite */ +#include +#include /* defines used in kernel.h */ +#include /* FIONBIO */ +#include +#include /* struct socket */ +#include /* sockaddrs */ +#include +#include +#include +#include /* BIOCIMMEDIATE */ +#include /* bus_dmamap_* */ +#include +#elif defined(linux) +#include "bsd_glue.h" +#elif defined(__APPLE__) +#warning OSX support is only partial +#include "osx_glue.h" +#elif defined (_WIN32) +#include "win_glue.h" +#endif + +/* + * common headers + */ +#include +#include + +static int +nmreq_register_from_legacy(struct nmreq *nmr, struct nmreq_header *hdr, + struct nmreq_register *req) +{ + req->nr_offset = nmr->nr_offset; + req->nr_memsize = nmr->nr_memsize; + req->nr_tx_slots = nmr->nr_tx_slots; + req->nr_rx_slots = nmr->nr_rx_slots; + req->nr_tx_rings = nmr->nr_tx_rings; + req->nr_rx_rings = nmr->nr_rx_rings; + req->nr_mem_id = nmr->nr_arg2; + req->nr_ringid = nmr->nr_ringid & NETMAP_RING_MASK; + if ((nmr->nr_flags & NR_REG_MASK) == NR_REG_DEFAULT) { + /* Convert the older nmr->nr_ringid (original + * netmap control API) to nmr->nr_flags. */ + u_int regmode = NR_REG_DEFAULT; + if (req->nr_ringid & NETMAP_SW_RING) { + regmode = NR_REG_SW; + } else if (req->nr_ringid & NETMAP_HW_RING) { + regmode = NR_REG_ONE_NIC; + } else { + regmode = NR_REG_ALL_NIC; + } + nmr->nr_flags = regmode | + (nmr->nr_flags & (~NR_REG_MASK)); + } + req->nr_mode = nmr->nr_flags & NR_REG_MASK; + /* Fix nr_name, nr_mode and nr_ringid to handle pipe requests. */ + if (req->nr_mode == NR_REG_PIPE_MASTER || + req->nr_mode == NR_REG_PIPE_SLAVE) { + char suffix[10]; + snprintf(suffix, sizeof(suffix), "%c%d", + (req->nr_mode == NR_REG_PIPE_MASTER ? '{' : '}'), + req->nr_ringid); + if (strlen(hdr->nr_name) + strlen(suffix) + >= sizeof(hdr->nr_name)) { + /* No space for the pipe suffix. */ + return ENOBUFS; + } + strncat(hdr->nr_name, suffix, strlen(suffix)); + req->nr_mode = NR_REG_ALL_NIC; + req->nr_ringid = 0; + } + req->nr_flags = nmr->nr_flags & (~NR_REG_MASK); + if (nmr->nr_ringid & NETMAP_NO_TX_POLL) { + req->nr_flags |= NR_NO_TX_POLL; + } + if (nmr->nr_ringid & NETMAP_DO_RX_POLL) { + req->nr_flags |= NR_DO_RX_POLL; + } + /* nmr->nr_arg1 (nr_pipes) ignored */ + req->nr_extra_bufs = nmr->nr_arg3; + + return 0; +} + +/* Convert the legacy 'nmr' struct into one of the nmreq_xyz structs + * (new API). The new struct is dynamically allocated. */ +static struct nmreq_header * +nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd) +{ + struct nmreq_header *hdr = nm_os_malloc(sizeof(*hdr)); + + if (hdr == NULL) { + goto oom; + } + + /* Sanitize nmr->nr_name by adding the string terminator. */ + if (ioctl_cmd == NIOCGINFO || ioctl_cmd == NIOCREGIF) { + nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0'; + } + + /* First prepare the request header. */ + hdr->nr_version = NETMAP_API; /* new API */ + strncpy(hdr->nr_name, nmr->nr_name, sizeof(nmr->nr_name)); + hdr->nr_options = (uint64_t)NULL; + hdr->nr_body = (uint64_t)NULL; + + switch (ioctl_cmd) { + case NIOCREGIF: { + switch (nmr->nr_cmd) { + case 0: { + /* Regular NIOCREGIF operation. */ + struct nmreq_register *req = nm_os_malloc(sizeof(*req)); + if (!req) { goto oom; } + hdr->nr_body = (uint64_t)req; + hdr->nr_reqtype = NETMAP_REQ_REGISTER; + if (nmreq_register_from_legacy(nmr, hdr, req)) { + goto oom; + } + break; + } + case NETMAP_BDG_ATTACH: { + struct nmreq_vale_attach *req = nm_os_malloc(sizeof(*req)); + if (!req) { goto oom; } + hdr->nr_body = (uint64_t)req; + hdr->nr_reqtype = NETMAP_REQ_VALE_ATTACH; + if (nmreq_register_from_legacy(nmr, hdr, &req->reg)) { + goto oom; + } + /* Fix nr_mode, starting from nr_arg1. */ + if (nmr->nr_arg1 & NETMAP_BDG_HOST) { + req->reg.nr_mode = NR_REG_NIC_SW; + } else { + req->reg.nr_mode = NR_REG_ALL_NIC; + } + break; + } + case NETMAP_BDG_DETACH: { + hdr->nr_reqtype = NETMAP_REQ_VALE_DETACH; + hdr->nr_body = (uint64_t)nm_os_malloc(sizeof(struct nmreq_vale_detach)); + break; + } + case NETMAP_BDG_VNET_HDR: + case NETMAP_VNET_HDR_GET: { + struct nmreq_port_hdr *req = nm_os_malloc(sizeof(*req)); + if (!req) { goto oom; } + hdr->nr_body = (uint64_t)req; + hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_VNET_HDR) ? + NETMAP_REQ_PORT_HDR_SET : NETMAP_REQ_PORT_HDR_GET; + req->nr_hdr_len = nmr->nr_arg1; + break; + } + case NETMAP_BDG_NEWIF : { + struct nmreq_vale_newif *req = nm_os_malloc(sizeof(*req)); + if (!req) { goto oom; } + hdr->nr_body = (uint64_t)req; + hdr->nr_reqtype = NETMAP_REQ_VALE_NEWIF; + req->nr_tx_slots = nmr->nr_tx_slots; + req->nr_rx_slots = nmr->nr_rx_slots; + req->nr_tx_rings = nmr->nr_tx_rings; + req->nr_rx_rings = nmr->nr_rx_rings; + req->nr_mem_id = nmr->nr_arg2; + break; + } + case NETMAP_BDG_DELIF: { + hdr->nr_reqtype = NETMAP_REQ_VALE_DELIF; + break; + } + case NETMAP_BDG_POLLING_ON: + case NETMAP_BDG_POLLING_OFF: { + struct nmreq_vale_polling *req = nm_os_malloc(sizeof(*req)); + if (!req) { goto oom; } + hdr->nr_body = (uint64_t)req; + hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_POLLING_ON) ? + NETMAP_REQ_VALE_POLLING_ENABLE : + NETMAP_REQ_VALE_POLLING_DISABLE; + switch (nmr->nr_flags & NR_REG_MASK) { + default: + req->nr_mode = 0; /* invalid */ + break; + case NR_REG_ONE_NIC: + req->nr_mode = NETMAP_POLLING_MODE_MULTI_CPU; + break; + case NR_REG_ALL_NIC: + req->nr_mode = NETMAP_POLLING_MODE_SINGLE_CPU; + break; + } + req->nr_first_cpu_id = nmr->nr_ringid & NETMAP_RING_MASK; + req->nr_num_polling_cpus = nmr->nr_arg1; + break; + } + case NETMAP_PT_HOST_CREATE: + case NETMAP_PT_HOST_DELETE: { + D("Netmap passthrough not supported yet"); + return NULL; + break; + } + } + break; + } + case NIOCGINFO: { + if (nmr->nr_cmd == NETMAP_BDG_LIST) { + struct nmreq_vale_list *req = nm_os_malloc(sizeof(*req)); + if (!req) { goto oom; } + hdr->nr_body = (uint64_t)req; + hdr->nr_reqtype = NETMAP_REQ_VALE_LIST; + req->nr_bridge_idx = nmr->nr_arg1; + req->nr_port_idx = nmr->nr_arg2; + } else { + /* Regular NIOCGINFO. */ + struct nmreq_port_info_get *req = nm_os_malloc(sizeof(*req)); + if (!req) { goto oom; } + hdr->nr_body = (uint64_t)req; + hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; + req->nr_offset = nmr->nr_offset; + req->nr_memsize = nmr->nr_memsize; + req->nr_tx_slots = nmr->nr_tx_slots; + req->nr_rx_slots = nmr->nr_rx_slots; + req->nr_tx_rings = nmr->nr_tx_rings; + req->nr_rx_rings = nmr->nr_rx_rings; + req->nr_mem_id = nmr->nr_arg2; + } + break; + } + } + + return hdr; +oom: + if (hdr) { + if (hdr->nr_body) { + nm_os_free((void *)hdr->nr_body); + } + nm_os_free(hdr); + } + D("Failed to allocate memory for nmreq_xyz struct"); + + return NULL; +} + +static void +nmreq_register_to_legacy(const struct nmreq_register *req, struct nmreq *nmr) +{ + nmr->nr_offset = req->nr_offset; + nmr->nr_memsize = req->nr_memsize; + nmr->nr_tx_slots = req->nr_tx_slots; + nmr->nr_rx_slots = req->nr_rx_slots; + nmr->nr_tx_rings = req->nr_tx_rings; + nmr->nr_rx_rings = req->nr_rx_rings; + nmr->nr_arg2 = req->nr_mem_id; + nmr->nr_arg3 = req->nr_extra_bufs; +} + +/* Convert a nmreq_xyz struct (new API) to the legacy 'nmr' struct. + * It also frees the nmreq_xyz struct, as it was allocated by + * nmreq_from_legacy(). */ +static int +nmreq_to_legacy(struct nmreq_header *hdr, struct nmreq *nmr) +{ + int ret = 0; + + /* We only write-back the fields that the user expects to be + * written back. */ + switch (hdr->nr_reqtype) { + case NETMAP_REQ_REGISTER: { + struct nmreq_register *req = + (struct nmreq_register *)hdr->nr_body; + nmreq_register_to_legacy(req, nmr); + break; + } + case NETMAP_REQ_PORT_INFO_GET: { + struct nmreq_port_info_get *req = + (struct nmreq_port_info_get *)hdr->nr_body; + nmr->nr_offset = req->nr_offset; + nmr->nr_memsize = req->nr_memsize; + nmr->nr_tx_slots = req->nr_tx_slots; + nmr->nr_rx_slots = req->nr_rx_slots; + nmr->nr_tx_rings = req->nr_tx_rings; + nmr->nr_rx_rings = req->nr_rx_rings; + nmr->nr_arg2 = req->nr_mem_id; + break; + } + case NETMAP_REQ_VALE_ATTACH: { + struct nmreq_vale_attach *req = + (struct nmreq_vale_attach *)hdr->nr_body; + nmreq_register_to_legacy(&req->reg, nmr); + break; + } + case NETMAP_REQ_VALE_DETACH: { + break; + } + case NETMAP_REQ_VALE_LIST: { + struct nmreq_vale_list *req = + (struct nmreq_vale_list *)hdr->nr_body; + strncpy(nmr->nr_name, hdr->nr_name, sizeof(nmr->nr_name)); + nmr->nr_arg1 = req->nr_bridge_idx; + nmr->nr_arg2 = req->nr_port_idx; + break; + } + case NETMAP_REQ_PORT_HDR_SET: + case NETMAP_REQ_PORT_HDR_GET: { + struct nmreq_port_hdr *req = + (struct nmreq_port_hdr *)hdr->nr_body; + nmr->nr_arg1 = req->nr_hdr_len; + break; + } + case NETMAP_REQ_VALE_NEWIF: { + struct nmreq_vale_newif *req = + (struct nmreq_vale_newif *)hdr->nr_body; + nmr->nr_tx_slots = req->nr_tx_slots; + nmr->nr_rx_slots = req->nr_rx_slots; + nmr->nr_tx_rings = req->nr_tx_rings; + nmr->nr_rx_rings = req->nr_rx_rings; + nmr->nr_arg2 = req->nr_mem_id; + break; + } + case NETMAP_REQ_VALE_DELIF: + case NETMAP_REQ_VALE_POLLING_ENABLE: + case NETMAP_REQ_VALE_POLLING_DISABLE: { + break; + } + } + + return ret; +} + +int +netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data, + struct thread *td) +{ + int error = 0; + + switch (cmd) { + case NIOCGINFO: + case NIOCREGIF: { + /* Request for the legacy control API. Convert it to a + * NIOCCTRL request. */ + struct nmreq *nmr = (struct nmreq *) data; + struct nmreq_header *hdr = nmreq_from_legacy(nmr, cmd); + if (hdr == NULL) { /* out of memory */ + return ENOMEM; + } + error = netmap_ioctl(priv, NIOCCTRL, (caddr_t)hdr, td, + /*nr_body_is_user=*/0); + if (error == 0) { + nmreq_to_legacy(hdr, nmr); + } + if (hdr->nr_body) { + nm_os_free((void *)hdr->nr_body); + } + nm_os_free(hdr); + break; + } +#ifdef WITH_VALE + case NIOCCONFIG: { + struct nm_ifreq *nr = (struct nm_ifreq *)data; + error = netmap_bdg_config(nr); + break; + } +#endif +#ifdef __FreeBSD__ + case FIONBIO: + case FIOASYNC: + ND("FIONBIO/FIOASYNC are no-ops"); + break; + + case BIOCIMMEDIATE: + case BIOCGHDRCMPLT: + case BIOCSHDRCMPLT: + case BIOCSSEESENT: + D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT"); + break; + + default: /* allow device-specific ioctls */ + { + struct nmreq *nmr = (struct nmreq *)data; + struct ifnet *ifp = ifunit_ref(nmr->nr_name); + if (ifp == NULL) { + error = ENXIO; + } else { + struct socket so; + + bzero(&so, sizeof(so)); + so.so_vnet = ifp->if_vnet; + // so->so_proto not null. + error = ifioctl(&so, cmd, data, td); + if_rele(ifp); + } + break; + } + +#else /* linux */ + default: + error = EOPNOTSUPP; +#endif /* linux */ + } + + return error; +} Property changes on: head/sys/dev/netmap/netmap_legacy.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/dev/netmap/netmap_mem2.c =================================================================== --- head/sys/dev/netmap/netmap_mem2.c (revision 332422) +++ head/sys/dev/netmap/netmap_mem2.c (revision 332423) @@ -1,2891 +1,2829 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2012-2014 Matteo Landi * Copyright (C) 2012-2016 Luigi Rizzo * Copyright (C) 2012-2016 Giuseppe Lettieri * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef linux #include "bsd_glue.h" #endif /* linux */ #ifdef __APPLE__ #include "osx_glue.h" #endif /* __APPLE__ */ #ifdef __FreeBSD__ #include /* prerequisite */ __FBSDID("$FreeBSD$"); #include #include #include /* MALLOC_DEFINE */ #include #include /* vtophys */ #include /* vtophys */ #include /* sockaddrs */ #include #include #include #include #include #include /* bus_dmamap_* */ /* M_NETMAP only used in here */ MALLOC_DECLARE(M_NETMAP); MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map"); #endif /* __FreeBSD__ */ #ifdef _WIN32 #include #endif #include #include #include #include "netmap_mem2.h" #ifdef _WIN32_USE_SMALL_GENERIC_DEVICES_MEMORY #define NETMAP_BUF_MAX_NUM 8*4096 /* if too big takes too much time to allocate */ #else #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ #endif #define NETMAP_POOL_MAX_NAMSZ 32 enum { NETMAP_IF_POOL = 0, NETMAP_RING_POOL, NETMAP_BUF_POOL, NETMAP_POOLS_NR }; struct netmap_obj_params { u_int size; u_int num; u_int last_size; u_int last_num; }; struct netmap_obj_pool { char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */ /* ---------------------------------------------------*/ /* these are only meaningful if the pool is finalized */ /* (see 'finalized' field in netmap_mem_d) */ u_int objtotal; /* actual total number of objects. */ u_int memtotal; /* actual total memory space */ u_int numclusters; /* actual number of clusters */ u_int objfree; /* number of free objects. */ struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ uint32_t *bitmap; /* one bit per buffer, 1 means free */ uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */ uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ + int alloc_done; /* we have allocated the memory */ /* ---------------------------------------------------*/ /* limits */ u_int objminsize; /* minimum object size */ u_int objmaxsize; /* maximum object size */ u_int nummin; /* minimum number of objects */ u_int nummax; /* maximum number of objects */ /* these are changed only by config */ u_int _objtotal; /* total number of objects */ u_int _objsize; /* object size */ u_int _clustsize; /* cluster size */ u_int _clustentries; /* objects per cluster */ u_int _numclusters; /* number of clusters */ /* requested values */ u_int r_objtotal; u_int r_objsize; }; #define NMA_LOCK_T NM_MTX_T +#define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx) +#define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx) +#define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx) +#define NMA_SPINLOCK(n) NM_MTX_SPINLOCK((n)->nm_mtx) +#define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx) - struct netmap_mem_ops { int (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*); int (*nmd_get_info)(struct netmap_mem_d *, uint64_t *size, u_int *memflags, uint16_t *id); vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t); int (*nmd_config)(struct netmap_mem_d *); int (*nmd_finalize)(struct netmap_mem_d *); void (*nmd_deref)(struct netmap_mem_d *); ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr); void (*nmd_delete)(struct netmap_mem_d *); struct netmap_if * (*nmd_if_new)(struct netmap_adapter *, struct netmap_priv_d *); void (*nmd_if_delete)(struct netmap_adapter *, struct netmap_if *); int (*nmd_rings_create)(struct netmap_adapter *); void (*nmd_rings_delete)(struct netmap_adapter *); }; struct netmap_mem_d { NMA_LOCK_T nm_mtx; /* protect the allocator */ u_int nm_totalsize; /* shorthand */ u_int flags; #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */ #define NETMAP_MEM_HIDDEN 0x8 /* beeing prepared */ int lasterr; /* last error for curr config */ int active; /* active users */ int refcount; /* the three allocators */ struct netmap_obj_pool pools[NETMAP_POOLS_NR]; nm_memid_t nm_id; /* allocator identifier */ int nm_grp; /* iommu groupd id */ /* list of all existing allocators, sorted by nm_id */ struct netmap_mem_d *prev, *next; struct netmap_mem_ops *ops; struct netmap_obj_params params[NETMAP_POOLS_NR]; #define NM_MEM_NAMESZ 16 char name[NM_MEM_NAMESZ]; }; -/* - * XXX need to fix the case of t0 == void - */ -#define NMD_DEFCB(t0, name) \ -t0 \ -netmap_mem_##name(struct netmap_mem_d *nmd) \ -{ \ - return nmd->ops->nmd_##name(nmd); \ +int +netmap_mem_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) +{ + int rv; + + NMA_LOCK(nmd); + rv = nmd->ops->nmd_get_lut(nmd, lut); + NMA_UNLOCK(nmd); + + return rv; } -#define NMD_DEFCB1(t0, name, t1) \ -t0 \ -netmap_mem_##name(struct netmap_mem_d *nmd, t1 a1) \ -{ \ - return nmd->ops->nmd_##name(nmd, a1); \ +int +netmap_mem_get_info(struct netmap_mem_d *nmd, uint64_t *size, + u_int *memflags, nm_memid_t *memid) +{ + int rv; + + NMA_LOCK(nmd); + rv = nmd->ops->nmd_get_info(nmd, size, memflags, memid); + NMA_UNLOCK(nmd); + + return rv; } -#define NMD_DEFCB3(t0, name, t1, t2, t3) \ -t0 \ -netmap_mem_##name(struct netmap_mem_d *nmd, t1 a1, t2 a2, t3 a3) \ -{ \ - return nmd->ops->nmd_##name(nmd, a1, a2, a3); \ +vm_paddr_t +netmap_mem_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off) +{ + vm_paddr_t pa; + +#if defined(__FreeBSD__) + /* This function is called by netmap_dev_pager_fault(), which holds a + * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we + * spin on the trylock. */ + NMA_SPINLOCK(nmd); +#else + NMA_LOCK(nmd); +#endif + pa = nmd->ops->nmd_ofstophys(nmd, off); + NMA_UNLOCK(nmd); + + return pa; } -#define NMD_DEFNACB(t0, name) \ -t0 \ -netmap_mem_##name(struct netmap_adapter *na) \ -{ \ - return na->nm_mem->ops->nmd_##name(na); \ +static int +netmap_mem_config(struct netmap_mem_d *nmd) +{ + if (nmd->active) { + /* already in use. Not fatal, but we + * cannot change the configuration + */ + return 0; + } + + return nmd->ops->nmd_config(nmd); } -#define NMD_DEFNACB1(t0, name, t1) \ -t0 \ -netmap_mem_##name(struct netmap_adapter *na, t1 a1) \ -{ \ - return na->nm_mem->ops->nmd_##name(na, a1); \ +ssize_t +netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *off) +{ + ssize_t rv; + + NMA_LOCK(nmd); + rv = nmd->ops->nmd_if_offset(nmd, off); + NMA_UNLOCK(nmd); + + return rv; } -NMD_DEFCB1(int, get_lut, struct netmap_lut *); -NMD_DEFCB3(int, get_info, uint64_t *, u_int *, uint16_t *); -NMD_DEFCB1(vm_paddr_t, ofstophys, vm_ooffset_t); -static int netmap_mem_config(struct netmap_mem_d *); -NMD_DEFCB(int, config); -NMD_DEFCB1(ssize_t, if_offset, const void *); -NMD_DEFCB(void, delete); +static void +netmap_mem_delete(struct netmap_mem_d *nmd) +{ + nmd->ops->nmd_delete(nmd); +} -NMD_DEFNACB1(struct netmap_if *, if_new, struct netmap_priv_d *); -NMD_DEFNACB1(void, if_delete, struct netmap_if *); -NMD_DEFNACB(int, rings_create); -NMD_DEFNACB(void, rings_delete); +struct netmap_if * +netmap_mem_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) +{ + struct netmap_if *nifp; + struct netmap_mem_d *nmd = na->nm_mem; + NMA_LOCK(nmd); + nifp = nmd->ops->nmd_if_new(na, priv); + NMA_UNLOCK(nmd); + + return nifp; +} + +void +netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nif) +{ + struct netmap_mem_d *nmd = na->nm_mem; + + NMA_LOCK(nmd); + nmd->ops->nmd_if_delete(na, nif); + NMA_UNLOCK(nmd); +} + +int +netmap_mem_rings_create(struct netmap_adapter *na) +{ + int rv; + struct netmap_mem_d *nmd = na->nm_mem; + + NMA_LOCK(nmd); + rv = nmd->ops->nmd_rings_create(na); + NMA_UNLOCK(nmd); + + return rv; +} + +void +netmap_mem_rings_delete(struct netmap_adapter *na) +{ + struct netmap_mem_d *nmd = na->nm_mem; + + NMA_LOCK(nmd); + nmd->ops->nmd_rings_delete(na); + NMA_UNLOCK(nmd); +} + static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *); static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *); static int nm_mem_assign_group(struct netmap_mem_d *, struct device *); static void nm_mem_release_id(struct netmap_mem_d *); nm_memid_t netmap_mem_get_id(struct netmap_mem_d *nmd) { return nmd->nm_id; } -#define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx) -#define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx) -#define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx) -#define NMA_SPINLOCK(n) NM_MTX_SPINLOCK((n)->nm_mtx) -#define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx) - #ifdef NM_DEBUG_MEM_PUTGET #define NM_DBG_REFC(nmd, func, line) \ nm_prinf("%s:%d mem[%d] -> %d\n", func, line, (nmd)->nm_id, (nmd)->refcount); #else #define NM_DBG_REFC(nmd, func, line) #endif /* circular list of all existing allocators */ static struct netmap_mem_d *netmap_last_mem_d = &nm_mem; NM_MTX_T nm_mem_list_lock; struct netmap_mem_d * __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line) { NM_MTX_LOCK(nm_mem_list_lock); nmd->refcount++; NM_DBG_REFC(nmd, func, line); NM_MTX_UNLOCK(nm_mem_list_lock); return nmd; } void __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line) { int last; NM_MTX_LOCK(nm_mem_list_lock); last = (--nmd->refcount == 0); if (last) nm_mem_release_id(nmd); NM_DBG_REFC(nmd, func, line); NM_MTX_UNLOCK(nm_mem_list_lock); if (last) netmap_mem_delete(nmd); } int netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na) { + int lasterr = 0; if (nm_mem_assign_group(nmd, na->pdev) < 0) { return ENOMEM; - } else { - NMA_LOCK(nmd); - nmd->lasterr = nmd->ops->nmd_finalize(nmd); - NMA_UNLOCK(nmd); } + NMA_LOCK(nmd); + + if (netmap_mem_config(nmd)) + goto out; + + nmd->active++; + + nmd->lasterr = nmd->ops->nmd_finalize(nmd); + if (!nmd->lasterr && na->pdev) { nmd->lasterr = netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na); - if (nmd->lasterr) { - netmap_mem_deref(nmd, na); - } } - return nmd->lasterr; +out: + lasterr = nmd->lasterr; + NMA_UNLOCK(nmd); + + if (lasterr) + netmap_mem_deref(nmd, na); + + return lasterr; } static int nm_isset(uint32_t *bitmap, u_int i) { return bitmap[ (i>>5) ] & ( 1U << (i & 31U) ); } static int netmap_init_obj_allocator_bitmap(struct netmap_obj_pool *p) { u_int n, j; if (p->bitmap == NULL) { /* Allocate the bitmap */ n = (p->objtotal + 31) / 32; p->bitmap = nm_os_malloc(sizeof(uint32_t) * n); if (p->bitmap == NULL) { D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n, p->name); return ENOMEM; } p->bitmap_slots = n; } else { memset(p->bitmap, 0, p->bitmap_slots); } p->objfree = 0; /* * Set all the bits in the bitmap that have * corresponding buffers to 1 to indicate they are * free. */ for (j = 0; j < p->objtotal; j++) { if (p->invalid_bitmap && nm_isset(p->invalid_bitmap, j)) { D("skipping %s %d", p->name, j); continue; } p->bitmap[ (j>>5) ] |= ( 1U << (j & 31U) ); p->objfree++; } ND("%s free %u", p->name, p->objfree); if (p->objfree == 0) return ENOMEM; return 0; } static int netmap_mem_init_bitmaps(struct netmap_mem_d *nmd) { int i, error = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { struct netmap_obj_pool *p = &nmd->pools[i]; error = netmap_init_obj_allocator_bitmap(p); if (error) return error; } /* * buffers 0 and 1 are reserved */ if (nmd->pools[NETMAP_BUF_POOL].objfree < 2) { return ENOMEM; } nmd->pools[NETMAP_BUF_POOL].objfree -= 2; if (nmd->pools[NETMAP_BUF_POOL].bitmap) { /* XXX This check is a workaround that prevents a * NULL pointer crash which currently happens only * with ptnetmap guests. * Removed shared-info --> is the bug still there? */ nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3U; } return 0; } int netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na) { int last_user = 0; NMA_LOCK(nmd); if (na->active_fds <= 0) netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na); if (nmd->active == 1) { last_user = 1; /* * Reset the allocator when it falls out of use so that any * pool resources leaked by unclean application exits are * reclaimed. */ netmap_mem_init_bitmaps(nmd); } nmd->ops->nmd_deref(nmd); + nmd->active--; + if (!nmd->active) + nmd->nm_grp = -1; + NMA_UNLOCK(nmd); return last_user; } /* accessor functions */ static int netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) { lut->lut = nmd->pools[NETMAP_BUF_POOL].lut; #ifdef __FreeBSD__ lut->plut = lut->lut; #endif lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; return 0; } static struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = { [NETMAP_IF_POOL] = { .size = 1024, .num = 2, }, [NETMAP_RING_POOL] = { .size = 5*PAGE_SIZE, .num = 4, }, [NETMAP_BUF_POOL] = { .size = 2048, .num = 4098, }, }; /* * nm_mem is the memory allocator used for all physical interfaces * running in netmap mode. * Virtual (VALE) ports will have each its own allocator. */ extern struct netmap_mem_ops netmap_mem_global_ops; /* forward */ struct netmap_mem_d nm_mem = { /* Our memory allocator. */ .pools = { [NETMAP_IF_POOL] = { .name = "netmap_if", .objminsize = sizeof(struct netmap_if), .objmaxsize = 4096, .nummin = 10, /* don't be stingy */ .nummax = 10000, /* XXX very large */ }, [NETMAP_RING_POOL] = { .name = "netmap_ring", .objminsize = sizeof(struct netmap_ring), .objmaxsize = 32*PAGE_SIZE, .nummin = 2, .nummax = 1024, }, [NETMAP_BUF_POOL] = { .name = "netmap_buf", .objminsize = 64, .objmaxsize = 65536, .nummin = 4, .nummax = 1000000, /* one million! */ }, }, .params = { [NETMAP_IF_POOL] = { .size = 1024, .num = 100, }, [NETMAP_RING_POOL] = { .size = 9*PAGE_SIZE, .num = 200, }, [NETMAP_BUF_POOL] = { .size = 2048, .num = NETMAP_BUF_MAX_NUM, }, }, .nm_id = 1, .nm_grp = -1, .prev = &nm_mem, .next = &nm_mem, .ops = &netmap_mem_global_ops, .name = "1" }; /* blueprint for the private memory allocators */ /* XXX clang is not happy about using name as a print format */ static const struct netmap_mem_d nm_blueprint = { .pools = { [NETMAP_IF_POOL] = { .name = "%s_if", .objminsize = sizeof(struct netmap_if), .objmaxsize = 4096, .nummin = 1, .nummax = 100, }, [NETMAP_RING_POOL] = { .name = "%s_ring", .objminsize = sizeof(struct netmap_ring), .objmaxsize = 32*PAGE_SIZE, .nummin = 2, .nummax = 1024, }, [NETMAP_BUF_POOL] = { .name = "%s_buf", .objminsize = 64, .objmaxsize = 65536, .nummin = 4, .nummax = 1000000, /* one million! */ }, }, .nm_grp = -1, .flags = NETMAP_MEM_PRIVATE, .ops = &netmap_mem_global_ops, }; /* memory allocator related sysctls */ #define STRINGIFY(x) #x #define DECLARE_SYSCTLS(id, name) \ SYSBEGIN(mem2_ ## name); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ CTLFLAG_RW, &nm_mem.params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ CTLFLAG_RW, &nm_mem.params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \ CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \ "Default size of private netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \ CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \ "Default number of private netmap " STRINGIFY(name) "s"); \ SYSEND SYSCTL_DECL(_dev_netmap); DECLARE_SYSCTLS(NETMAP_IF_POOL, if); DECLARE_SYSCTLS(NETMAP_RING_POOL, ring); DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); /* call with nm_mem_list_lock held */ static int nm_mem_assign_id_locked(struct netmap_mem_d *nmd) { nm_memid_t id; struct netmap_mem_d *scan = netmap_last_mem_d; int error = ENOMEM; do { /* we rely on unsigned wrap around */ id = scan->nm_id + 1; if (id == 0) /* reserve 0 as error value */ id = 1; scan = scan->next; if (id != scan->nm_id) { nmd->nm_id = id; nmd->prev = scan->prev; nmd->next = scan; scan->prev->next = nmd; scan->prev = nmd; netmap_last_mem_d = nmd; nmd->refcount = 1; NM_DBG_REFC(nmd, __FUNCTION__, __LINE__); error = 0; break; } } while (scan != netmap_last_mem_d); return error; } /* call with nm_mem_list_lock *not* held */ static int nm_mem_assign_id(struct netmap_mem_d *nmd) { int ret; NM_MTX_LOCK(nm_mem_list_lock); ret = nm_mem_assign_id_locked(nmd); NM_MTX_UNLOCK(nm_mem_list_lock); return ret; } /* call with nm_mem_list_lock held */ static void nm_mem_release_id(struct netmap_mem_d *nmd) { nmd->prev->next = nmd->next; nmd->next->prev = nmd->prev; if (netmap_last_mem_d == nmd) netmap_last_mem_d = nmd->prev; nmd->prev = nmd->next = NULL; } struct netmap_mem_d * netmap_mem_find(nm_memid_t id) { struct netmap_mem_d *nmd; NM_MTX_LOCK(nm_mem_list_lock); nmd = netmap_last_mem_d; do { if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_id == id) { nmd->refcount++; NM_DBG_REFC(nmd, __FUNCTION__, __LINE__); NM_MTX_UNLOCK(nm_mem_list_lock); return nmd; } nmd = nmd->next; } while (nmd != netmap_last_mem_d); NM_MTX_UNLOCK(nm_mem_list_lock); return NULL; } static int nm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev) { int err = 0, id; id = nm_iommu_group_id(dev); if (netmap_verbose) D("iommu_group %d", id); NMA_LOCK(nmd); if (nmd->nm_grp < 0) nmd->nm_grp = id; if (nmd->nm_grp != id) nmd->lasterr = err = ENOMEM; NMA_UNLOCK(nmd); return err; } static struct lut_entry * nm_alloc_lut(u_int nobj) { size_t n = sizeof(struct lut_entry) * nobj; struct lut_entry *lut; #ifdef linux lut = vmalloc(n); #else lut = nm_os_malloc(n); #endif return lut; } static void nm_free_lut(struct lut_entry *lut, u_int objtotal) { bzero(lut, sizeof(struct lut_entry) * objtotal); #ifdef linux vfree(lut); #else nm_os_free(lut); #endif } #if defined(linux) || defined(_WIN32) static struct plut_entry * nm_alloc_plut(u_int nobj) { size_t n = sizeof(struct plut_entry) * nobj; struct plut_entry *lut; lut = vmalloc(n); return lut; } static void nm_free_plut(struct plut_entry * lut) { vfree(lut); } #endif /* linux or _WIN32 */ /* * First, find the allocator that contains the requested offset, * then locate the cluster through a lookup table. */ static vm_paddr_t netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset) { int i; vm_ooffset_t o = offset; vm_paddr_t pa; struct netmap_obj_pool *p; -#if defined(__FreeBSD__) - /* This function is called by netmap_dev_pager_fault(), which holds a - * non-sleepable lock since FreeBSD 12. Since we cannot sleep, we - * spin on the trylock. */ - NMA_SPINLOCK(nmd); -#else - NMA_LOCK(nmd); -#endif p = nmd->pools; for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) { if (offset >= p[i].memtotal) continue; // now lookup the cluster's address #ifndef _WIN32 pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) + offset % p[i]._objsize; #else pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr); pa.QuadPart += offset % p[i]._objsize; #endif - NMA_UNLOCK(nmd); return pa; } /* this is only in case of errors */ D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, p[NETMAP_IF_POOL].memtotal, p[NETMAP_IF_POOL].memtotal + p[NETMAP_RING_POOL].memtotal, p[NETMAP_IF_POOL].memtotal + p[NETMAP_RING_POOL].memtotal + p[NETMAP_BUF_POOL].memtotal); - NMA_UNLOCK(nmd); #ifndef _WIN32 return 0; /* bad address */ #else vm_paddr_t res; res.QuadPart = 0; return res; #endif } #ifdef _WIN32 /* * win32_build_virtual_memory_for_userspace * * This function get all the object making part of the pools and maps * a contiguous virtual memory space for the userspace * It works this way * 1 - allocate a Memory Descriptor List wide as the sum * of the memory needed for the pools * 2 - cycle all the objects in every pool and for every object do * * 2a - cycle all the objects in every pool, get the list * of the physical address descriptors * 2b - calculate the offset in the array of pages desciptor in the * main MDL * 2c - copy the descriptors of the object in the main MDL * * 3 - return the resulting MDL that needs to be mapped in userland * * In this way we will have an MDL that describes all the memory for the * objects in a single object */ PMDL win32_build_user_vm_map(struct netmap_mem_d* nmd) { - int i, j; - size_t memsize; u_int memflags, ofs = 0; PMDL mainMdl, tempMdl; + uint64_t memsize; + int i, j; if (netmap_mem_get_info(nmd, &memsize, &memflags, NULL)) { D("memory not finalised yet"); return NULL; } mainMdl = IoAllocateMdl(NULL, memsize, FALSE, FALSE, NULL); if (mainMdl == NULL) { D("failed to allocate mdl"); return NULL; } NMA_LOCK(nmd); for (i = 0; i < NETMAP_POOLS_NR; i++) { struct netmap_obj_pool *p = &nmd->pools[i]; int clsz = p->_clustsize; int clobjs = p->_clustentries; /* objects per cluster */ int mdl_len = sizeof(PFN_NUMBER) * BYTES_TO_PAGES(clsz); PPFN_NUMBER pSrc, pDst; /* each pool has a different cluster size so we need to reallocate */ tempMdl = IoAllocateMdl(p->lut[0].vaddr, clsz, FALSE, FALSE, NULL); if (tempMdl == NULL) { NMA_UNLOCK(nmd); D("fail to allocate tempMdl"); IoFreeMdl(mainMdl); return NULL; } pSrc = MmGetMdlPfnArray(tempMdl); /* create one entry per cluster, the lut[] has one entry per object */ for (j = 0; j < p->numclusters; j++, ofs += clsz) { pDst = &MmGetMdlPfnArray(mainMdl)[BYTES_TO_PAGES(ofs)]; MmInitializeMdl(tempMdl, p->lut[j*clobjs].vaddr, clsz); MmBuildMdlForNonPagedPool(tempMdl); /* compute physical page addresses */ RtlCopyMemory(pDst, pSrc, mdl_len); /* copy the page descriptors */ mainMdl->MdlFlags = tempMdl->MdlFlags; /* XXX what is in here ? */ } IoFreeMdl(tempMdl); } NMA_UNLOCK(nmd); return mainMdl; } #endif /* _WIN32 */ /* * helper function for OS-specific mmap routines (currently only windows). * Given an nmd and a pool index, returns the cluster size and number of clusters. * Returns 0 if memory is finalised and the pool is valid, otherwise 1. * It should be called under NMA_LOCK(nmd) otherwise the underlying info can change. */ int netmap_mem2_get_pool_info(struct netmap_mem_d* nmd, u_int pool, u_int *clustsize, u_int *numclusters) { if (!nmd || !clustsize || !numclusters || pool >= NETMAP_POOLS_NR) return 1; /* invalid arguments */ // NMA_LOCK_ASSERT(nmd); if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { *clustsize = *numclusters = 0; return 1; /* not ready yet */ } *clustsize = nmd->pools[pool]._clustsize; *numclusters = nmd->pools[pool].numclusters; return 0; /* success */ } static int -netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size, u_int *memflags, - nm_memid_t *id) +netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size, + u_int *memflags, nm_memid_t *id) { int error = 0; - NMA_LOCK(nmd); error = netmap_mem_config(nmd); if (error) goto out; if (size) { if (nmd->flags & NETMAP_MEM_FINALIZED) { *size = nmd->nm_totalsize; } else { int i; *size = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { struct netmap_obj_pool *p = nmd->pools + i; *size += (p->_numclusters * p->_clustsize); } } } if (memflags) *memflags = nmd->flags; if (id) *id = nmd->nm_id; out: - NMA_UNLOCK(nmd); return error; } /* * we store objects by kernel address, need to find the offset * within the pool to export the value to userspace. * Algorithm: scan until we find the cluster, then add the * actual offset in the cluster */ static ssize_t netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) { int i, k = p->_clustentries, n = p->objtotal; ssize_t ofs = 0; for (i = 0; i < n; i += k, ofs += p->_clustsize) { const char *base = p->lut[i].vaddr; ssize_t relofs = (const char *) vaddr - base; if (relofs < 0 || relofs >= p->_clustsize) continue; ofs = ofs + relofs; ND("%s: return offset %d (cluster %d) for pointer %p", p->name, ofs, i, vaddr); return ofs; } D("address %p is not contained inside any cluster (%s)", vaddr, p->name); return 0; /* An error occurred */ } /* Helper functions which convert virtual addresses to offsets */ #define netmap_if_offset(n, v) \ netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v)) #define netmap_ring_offset(n, v) \ ((n)->pools[NETMAP_IF_POOL].memtotal + \ netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v))) static ssize_t netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr) { - ssize_t v; - NMA_LOCK(nmd); - v = netmap_if_offset(nmd, addr); - NMA_UNLOCK(nmd); - return v; + return netmap_if_offset(nmd, addr); } /* * report the index, and use start position as a hint, * otherwise buffer allocation becomes terribly expensive. */ static void * netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index) { uint32_t i = 0; /* index in the bitmap */ uint32_t mask, j = 0; /* slot counter */ void *vaddr = NULL; if (len > p->_objsize) { D("%s request size %d too large", p->name, len); return NULL; } if (p->objfree == 0) { D("no more %s objects", p->name); return NULL; } if (start) i = *start; /* termination is guaranteed by p->free, but better check bounds on i */ while (vaddr == NULL && i < p->bitmap_slots) { uint32_t cur = p->bitmap[i]; if (cur == 0) { /* bitmask is fully used */ i++; continue; } /* locate a slot */ for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) ; p->bitmap[i] &= ~mask; /* mark object as in use */ p->objfree--; vaddr = p->lut[i * 32 + j].vaddr; if (index) *index = i * 32 + j; } ND("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr); if (start) *start = i; return vaddr; } /* * free by index, not by address. * XXX should we also cleanup the content ? */ static int netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) { uint32_t *ptr, mask; if (j >= p->objtotal) { D("invalid index %u, max %u", j, p->objtotal); return 1; } ptr = &p->bitmap[j / 32]; mask = (1 << (j % 32)); if (*ptr & mask) { D("ouch, double free on buffer %d", j); return 1; } else { *ptr |= mask; p->objfree++; return 0; } } /* * free by address. This is slow but is only used for a few * objects (rings, nifp) */ static void netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) { u_int i, j, n = p->numclusters; for (i = 0, j = 0; i < n; i++, j += p->_clustentries) { void *base = p->lut[i * p->_clustentries].vaddr; ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; /* Given address, is out of the scope of the current cluster.*/ if (base == NULL || vaddr < base || relofs >= p->_clustsize) continue; j = j + relofs / p->_objsize; /* KASSERT(j != 0, ("Cannot free object 0")); */ netmap_obj_free(p, j); return; } D("address %p is not contained inside any cluster (%s)", vaddr, p->name); } unsigned netmap_mem_bufsize(struct netmap_mem_d *nmd) { return nmd->pools[NETMAP_BUF_POOL]._objsize; } #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL) #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v)) #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL) #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v)) #define netmap_buf_malloc(n, _pos, _index) \ netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index) #if 0 /* currently unused */ /* Return the index associated to the given packet buffer */ #define netmap_buf_index(n, v) \ (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n)) #endif /* * allocate extra buffers in a linked list. * returns the actual number. */ uint32_t netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n) { struct netmap_mem_d *nmd = na->nm_mem; uint32_t i, pos = 0; /* opaque, scan position in the bitmap */ NMA_LOCK(nmd); *head = 0; /* default, 'null' index ie empty list */ for (i = 0 ; i < n; i++) { uint32_t cur = *head; /* save current head */ uint32_t *p = netmap_buf_malloc(nmd, &pos, head); if (p == NULL) { D("no more buffers after %d of %d", i, n); *head = cur; /* restore */ break; } ND(5, "allocate buffer %d -> %d", *head, cur); *p = cur; /* link to previous head */ } NMA_UNLOCK(nmd); return i; } static void netmap_extra_free(struct netmap_adapter *na, uint32_t head) { struct lut_entry *lut = na->na_lut.lut; struct netmap_mem_d *nmd = na->nm_mem; struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; uint32_t i, cur, *buf; ND("freeing the extra list"); for (i = 0; head >=2 && head < p->objtotal; i++) { cur = head; buf = lut[head].vaddr; head = *buf; *buf = 0; if (netmap_obj_free(p, cur)) break; } if (head != 0) D("breaking with head %d", head); if (netmap_verbose) D("freed %d buffers", i); } /* Return nonzero on error */ static int netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) { struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; u_int i = 0; /* slot counter */ uint32_t pos = 0; /* slot in p->bitmap */ uint32_t index = 0; /* buffer index */ for (i = 0; i < n; i++) { void *vaddr = netmap_buf_malloc(nmd, &pos, &index); if (vaddr == NULL) { D("no more buffers after %d of %d", i, n); goto cleanup; } slot[i].buf_idx = index; slot[i].len = p->_objsize; slot[i].flags = 0; slot[i].ptr = 0; } - ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos); + ND("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos); return (0); cleanup: while (i > 0) { i--; netmap_obj_free(p, slot[i].buf_idx); } bzero(slot, n * sizeof(slot[0])); return (ENOMEM); } static void netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index) { struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; u_int i; for (i = 0; i < n; i++) { slot[i].buf_idx = index; slot[i].len = p->_objsize; slot[i].flags = 0; } } static void netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i) { struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; if (i < 2 || i >= p->objtotal) { D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); return; } netmap_obj_free(p, i); } static void netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) { u_int i; for (i = 0; i < n; i++) { - if (slot[i].buf_idx > 2) + if (slot[i].buf_idx > 1) netmap_free_buf(nmd, slot[i].buf_idx); } + ND("%s: released some buffers, available: %u", + p->name, p->objfree); } static void netmap_reset_obj_allocator(struct netmap_obj_pool *p) { if (p == NULL) return; if (p->bitmap) nm_os_free(p->bitmap); p->bitmap = NULL; if (p->invalid_bitmap) nm_os_free(p->invalid_bitmap); p->invalid_bitmap = NULL; + if (!p->alloc_done) { + /* allocation was done by somebody else. + * Let them clean up after themselves. + */ + return; + } if (p->lut) { u_int i; /* * Free each cluster allocated in * netmap_finalize_obj_allocator(). The cluster start * addresses are stored at multiples of p->_clusterentries * in the lut. */ for (i = 0; i < p->objtotal; i += p->_clustentries) { contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP); } nm_free_lut(p->lut, p->objtotal); } p->lut = NULL; p->objtotal = 0; p->memtotal = 0; p->numclusters = 0; p->objfree = 0; + p->alloc_done = 0; } /* * Free all resources related to an allocator. */ static void netmap_destroy_obj_allocator(struct netmap_obj_pool *p) { if (p == NULL) return; netmap_reset_obj_allocator(p); } /* * We receive a request for objtotal objects, of size objsize each. * Internally we may round up both numbers, as we allocate objects * in small clusters multiple of the page size. * We need to keep track of objtotal and clustentries, * as they are needed when freeing memory. * * XXX note -- userspace needs the buffers to be contiguous, * so we cannot afford gaps at the end of a cluster. */ /* call with NMA_LOCK held */ static int netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) { int i; u_int clustsize; /* the cluster size, multiple of page size */ u_int clustentries; /* how many objects per entry */ /* we store the current request, so we can * detect configuration changes later */ p->r_objtotal = objtotal; p->r_objsize = objsize; #define MAX_CLUSTSIZE (1<<22) // 4 MB #define LINE_ROUND NM_CACHE_ALIGN // 64 if (objsize >= MAX_CLUSTSIZE) { /* we could do it but there is no point */ D("unsupported allocation for %d bytes", objsize); return EINVAL; } /* make sure objsize is a multiple of LINE_ROUND */ i = (objsize & (LINE_ROUND - 1)); if (i) { D("XXX aligning object by %d bytes", LINE_ROUND - i); objsize += LINE_ROUND - i; } if (objsize < p->objminsize || objsize > p->objmaxsize) { D("requested objsize %d out of range [%d, %d]", objsize, p->objminsize, p->objmaxsize); return EINVAL; } if (objtotal < p->nummin || objtotal > p->nummax) { D("requested objtotal %d out of range [%d, %d]", objtotal, p->nummin, p->nummax); return EINVAL; } /* * Compute number of objects using a brute-force approach: * given a max cluster size, * we try to fill it with objects keeping track of the * wasted space to the next page boundary. */ for (clustentries = 0, i = 1;; i++) { u_int delta, used = i * objsize; if (used > MAX_CLUSTSIZE) break; delta = used % PAGE_SIZE; if (delta == 0) { // exact solution clustentries = i; break; } } /* exact solution not found */ if (clustentries == 0) { D("unsupported allocation for %d bytes", objsize); return EINVAL; } /* compute clustsize */ clustsize = clustentries * objsize; if (netmap_verbose) D("objsize %d clustsize %d objects %d", objsize, clustsize, clustentries); /* * The number of clusters is n = ceil(objtotal/clustentries) * objtotal' = n * clustentries */ p->_clustentries = clustentries; p->_clustsize = clustsize; p->_numclusters = (objtotal + clustentries - 1) / clustentries; /* actual values (may be larger than requested) */ p->_objsize = objsize; p->_objtotal = p->_numclusters * clustentries; return 0; } /* call with NMA_LOCK held */ static int netmap_finalize_obj_allocator(struct netmap_obj_pool *p) { int i; /* must be signed */ size_t n; if (p->lut) { - /* already finalized, nothing to do */ + /* if the lut is already there we assume that also all the + * clusters have already been allocated, possibily by somebody + * else (e.g., extmem). In the latter case, the alloc_done flag + * will remain at zero, so that we will not attempt to + * deallocate the clusters by ourselves in + * netmap_reset_obj_allocator. + */ return 0; } /* optimistically assume we have enough memory */ p->numclusters = p->_numclusters; p->objtotal = p->_objtotal; + p->alloc_done = 1; p->lut = nm_alloc_lut(p->objtotal); if (p->lut == NULL) { D("Unable to create lookup table for '%s'", p->name); goto clean; } /* * Allocate clusters, init pointers */ n = p->_clustsize; for (i = 0; i < (int)p->objtotal;) { int lim = i + p->_clustentries; char *clust; /* * XXX Note, we only need contigmalloc() for buffers attached * to native interfaces. In all other cases (nifp, netmap rings * and even buffers for VALE ports or emulated interfaces) we * can live with standard malloc, because the hardware will not * access the pages directly. */ clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0); if (clust == NULL) { /* * If we get here, there is a severe memory shortage, * so halve the allocated memory to reclaim some. */ D("Unable to create cluster at %d for '%s' allocator", i, p->name); if (i < 2) /* nothing to halve */ goto out; lim = i / 2; for (i--; i >= lim; i--) { if (i % p->_clustentries == 0 && p->lut[i].vaddr) contigfree(p->lut[i].vaddr, n, M_NETMAP); p->lut[i].vaddr = NULL; } out: p->objtotal = i; /* we may have stopped in the middle of a cluster */ p->numclusters = (i + p->_clustentries - 1) / p->_clustentries; break; } /* * Set lut state for all buffers in the current cluster. * * [i, lim) is the set of buffer indexes that cover the * current cluster. * * 'clust' is really the address of the current buffer in * the current cluster as we index through it with a stride * of p->_objsize. */ for (; i < lim; i++, clust += p->_objsize) { p->lut[i].vaddr = clust; #if !defined(linux) && !defined(_WIN32) p->lut[i].paddr = vtophys(clust); #endif } } p->memtotal = p->numclusters * p->_clustsize; if (netmap_verbose) D("Pre-allocated %d clusters (%d/%dKB) for '%s'", p->numclusters, p->_clustsize >> 10, p->memtotal >> 10, p->name); return 0; clean: netmap_reset_obj_allocator(p); return ENOMEM; } /* call with lock held */ static int netmap_mem_params_changed(struct netmap_obj_params* p) { int i, rv = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { if (p[i].last_size != p[i].size || p[i].last_num != p[i].num) { p[i].last_size = p[i].size; p[i].last_num = p[i].num; rv = 1; } } return rv; } static void netmap_mem_reset_all(struct netmap_mem_d *nmd) { int i; if (netmap_verbose) D("resetting %p", nmd); for (i = 0; i < NETMAP_POOLS_NR; i++) { netmap_reset_obj_allocator(&nmd->pools[i]); } nmd->flags &= ~NETMAP_MEM_FINALIZED; } static int netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na) { - int i, lim = p->_objtotal; + int i, lim = p->objtotal; struct netmap_lut *lut = &na->na_lut; if (na == NULL || na->pdev == NULL) return 0; #if defined(__FreeBSD__) (void)i; (void)lim; (void)lut; D("unsupported on FreeBSD"); #elif defined(_WIN32) (void)i; (void)lim; (void)lut; D("unsupported on Windows"); #else /* linux */ ND("unmapping and freeing plut for %s", na->name); if (lut->plut == NULL) return 0; for (i = 0; i < lim; i += p->_clustentries) { if (lut->plut[i].paddr) netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->_clustsize); } nm_free_plut(lut->plut); lut->plut = NULL; #endif /* linux */ return 0; } static int netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na) { int error = 0; int i, lim = p->objtotal; struct netmap_lut *lut = &na->na_lut; if (na->pdev == NULL) return 0; #if defined(__FreeBSD__) (void)i; (void)lim; (void)lut; D("unsupported on FreeBSD"); #elif defined(_WIN32) (void)i; (void)lim; (void)lut; D("unsupported on Windows"); #else /* linux */ if (lut->plut != NULL) { ND("plut already allocated for %s", na->name); return 0; } ND("allocating physical lut for %s", na->name); lut->plut = nm_alloc_plut(lim); if (lut->plut == NULL) { D("Failed to allocate physical lut for %s", na->name); return ENOMEM; } for (i = 0; i < lim; i += p->_clustentries) { lut->plut[i].paddr = 0; } for (i = 0; i < lim; i += p->_clustentries) { int j; if (p->lut[i].vaddr == NULL) continue; error = netmap_load_map(na, (bus_dma_tag_t) na->pdev, &lut->plut[i].paddr, p->lut[i].vaddr, p->_clustsize); if (error) { D("Failed to map cluster #%d from the %s pool", i, p->name); break; } for (j = 1; j < p->_clustentries; j++) { lut->plut[i + j].paddr = lut->plut[i + j - 1].paddr + p->_objsize; } } if (error) netmap_mem_unmap(p, na); #endif /* linux */ return error; } static int netmap_mem_finalize_all(struct netmap_mem_d *nmd) { int i; if (nmd->flags & NETMAP_MEM_FINALIZED) return 0; nmd->lasterr = 0; nmd->nm_totalsize = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]); if (nmd->lasterr) goto error; nmd->nm_totalsize += nmd->pools[i].memtotal; } nmd->lasterr = netmap_mem_init_bitmaps(nmd); if (nmd->lasterr) goto error; nmd->flags |= NETMAP_MEM_FINALIZED; if (netmap_verbose) D("interfaces %d KB, rings %d KB, buffers %d MB", nmd->pools[NETMAP_IF_POOL].memtotal >> 10, nmd->pools[NETMAP_RING_POOL].memtotal >> 10, nmd->pools[NETMAP_BUF_POOL].memtotal >> 20); if (netmap_verbose) D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree); return 0; error: netmap_mem_reset_all(nmd); return nmd->lasterr; } /* * allocator for private memory */ static void * _netmap_mem_private_new(size_t size, struct netmap_obj_params *p, struct netmap_mem_ops *ops, int *perr) { struct netmap_mem_d *d = NULL; int i, err = 0; d = nm_os_malloc(size); if (d == NULL) { err = ENOMEM; goto error; } *d = nm_blueprint; d->ops = ops; err = nm_mem_assign_id(d); if (err) goto error_free; snprintf(d->name, NM_MEM_NAMESZ, "%d", d->nm_id); for (i = 0; i < NETMAP_POOLS_NR; i++) { snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ, nm_blueprint.pools[i].name, d->name); d->params[i].num = p[i].num; d->params[i].size = p[i].size; } NMA_LOCK_INIT(d); err = netmap_mem_config(d); if (err) goto error_rel_id; d->flags &= ~NETMAP_MEM_FINALIZED; return d; error_rel_id: NMA_LOCK_DESTROY(d); nm_mem_release_id(d); error_free: nm_os_free(d); error: if (perr) *perr = err; return NULL; } struct netmap_mem_d * netmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes, int *perr) { struct netmap_mem_d *d = NULL; struct netmap_obj_params p[NETMAP_POOLS_NR]; int i; u_int v, maxd; /* account for the fake host rings */ txr++; rxr++; /* copy the min values */ for (i = 0; i < NETMAP_POOLS_NR; i++) { p[i] = netmap_min_priv_params[i]; } /* possibly increase them to fit user request */ v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr); if (p[NETMAP_IF_POOL].size < v) p[NETMAP_IF_POOL].size = v; v = 2 + 4 * npipes; if (p[NETMAP_IF_POOL].num < v) p[NETMAP_IF_POOL].num = v; maxd = (txd > rxd) ? txd : rxd; v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd; if (p[NETMAP_RING_POOL].size < v) p[NETMAP_RING_POOL].size = v; /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake) * and two rx rings (again, 1 normal and 1 fake host) */ v = txr + rxr + 8 * npipes; if (p[NETMAP_RING_POOL].num < v) p[NETMAP_RING_POOL].num = v; /* for each pipe we only need the buffers for the 4 "real" rings. * On the other end, the pipe ring dimension may be different from * the parent port ring dimension. As a compromise, we allocate twice the * space actually needed if the pipe rings were the same size as the parent rings */ v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs; /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */ if (p[NETMAP_BUF_POOL].num < v) p[NETMAP_BUF_POOL].num = v; if (netmap_verbose) D("req if %d*%d ring %d*%d buf %d*%d", p[NETMAP_IF_POOL].num, p[NETMAP_IF_POOL].size, p[NETMAP_RING_POOL].num, p[NETMAP_RING_POOL].size, p[NETMAP_BUF_POOL].num, p[NETMAP_BUF_POOL].size); d = _netmap_mem_private_new(sizeof(*d), p, &netmap_mem_global_ops, perr); return d; } /* call with lock held */ static int netmap_mem2_config(struct netmap_mem_d *nmd) { int i; - if (nmd->active) - /* already in use, we cannot change the configuration */ - goto out; - if (!netmap_mem_params_changed(nmd->params)) goto out; ND("reconfiguring"); if (nmd->flags & NETMAP_MEM_FINALIZED) { /* reset previous allocation */ for (i = 0; i < NETMAP_POOLS_NR; i++) { netmap_reset_obj_allocator(&nmd->pools[i]); } nmd->flags &= ~NETMAP_MEM_FINALIZED; } for (i = 0; i < NETMAP_POOLS_NR; i++) { nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i], nmd->params[i].num, nmd->params[i].size); if (nmd->lasterr) goto out; } out: return nmd->lasterr; } static int netmap_mem2_finalize(struct netmap_mem_d *nmd) { - int err; - - /* update configuration if changed */ - if (netmap_mem_config(nmd)) - goto out1; - - nmd->active++; - - if (nmd->flags & NETMAP_MEM_FINALIZED) { - /* may happen if config is not changed */ - D("nothing to do"); + if (nmd->flags & NETMAP_MEM_FINALIZED) goto out; - } if (netmap_mem_finalize_all(nmd)) goto out; nmd->lasterr = 0; out: - if (nmd->lasterr) - nmd->active--; -out1: - err = nmd->lasterr; - - return err; - + return nmd->lasterr; } static void netmap_mem2_delete(struct netmap_mem_d *nmd) { int i; for (i = 0; i < NETMAP_POOLS_NR; i++) { netmap_destroy_obj_allocator(&nmd->pools[i]); } NMA_LOCK_DESTROY(nmd); if (nmd != &nm_mem) nm_os_free(nmd); } #ifdef WITH_EXTMEM /* doubly linekd list of all existing external allocators */ static struct netmap_mem_ext *netmap_mem_ext_list = NULL; NM_MTX_T nm_mem_ext_list_lock; #endif /* WITH_EXTMEM */ int netmap_mem_init(void) { NM_MTX_INIT(nm_mem_list_lock); NMA_LOCK_INIT(&nm_mem); netmap_mem_get(&nm_mem); #ifdef WITH_EXTMEM NM_MTX_INIT(nm_mem_ext_list_lock); #endif /* WITH_EXTMEM */ return (0); } void netmap_mem_fini(void) { netmap_mem_put(&nm_mem); } static void netmap_free_rings(struct netmap_adapter *na) { enum txrx t; for_rx_tx(t) { u_int i; for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { - struct netmap_kring *kring = &NMR(na, t)[i]; + struct netmap_kring *kring = NMR(na, t)[i]; struct netmap_ring *ring = kring->ring; if (ring == NULL || kring->users > 0 || (kring->nr_kflags & NKR_NEEDRING)) { if (netmap_verbose) D("NOT deleting ring %s (ring %p, users %d neekring %d)", kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); continue; } if (netmap_verbose) D("deleting ring %s", kring->name); - if (i != nma_get_nrings(na, t) || na->na_flags & NAF_HOST_RINGS) + if (!(kring->nr_kflags & NKR_FAKERING)) { + ND("freeing bufs for %s", kring->name); netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots); + } else { + ND("NOT freeing bufs for %s", kring->name); + } netmap_ring_free(na->nm_mem, ring); kring->ring = NULL; } } } /* call with NMA_LOCK held * * * Allocate netmap rings and buffers for this card * The rings are contiguous, but have variable size. * The kring array must follow the layout described * in netmap_krings_create(). */ static int netmap_mem2_rings_create(struct netmap_adapter *na) { enum txrx t; - NMA_LOCK(na->nm_mem); - for_rx_tx(t) { u_int i; for (i = 0; i <= nma_get_nrings(na, t); i++) { - struct netmap_kring *kring = &NMR(na, t)[i]; + struct netmap_kring *kring = NMR(na, t)[i]; struct netmap_ring *ring = kring->ring; u_int len, ndesc; if (ring || (!kring->users && !(kring->nr_kflags & NKR_NEEDRING))) { /* uneeded, or already created by somebody else */ if (netmap_verbose) D("NOT creating ring %s (ring %p, users %d neekring %d)", kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING); continue; } if (netmap_verbose) D("creating %s", kring->name); ndesc = kring->nkr_num_slots; len = sizeof(struct netmap_ring) + ndesc * sizeof(struct netmap_slot); ring = netmap_ring_malloc(na->nm_mem, len); if (ring == NULL) { D("Cannot allocate %s_ring", nm_txrx2str(t)); goto cleanup; } ND("txring at %p", ring); kring->ring = ring; *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; *(int64_t *)(uintptr_t)&ring->buf_ofs = (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - netmap_ring_offset(na->nm_mem, ring); /* copy values from kring */ ring->head = kring->rhead; ring->cur = kring->rcur; ring->tail = kring->rtail; *(uint32_t *)(uintptr_t)&ring->nr_buf_size = netmap_mem_bufsize(na->nm_mem); ND("%s h %d c %d t %d", kring->name, ring->head, ring->cur, ring->tail); ND("initializing slots for %s_ring", nm_txrx2str(txrx)); - if (i != nma_get_nrings(na, t) || (na->na_flags & NAF_HOST_RINGS)) { + if (!(kring->nr_kflags & NKR_FAKERING)) { /* this is a real ring */ + ND("allocating buffers for %s", kring->name); if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) { D("Cannot allocate buffers for %s_ring", nm_txrx2str(t)); goto cleanup; } } else { /* this is a fake ring, set all indices to 0 */ + ND("NOT allocating buffers for %s", kring->name); netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0); } /* ring info */ *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id; *(uint16_t *)(uintptr_t)&ring->dir = kring->tx; } } - NMA_UNLOCK(na->nm_mem); - return 0; cleanup: netmap_free_rings(na); - NMA_UNLOCK(na->nm_mem); - return ENOMEM; } static void netmap_mem2_rings_delete(struct netmap_adapter *na) { /* last instance, release bufs and rings */ - NMA_LOCK(na->nm_mem); - netmap_free_rings(na); - - NMA_UNLOCK(na->nm_mem); } /* call with NMA_LOCK held */ /* * Allocate the per-fd structure netmap_if. * * We assume that the configuration stored in na * (number of tx/rx rings and descs) does not change while * the interface is in netmap mode. */ static struct netmap_if * netmap_mem2_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) { struct netmap_if *nifp; ssize_t base; /* handy for relative offsets between rings and nifp */ u_int i, len, n[NR_TXRX], ntot; enum txrx t; ntot = 0; for_rx_tx(t) { /* account for the (eventually fake) host rings */ n[t] = nma_get_nrings(na, t) + 1; ntot += n[t]; } /* * the descriptor is followed inline by an array of offsets * to the tx and rx rings in the shared memory region. */ - NMA_LOCK(na->nm_mem); - len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t)); nifp = netmap_if_malloc(na->nm_mem, len); if (nifp == NULL) { NMA_UNLOCK(na->nm_mem); return NULL; } /* initialize base fields -- override const */ *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; strncpy(nifp->ni_name, na->name, (size_t)IFNAMSIZ); /* * fill the slots for the rx and tx rings. They contain the offset * between the ring and nifp, so the information is usable in * userspace to reach the ring from the nifp. */ base = netmap_if_offset(na->nm_mem, nifp); for (i = 0; i < n[NR_TX]; i++) { /* XXX instead of ofs == 0 maybe use the offset of an error * ring, like we do for buffers? */ ssize_t ofs = 0; - if (na->tx_rings[i].ring != NULL && i >= priv->np_qfirst[NR_TX] + if (na->tx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_TX] && i < priv->np_qlast[NR_TX]) { ofs = netmap_ring_offset(na->nm_mem, - na->tx_rings[i].ring) - base; + na->tx_rings[i]->ring) - base; } *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = ofs; } for (i = 0; i < n[NR_RX]; i++) { /* XXX instead of ofs == 0 maybe use the offset of an error * ring, like we do for buffers? */ ssize_t ofs = 0; - if (na->rx_rings[i].ring != NULL && i >= priv->np_qfirst[NR_RX] + if (na->rx_rings[i]->ring != NULL && i >= priv->np_qfirst[NR_RX] && i < priv->np_qlast[NR_RX]) { ofs = netmap_ring_offset(na->nm_mem, - na->rx_rings[i].ring) - base; + na->rx_rings[i]->ring) - base; } *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = ofs; } - NMA_UNLOCK(na->nm_mem); - return (nifp); } static void netmap_mem2_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) { if (nifp == NULL) /* nothing to do */ return; - NMA_LOCK(na->nm_mem); if (nifp->ni_bufs_head) netmap_extra_free(na, nifp->ni_bufs_head); netmap_if_free(na->nm_mem, nifp); - - NMA_UNLOCK(na->nm_mem); } static void netmap_mem2_deref(struct netmap_mem_d *nmd) { - nmd->active--; - if (!nmd->active) - nmd->nm_grp = -1; if (netmap_verbose) D("active = %d", nmd->active); } struct netmap_mem_ops netmap_mem_global_ops = { .nmd_get_lut = netmap_mem2_get_lut, .nmd_get_info = netmap_mem2_get_info, .nmd_ofstophys = netmap_mem2_ofstophys, .nmd_config = netmap_mem2_config, .nmd_finalize = netmap_mem2_finalize, .nmd_deref = netmap_mem2_deref, .nmd_delete = netmap_mem2_delete, .nmd_if_offset = netmap_mem2_if_offset, .nmd_if_new = netmap_mem2_if_new, .nmd_if_delete = netmap_mem2_if_delete, .nmd_rings_create = netmap_mem2_rings_create, .nmd_rings_delete = netmap_mem2_rings_delete }; int -netmap_mem_pools_info_get(struct nmreq *nmr, struct netmap_mem_d *nmd) +netmap_mem_pools_info_get(struct nmreq_pools_info *req, + struct netmap_mem_d *nmd) { - uintptr_t *pp = (uintptr_t *)&nmr->nr_arg1; - struct netmap_pools_info *upi = (struct netmap_pools_info *)(*pp); - struct netmap_pools_info pi; - uint64_t memsize; - uint16_t memid; int ret; - ret = netmap_mem_get_info(nmd, &memsize, NULL, &memid); + ret = netmap_mem_get_info(nmd, &req->nr_memsize, NULL, + &req->nr_mem_id); if (ret) { return ret; } - pi.memsize = memsize; - pi.memid = memid; NMA_LOCK(nmd); - pi.if_pool_offset = 0; - pi.if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal; - pi.if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize; + req->nr_if_pool_offset = 0; + req->nr_if_pool_objtotal = nmd->pools[NETMAP_IF_POOL].objtotal; + req->nr_if_pool_objsize = nmd->pools[NETMAP_IF_POOL]._objsize; - pi.ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal; - pi.ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal; - pi.ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize; + req->nr_ring_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal; + req->nr_ring_pool_objtotal = nmd->pools[NETMAP_RING_POOL].objtotal; + req->nr_ring_pool_objsize = nmd->pools[NETMAP_RING_POOL]._objsize; - pi.buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal + + req->nr_buf_pool_offset = nmd->pools[NETMAP_IF_POOL].memtotal + nmd->pools[NETMAP_RING_POOL].memtotal; - pi.buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; - pi.buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; + req->nr_buf_pool_objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; + req->nr_buf_pool_objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; NMA_UNLOCK(nmd); - ret = copyout(&pi, upi, sizeof(pi)); - if (ret) { - return ret; - } - return 0; } #ifdef WITH_EXTMEM struct netmap_mem_ext { struct netmap_mem_d up; - struct page **pages; - int nr_pages; + struct nm_os_extmem *os; struct netmap_mem_ext *next, *prev; }; /* call with nm_mem_list_lock held */ static void netmap_mem_ext_register(struct netmap_mem_ext *e) { NM_MTX_LOCK(nm_mem_ext_list_lock); if (netmap_mem_ext_list) netmap_mem_ext_list->prev = e; e->next = netmap_mem_ext_list; netmap_mem_ext_list = e; e->prev = NULL; NM_MTX_UNLOCK(nm_mem_ext_list_lock); } /* call with nm_mem_list_lock held */ static void netmap_mem_ext_unregister(struct netmap_mem_ext *e) { if (e->prev) e->prev->next = e->next; else netmap_mem_ext_list = e->next; if (e->next) e->next->prev = e->prev; e->prev = e->next = NULL; } -static int -netmap_mem_ext_same_pages(struct netmap_mem_ext *e, struct page **pages, int nr_pages) -{ - int i; - - if (e->nr_pages != nr_pages) - return 0; - - for (i = 0; i < nr_pages; i++) - if (pages[i] != e->pages[i]) - return 0; - - return 1; -} - static struct netmap_mem_ext * -netmap_mem_ext_search(struct page **pages, int nr_pages) +netmap_mem_ext_search(struct nm_os_extmem *os) { struct netmap_mem_ext *e; NM_MTX_LOCK(nm_mem_ext_list_lock); for (e = netmap_mem_ext_list; e; e = e->next) { - if (netmap_mem_ext_same_pages(e, pages, nr_pages)) { + if (nm_os_extmem_isequal(e->os, os)) { netmap_mem_get(&e->up); break; } } NM_MTX_UNLOCK(nm_mem_ext_list_lock); return e; } static void -netmap_mem_ext_free_pages(struct page **pages, int nr_pages) -{ - int i; - - for (i = 0; i < nr_pages; i++) { - kunmap(pages[i]); - put_page(pages[i]); - } - nm_os_vfree(pages); -} - -static void netmap_mem_ext_delete(struct netmap_mem_d *d) { int i; struct netmap_mem_ext *e = (struct netmap_mem_ext *)d; netmap_mem_ext_unregister(e); for (i = 0; i < NETMAP_POOLS_NR; i++) { struct netmap_obj_pool *p = &d->pools[i]; if (p->lut) { nm_free_lut(p->lut, p->objtotal); p->lut = NULL; } } - if (e->pages) { - netmap_mem_ext_free_pages(e->pages, e->nr_pages); - e->pages = NULL; - e->nr_pages = 0; - } + if (e->os) + nm_os_extmem_delete(e->os); netmap_mem2_delete(d); } static int netmap_mem_ext_config(struct netmap_mem_d *nmd) { return 0; } struct netmap_mem_ops netmap_mem_ext_ops = { .nmd_get_lut = netmap_mem2_get_lut, .nmd_get_info = netmap_mem2_get_info, .nmd_ofstophys = netmap_mem2_ofstophys, .nmd_config = netmap_mem_ext_config, .nmd_finalize = netmap_mem2_finalize, .nmd_deref = netmap_mem2_deref, .nmd_delete = netmap_mem_ext_delete, .nmd_if_offset = netmap_mem2_if_offset, .nmd_if_new = netmap_mem2_if_new, .nmd_if_delete = netmap_mem2_if_delete, .nmd_rings_create = netmap_mem2_rings_create, .nmd_rings_delete = netmap_mem2_rings_delete }; struct netmap_mem_d * -netmap_mem_ext_create(struct nmreq *nmr, int *perror) +netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror) { - uintptr_t p = *(uintptr_t *)&nmr->nr_arg1; - struct netmap_pools_info pi; int error = 0; - unsigned long end, start; - int nr_pages, res, i, j; - struct page **pages = NULL; + int i, j; struct netmap_mem_ext *nme; char *clust; size_t off; + struct nm_os_extmem *os = NULL; + int nr_pages; - error = copyin((void *)p, &pi, sizeof(pi)); - if (error) - goto out; - // XXX sanity checks - if (pi.if_pool_objtotal == 0) - pi.if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num; - if (pi.if_pool_objsize == 0) - pi.if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size; - if (pi.ring_pool_objtotal == 0) - pi.ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num; - if (pi.ring_pool_objsize == 0) - pi.ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size; - if (pi.buf_pool_objtotal == 0) - pi.buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num; - if (pi.buf_pool_objsize == 0) - pi.buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size; + if (pi->nr_if_pool_objtotal == 0) + pi->nr_if_pool_objtotal = netmap_min_priv_params[NETMAP_IF_POOL].num; + if (pi->nr_if_pool_objsize == 0) + pi->nr_if_pool_objsize = netmap_min_priv_params[NETMAP_IF_POOL].size; + if (pi->nr_ring_pool_objtotal == 0) + pi->nr_ring_pool_objtotal = netmap_min_priv_params[NETMAP_RING_POOL].num; + if (pi->nr_ring_pool_objsize == 0) + pi->nr_ring_pool_objsize = netmap_min_priv_params[NETMAP_RING_POOL].size; + if (pi->nr_buf_pool_objtotal == 0) + pi->nr_buf_pool_objtotal = netmap_min_priv_params[NETMAP_BUF_POOL].num; + if (pi->nr_buf_pool_objsize == 0) + pi->nr_buf_pool_objsize = netmap_min_priv_params[NETMAP_BUF_POOL].size; D("if %d %d ring %d %d buf %d %d", - pi.if_pool_objtotal, pi.if_pool_objsize, - pi.ring_pool_objtotal, pi.ring_pool_objsize, - pi.buf_pool_objtotal, pi.buf_pool_objsize); + pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize, + pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize, + pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize); - end = (p + pi.memsize + PAGE_SIZE - 1) >> PAGE_SHIFT; - start = p >> PAGE_SHIFT; - nr_pages = end - start; - - pages = nm_os_vmalloc(nr_pages * sizeof(*pages)); - if (pages == NULL) { - error = ENOMEM; + os = nm_os_extmem_create(usrptr, pi, &error); + if (os == NULL) { + D("os extmem creation failed"); goto out; } -#ifdef NETMAP_LINUX_HAVE_GUP_4ARGS - res = get_user_pages_unlocked( - p, - nr_pages, - pages, - FOLL_WRITE | FOLL_GET | FOLL_SPLIT | FOLL_POPULATE); // XXX check other flags -#elif defined(NETMAP_LINUX_HAVE_GUP_5ARGS) - res = get_user_pages_unlocked( - p, - nr_pages, - 1, /* write */ - 0, /* don't force */ - pages); -#elif defined(NETMAP_LINUX_HAVE_GUP_7ARGS) - res = get_user_pages_unlocked( - current, - current->mm, - p, - nr_pages, - 1, /* write */ - 0, /* don't force */ - pages); -#else - down_read(¤t->mm->mmap_sem); - res = get_user_pages( - current, - current->mm, - p, - nr_pages, - 1, /* write */ - 0, /* don't force */ - pages, - NULL); - up_read(¤t->mm->mmap_sem); -#endif /* NETMAP_LINUX_GUP */ - - if (res < nr_pages) { - error = EFAULT; - goto out_unmap; - } - - nme = netmap_mem_ext_search(pages, nr_pages); + nme = netmap_mem_ext_search(os); if (nme) { - netmap_mem_ext_free_pages(pages, nr_pages); + nm_os_extmem_delete(os); return &nme->up; } D("not found, creating new"); nme = _netmap_mem_private_new(sizeof(*nme), (struct netmap_obj_params[]){ - { pi.if_pool_objsize, pi.if_pool_objtotal }, - { pi.ring_pool_objsize, pi.ring_pool_objtotal }, - { pi.buf_pool_objsize, pi.buf_pool_objtotal }}, + { pi->nr_if_pool_objsize, pi->nr_if_pool_objtotal }, + { pi->nr_ring_pool_objsize, pi->nr_ring_pool_objtotal }, + { pi->nr_buf_pool_objsize, pi->nr_buf_pool_objtotal }}, &netmap_mem_ext_ops, &error); if (nme == NULL) goto out_unmap; + nr_pages = nm_os_extmem_nr_pages(os); + /* from now on pages will be released by nme destructor; * we let res = 0 to prevent release in out_unmap below */ - res = 0; - nme->pages = pages; - nme->nr_pages = nr_pages; - nme->up.flags |= NETMAP_MEM_EXT; + nme->os = os; + os = NULL; /* pass ownership */ - clust = kmap(*pages); + clust = nm_os_extmem_nextpage(nme->os); off = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { struct netmap_obj_pool *p = &nme->up.pools[i]; struct netmap_obj_params *o = &nme->up.params[i]; p->_objsize = o->size; p->_clustsize = o->size; p->_clustentries = 1; p->lut = nm_alloc_lut(o->num); if (p->lut == NULL) { error = ENOMEM; goto out_delete; } p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t); p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots); if (p->invalid_bitmap == NULL) { error = ENOMEM; goto out_delete; } if (nr_pages == 0) { p->objtotal = 0; p->memtotal = 0; p->objfree = 0; continue; } for (j = 0; j < o->num && nr_pages > 0; j++) { size_t noff; - size_t skip; p->lut[j].vaddr = clust + off; +#if !defined(linux) && !defined(_WIN32) + p->lut[j].paddr = vtophys(p->lut[j].vaddr); +#endif ND("%s %d at %p", p->name, j, p->lut[j].vaddr); noff = off + p->_objsize; if (noff < PAGE_SIZE) { off = noff; continue; } ND("too big, recomputing offset..."); - skip = PAGE_SIZE - (off & PAGE_MASK); while (noff >= PAGE_SIZE) { - noff -= skip; - pages++; + char *old_clust = clust; + noff -= PAGE_SIZE; + clust = nm_os_extmem_nextpage(nme->os); nr_pages--; ND("noff %zu page %p nr_pages %d", noff, page_to_virt(*pages), nr_pages); if (noff > 0 && !nm_isset(p->invalid_bitmap, j) && - (nr_pages == 0 || *pages != *(pages - 1) + 1)) + (nr_pages == 0 || + old_clust + PAGE_SIZE != clust)) { /* out of space or non contiguous, * drop this object * */ p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U); ND("non contiguous at off %zu, drop", noff); } if (nr_pages == 0) break; - skip = PAGE_SIZE; } off = noff; - if (nr_pages > 0) - clust = kmap(*pages); } p->objtotal = j; p->numclusters = p->objtotal; p->memtotal = j * p->_objsize; ND("%d memtotal %u", j, p->memtotal); } - /* skip the first netmap_if, where the pools info reside */ - { - struct netmap_obj_pool *p = &nme->up.pools[NETMAP_IF_POOL]; - p->invalid_bitmap[0] |= 1U; - } - netmap_mem_ext_register(nme); return &nme->up; out_delete: netmap_mem_put(&nme->up); out_unmap: - for (i = 0; i < res; i++) - put_page(pages[i]); - if (res) - nm_os_free(pages); + if (os) + nm_os_extmem_delete(os); out: if (perror) *perror = error; return NULL; } #endif /* WITH_EXTMEM */ #ifdef WITH_PTNETMAP_GUEST struct mem_pt_if { struct mem_pt_if *next; struct ifnet *ifp; unsigned int nifp_offset; }; /* Netmap allocator for ptnetmap guests. */ struct netmap_mem_ptg { struct netmap_mem_d up; vm_paddr_t nm_paddr; /* physical address in the guest */ void *nm_addr; /* virtual address in the guest */ struct netmap_lut buf_lut; /* lookup table for BUF pool in the guest */ nm_memid_t host_mem_id; /* allocator identifier in the host */ struct ptnetmap_memdev *ptn_dev;/* ptnetmap memdev */ struct mem_pt_if *pt_ifs; /* list of interfaces in passthrough */ }; /* Link a passthrough interface to a passthrough netmap allocator. */ static int netmap_mem_pt_guest_ifp_add(struct netmap_mem_d *nmd, struct ifnet *ifp, unsigned int nifp_offset) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; struct mem_pt_if *ptif = nm_os_malloc(sizeof(*ptif)); if (!ptif) { return ENOMEM; } NMA_LOCK(nmd); ptif->ifp = ifp; ptif->nifp_offset = nifp_offset; if (ptnmd->pt_ifs) { ptif->next = ptnmd->pt_ifs; } ptnmd->pt_ifs = ptif; NMA_UNLOCK(nmd); D("added (ifp=%p,nifp_offset=%u)", ptif->ifp, ptif->nifp_offset); return 0; } /* Called with NMA_LOCK(nmd) held. */ static struct mem_pt_if * netmap_mem_pt_guest_ifp_lookup(struct netmap_mem_d *nmd, struct ifnet *ifp) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; struct mem_pt_if *curr; for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { if (curr->ifp == ifp) { return curr; } } return NULL; } /* Unlink a passthrough interface from a passthrough netmap allocator. */ int netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, struct ifnet *ifp) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; struct mem_pt_if *prev = NULL; struct mem_pt_if *curr; int ret = -1; NMA_LOCK(nmd); for (curr = ptnmd->pt_ifs; curr; curr = curr->next) { if (curr->ifp == ifp) { if (prev) { prev->next = curr->next; } else { ptnmd->pt_ifs = curr->next; } D("removed (ifp=%p,nifp_offset=%u)", curr->ifp, curr->nifp_offset); nm_os_free(curr); ret = 0; break; } prev = curr; } NMA_UNLOCK(nmd); return ret; } static int netmap_mem_pt_guest_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; if (!(nmd->flags & NETMAP_MEM_FINALIZED)) { return EINVAL; } *lut = ptnmd->buf_lut; return 0; } static int netmap_mem_pt_guest_get_info(struct netmap_mem_d *nmd, uint64_t *size, u_int *memflags, uint16_t *id) { int error = 0; - NMA_LOCK(nmd); - error = nmd->ops->nmd_config(nmd); if (error) goto out; if (size) *size = nmd->nm_totalsize; if (memflags) *memflags = nmd->flags; if (id) *id = nmd->nm_id; out: - NMA_UNLOCK(nmd); return error; } static vm_paddr_t netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; vm_paddr_t paddr; /* if the offset is valid, just return csb->base_addr + off */ paddr = (vm_paddr_t)(ptnmd->nm_paddr + off); ND("off %lx padr %lx", off, (unsigned long)paddr); return paddr; } static int netmap_mem_pt_guest_config(struct netmap_mem_d *nmd) { /* nothing to do, we are configured on creation * and configuration never changes thereafter */ return 0; } static int netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; uint64_t mem_size; uint32_t bufsize; uint32_t nbuffers; uint32_t poolofs; vm_paddr_t paddr; char *vaddr; int i; int error = 0; - nmd->active++; - if (nmd->flags & NETMAP_MEM_FINALIZED) goto out; if (ptnmd->ptn_dev == NULL) { D("ptnetmap memdev not attached"); error = ENOMEM; - goto err; + goto out; } /* Map memory through ptnetmap-memdev BAR. */ error = nm_os_pt_memdev_iomap(ptnmd->ptn_dev, &ptnmd->nm_paddr, &ptnmd->nm_addr, &mem_size); if (error) - goto err; + goto out; /* Initialize the lut using the information contained in the * ptnetmap memory device. */ bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, PTNET_MDEV_IO_BUF_POOL_OBJSZ); nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, PTNET_MDEV_IO_BUF_POOL_OBJNUM); /* allocate the lut */ if (ptnmd->buf_lut.lut == NULL) { D("allocating lut"); ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers); if (ptnmd->buf_lut.lut == NULL) { D("lut allocation failed"); return ENOMEM; } } /* we have physically contiguous memory mapped through PCI BAR */ poolofs = nm_os_pt_memdev_ioread(ptnmd->ptn_dev, PTNET_MDEV_IO_BUF_POOL_OFS); vaddr = (char *)(ptnmd->nm_addr) + poolofs; paddr = ptnmd->nm_paddr + poolofs; for (i = 0; i < nbuffers; i++) { ptnmd->buf_lut.lut[i].vaddr = vaddr; vaddr += bufsize; paddr += bufsize; } ptnmd->buf_lut.objtotal = nbuffers; ptnmd->buf_lut.objsize = bufsize; nmd->nm_totalsize = (unsigned int)mem_size; + /* Initialize these fields as are needed by + * netmap_mem_bufsize(). + * XXX please improve this, why do we need this + * replication? maybe we nmd->pools[] should no be + * there for the guest allocator? */ + nmd->pools[NETMAP_BUF_POOL]._objsize = bufsize; + nmd->pools[NETMAP_BUF_POOL]._objtotal = nbuffers; + nmd->flags |= NETMAP_MEM_FINALIZED; out: - return 0; -err: - nmd->active--; return error; } static void netmap_mem_pt_guest_deref(struct netmap_mem_d *nmd) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; - nmd->active--; - if (nmd->active <= 0 && + if (nmd->active == 1 && (nmd->flags & NETMAP_MEM_FINALIZED)) { nmd->flags &= ~NETMAP_MEM_FINALIZED; /* unmap ptnetmap-memdev memory */ if (ptnmd->ptn_dev) { nm_os_pt_memdev_iounmap(ptnmd->ptn_dev); } ptnmd->nm_addr = NULL; ptnmd->nm_paddr = 0; } } static ssize_t netmap_mem_pt_guest_if_offset(struct netmap_mem_d *nmd, const void *vaddr) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)nmd; return (const char *)(vaddr) - (char *)(ptnmd->nm_addr); } static void netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd) { if (nmd == NULL) return; if (netmap_verbose) D("deleting %p", nmd); if (nmd->active > 0) D("bug: deleting mem allocator with active=%d!", nmd->active); if (netmap_verbose) D("done deleting %p", nmd); NMA_LOCK_DESTROY(nmd); nm_os_free(nmd); } static struct netmap_if * netmap_mem_pt_guest_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem; struct mem_pt_if *ptif; struct netmap_if *nifp = NULL; - NMA_LOCK(na->nm_mem); - ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); if (ptif == NULL) { D("Error: interface %p is not in passthrough", na->ifp); goto out; } nifp = (struct netmap_if *)((char *)(ptnmd->nm_addr) + ptif->nifp_offset); - NMA_UNLOCK(na->nm_mem); out: return nifp; } static void netmap_mem_pt_guest_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) { struct mem_pt_if *ptif; - NMA_LOCK(na->nm_mem); ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); if (ptif == NULL) { D("Error: interface %p is not in passthrough", na->ifp); } - NMA_UNLOCK(na->nm_mem); } static int netmap_mem_pt_guest_rings_create(struct netmap_adapter *na) { struct netmap_mem_ptg *ptnmd = (struct netmap_mem_ptg *)na->nm_mem; struct mem_pt_if *ptif; struct netmap_if *nifp; int i, error = -1; - NMA_LOCK(na->nm_mem); - ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp); if (ptif == NULL) { D("Error: interface %p is not in passthrough", na->ifp); goto out; } /* point each kring to the corresponding backend ring */ nifp = (struct netmap_if *)((char *)ptnmd->nm_addr + ptif->nifp_offset); for (i = 0; i <= na->num_tx_rings; i++) { - struct netmap_kring *kring = na->tx_rings + i; + struct netmap_kring *kring = na->tx_rings[i]; if (kring->ring) continue; kring->ring = (struct netmap_ring *) ((char *)nifp + nifp->ring_ofs[i]); } for (i = 0; i <= na->num_rx_rings; i++) { - struct netmap_kring *kring = na->rx_rings + i; + struct netmap_kring *kring = na->rx_rings[i]; if (kring->ring) continue; kring->ring = (struct netmap_ring *) ((char *)nifp + nifp->ring_ofs[i + na->num_tx_rings + 1]); } error = 0; out: - NMA_UNLOCK(na->nm_mem); - return error; } static void netmap_mem_pt_guest_rings_delete(struct netmap_adapter *na) { #if 0 enum txrx t; for_rx_tx(t) { u_int i; for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { struct netmap_kring *kring = &NMR(na, t)[i]; kring->ring = NULL; } } #endif } static struct netmap_mem_ops netmap_mem_pt_guest_ops = { .nmd_get_lut = netmap_mem_pt_guest_get_lut, .nmd_get_info = netmap_mem_pt_guest_get_info, .nmd_ofstophys = netmap_mem_pt_guest_ofstophys, .nmd_config = netmap_mem_pt_guest_config, .nmd_finalize = netmap_mem_pt_guest_finalize, .nmd_deref = netmap_mem_pt_guest_deref, .nmd_if_offset = netmap_mem_pt_guest_if_offset, .nmd_delete = netmap_mem_pt_guest_delete, .nmd_if_new = netmap_mem_pt_guest_if_new, .nmd_if_delete = netmap_mem_pt_guest_if_delete, .nmd_rings_create = netmap_mem_pt_guest_rings_create, .nmd_rings_delete = netmap_mem_pt_guest_rings_delete }; /* Called with nm_mem_list_lock held. */ static struct netmap_mem_d * netmap_mem_pt_guest_find_memid(nm_memid_t mem_id) { struct netmap_mem_d *mem = NULL; struct netmap_mem_d *scan = netmap_last_mem_d; do { /* find ptnetmap allocator through host ID */ if (scan->ops->nmd_deref == netmap_mem_pt_guest_deref && ((struct netmap_mem_ptg *)(scan))->host_mem_id == mem_id) { mem = scan; mem->refcount++; NM_DBG_REFC(mem, __FUNCTION__, __LINE__); break; } scan = scan->next; } while (scan != netmap_last_mem_d); return mem; } /* Called with nm_mem_list_lock held. */ static struct netmap_mem_d * netmap_mem_pt_guest_create(nm_memid_t mem_id) { struct netmap_mem_ptg *ptnmd; int err = 0; ptnmd = nm_os_malloc(sizeof(struct netmap_mem_ptg)); if (ptnmd == NULL) { err = ENOMEM; goto error; } ptnmd->up.ops = &netmap_mem_pt_guest_ops; ptnmd->host_mem_id = mem_id; ptnmd->pt_ifs = NULL; /* Assign new id in the guest (We have the lock) */ err = nm_mem_assign_id_locked(&ptnmd->up); if (err) goto error; ptnmd->up.flags &= ~NETMAP_MEM_FINALIZED; ptnmd->up.flags |= NETMAP_MEM_IO; NMA_LOCK_INIT(&ptnmd->up); snprintf(ptnmd->up.name, NM_MEM_NAMESZ, "%d", ptnmd->up.nm_id); return &ptnmd->up; error: netmap_mem_pt_guest_delete(&ptnmd->up); return NULL; } /* * find host id in guest allocators and create guest allocator * if it is not there */ static struct netmap_mem_d * netmap_mem_pt_guest_get(nm_memid_t mem_id) { struct netmap_mem_d *nmd; NM_MTX_LOCK(nm_mem_list_lock); nmd = netmap_mem_pt_guest_find_memid(mem_id); if (nmd == NULL) { nmd = netmap_mem_pt_guest_create(mem_id); } NM_MTX_UNLOCK(nm_mem_list_lock); return nmd; } /* * The guest allocator can be created by ptnetmap_memdev (during the device * attach) or by ptnetmap device (ptnet), during the netmap_attach. * * The order is not important (we have different order in LINUX and FreeBSD). * The first one, creates the device, and the second one simply attaches it. */ /* Called when ptnetmap_memdev is attaching, to attach a new allocator in * the guest */ struct netmap_mem_d * netmap_mem_pt_guest_attach(struct ptnetmap_memdev *ptn_dev, nm_memid_t mem_id) { struct netmap_mem_d *nmd; struct netmap_mem_ptg *ptnmd; nmd = netmap_mem_pt_guest_get(mem_id); /* assign this device to the guest allocator */ if (nmd) { ptnmd = (struct netmap_mem_ptg *)nmd; ptnmd->ptn_dev = ptn_dev; } return nmd; } /* Called when ptnet device is attaching */ struct netmap_mem_d * netmap_mem_pt_guest_new(struct ifnet *ifp, unsigned int nifp_offset, unsigned int memid) { struct netmap_mem_d *nmd; if (ifp == NULL) { return NULL; } nmd = netmap_mem_pt_guest_get((nm_memid_t)memid); if (nmd) { netmap_mem_pt_guest_ifp_add(nmd, ifp, nifp_offset); } return nmd; } #endif /* WITH_PTNETMAP_GUEST */ Index: head/sys/dev/netmap/netmap_mem2.h =================================================================== --- head/sys/dev/netmap/netmap_mem2.h (revision 332422) +++ head/sys/dev/netmap/netmap_mem2.h (revision 332423) @@ -1,178 +1,189 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2012-2014 Matteo Landi * Copyright (C) 2012-2016 Luigi Rizzo * Copyright (C) 2012-2016 Giuseppe Lettieri * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * (New) memory allocator for netmap */ /* * This allocator creates three memory pools: * nm_if_pool for the struct netmap_if * nm_ring_pool for the struct netmap_ring * nm_buf_pool for the packet buffers. * * that contain netmap objects. Each pool is made of a number of clusters, * multiple of a page size, each containing an integer number of objects. * The clusters are contiguous in user space but not in the kernel. * Only nm_buf_pool needs to be dma-able, * but for convenience use the same type of allocator for all. * * Once mapped, the three pools are exported to userspace * as a contiguous block, starting from nm_if_pool. Each * cluster (and pool) is an integral number of pages. * [ . . . ][ . . . . . .][ . . . . . . . . . .] * nm_if nm_ring nm_buf * * The userspace areas contain offsets of the objects in userspace. * When (at init time) we write these offsets, we find out the index * of the object, and from there locate the offset from the beginning * of the region. * * The invididual allocators manage a pool of memory for objects of * the same size. * The pool is split into smaller clusters, whose size is a * multiple of the page size. The cluster size is chosen * to minimize the waste for a given max cluster size * (we do it by brute force, as we have relatively few objects * per cluster). * * Objects are aligned to the cache line (64 bytes) rounding up object * sizes when needed. A bitmap contains the state of each object. * Allocation scans the bitmap; this is done only on attach, so we are not * too worried about performance * * For each allocator we can define (thorugh sysctl) the size and * number of each object. Memory is allocated at the first use of a * netmap file descriptor, and can be freed when all such descriptors * have been released (including unmapping the memory). * If memory is scarce, the system tries to get as much as possible * and the sysctl values reflect the actual allocation. * Together with desired values, the sysctl export also absolute * min and maximum values that cannot be overridden. * * struct netmap_if: * variable size, max 16 bytes per ring pair plus some fixed amount. * 1024 bytes should be large enough in practice. * * In the worst case we have one netmap_if per ring in the system. * * struct netmap_ring * variable size, 8 byte per slot plus some fixed amount. * Rings can be large (e.g. 4k slots, or >32Kbytes). * We default to 36 KB (9 pages), and a few hundred rings. * * struct netmap_buffer * The more the better, both because fast interfaces tend to have * many slots, and because we may want to use buffers to store * packets in userspace avoiding copies. * Must contain a full frame (eg 1518, or more for vlans, jumbo * frames etc.) plus be nicely aligned, plus some NICs restrict * the size to multiple of 1K or so. Default to 2K */ #ifndef _NET_NETMAP_MEM2_H_ #define _NET_NETMAP_MEM2_H_ /* We implement two kinds of netmap_mem_d structures: * * - global: used by hardware NICS; * * - private: used by VALE ports. * * In both cases, the netmap_mem_d structure has the same lifetime as the * netmap_adapter of the corresponding NIC or port. It is the responsibility of * the client code to delete the private allocator when the associated * netmap_adapter is freed (this is implemented by the NAF_MEM_OWNER flag in * netmap.c). The 'refcount' field counts the number of active users of the * structure. The global allocator uses this information to prevent/allow * reconfiguration. The private allocators release all their memory when there * are no active users. By 'active user' we mean an existing netmap_priv * structure holding a reference to the allocator. */ extern struct netmap_mem_d nm_mem; typedef uint16_t nm_memid_t; int netmap_mem_get_lut(struct netmap_mem_d *, struct netmap_lut *); nm_memid_t netmap_mem_get_id(struct netmap_mem_d *); vm_paddr_t netmap_mem_ofstophys(struct netmap_mem_d *, vm_ooffset_t); #ifdef _WIN32 PMDL win32_build_user_vm_map(struct netmap_mem_d* nmd); #endif int netmap_mem_finalize(struct netmap_mem_d *, struct netmap_adapter *); int netmap_mem_init(void); void netmap_mem_fini(void); struct netmap_if * netmap_mem_if_new(struct netmap_adapter *, struct netmap_priv_d *); void netmap_mem_if_delete(struct netmap_adapter *, struct netmap_if *); int netmap_mem_rings_create(struct netmap_adapter *); void netmap_mem_rings_delete(struct netmap_adapter *); int netmap_mem_deref(struct netmap_mem_d *, struct netmap_adapter *); -int netmap_mem2_get_pool_info(struct netmap_mem_d *, u_int, u_int *, u_int *); -int netmap_mem_get_info(struct netmap_mem_d *, uint64_t *size, u_int *memflags, uint16_t *id); +int netmap_mem2_get_pool_info(struct netmap_mem_d *, u_int, u_int *, u_int *); +int netmap_mem_get_info(struct netmap_mem_d *, uint64_t *size, + u_int *memflags, nm_memid_t *id); ssize_t netmap_mem_if_offset(struct netmap_mem_d *, const void *vaddr); struct netmap_mem_d* netmap_mem_private_new( u_int txr, u_int txd, u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes, int* error); -void netmap_mem_delete(struct netmap_mem_d *); #define netmap_mem_get(d) __netmap_mem_get(d, __FUNCTION__, __LINE__) #define netmap_mem_put(d) __netmap_mem_put(d, __FUNCTION__, __LINE__) struct netmap_mem_d* __netmap_mem_get(struct netmap_mem_d *, const char *, int); void __netmap_mem_put(struct netmap_mem_d *, const char *, int); struct netmap_mem_d* netmap_mem_find(nm_memid_t); unsigned netmap_mem_bufsize(struct netmap_mem_d *nmd); #ifdef WITH_EXTMEM -struct netmap_mem_d* netmap_mem_ext_create(struct nmreq *, int *); +struct netmap_mem_d* netmap_mem_ext_create(uint64_t, struct nmreq_pools_info *, int *); #else /* !WITH_EXTMEM */ #define netmap_mem_ext_create(nmr, _perr) \ ({ int *perr = _perr; if (perr) *(perr) = EOPNOTSUPP; NULL; }) #endif /* WITH_EXTMEM */ #ifdef WITH_PTNETMAP_GUEST struct netmap_mem_d* netmap_mem_pt_guest_new(struct ifnet *, unsigned int nifp_offset, unsigned int memid); struct ptnetmap_memdev; struct netmap_mem_d* netmap_mem_pt_guest_attach(struct ptnetmap_memdev *, uint16_t); int netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *, struct ifnet *); #endif /* WITH_PTNETMAP_GUEST */ -int netmap_mem_pools_info_get(struct nmreq *, struct netmap_mem_d *); +int netmap_mem_pools_info_get(struct nmreq_pools_info *, + struct netmap_mem_d *); #define NETMAP_MEM_PRIVATE 0x2 /* allocator uses private address space */ #define NETMAP_MEM_IO 0x4 /* the underlying memory is mmapped I/O */ #define NETMAP_MEM_EXT 0x10 /* external memory (not remappable) */ uint32_t netmap_extra_alloc(struct netmap_adapter *, uint32_t *, uint32_t n); + +#ifdef WITH_EXTMEM +#include +struct nm_os_extmem; /* opaque */ +struct nm_os_extmem *nm_os_extmem_create(unsigned long, struct nmreq_pools_info *, int *perror); +char *nm_os_extmem_nextpage(struct nm_os_extmem *); +int nm_os_extmem_nr_pages(struct nm_os_extmem *); +int nm_os_extmem_isequal(struct nm_os_extmem *, struct nm_os_extmem *); +void nm_os_extmem_delete(struct nm_os_extmem *); +#endif /* WITH_EXTMEM */ #endif Index: head/sys/dev/netmap/netmap_monitor.c =================================================================== --- head/sys/dev/netmap/netmap_monitor.c (revision 332422) +++ head/sys/dev/netmap/netmap_monitor.c (revision 332423) @@ -1,977 +1,981 @@ /* * Copyright (C) 2014-2016 Giuseppe Lettieri * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * Monitors * * netmap monitors can be used to do monitoring of network traffic * on another adapter, when the latter adapter is working in netmap mode. * * Monitors offer to userspace the same interface as any other netmap port, * with as many pairs of netmap rings as the monitored adapter. * However, only the rx rings are actually used. Each monitor rx ring receives * the traffic transiting on both the tx and rx corresponding rings in the * monitored adapter. During registration, the user can choose if she wants * to intercept tx only, rx only, or both tx and rx traffic. * * If the monitor is not able to cope with the stream of frames, excess traffic * will be dropped. * * If the monitored adapter leaves netmap mode, the monitor has to be restarted. * * Monitors can be either zero-copy or copy-based. * * Copy monitors see the frames before they are consumed: * * - For tx traffic, this is when the application sends them, before they are * passed down to the adapter. * * - For rx traffic, this is when they are received by the adapter, before * they are sent up to the application, if any (note that, if no * application is reading from a monitored ring, the ring will eventually * fill up and traffic will stop). * * Zero-copy monitors only see the frames after they have been consumed: * * - For tx traffic, this is after the slots containing the frames have been * marked as free. Note that this may happen at a considerably delay after * frame transmission, since freeing of slots is often done lazily. * * - For rx traffic, this is after the consumer on the monitored adapter * has released them. In most cases, the consumer is a userspace * application which may have modified the frame contents. * * Several copy or zero-copy monitors may be active on any ring. * */ #if defined(__FreeBSD__) #include /* prerequisite */ #include #include #include /* defines used in kernel.h */ #include /* types used in module initialization */ #include #include #include #include #include #include #include /* sockaddrs */ #include #include #include /* bus_dmamap_* */ #include #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #elif defined(_WIN32) #include "win_glue.h" #else #error Unsupported platform #endif /* unsupported */ /* * common headers */ #include #include #include #ifdef WITH_MONITOR #define NM_MONITOR_MAXSLOTS 4096 /* ******************************************************************** * functions common to both kind of monitors ******************************************************************** */ static int netmap_zmon_reg(struct netmap_adapter *, int); static int nm_is_zmon(struct netmap_adapter *na) { return na->nm_register == netmap_zmon_reg; } /* nm_sync callback for the monitor's own tx rings. * This makes no sense and always returns error */ static int netmap_monitor_txsync(struct netmap_kring *kring, int flags) { RD(1, "%s %x", kring->name, flags); return EIO; } /* nm_sync callback for the monitor's own rx rings. * Note that the lock in netmap_zmon_parent_sync only protects * writers among themselves. Synchronization between writers * (i.e., netmap_zmon_parent_txsync and netmap_zmon_parent_rxsync) * and readers (i.e., netmap_zmon_rxsync) relies on memory barriers. */ static int netmap_monitor_rxsync(struct netmap_kring *kring, int flags) { ND("%s %x", kring->name, flags); kring->nr_hwcur = kring->rhead; mb(); return 0; } /* nm_krings_create callbacks for monitors. */ static int netmap_monitor_krings_create(struct netmap_adapter *na) { int error = netmap_krings_create(na, 0); if (error) return error; /* override the host rings callbacks */ - na->tx_rings[na->num_tx_rings].nm_sync = netmap_monitor_txsync; - na->rx_rings[na->num_rx_rings].nm_sync = netmap_monitor_rxsync; + na->tx_rings[na->num_tx_rings]->nm_sync = netmap_monitor_txsync; + na->rx_rings[na->num_rx_rings]->nm_sync = netmap_monitor_rxsync; return 0; } /* nm_krings_delete callback for monitors */ static void netmap_monitor_krings_delete(struct netmap_adapter *na) { netmap_krings_delete(na); } static u_int nm_txrx2flag(enum txrx t) { return (t == NR_RX ? NR_MONITOR_RX : NR_MONITOR_TX); } /* allocate the monitors array in the monitored kring */ static int nm_monitor_alloc(struct netmap_kring *kring, u_int n) { size_t old_len, len; struct netmap_kring **nm; if (n <= kring->max_monitors) /* we already have more entries that requested */ return 0; old_len = sizeof(struct netmap_kring *)*kring->max_monitors; len = sizeof(struct netmap_kring *) * n; nm = nm_os_realloc(kring->monitors, len, old_len); if (nm == NULL) return ENOMEM; kring->monitors = nm; kring->max_monitors = n; return 0; } /* deallocate the parent array in the parent adapter */ static void nm_monitor_dealloc(struct netmap_kring *kring) { if (kring->monitors) { if (kring->n_monitors > 0) { D("freeing not empty monitor array for %s (%d dangling monitors)!", kring->name, kring->n_monitors); } nm_os_free(kring->monitors); kring->monitors = NULL; kring->max_monitors = 0; kring->n_monitors = 0; } } /* returns 1 iff kring has no monitors */ static inline int nm_monitor_none(struct netmap_kring *kring) { return kring->n_monitors == 0 && kring->zmon_list[NR_TX].next == NULL && kring->zmon_list[NR_RX].next == NULL; } /* * monitors work by replacing the nm_sync() and possibly the * nm_notify() callbacks in the monitored rings. */ static int netmap_zmon_parent_txsync(struct netmap_kring *, int); static int netmap_zmon_parent_rxsync(struct netmap_kring *, int); static int netmap_monitor_parent_txsync(struct netmap_kring *, int); static int netmap_monitor_parent_rxsync(struct netmap_kring *, int); static int netmap_monitor_parent_notify(struct netmap_kring *, int); /* add the monitor mkring to the list of monitors of kring. * If this is the first monitor, intercept the callbacks */ static int netmap_monitor_add(struct netmap_kring *mkring, struct netmap_kring *kring, int zmon) { int error = NM_IRQ_COMPLETED; enum txrx t = kring->tx; struct netmap_zmon_list *z = &kring->zmon_list[t]; struct netmap_zmon_list *mz = &mkring->zmon_list[t]; /* a zero-copy monitor which is not the first in the list * must monitor the previous monitor */ if (zmon && z->prev != NULL) kring = z->prev; /* synchronize with concurrently running nm_sync()s */ nm_kr_stop(kring, NM_KR_LOCKED); if (nm_monitor_none(kring)) { /* this is the first monitor, intercept callbacks */ ND("intercept callbacks on %s", kring->name); kring->mon_sync = kring->nm_sync; kring->mon_notify = kring->nm_notify; if (kring->tx == NR_TX) { kring->nm_sync = netmap_monitor_parent_txsync; } else { kring->nm_sync = netmap_monitor_parent_rxsync; kring->nm_notify = netmap_monitor_parent_notify; kring->mon_tail = kring->nr_hwtail; } } if (zmon) { /* append the zmon to the list */ struct netmap_monitor_adapter *mna = (struct netmap_monitor_adapter *)mkring->na; struct netmap_adapter *pna; if (z->prev != NULL) z->prev->zmon_list[t].next = mkring; mz->prev = z->prev; z->prev = mkring; if (z->next == NULL) z->next = mkring; /* grap a reference to the previous netmap adapter * in the chain (this may be the monitored port * or another zero-copy monitor) */ pna = kring->na; netmap_adapter_get(pna); netmap_adapter_put(mna->priv.np_na); mna->priv.np_na = pna; } else { /* make sure the monitor array exists and is big enough */ error = nm_monitor_alloc(kring, kring->n_monitors + 1); if (error) goto out; kring->monitors[kring->n_monitors] = mkring; mkring->mon_pos[kring->tx] = kring->n_monitors; kring->n_monitors++; } out: nm_kr_start(kring); return error; } /* remove the monitor mkring from the list of monitors of kring. * If this is the last monitor, restore the original callbacks */ static void netmap_monitor_del(struct netmap_kring *mkring, struct netmap_kring *kring) { struct netmap_zmon_list *mz = &mkring->zmon_list[kring->tx]; int zmon = nm_is_zmon(mkring->na); if (zmon && mz->prev != NULL) kring = mz->prev; /* synchronize with concurrently running nm_sync()s */ nm_kr_stop(kring, NM_KR_LOCKED); if (zmon) { /* remove the monitor from the list */ if (mz->prev != NULL) mz->prev->zmon_list[kring->tx].next = mz->next; else kring->zmon_list[kring->tx].next = mz->next; if (mz->next != NULL) { mz->next->zmon_list[kring->tx].prev = mz->prev; } else { kring->zmon_list[kring->tx].prev = mz->prev; } } else { /* this is a copy monitor */ uint32_t mon_pos = mkring->mon_pos[kring->tx]; kring->n_monitors--; if (mon_pos != kring->n_monitors) { kring->monitors[mon_pos] = kring->monitors[kring->n_monitors]; kring->monitors[mon_pos]->mon_pos[kring->tx] = mon_pos; } kring->monitors[kring->n_monitors] = NULL; if (kring->n_monitors == 0) { nm_monitor_dealloc(kring); } } if (nm_monitor_none(kring)) { /* this was the last monitor, restore the callbacks */ ND("%s: restoring sync on %s: %p", mkring->name, kring->name, kring->mon_sync); kring->nm_sync = kring->mon_sync; kring->mon_sync = NULL; if (kring->tx == NR_RX) { ND("%s: restoring notify on %s: %p", mkring->name, kring->name, kring->mon_notify); kring->nm_notify = kring->mon_notify; kring->mon_notify = NULL; } } nm_kr_start(kring); } /* This is called when the monitored adapter leaves netmap mode * (see netmap_do_unregif). * We need to notify the monitors that the monitored rings are gone. * We do this by setting their mna->priv.np_na to NULL. * Note that the rings are already stopped when this happens, so * no monitor ring callback can be active. */ void netmap_monitor_stop(struct netmap_adapter *na) { enum txrx t; for_rx_tx(t) { u_int i; for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { - struct netmap_kring *kring = &NMR(na, t)[i]; + struct netmap_kring *kring = NMR(na, t)[i]; struct netmap_kring *zkring; u_int j; for (j = 0; j < kring->n_monitors; j++) { struct netmap_kring *mkring = kring->monitors[j]; struct netmap_monitor_adapter *mna = (struct netmap_monitor_adapter *)mkring->na; /* forget about this adapter */ if (mna->priv.np_na != NULL) { netmap_adapter_put(mna->priv.np_na); mna->priv.np_na = NULL; } } zkring = kring->zmon_list[kring->tx].next; if (zkring != NULL) { struct netmap_monitor_adapter *next = (struct netmap_monitor_adapter *)zkring->na; struct netmap_monitor_adapter *this = (struct netmap_monitor_adapter *)na; struct netmap_adapter *pna = this->priv.np_na; /* let the next monitor forget about us */ if (next->priv.np_na != NULL) { netmap_adapter_put(next->priv.np_na); } if (pna != NULL && nm_is_zmon(na)) { /* we are a monitor ourselves and we may * need to pass down the reference to * the previous adapter in the chain */ netmap_adapter_get(pna); next->priv.np_na = pna; continue; } next->priv.np_na = NULL; } } } } /* common functions for the nm_register() callbacks of both kind of * monitors. */ static int netmap_monitor_reg_common(struct netmap_adapter *na, int onoff, int zmon) { struct netmap_monitor_adapter *mna = (struct netmap_monitor_adapter *)na; struct netmap_priv_d *priv = &mna->priv; struct netmap_adapter *pna = priv->np_na; struct netmap_kring *kring, *mkring; int i; enum txrx t, s; ND("%p: onoff %d", na, onoff); if (onoff) { if (pna == NULL) { /* parent left netmap mode, fatal */ D("%s: internal error", na->name); return ENXIO; } for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { - mkring = &NMR(na, t)[i]; + mkring = NMR(na, t)[i]; if (!nm_kring_pending_on(mkring)) continue; mkring->nr_mode = NKR_NETMAP_ON; if (t == NR_TX) continue; for_rx_tx(s) { if (i > nma_get_nrings(pna, s)) continue; if (mna->flags & nm_txrx2flag(s)) { - kring = &NMR(pna, s)[i]; + kring = NMR(pna, s)[i]; netmap_monitor_add(mkring, kring, zmon); } } } } na->na_flags |= NAF_NETMAP_ON; } else { if (na->active_fds == 0) na->na_flags &= ~NAF_NETMAP_ON; for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { - mkring = &NMR(na, t)[i]; + mkring = NMR(na, t)[i]; if (!nm_kring_pending_off(mkring)) continue; mkring->nr_mode = NKR_NETMAP_OFF; if (t == NR_TX) continue; /* we cannot access the parent krings if the parent * has left netmap mode. This is signaled by a NULL * pna pointer */ if (pna == NULL) continue; for_rx_tx(s) { if (i > nma_get_nrings(pna, s)) continue; if (mna->flags & nm_txrx2flag(s)) { - kring = &NMR(pna, s)[i]; + kring = NMR(pna, s)[i]; netmap_monitor_del(mkring, kring); } } } } } return 0; } /* **************************************************************** * functions specific for zero-copy monitors **************************************************************** */ /* * Common function for both zero-copy tx and rx nm_sync() * callbacks */ static int netmap_zmon_parent_sync(struct netmap_kring *kring, int flags, enum txrx tx) { struct netmap_kring *mkring = kring->zmon_list[tx].next; struct netmap_ring *ring = kring->ring, *mring; int error = 0; int rel_slots, free_slots, busy, sent = 0; u_int beg, end, i; u_int lim = kring->nkr_num_slots - 1, mlim; // = mkring->nkr_num_slots - 1; if (mkring == NULL) { RD(5, "NULL monitor on %s", kring->name); return 0; } mring = mkring->ring; mlim = mkring->nkr_num_slots - 1; /* get the relased slots (rel_slots) */ if (tx == NR_TX) { beg = kring->nr_hwtail + 1; error = kring->mon_sync(kring, flags); if (error) return error; end = kring->nr_hwtail + 1; } else { /* NR_RX */ beg = kring->nr_hwcur; end = kring->rhead; } rel_slots = end - beg; if (rel_slots < 0) rel_slots += kring->nkr_num_slots; if (!rel_slots) { /* no released slots, but we still need * to call rxsync if this is a rx ring */ goto out_rxsync; } /* we need to lock the monitor receive ring, since it * is the target of bot tx and rx traffic from the monitored * adapter */ mtx_lock(&mkring->q_lock); /* get the free slots available on the monitor ring */ i = mkring->nr_hwtail; busy = i - mkring->nr_hwcur; if (busy < 0) busy += mkring->nkr_num_slots; free_slots = mlim - busy; if (!free_slots) goto out; /* swap min(free_slots, rel_slots) slots */ if (free_slots < rel_slots) { beg += (rel_slots - free_slots); rel_slots = free_slots; } if (unlikely(beg >= kring->nkr_num_slots)) beg -= kring->nkr_num_slots; sent = rel_slots; for ( ; rel_slots; rel_slots--) { struct netmap_slot *s = &ring->slot[beg]; struct netmap_slot *ms = &mring->slot[i]; uint32_t tmp; tmp = ms->buf_idx; ms->buf_idx = s->buf_idx; s->buf_idx = tmp; ND(5, "beg %d buf_idx %d", beg, tmp); tmp = ms->len; ms->len = s->len; s->len = tmp; s->flags |= NS_BUF_CHANGED; beg = nm_next(beg, lim); i = nm_next(i, mlim); } mb(); mkring->nr_hwtail = i; out: mtx_unlock(&mkring->q_lock); if (sent) { /* notify the new frames to the monitor */ mkring->nm_notify(mkring, 0); } out_rxsync: if (tx == NR_RX) error = kring->mon_sync(kring, flags); return error; } /* callback used to replace the nm_sync callback in the monitored tx rings */ static int netmap_zmon_parent_txsync(struct netmap_kring *kring, int flags) { return netmap_zmon_parent_sync(kring, flags, NR_TX); } /* callback used to replace the nm_sync callback in the monitored rx rings */ static int netmap_zmon_parent_rxsync(struct netmap_kring *kring, int flags) { return netmap_zmon_parent_sync(kring, flags, NR_RX); } static int netmap_zmon_reg(struct netmap_adapter *na, int onoff) { return netmap_monitor_reg_common(na, onoff, 1 /* zcopy */); } /* nm_dtor callback for monitors */ static void netmap_zmon_dtor(struct netmap_adapter *na) { struct netmap_monitor_adapter *mna = (struct netmap_monitor_adapter *)na; struct netmap_priv_d *priv = &mna->priv; struct netmap_adapter *pna = priv->np_na; netmap_adapter_put(pna); } /* **************************************************************** * functions specific for copy monitors **************************************************************** */ static void netmap_monitor_parent_sync(struct netmap_kring *kring, u_int first_new, int new_slots) { u_int j; for (j = 0; j < kring->n_monitors; j++) { struct netmap_kring *mkring = kring->monitors[j]; u_int i, mlim, beg; int free_slots, busy, sent = 0, m; u_int lim = kring->nkr_num_slots - 1; struct netmap_ring *ring = kring->ring, *mring = mkring->ring; u_int max_len = NETMAP_BUF_SIZE(mkring->na); mlim = mkring->nkr_num_slots - 1; /* we need to lock the monitor receive ring, since it * is the target of bot tx and rx traffic from the monitored * adapter */ mtx_lock(&mkring->q_lock); /* get the free slots available on the monitor ring */ i = mkring->nr_hwtail; busy = i - mkring->nr_hwcur; if (busy < 0) busy += mkring->nkr_num_slots; free_slots = mlim - busy; if (!free_slots) goto out; /* copy min(free_slots, new_slots) slots */ m = new_slots; beg = first_new; if (free_slots < m) { beg += (m - free_slots); if (beg >= kring->nkr_num_slots) beg -= kring->nkr_num_slots; m = free_slots; } for ( ; m; m--) { struct netmap_slot *s = &ring->slot[beg]; struct netmap_slot *ms = &mring->slot[i]; u_int copy_len = s->len; char *src = NMB(kring->na, s), *dst = NMB(mkring->na, ms); if (unlikely(copy_len > max_len)) { RD(5, "%s->%s: truncating %d to %d", kring->name, mkring->name, copy_len, max_len); copy_len = max_len; } memcpy(dst, src, copy_len); ms->len = copy_len; sent++; beg = nm_next(beg, lim); i = nm_next(i, mlim); } mb(); mkring->nr_hwtail = i; out: mtx_unlock(&mkring->q_lock); if (sent) { /* notify the new frames to the monitor */ mkring->nm_notify(mkring, 0); } } } /* callback used to replace the nm_sync callback in the monitored tx rings */ static int netmap_monitor_parent_txsync(struct netmap_kring *kring, int flags) { u_int first_new; int new_slots; /* get the new slots */ if (kring->n_monitors > 0) { first_new = kring->nr_hwcur; new_slots = kring->rhead - first_new; if (new_slots < 0) new_slots += kring->nkr_num_slots; if (new_slots) netmap_monitor_parent_sync(kring, first_new, new_slots); } if (kring->zmon_list[NR_TX].next != NULL) { return netmap_zmon_parent_txsync(kring, flags); } return kring->mon_sync(kring, flags); } /* callback used to replace the nm_sync callback in the monitored rx rings */ static int netmap_monitor_parent_rxsync(struct netmap_kring *kring, int flags) { u_int first_new; int new_slots, error; /* get the new slots */ if (kring->zmon_list[NR_RX].next != NULL) { error = netmap_zmon_parent_rxsync(kring, flags); } else { error = kring->mon_sync(kring, flags); } if (error) return error; if (kring->n_monitors > 0) { first_new = kring->mon_tail; new_slots = kring->nr_hwtail - first_new; if (new_slots < 0) new_slots += kring->nkr_num_slots; if (new_slots) netmap_monitor_parent_sync(kring, first_new, new_slots); kring->mon_tail = kring->nr_hwtail; } return 0; } /* callback used to replace the nm_notify() callback in the monitored rx rings */ static int netmap_monitor_parent_notify(struct netmap_kring *kring, int flags) { int (*notify)(struct netmap_kring*, int); ND(5, "%s %x", kring->name, flags); /* ?xsync callbacks have tryget called by their callers * (NIOCREGIF and poll()), but here we have to call it * by ourself */ if (nm_kr_tryget(kring, 0, NULL)) { /* in all cases, just skip the sync */ return NM_IRQ_COMPLETED; } if (kring->n_monitors > 0) { netmap_monitor_parent_rxsync(kring, NAF_FORCE_READ); } if (nm_monitor_none(kring)) { /* we are no longer monitoring this ring, so both * mon_sync and mon_notify are NULL */ notify = kring->nm_notify; } else { notify = kring->mon_notify; } nm_kr_put(kring); return notify(kring, flags); } static int netmap_monitor_reg(struct netmap_adapter *na, int onoff) { return netmap_monitor_reg_common(na, onoff, 0 /* no zcopy */); } static void netmap_monitor_dtor(struct netmap_adapter *na) { struct netmap_monitor_adapter *mna = (struct netmap_monitor_adapter *)na; struct netmap_priv_d *priv = &mna->priv; struct netmap_adapter *pna = priv->np_na; netmap_adapter_put(pna); } -/* check if nmr is a request for a monitor adapter that we can satisfy */ +/* check if req is a request for a monitor adapter that we can satisfy */ int -netmap_get_monitor_na(struct nmreq *nmr, struct netmap_adapter **na, - struct netmap_mem_d *nmd, int create) +netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na, + struct netmap_mem_d *nmd, int create) { - struct nmreq pnmr; + struct nmreq_register *req = (struct nmreq_register *)hdr->nr_body; + struct nmreq_register preq; struct netmap_adapter *pna; /* parent adapter */ struct netmap_monitor_adapter *mna; struct ifnet *ifp = NULL; int error; - int zcopy = (nmr->nr_flags & NR_ZCOPY_MON); + int zcopy = (req->nr_flags & NR_ZCOPY_MON); char monsuff[10] = ""; if (zcopy) { - nmr->nr_flags |= (NR_MONITOR_TX | NR_MONITOR_RX); + req->nr_flags |= (NR_MONITOR_TX | NR_MONITOR_RX); } - if ((nmr->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX)) == 0) { + if ((req->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX)) == 0) { ND("not a monitor"); return 0; } /* this is a request for a monitor adapter */ - ND("flags %x", nmr->nr_flags); + ND("flags %lx", req->nr_flags); - /* first, try to find the adapter that we want to monitor - * We use the same nmr, after we have turned off the monitor flags. + /* First, try to find the adapter that we want to monitor. + * We use the same req, after we have turned off the monitor flags. * In this way we can potentially monitor everything netmap understands, * except other monitors. */ - memcpy(&pnmr, nmr, sizeof(pnmr)); - pnmr.nr_flags &= ~(NR_MONITOR_TX | NR_MONITOR_RX | NR_ZCOPY_MON); - error = netmap_get_na(&pnmr, &pna, &ifp, nmd, create); + memcpy(&preq, req, sizeof(preq)); + preq.nr_flags &= ~(NR_MONITOR_TX | NR_MONITOR_RX | NR_ZCOPY_MON); + hdr->nr_body = (uint64_t)&preq; + error = netmap_get_na(hdr, &pna, &ifp, nmd, create); + hdr->nr_body = (uint64_t)req; if (error) { D("parent lookup failed: %d", error); return error; } ND("found parent: %s", pna->name); if (!nm_netmap_on(pna)) { /* parent not in netmap mode */ /* XXX we can wait for the parent to enter netmap mode, * by intercepting its nm_register callback (2014-03-16) */ D("%s not in netmap mode", pna->name); error = EINVAL; goto put_out; } mna = nm_os_malloc(sizeof(*mna)); if (mna == NULL) { D("memory error"); error = ENOMEM; goto put_out; } mna->priv.np_na = pna; /* grab all the rings we need in the parent */ - error = netmap_interp_ringid(&mna->priv, nmr->nr_ringid, nmr->nr_flags); + error = netmap_interp_ringid(&mna->priv, req->nr_mode, req->nr_ringid, + req->nr_flags); if (error) { D("ringid error"); goto free_out; } if (mna->priv.np_qlast[NR_TX] - mna->priv.np_qfirst[NR_TX] == 1) { snprintf(monsuff, 10, "-%d", mna->priv.np_qfirst[NR_TX]); } snprintf(mna->up.name, sizeof(mna->up.name), "%s%s/%s%s%s", pna->name, monsuff, zcopy ? "z" : "", - (nmr->nr_flags & NR_MONITOR_RX) ? "r" : "", - (nmr->nr_flags & NR_MONITOR_TX) ? "t" : ""); + (req->nr_flags & NR_MONITOR_RX) ? "r" : "", + (req->nr_flags & NR_MONITOR_TX) ? "t" : ""); /* the monitor supports the host rings iff the parent does */ mna->up.na_flags |= (pna->na_flags & NAF_HOST_RINGS); /* a do-nothing txsync: monitors cannot be used to inject packets */ mna->up.nm_txsync = netmap_monitor_txsync; mna->up.nm_rxsync = netmap_monitor_rxsync; mna->up.nm_krings_create = netmap_monitor_krings_create; mna->up.nm_krings_delete = netmap_monitor_krings_delete; mna->up.num_tx_rings = 1; // XXX what should we do here with chained zmons? /* we set the number of our rx_rings to be max(num_rx_rings, num_rx_rings) * in the parent */ mna->up.num_rx_rings = pna->num_rx_rings; if (pna->num_tx_rings > pna->num_rx_rings) mna->up.num_rx_rings = pna->num_tx_rings; /* by default, the number of slots is the same as in * the parent rings, but the user may ask for a different * number */ - mna->up.num_tx_desc = nmr->nr_tx_slots; + mna->up.num_tx_desc = req->nr_tx_slots; nm_bound_var(&mna->up.num_tx_desc, pna->num_tx_desc, 1, NM_MONITOR_MAXSLOTS, NULL); - mna->up.num_rx_desc = nmr->nr_rx_slots; + mna->up.num_rx_desc = req->nr_rx_slots; nm_bound_var(&mna->up.num_rx_desc, pna->num_rx_desc, 1, NM_MONITOR_MAXSLOTS, NULL); if (zcopy) { mna->up.nm_register = netmap_zmon_reg; mna->up.nm_dtor = netmap_zmon_dtor; /* to have zero copy, we need to use the same memory allocator * as the monitored port */ mna->up.nm_mem = netmap_mem_get(pna->nm_mem); /* and the allocator cannot be changed */ mna->up.na_flags |= NAF_MEM_OWNER; } else { mna->up.nm_register = netmap_monitor_reg; mna->up.nm_dtor = netmap_monitor_dtor; mna->up.nm_mem = netmap_mem_private_new( mna->up.num_tx_rings, mna->up.num_tx_desc, mna->up.num_rx_rings, mna->up.num_rx_desc, 0, /* extra bufs */ 0, /* pipes */ &error); if (mna->up.nm_mem == NULL) goto put_out; } error = netmap_attach_common(&mna->up); if (error) { D("attach_common error"); goto mem_put_out; } /* remember the traffic directions we have to monitor */ - mna->flags = (nmr->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX | NR_ZCOPY_MON)); + mna->flags = (req->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX | NR_ZCOPY_MON)); *na = &mna->up; netmap_adapter_get(*na); /* keep the reference to the parent */ ND("monitor ok"); /* drop the reference to the ifp, if any */ if (ifp) if_rele(ifp); return 0; mem_put_out: netmap_mem_put(mna->up.nm_mem); free_out: nm_os_free(mna); put_out: netmap_unget_na(pna, ifp); return error; } #endif /* WITH_MONITOR */ Index: head/sys/dev/netmap/netmap_pipe.c =================================================================== --- head/sys/dev/netmap/netmap_pipe.c (revision 332422) +++ head/sys/dev/netmap/netmap_pipe.c (revision 332423) @@ -1,712 +1,819 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2014-2016 Giuseppe Lettieri * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #if defined(__FreeBSD__) #include /* prerequisite */ #include #include #include /* defines used in kernel.h */ #include /* types used in module initialization */ #include #include #include #include #include #include #include /* sockaddrs */ #include #include #include /* bus_dmamap_* */ #include #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #elif defined(_WIN32) #include "win_glue.h" #else #error Unsupported platform #endif /* unsupported */ /* * common headers */ #include #include #include #ifdef WITH_PIPES #define NM_PIPE_MAXSLOTS 4096 +#define NM_PIPE_MAXRINGS 256 static int netmap_default_pipes = 0; /* ignored, kept for compatibility */ SYSBEGIN(vars_pipes); SYSCTL_DECL(_dev_netmap); SYSCTL_INT(_dev_netmap, OID_AUTO, default_pipes, CTLFLAG_RW, &netmap_default_pipes, 0, "For compatibility only"); SYSEND; /* allocate the pipe array in the parent adapter */ static int nm_pipe_alloc(struct netmap_adapter *na, u_int npipes) { size_t old_len, len; struct netmap_pipe_adapter **npa; if (npipes <= na->na_max_pipes) /* we already have more entries that requested */ return 0; if (npipes < na->na_next_pipe || npipes > NM_MAXPIPES) return EINVAL; old_len = sizeof(struct netmap_pipe_adapter *)*na->na_max_pipes; len = sizeof(struct netmap_pipe_adapter *) * npipes; npa = nm_os_realloc(na->na_pipes, len, old_len); if (npa == NULL) return ENOMEM; na->na_pipes = npa; na->na_max_pipes = npipes; return 0; } /* deallocate the parent array in the parent adapter */ void netmap_pipe_dealloc(struct netmap_adapter *na) { if (na->na_pipes) { if (na->na_next_pipe > 0) { D("freeing not empty pipe array for %s (%d dangling pipes)!", na->name, na->na_next_pipe); } nm_os_free(na->na_pipes); na->na_pipes = NULL; na->na_max_pipes = 0; na->na_next_pipe = 0; } } /* find a pipe endpoint with the given id among the parent's pipes */ static struct netmap_pipe_adapter * -netmap_pipe_find(struct netmap_adapter *parent, u_int pipe_id) +netmap_pipe_find(struct netmap_adapter *parent, const char *pipe_id) { int i; struct netmap_pipe_adapter *na; for (i = 0; i < parent->na_next_pipe; i++) { + const char *na_pipe_id; na = parent->na_pipes[i]; - if (na->id == pipe_id) { + na_pipe_id = strrchr(na->up.name, + na->role == NM_PIPE_ROLE_MASTER ? '{' : '}'); + KASSERT(na_pipe_id != NULL, ("Invalid pipe name")); + ++na_pipe_id; + if (!strcmp(na_pipe_id, pipe_id)) { return na; } } return NULL; } /* add a new pipe endpoint to the parent array */ static int netmap_pipe_add(struct netmap_adapter *parent, struct netmap_pipe_adapter *na) { if (parent->na_next_pipe >= parent->na_max_pipes) { u_int npipes = parent->na_max_pipes ? 2*parent->na_max_pipes : 2; int error = nm_pipe_alloc(parent, npipes); if (error) return error; } parent->na_pipes[parent->na_next_pipe] = na; na->parent_slot = parent->na_next_pipe; parent->na_next_pipe++; return 0; } /* remove the given pipe endpoint from the parent array */ static void netmap_pipe_remove(struct netmap_adapter *parent, struct netmap_pipe_adapter *na) { u_int n; n = --parent->na_next_pipe; if (n != na->parent_slot) { struct netmap_pipe_adapter **p = &parent->na_pipes[na->parent_slot]; *p = parent->na_pipes[n]; (*p)->parent_slot = na->parent_slot; } parent->na_pipes[n] = NULL; } int netmap_pipe_txsync(struct netmap_kring *txkring, int flags) { struct netmap_kring *rxkring = txkring->pipe; - u_int limit; /* slots to transfer */ - u_int j, k, lim_tx = txkring->nkr_num_slots - 1, - lim_rx = rxkring->nkr_num_slots - 1; - int m, busy; + u_int k, lim = txkring->nkr_num_slots - 1; + int m; /* slots to transfer */ struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring; ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name); - ND(2, "before: hwcur %d hwtail %d cur %d head %d tail %d", txkring->nr_hwcur, txkring->nr_hwtail, + ND(20, "TX before: hwcur %d hwtail %d cur %d head %d tail %d", + txkring->nr_hwcur, txkring->nr_hwtail, txkring->rcur, txkring->rhead, txkring->rtail); - j = rxkring->nr_hwtail; /* RX */ - k = txkring->nr_hwcur; /* TX */ m = txkring->rhead - txkring->nr_hwcur; /* new slots */ if (m < 0) m += txkring->nkr_num_slots; - limit = m; - m = lim_rx; /* max avail space on destination */ - busy = j - rxkring->nr_hwcur; /* busy slots */ - if (busy < 0) - busy += rxkring->nkr_num_slots; - m -= busy; /* subtract busy slots */ - ND(2, "m %d limit %d", m, limit); - if (m < limit) - limit = m; - if (limit == 0) { - /* either the rxring is full, or nothing to send */ + if (m == 0) { + /* nothing to send */ return 0; } - while (limit-- > 0) { - struct netmap_slot *rs = &rxring->slot[j]; + for (k = txkring->nr_hwcur; m; m--, k = nm_next(k, lim)) { + struct netmap_slot *rs = &rxring->slot[k]; struct netmap_slot *ts = &txring->slot[k]; - struct netmap_slot tmp; - __builtin_prefetch(ts + 1); + rs->len = ts->len; + rs->ptr = ts->ptr; - /* swap the slots and report the buffer change */ - tmp = *rs; - tmp.flags |= NS_BUF_CHANGED; - *rs = *ts; - rs->flags |= NS_BUF_CHANGED; - *ts = tmp; - - j = nm_next(j, lim_rx); - k = nm_next(k, lim_tx); + if (ts->flags & NS_BUF_CHANGED) { + rs->buf_idx = ts->buf_idx; + rs->flags |= NS_BUF_CHANGED; + ts->flags &= ~NS_BUF_CHANGED; + } } mb(); /* make sure the slots are updated before publishing them */ - rxkring->nr_hwtail = j; + rxkring->nr_hwtail = k; txkring->nr_hwcur = k; - txkring->nr_hwtail = nm_prev(k, lim_tx); - ND(2, "after: hwcur %d hwtail %d cur %d head %d tail %d j %d", txkring->nr_hwcur, txkring->nr_hwtail, - txkring->rcur, txkring->rhead, txkring->rtail, j); + ND(20, "TX after : hwcur %d hwtail %d cur %d head %d tail %d k %d", + txkring->nr_hwcur, txkring->nr_hwtail, + txkring->rcur, txkring->rhead, txkring->rtail, k); - mb(); /* make sure rxkring->nr_hwtail is updated before notifying */ rxkring->nm_notify(rxkring, 0); return 0; } int netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags) { struct netmap_kring *txkring = rxkring->pipe; - uint32_t oldhwcur = rxkring->nr_hwcur; + u_int k, lim = rxkring->nkr_num_slots - 1; + int m; /* slots to release */ + struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring; - ND("%s %x <- %s", rxkring->name, flags, txkring->name); - rxkring->nr_hwcur = rxkring->rhead; /* recover user-relased slots */ - ND(5, "hwcur %d hwtail %d cur %d head %d tail %d", rxkring->nr_hwcur, rxkring->nr_hwtail, + ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name); + ND(20, "RX before: hwcur %d hwtail %d cur %d head %d tail %d", + rxkring->nr_hwcur, rxkring->nr_hwtail, rxkring->rcur, rxkring->rhead, rxkring->rtail); - mb(); /* paired with the first mb() in txsync */ - if (oldhwcur != rxkring->nr_hwcur) { - /* we have released some slots, notify the other end */ - mb(); /* make sure nr_hwcur is updated before notifying */ - txkring->nm_notify(txkring, 0); + m = rxkring->rhead - rxkring->nr_hwcur; /* released slots */ + if (m < 0) + m += rxkring->nkr_num_slots; + + if (m == 0) { + /* nothing to release */ + return 0; } - return 0; + + for (k = rxkring->nr_hwcur; m; m--, k = nm_next(k, lim)) { + struct netmap_slot *rs = &rxring->slot[k]; + struct netmap_slot *ts = &txring->slot[k]; + + if (rs->flags & NS_BUF_CHANGED) { + /* copy the slot and report the buffer change */ + *ts = *rs; + rs->flags &= ~NS_BUF_CHANGED; + } + } + + mb(); /* make sure the slots are updated before publishing them */ + txkring->nr_hwtail = nm_prev(k, lim); + rxkring->nr_hwcur = k; + + ND(20, "RX after : hwcur %d hwtail %d cur %d head %d tail %d k %d", + rxkring->nr_hwcur, rxkring->nr_hwtail, + rxkring->rcur, rxkring->rhead, rxkring->rtail, k); + + txkring->nm_notify(txkring, 0); + + return 0; } /* Pipe endpoints are created and destroyed together, so that endopoints do not * have to check for the existence of their peer at each ?xsync. * * To play well with the existing netmap infrastructure (refcounts etc.), we * adopt the following strategy: * * 1) The first endpoint that is created also creates the other endpoint and * grabs a reference to it. * * state A) user1 --> endpoint1 --> endpoint2 * * 2) If, starting from state A, endpoint2 is then registered, endpoint1 gives * its reference to the user: * * state B) user1 --> endpoint1 endpoint2 <--- user2 * * 3) Assume that, starting from state B endpoint2 is closed. In the unregister * callback endpoint2 notes that endpoint1 is still active and adds a reference * from endpoint1 to itself. When user2 then releases her own reference, * endpoint2 is not destroyed and we are back to state A. A symmetrical state * would be reached if endpoint1 were released instead. * * 4) If, starting from state A, endpoint1 is closed, the destructor notes that * it owns a reference to endpoint2 and releases it. * * Something similar goes on for the creation and destruction of the krings. */ /* netmap_pipe_krings_create. * * There are two cases: * * 1) state is * * usr1 --> e1 --> e2 * * and we are e1. We have to create both sets * of krings. * * 2) state is * * usr1 --> e1 --> e2 * * and we are e2. e1 is certainly registered and our * krings already exist. Nothing to do. */ static int netmap_pipe_krings_create(struct netmap_adapter *na) { struct netmap_pipe_adapter *pna = (struct netmap_pipe_adapter *)na; struct netmap_adapter *ona = &pna->peer->up; int error = 0; enum txrx t; if (pna->peer_ref) { int i; /* case 1) above */ ND("%p: case 1, create both ends", na); error = netmap_krings_create(na, 0); if (error) goto err; /* create the krings of the other end */ error = netmap_krings_create(ona, 0); if (error) goto del_krings1; /* cross link the krings */ for_rx_tx(t) { enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */ for (i = 0; i < nma_get_nrings(na, t); i++) { - NMR(na, t)[i].pipe = NMR(ona, r) + i; - NMR(ona, r)[i].pipe = NMR(na, t) + i; + NMR(na, t)[i]->pipe = NMR(ona, r)[i]; + NMR(ona, r)[i]->pipe = NMR(na, t)[i]; + /* mark all peer-adapter rings as fake */ + NMR(ona, r)[i]->nr_kflags |= NKR_FAKERING; } } } return 0; del_krings1: netmap_krings_delete(na); err: return error; } /* netmap_pipe_reg. * * There are two cases on registration (onoff==1) * * 1.a) state is * * usr1 --> e1 --> e2 * * and we are e1. Create the needed rings of the * other end. * * 1.b) state is * * usr1 --> e1 --> e2 <-- usr2 * * and we are e2. Drop the ref e1 is holding. * * There are two additional cases on unregister (onoff==0) * * 2.a) state is * * usr1 --> e1 --> e2 * * and we are e1. Nothing special to do, e2 will * be cleaned up by the destructor of e1. * * 2.b) state is * * usr1 --> e1 e2 <-- usr2 * * and we are either e1 or e2. Add a ref from the - * other end and hide our rings. + * other end. */ static int netmap_pipe_reg(struct netmap_adapter *na, int onoff) { struct netmap_pipe_adapter *pna = (struct netmap_pipe_adapter *)na; struct netmap_adapter *ona = &pna->peer->up; int i, error = 0; enum txrx t; ND("%p: onoff %d", na, onoff); if (onoff) { for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t); i++) { - struct netmap_kring *kring = &NMR(na, t)[i]; + struct netmap_kring *kring = NMR(na, t)[i]; if (nm_kring_pending_on(kring)) { /* mark the peer ring as needed */ kring->pipe->nr_kflags |= NKR_NEEDRING; } } } - /* create all missing needed rings on the other end */ + /* create all missing needed rings on the other end. + * Either our end, or the other, has been marked as + * fake, so the allocation will not be done twice. + */ error = netmap_mem_rings_create(ona); if (error) return error; /* In case of no error we put our rings in netmap mode */ for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { - struct netmap_kring *kring = &NMR(na, t)[i]; - + struct netmap_kring *kring = NMR(na, t)[i]; if (nm_kring_pending_on(kring)) { + struct netmap_kring *sring, *dring; + + /* copy the buffers from the non-fake ring */ + if (kring->nr_kflags & NKR_FAKERING) { + sring = kring->pipe; + dring = kring; + } else { + sring = kring; + dring = kring->pipe; + } + memcpy(dring->ring->slot, + sring->ring->slot, + sizeof(struct netmap_slot) * + sring->nkr_num_slots); + /* mark both rings as fake and needed, + * so that buffers will not be + * deleted by the standard machinery + * (we will delete them by ourselves in + * netmap_pipe_krings_delete) + */ + sring->nr_kflags |= + (NKR_FAKERING | NKR_NEEDRING); + dring->nr_kflags |= + (NKR_FAKERING | NKR_NEEDRING); kring->nr_mode = NKR_NETMAP_ON; } } } if (na->active_fds == 0) na->na_flags |= NAF_NETMAP_ON; } else { if (na->active_fds == 0) na->na_flags &= ~NAF_NETMAP_ON; for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { - struct netmap_kring *kring = &NMR(na, t)[i]; + struct netmap_kring *kring = NMR(na, t)[i]; if (nm_kring_pending_off(kring)) { kring->nr_mode = NKR_NETMAP_OFF; - /* mark the peer ring as no longer needed by us - * (it may still be kept if sombody else is using it) - */ - if (kring->pipe) { - kring->pipe->nr_kflags &= ~NKR_NEEDRING; - } } } } - /* delete all the peer rings that are no longer needed */ - netmap_mem_rings_delete(ona); } if (na->active_fds) { ND("active_fds %d", na->active_fds); return 0; } if (pna->peer_ref) { ND("%p: case 1.a or 2.a, nothing to do", na); return 0; } if (onoff) { ND("%p: case 1.b, drop peer", na); pna->peer->peer_ref = 0; netmap_adapter_put(na); } else { ND("%p: case 2.b, grab peer", na); netmap_adapter_get(na); pna->peer->peer_ref = 1; } return error; } /* netmap_pipe_krings_delete. * * There are two cases: * * 1) state is * * usr1 --> e1 --> e2 * * and we are e1 (e2 is not registered, so krings_delete cannot be * called on it); * * 2) state is * * usr1 --> e1 e2 <-- usr2 * * and we are either e1 or e2. * * In the former case we have to also delete the krings of e2; - * in the latter case we do nothing (note that our krings - * have already been hidden in the unregister callback). + * in the latter case we do nothing. */ static void netmap_pipe_krings_delete(struct netmap_adapter *na) { struct netmap_pipe_adapter *pna = (struct netmap_pipe_adapter *)na; - struct netmap_adapter *ona; /* na of the other end */ + struct netmap_adapter *sna, *ona; /* na of the other end */ + enum txrx t; + int i; if (!pna->peer_ref) { ND("%p: case 2, kept alive by peer", na); return; } + ona = &pna->peer->up; /* case 1) above */ ND("%p: case 1, deleting everything", na); + /* To avoid double-frees we zero-out all the buffers in the kernel part + * of each ring. The reason is this: If the user is behaving correctly, + * all buffers are found in exactly one slot in the userspace part of + * some ring. If the user is not behaving correctly, we cannot release + * buffers cleanly anyway. In the latter case, the allocator will + * return to a clean state only when all its users will close. + */ + sna = na; +cleanup: + for_rx_tx(t) { + for (i = 0; i < nma_get_nrings(sna, t) + 1; i++) { + struct netmap_kring *kring = NMR(sna, t)[i]; + struct netmap_ring *ring = kring->ring; + uint32_t j, lim = kring->nkr_num_slots - 1; + + ND("%s ring %p hwtail %u hwcur %u", + kring->name, ring, kring->nr_hwtail, kring->nr_hwcur); + + if (ring == NULL) + continue; + + if (kring->nr_hwtail == kring->nr_hwcur) + ring->slot[kring->nr_hwtail].buf_idx = 0; + + for (j = nm_next(kring->nr_hwtail, lim); + j != kring->nr_hwcur; + j = nm_next(j, lim)) + { + ND("%s[%d] %u", kring->name, j, ring->slot[j].buf_idx); + ring->slot[j].buf_idx = 0; + } + kring->nr_kflags &= ~(NKR_FAKERING | NKR_NEEDRING); + } + + } + if (sna != ona && ona->tx_rings) { + sna = ona; + goto cleanup; + } + + netmap_mem_rings_delete(na); netmap_krings_delete(na); /* also zeroes tx_rings etc. */ - ona = &pna->peer->up; + if (ona->tx_rings == NULL) { /* already deleted, we must be on an * cleanup-after-error path */ return; } + netmap_mem_rings_delete(ona); netmap_krings_delete(ona); } static void netmap_pipe_dtor(struct netmap_adapter *na) { struct netmap_pipe_adapter *pna = (struct netmap_pipe_adapter *)na; ND("%p %p", na, pna->parent_ifp); if (pna->peer_ref) { ND("%p: clean up peer", na); pna->peer_ref = 0; netmap_adapter_put(&pna->peer->up); } - if (pna->role == NR_REG_PIPE_MASTER) + if (pna->role == NM_PIPE_ROLE_MASTER) netmap_pipe_remove(pna->parent, pna); if (pna->parent_ifp) if_rele(pna->parent_ifp); netmap_adapter_put(pna->parent); pna->parent = NULL; } int -netmap_get_pipe_na(struct nmreq *nmr, struct netmap_adapter **na, +netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct netmap_mem_d *nmd, int create) { - struct nmreq pnmr; + struct nmreq_register *req = (struct nmreq_register *)hdr->nr_body; struct netmap_adapter *pna; /* parent adapter */ - struct netmap_pipe_adapter *mna, *sna, *req; + struct netmap_pipe_adapter *mna, *sna, *reqna; struct ifnet *ifp = NULL; - u_int pipe_id; - int role = nmr->nr_flags & NR_REG_MASK; + const char *pipe_id = NULL; + int role = 0; int error, retries = 0; + char *cbra; - ND("flags %x", nmr->nr_flags); + /* Try to parse the pipe syntax 'xx{yy' or 'xx}yy'. */ + cbra = strrchr(hdr->nr_name, '{'); + if (cbra != NULL) { + role = NM_PIPE_ROLE_MASTER; + } else { + cbra = strrchr(hdr->nr_name, '}'); + if (cbra != NULL) { + role = NM_PIPE_ROLE_SLAVE; + } else { + ND("not a pipe"); + return 0; + } + } + pipe_id = cbra + 1; + if (*pipe_id == '\0' || cbra == hdr->nr_name) { + /* Bracket is the last character, so pipe name is missing; + * or bracket is the first character, so base port name + * is missing. */ + return EINVAL; + } - if (role != NR_REG_PIPE_MASTER && role != NR_REG_PIPE_SLAVE) { - ND("not a pipe"); - return 0; + if (req->nr_mode != NR_REG_ALL_NIC && req->nr_mode != NR_REG_ONE_NIC) { + /* We only accept modes involving hardware rings. */ + return EINVAL; } - role = nmr->nr_flags & NR_REG_MASK; /* first, try to find the parent adapter */ - bzero(&pnmr, sizeof(pnmr)); - memcpy(&pnmr.nr_name, nmr->nr_name, IFNAMSIZ); - /* pass to parent the requested number of pipes */ - pnmr.nr_arg1 = nmr->nr_arg1; for (;;) { + char nr_name_orig[NETMAP_REQ_IFNAMSIZ]; int create_error; - error = netmap_get_na(&pnmr, &pna, &ifp, nmd, create); + /* Temporarily remove the pipe suffix. */ + strncpy(nr_name_orig, hdr->nr_name, sizeof(nr_name_orig)); + *cbra = '\0'; + error = netmap_get_na(hdr, &pna, &ifp, nmd, create); + /* Restore the pipe suffix. */ + strncpy(hdr->nr_name, nr_name_orig, sizeof(hdr->nr_name)); if (!error) break; if (error != ENXIO || retries++) { ND("parent lookup failed: %d", error); return error; } ND("try to create a persistent vale port"); /* create a persistent vale port and try again */ + *cbra = '\0'; NMG_UNLOCK(); - create_error = netmap_vi_create(&pnmr, 1 /* autodelete */); + create_error = netmap_vi_create(hdr, 1 /* autodelete */); NMG_LOCK(); + strncpy(hdr->nr_name, nr_name_orig, sizeof(hdr->nr_name)); if (create_error && create_error != EEXIST) { if (create_error != EOPNOTSUPP) { D("failed to create a persistent vale port: %d", create_error); } return error; } } if (NETMAP_OWNED_BY_KERN(pna)) { ND("parent busy"); error = EBUSY; goto put_out; } /* next, lookup the pipe id in the parent list */ - req = NULL; - pipe_id = nmr->nr_ringid & NETMAP_RING_MASK; + reqna = NULL; mna = netmap_pipe_find(pna, pipe_id); if (mna) { if (mna->role == role) { - ND("found %d directly at %d", pipe_id, mna->parent_slot); - req = mna; + ND("found %s directly at %d", pipe_id, mna->parent_slot); + reqna = mna; } else { - ND("found %d indirectly at %d", pipe_id, mna->parent_slot); - req = mna->peer; + ND("found %s indirectly at %d", pipe_id, mna->parent_slot); + reqna = mna->peer; } /* the pipe we have found already holds a ref to the parent, * so we need to drop the one we got from netmap_get_na() */ netmap_unget_na(pna, ifp); goto found; } - ND("pipe %d not found, create %d", pipe_id, create); + ND("pipe %s not found, create %d", pipe_id, create); if (!create) { error = ENODEV; goto put_out; } /* we create both master and slave. * The endpoint we were asked for holds a reference to * the other one. */ mna = nm_os_malloc(sizeof(*mna)); if (mna == NULL) { error = ENOMEM; goto put_out; } - snprintf(mna->up.name, sizeof(mna->up.name), "%s{%d", pna->name, pipe_id); + snprintf(mna->up.name, sizeof(mna->up.name), "%s{%s", pna->name, pipe_id); - mna->id = pipe_id; - mna->role = NR_REG_PIPE_MASTER; + mna->role = NM_PIPE_ROLE_MASTER; mna->parent = pna; mna->parent_ifp = ifp; mna->up.nm_txsync = netmap_pipe_txsync; mna->up.nm_rxsync = netmap_pipe_rxsync; mna->up.nm_register = netmap_pipe_reg; mna->up.nm_dtor = netmap_pipe_dtor; mna->up.nm_krings_create = netmap_pipe_krings_create; mna->up.nm_krings_delete = netmap_pipe_krings_delete; mna->up.nm_mem = netmap_mem_get(pna->nm_mem); mna->up.na_flags |= NAF_MEM_OWNER; mna->up.na_lut = pna->na_lut; - mna->up.num_tx_rings = 1; - mna->up.num_rx_rings = 1; - mna->up.num_tx_desc = nmr->nr_tx_slots; + mna->up.num_tx_rings = req->nr_tx_rings; + nm_bound_var(&mna->up.num_tx_rings, 1, + 1, NM_PIPE_MAXRINGS, NULL); + mna->up.num_rx_rings = req->nr_rx_rings; + nm_bound_var(&mna->up.num_rx_rings, 1, + 1, NM_PIPE_MAXRINGS, NULL); + mna->up.num_tx_desc = req->nr_tx_slots; nm_bound_var(&mna->up.num_tx_desc, pna->num_tx_desc, 1, NM_PIPE_MAXSLOTS, NULL); - mna->up.num_rx_desc = nmr->nr_rx_slots; + mna->up.num_rx_desc = req->nr_rx_slots; nm_bound_var(&mna->up.num_rx_desc, pna->num_rx_desc, 1, NM_PIPE_MAXSLOTS, NULL); error = netmap_attach_common(&mna->up); if (error) goto free_mna; /* register the master with the parent */ error = netmap_pipe_add(pna, mna); if (error) goto free_mna; /* create the slave */ sna = nm_os_malloc(sizeof(*mna)); if (sna == NULL) { error = ENOMEM; goto unregister_mna; } /* most fields are the same, copy from master and then fix */ *sna = *mna; sna->up.nm_mem = netmap_mem_get(mna->up.nm_mem); - snprintf(sna->up.name, sizeof(sna->up.name), "%s}%d", pna->name, pipe_id); - sna->role = NR_REG_PIPE_SLAVE; + /* swap the number of tx/rx rings */ + sna->up.num_tx_rings = mna->up.num_rx_rings; + sna->up.num_rx_rings = mna->up.num_tx_rings; + snprintf(sna->up.name, sizeof(sna->up.name), "%s}%s", pna->name, pipe_id); + sna->role = NM_PIPE_ROLE_SLAVE; error = netmap_attach_common(&sna->up); if (error) goto free_sna; /* join the two endpoints */ mna->peer = sna; sna->peer = mna; /* we already have a reference to the parent, but we * need another one for the other endpoint we created */ netmap_adapter_get(pna); /* likewise for the ifp, if any */ if (ifp) if_ref(ifp); - if (role == NR_REG_PIPE_MASTER) { - req = mna; + if (role == NM_PIPE_ROLE_MASTER) { + reqna = mna; mna->peer_ref = 1; netmap_adapter_get(&sna->up); } else { - req = sna; + reqna = sna; sna->peer_ref = 1; netmap_adapter_get(&mna->up); } ND("created master %p and slave %p", mna, sna); found: - ND("pipe %d %s at %p", pipe_id, - (req->role == NR_REG_PIPE_MASTER ? "master" : "slave"), req); - *na = &req->up; + ND("pipe %s %s at %p", pipe_id, + (reqna->role == NM_PIPE_ROLE_MASTER ? "master" : "slave"), reqna); + *na = &reqna->up; netmap_adapter_get(*na); /* keep the reference to the parent. * It will be released by the req destructor */ return 0; free_sna: nm_os_free(sna); unregister_mna: netmap_pipe_remove(pna, mna); free_mna: nm_os_free(mna); put_out: netmap_unget_na(pna, ifp); return error; } #endif /* WITH_PIPES */ Index: head/sys/dev/netmap/netmap_pt.c =================================================================== --- head/sys/dev/netmap/netmap_pt.c (revision 332422) +++ head/sys/dev/netmap/netmap_pt.c (revision 332423) @@ -1,1525 +1,1498 @@ /* * Copyright (C) 2015 Stefano Garzarella * Copyright (C) 2016 Vincenzo Maffione * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * common headers */ #if defined(__FreeBSD__) #include #include #include #include #include #include #include #include #include //#define usleep_range(_1, _2) #define usleep_range(_1, _2) \ pause_sbt("ptnetmap-sleep", SBT_1US * _1, SBT_1US * 1, C_ABSOLUTE) #elif defined(linux) #include #endif #include #include #include #include #ifdef WITH_PTNETMAP_HOST /* RX cycle without receive any packets */ #define PTN_RX_DRY_CYCLES_MAX 10 /* Limit Batch TX to half ring. * Currently disabled, since it does not manage NS_MOREFRAG, which * results in random drops in the VALE txsync. */ //#define PTN_TX_BATCH_LIM(_n) ((_n >> 1)) //#define BUSY_WAIT #define NETMAP_PT_DEBUG /* Enables communication debugging. */ #ifdef NETMAP_PT_DEBUG #define DBG(x) x #else #define DBG(x) #endif #undef RATE //#define RATE /* Enables communication statistics. */ #ifdef RATE #define IFRATE(x) x struct rate_batch_stats { unsigned long sync; unsigned long sync_dry; unsigned long pkt; }; struct rate_stats { unsigned long gtxk; /* Guest --> Host Tx kicks. */ unsigned long grxk; /* Guest --> Host Rx kicks. */ unsigned long htxk; /* Host --> Guest Tx kicks. */ unsigned long hrxk; /* Host --> Guest Rx Kicks. */ unsigned long btxwu; /* Backend Tx wake-up. */ unsigned long brxwu; /* Backend Rx wake-up. */ struct rate_batch_stats txbs; struct rate_batch_stats rxbs; }; struct rate_context { struct timer_list timer; struct rate_stats new; struct rate_stats old; }; #define RATE_PERIOD 2 static void rate_callback(unsigned long arg) { struct rate_context * ctx = (struct rate_context *)arg; struct rate_stats cur = ctx->new; struct rate_batch_stats *txbs = &cur.txbs; struct rate_batch_stats *rxbs = &cur.rxbs; struct rate_batch_stats *txbs_old = &ctx->old.txbs; struct rate_batch_stats *rxbs_old = &ctx->old.rxbs; uint64_t tx_batch, rx_batch; unsigned long txpkts, rxpkts; unsigned long gtxk, grxk; int r; txpkts = txbs->pkt - txbs_old->pkt; rxpkts = rxbs->pkt - rxbs_old->pkt; tx_batch = ((txbs->sync - txbs_old->sync) > 0) ? txpkts / (txbs->sync - txbs_old->sync): 0; rx_batch = ((rxbs->sync - rxbs_old->sync) > 0) ? rxpkts / (rxbs->sync - rxbs_old->sync): 0; /* Fix-up gtxk and grxk estimates. */ gtxk = (cur.gtxk - ctx->old.gtxk) - (cur.btxwu - ctx->old.btxwu); grxk = (cur.grxk - ctx->old.grxk) - (cur.brxwu - ctx->old.brxwu); printk("txpkts = %lu Hz\n", txpkts/RATE_PERIOD); printk("gtxk = %lu Hz\n", gtxk/RATE_PERIOD); printk("htxk = %lu Hz\n", (cur.htxk - ctx->old.htxk)/RATE_PERIOD); printk("btxw = %lu Hz\n", (cur.btxwu - ctx->old.btxwu)/RATE_PERIOD); printk("rxpkts = %lu Hz\n", rxpkts/RATE_PERIOD); printk("grxk = %lu Hz\n", grxk/RATE_PERIOD); printk("hrxk = %lu Hz\n", (cur.hrxk - ctx->old.hrxk)/RATE_PERIOD); printk("brxw = %lu Hz\n", (cur.brxwu - ctx->old.brxwu)/RATE_PERIOD); printk("txbatch = %llu avg\n", tx_batch); printk("rxbatch = %llu avg\n", rx_batch); printk("\n"); ctx->old = cur; r = mod_timer(&ctx->timer, jiffies + msecs_to_jiffies(RATE_PERIOD * 1000)); if (unlikely(r)) D("[ptnetmap] Error: mod_timer()\n"); } static void rate_batch_stats_update(struct rate_batch_stats *bf, uint32_t pre_tail, uint32_t act_tail, uint32_t num_slots) { int n = (int)act_tail - pre_tail; if (n) { if (n < 0) n += num_slots; bf->sync++; bf->pkt += n; } else { bf->sync_dry++; } } #else /* !RATE */ #define IFRATE(x) #endif /* RATE */ struct ptnetmap_state { /* Kthreads. */ struct nm_kctx **kctxs; /* Shared memory with the guest (TX/RX) */ struct ptnet_csb_gh __user *csb_gh; struct ptnet_csb_hg __user *csb_hg; bool stopped; /* Netmap adapter wrapping the backend. */ struct netmap_pt_host_adapter *pth_na; IFRATE(struct rate_context rate_ctx;) }; static inline void ptnetmap_kring_dump(const char *title, const struct netmap_kring *kring) { D("%s - name: %s hwcur: %d hwtail: %d rhead: %d rcur: %d" " rtail: %d head: %d cur: %d tail: %d", title, kring->name, kring->nr_hwcur, kring->nr_hwtail, kring->rhead, kring->rcur, kring->rtail, kring->ring->head, kring->ring->cur, kring->ring->tail); } /* * TX functions to set/get and to handle host/guest kick. */ /* Enable or disable guest --> host kicks. */ static inline void pthg_kick_enable(struct ptnet_csb_hg __user *pthg, uint32_t val) { CSB_WRITE(pthg, host_need_kick, val); } /* Are guest interrupt enabled or disabled? */ static inline uint32_t ptgh_intr_enabled(struct ptnet_csb_gh __user *ptgh) { uint32_t v; CSB_READ(ptgh, guest_need_kick, v); return v; } /* Handle TX events: from the guest or from the backend */ static void ptnetmap_tx_handler(void *data, int is_kthread) { struct netmap_kring *kring = data; struct netmap_pt_host_adapter *pth_na = (struct netmap_pt_host_adapter *)kring->na->na_private; struct ptnetmap_state *ptns = pth_na->ptns; struct ptnet_csb_gh __user *ptgh; struct ptnet_csb_hg __user *pthg; struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */ bool more_txspace = false; struct nm_kctx *kth; uint32_t num_slots; int batch; IFRATE(uint32_t pre_tail); if (unlikely(!ptns)) { D("ERROR ptnetmap state is NULL"); return; } if (unlikely(ptns->stopped)) { RD(1, "backend netmap is being stopped"); return; } if (unlikely(nm_kr_tryget(kring, 1, NULL))) { D("ERROR nm_kr_tryget()"); return; } /* This is a guess, to be fixed in the rate callback. */ IFRATE(ptns->rate_ctx.new.gtxk++); /* Get TX ptgh/pthg pointer from the CSB. */ ptgh = ptns->csb_gh + kring->ring_id; pthg = ptns->csb_hg + kring->ring_id; kth = ptns->kctxs[kring->ring_id]; num_slots = kring->nkr_num_slots; /* Disable guest --> host notifications. */ pthg_kick_enable(pthg, 0); /* Copy the guest kring pointers from the CSB */ ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots); for (;;) { /* If guest moves ahead too fast, let's cut the move so * that we don't exceed our batch limit. */ batch = shadow_ring.head - kring->nr_hwcur; if (batch < 0) batch += num_slots; #ifdef PTN_TX_BATCH_LIM if (batch > PTN_TX_BATCH_LIM(num_slots)) { uint32_t head_lim = kring->nr_hwcur + PTN_TX_BATCH_LIM(num_slots); if (head_lim >= num_slots) head_lim -= num_slots; ND(1, "batch: %d head: %d head_lim: %d", batch, shadow_ring.head, head_lim); shadow_ring.head = head_lim; batch = PTN_TX_BATCH_LIM(num_slots); } #endif /* PTN_TX_BATCH_LIM */ if (nm_kr_txspace(kring) <= (num_slots >> 1)) { shadow_ring.flags |= NAF_FORCE_RECLAIM; } /* Netmap prologue */ shadow_ring.tail = kring->rtail; if (unlikely(nm_txsync_prologue(kring, &shadow_ring) >= num_slots)) { /* Reinit ring and enable notifications. */ netmap_ring_reinit(kring); pthg_kick_enable(pthg, 1); break; } if (unlikely(netmap_verbose & NM_VERB_TXSYNC)) { ptnetmap_kring_dump("pre txsync", kring); } IFRATE(pre_tail = kring->rtail); if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) { /* Reenable notifications. */ pthg_kick_enable(pthg, 1); D("ERROR txsync()"); break; } /* * Finalize * Copy host hwcur and hwtail into the CSB for the guest sync(), and * do the nm_sync_finalize. */ ptnetmap_host_write_kring_csb(pthg, kring->nr_hwcur, kring->nr_hwtail); if (kring->rtail != kring->nr_hwtail) { /* Some more room available in the parent adapter. */ kring->rtail = kring->nr_hwtail; more_txspace = true; } IFRATE(rate_batch_stats_update(&ptns->rate_ctx.new.txbs, pre_tail, kring->rtail, num_slots)); if (unlikely(netmap_verbose & NM_VERB_TXSYNC)) { ptnetmap_kring_dump("post txsync", kring); } #ifndef BUSY_WAIT /* Interrupt the guest if needed. */ if (more_txspace && ptgh_intr_enabled(ptgh) && is_kthread) { /* Disable guest kick to avoid sending unnecessary kicks */ nm_os_kctx_send_irq(kth); IFRATE(ptns->rate_ctx.new.htxk++); more_txspace = false; } #endif /* Read CSB to see if there is more work to do. */ ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots); #ifndef BUSY_WAIT if (shadow_ring.head == kring->rhead) { /* * No more packets to transmit. We enable notifications and * go to sleep, waiting for a kick from the guest when new * new slots are ready for transmission. */ if (is_kthread) { usleep_range(1,1); } /* Reenable notifications. */ pthg_kick_enable(pthg, 1); /* Doublecheck. */ ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots); if (shadow_ring.head != kring->rhead) { /* We won the race condition, there are more packets to * transmit. Disable notifications and do another cycle */ pthg_kick_enable(pthg, 0); continue; } break; } if (nm_kr_txempty(kring)) { /* No more available TX slots. We stop waiting for a notification * from the backend (netmap_tx_irq). */ ND(1, "TX ring"); break; } #endif if (unlikely(ptns->stopped)) { D("backend netmap is being stopped"); break; } } nm_kr_put(kring); if (more_txspace && ptgh_intr_enabled(ptgh) && is_kthread) { nm_os_kctx_send_irq(kth); IFRATE(ptns->rate_ctx.new.htxk++); } } /* Called on backend nm_notify when there is no worker thread. */ static void ptnetmap_tx_nothread_notify(void *data) { struct netmap_kring *kring = data; struct netmap_pt_host_adapter *pth_na = (struct netmap_pt_host_adapter *)kring->na->na_private; struct ptnetmap_state *ptns = pth_na->ptns; if (unlikely(!ptns)) { D("ERROR ptnetmap state is NULL"); return; } if (unlikely(ptns->stopped)) { D("backend netmap is being stopped"); return; } /* We cannot access the CSB here (to check ptgh->guest_need_kick), * unless we switch address space to the one of the guest. For now * we unconditionally inject an interrupt. */ nm_os_kctx_send_irq(ptns->kctxs[kring->ring_id]); IFRATE(ptns->rate_ctx.new.htxk++); ND(1, "%s interrupt", kring->name); } /* * We need RX kicks from the guest when (tail == head-1), where we wait * for the guest to refill. */ #ifndef BUSY_WAIT static inline int ptnetmap_norxslots(struct netmap_kring *kring, uint32_t g_head) { return (NM_ACCESS_ONCE(kring->nr_hwtail) == nm_prev(g_head, kring->nkr_num_slots - 1)); } #endif /* !BUSY_WAIT */ /* Handle RX events: from the guest or from the backend */ static void ptnetmap_rx_handler(void *data, int is_kthread) { struct netmap_kring *kring = data; struct netmap_pt_host_adapter *pth_na = (struct netmap_pt_host_adapter *)kring->na->na_private; struct ptnetmap_state *ptns = pth_na->ptns; struct ptnet_csb_gh __user *ptgh; struct ptnet_csb_hg __user *pthg; struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */ struct nm_kctx *kth; uint32_t num_slots; int dry_cycles = 0; bool some_recvd = false; IFRATE(uint32_t pre_tail); if (unlikely(!ptns || !ptns->pth_na)) { D("ERROR ptnetmap state %p, ptnetmap host adapter %p", ptns, ptns ? ptns->pth_na : NULL); return; } if (unlikely(ptns->stopped)) { RD(1, "backend netmap is being stopped"); return; } if (unlikely(nm_kr_tryget(kring, 1, NULL))) { D("ERROR nm_kr_tryget()"); return; } /* This is a guess, to be fixed in the rate callback. */ IFRATE(ptns->rate_ctx.new.grxk++); /* Get RX ptgh and pthg pointers from the CSB. */ ptgh = ptns->csb_gh + (pth_na->up.num_tx_rings + kring->ring_id); pthg = ptns->csb_hg + (pth_na->up.num_tx_rings + kring->ring_id); kth = ptns->kctxs[pth_na->up.num_tx_rings + kring->ring_id]; num_slots = kring->nkr_num_slots; /* Disable notifications. */ pthg_kick_enable(pthg, 0); /* Copy the guest kring pointers from the CSB */ ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots); for (;;) { uint32_t hwtail; /* Netmap prologue */ shadow_ring.tail = kring->rtail; if (unlikely(nm_rxsync_prologue(kring, &shadow_ring) >= num_slots)) { /* Reinit ring and enable notifications. */ netmap_ring_reinit(kring); pthg_kick_enable(pthg, 1); break; } if (unlikely(netmap_verbose & NM_VERB_RXSYNC)) { ptnetmap_kring_dump("pre rxsync", kring); } IFRATE(pre_tail = kring->rtail); if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) { /* Reenable notifications. */ pthg_kick_enable(pthg, 1); D("ERROR rxsync()"); break; } /* * Finalize * Copy host hwcur and hwtail into the CSB for the guest sync() */ hwtail = NM_ACCESS_ONCE(kring->nr_hwtail); ptnetmap_host_write_kring_csb(pthg, kring->nr_hwcur, hwtail); if (kring->rtail != hwtail) { kring->rtail = hwtail; some_recvd = true; dry_cycles = 0; } else { dry_cycles++; } IFRATE(rate_batch_stats_update(&ptns->rate_ctx.new.rxbs, pre_tail, kring->rtail, num_slots)); if (unlikely(netmap_verbose & NM_VERB_RXSYNC)) { ptnetmap_kring_dump("post rxsync", kring); } #ifndef BUSY_WAIT /* Interrupt the guest if needed. */ if (some_recvd && ptgh_intr_enabled(ptgh)) { /* Disable guest kick to avoid sending unnecessary kicks */ nm_os_kctx_send_irq(kth); IFRATE(ptns->rate_ctx.new.hrxk++); some_recvd = false; } #endif /* Read CSB to see if there is more work to do. */ ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots); #ifndef BUSY_WAIT if (ptnetmap_norxslots(kring, shadow_ring.head)) { /* * No more slots available for reception. We enable notification and * go to sleep, waiting for a kick from the guest when new receive * slots are available. */ usleep_range(1,1); /* Reenable notifications. */ pthg_kick_enable(pthg, 1); /* Doublecheck. */ ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots); if (!ptnetmap_norxslots(kring, shadow_ring.head)) { /* We won the race condition, more slots are available. Disable * notifications and do another cycle. */ pthg_kick_enable(pthg, 0); continue; } break; } hwtail = NM_ACCESS_ONCE(kring->nr_hwtail); if (unlikely(hwtail == kring->rhead || dry_cycles >= PTN_RX_DRY_CYCLES_MAX)) { /* No more packets to be read from the backend. We stop and * wait for a notification from the backend (netmap_rx_irq). */ ND(1, "nr_hwtail: %d rhead: %d dry_cycles: %d", hwtail, kring->rhead, dry_cycles); break; } #endif if (unlikely(ptns->stopped)) { D("backend netmap is being stopped"); break; } } nm_kr_put(kring); /* Interrupt the guest if needed. */ if (some_recvd && ptgh_intr_enabled(ptgh)) { nm_os_kctx_send_irq(kth); IFRATE(ptns->rate_ctx.new.hrxk++); } } #ifdef NETMAP_PT_DEBUG static void ptnetmap_print_configuration(struct ptnetmap_cfg *cfg) { int k; D("ptnetmap configuration:"); D(" CSB @%p@:%p, num_rings=%u, cfgtype %08x", cfg->csb_gh, cfg->csb_hg, cfg->num_rings, cfg->cfgtype); for (k = 0; k < cfg->num_rings; k++) { switch (cfg->cfgtype) { case PTNETMAP_CFGTYPE_QEMU: { struct ptnetmap_cfgentry_qemu *e = (struct ptnetmap_cfgentry_qemu *)(cfg+1) + k; D(" ring #%d: ioeventfd=%lu, irqfd=%lu", k, (unsigned long)e->ioeventfd, (unsigned long)e->irqfd); break; } case PTNETMAP_CFGTYPE_BHYVE: { struct ptnetmap_cfgentry_bhyve *e = (struct ptnetmap_cfgentry_bhyve *)(cfg+1) + k; D(" ring #%d: wchan=%lu, ioctl_fd=%lu, " "ioctl_cmd=%lu, msix_msg_data=%lu, msix_addr=%lu", k, (unsigned long)e->wchan, (unsigned long)e->ioctl_fd, (unsigned long)e->ioctl_cmd, (unsigned long)e->ioctl_data.msg_data, (unsigned long)e->ioctl_data.addr); break; } } } } #endif /* NETMAP_PT_DEBUG */ /* Copy actual state of the host ring into the CSB for the guest init */ static int ptnetmap_kring_snapshot(struct netmap_kring *kring, struct ptnet_csb_gh __user *ptgh, struct ptnet_csb_hg __user *pthg) { if (CSB_WRITE(ptgh, head, kring->rhead)) goto err; if (CSB_WRITE(ptgh, cur, kring->rcur)) goto err; if (CSB_WRITE(pthg, hwcur, kring->nr_hwcur)) goto err; if (CSB_WRITE(pthg, hwtail, NM_ACCESS_ONCE(kring->nr_hwtail))) goto err; DBG(ptnetmap_kring_dump("ptnetmap_kring_snapshot", kring);) return 0; err: return EFAULT; } static struct netmap_kring * ptnetmap_kring(struct netmap_pt_host_adapter *pth_na, int k) { if (k < pth_na->up.num_tx_rings) { - return pth_na->up.tx_rings + k; + return pth_na->up.tx_rings[k]; } - return pth_na->up.rx_rings + k - pth_na->up.num_tx_rings; + return pth_na->up.rx_rings[k - pth_na->up.num_tx_rings]; } static int ptnetmap_krings_snapshot(struct netmap_pt_host_adapter *pth_na) { struct ptnetmap_state *ptns = pth_na->ptns; struct netmap_kring *kring; unsigned int num_rings; int err = 0, k; num_rings = pth_na->up.num_tx_rings + pth_na->up.num_rx_rings; for (k = 0; k < num_rings; k++) { kring = ptnetmap_kring(pth_na, k); err |= ptnetmap_kring_snapshot(kring, ptns->csb_gh + k, ptns->csb_hg + k); } return err; } /* * Functions to create kernel contexts, and start/stop the workers. */ static int ptnetmap_create_kctxs(struct netmap_pt_host_adapter *pth_na, struct ptnetmap_cfg *cfg, int use_tx_kthreads) { struct ptnetmap_state *ptns = pth_na->ptns; struct nm_kctx_cfg nmk_cfg; unsigned int num_rings; uint8_t *cfg_entries = (uint8_t *)(cfg + 1); + unsigned int expected_cfgtype = 0; int k; +#if defined(__FreeBSD__) + expected_cfgtype = PTNETMAP_CFGTYPE_BHYVE; +#elif defined(linux) + expected_cfgtype = PTNETMAP_CFGTYPE_QEMU; +#endif + if (cfg->cfgtype != expected_cfgtype) { + D("Unsupported cfgtype %u", cfg->cfgtype); + return EINVAL; + } + num_rings = pth_na->up.num_tx_rings + pth_na->up.num_rx_rings; for (k = 0; k < num_rings; k++) { nmk_cfg.attach_user = 1; /* attach kthread to user process */ nmk_cfg.worker_private = ptnetmap_kring(pth_na, k); nmk_cfg.type = k; if (k < pth_na->up.num_tx_rings) { nmk_cfg.worker_fn = ptnetmap_tx_handler; nmk_cfg.use_kthread = use_tx_kthreads; nmk_cfg.notify_fn = ptnetmap_tx_nothread_notify; } else { nmk_cfg.worker_fn = ptnetmap_rx_handler; nmk_cfg.use_kthread = 1; } ptns->kctxs[k] = nm_os_kctx_create(&nmk_cfg, - cfg->cfgtype, cfg_entries + k * cfg->entry_size); + cfg_entries + k * cfg->entry_size); if (ptns->kctxs[k] == NULL) { goto err; } } return 0; err: for (k = 0; k < num_rings; k++) { if (ptns->kctxs[k]) { nm_os_kctx_destroy(ptns->kctxs[k]); ptns->kctxs[k] = NULL; } } return EFAULT; } static int ptnetmap_start_kctx_workers(struct netmap_pt_host_adapter *pth_na) { struct ptnetmap_state *ptns = pth_na->ptns; int num_rings; int error; int k; if (!ptns) { D("BUG ptns is NULL"); return EFAULT; } ptns->stopped = false; num_rings = ptns->pth_na->up.num_tx_rings + ptns->pth_na->up.num_rx_rings; for (k = 0; k < num_rings; k++) { //nm_os_kctx_worker_setaff(ptns->kctxs[k], xxx); error = nm_os_kctx_worker_start(ptns->kctxs[k]); if (error) { return error; } } return 0; } static void ptnetmap_stop_kctx_workers(struct netmap_pt_host_adapter *pth_na) { struct ptnetmap_state *ptns = pth_na->ptns; int num_rings; int k; if (!ptns) { /* Nothing to do. */ return; } ptns->stopped = true; num_rings = ptns->pth_na->up.num_tx_rings + ptns->pth_na->up.num_rx_rings; for (k = 0; k < num_rings; k++) { nm_os_kctx_worker_stop(ptns->kctxs[k]); } } -static struct ptnetmap_cfg * -ptnetmap_read_cfg(struct nmreq *nmr) -{ - uintptr_t *nmr_ptncfg = (uintptr_t *)&nmr->nr_arg1; - struct ptnetmap_cfg *cfg; - struct ptnetmap_cfg tmp; - size_t cfglen; - - if (copyin((const void *)*nmr_ptncfg, &tmp, sizeof(tmp))) { - D("Partial copyin() failed"); - return NULL; - } - - cfglen = sizeof(tmp) + tmp.num_rings * tmp.entry_size; - cfg = nm_os_malloc(cfglen); - if (!cfg) { - return NULL; - } - - if (copyin((const void *)*nmr_ptncfg, cfg, cfglen)) { - D("Full copyin() failed"); - nm_os_free(cfg); - return NULL; - } - - return cfg; -} - static int nm_unused_notify(struct netmap_kring *, int); static int nm_pt_host_notify(struct netmap_kring *, int); /* Create ptnetmap state and switch parent adapter to ptnetmap mode. */ static int ptnetmap_create(struct netmap_pt_host_adapter *pth_na, struct ptnetmap_cfg *cfg) { int use_tx_kthreads = ptnetmap_tx_workers; /* snapshot */ struct ptnetmap_state *ptns; unsigned int num_rings; int ret, i; /* Check if ptnetmap state is already there. */ if (pth_na->ptns) { D("ERROR adapter %p already in ptnetmap mode", pth_na->parent); return EINVAL; } num_rings = pth_na->up.num_tx_rings + pth_na->up.num_rx_rings; if (num_rings != cfg->num_rings) { D("ERROR configuration mismatch, expected %u rings, found %u", num_rings, cfg->num_rings); return EINVAL; } if (!use_tx_kthreads && na_is_generic(pth_na->parent)) { D("ERROR ptnetmap direct transmission not supported with " "passed-through emulated adapters"); return EOPNOTSUPP; } ptns = nm_os_malloc(sizeof(*ptns) + num_rings * sizeof(*ptns->kctxs)); if (!ptns) { return ENOMEM; } ptns->kctxs = (struct nm_kctx **)(ptns + 1); ptns->stopped = true; /* Cross-link data structures. */ pth_na->ptns = ptns; ptns->pth_na = pth_na; /* Store the CSB address provided by the hypervisor. */ ptns->csb_gh = cfg->csb_gh; ptns->csb_hg = cfg->csb_hg; DBG(ptnetmap_print_configuration(cfg)); /* Create kernel contexts. */ if ((ret = ptnetmap_create_kctxs(pth_na, cfg, use_tx_kthreads))) { D("ERROR ptnetmap_create_kctxs()"); goto err; } /* Copy krings state into the CSB for the guest initialization */ if ((ret = ptnetmap_krings_snapshot(pth_na))) { D("ERROR ptnetmap_krings_snapshot()"); goto err; } /* Overwrite parent nm_notify krings callback, and * clear NAF_BDG_MAYSLEEP if needed. */ pth_na->parent->na_private = pth_na; pth_na->parent_nm_notify = pth_na->parent->nm_notify; pth_na->parent->nm_notify = nm_unused_notify; pth_na->parent_na_flags = pth_na->parent->na_flags; if (!use_tx_kthreads) { /* VALE port txsync is executed under spinlock on Linux, so * we need to make sure the bridge cannot sleep. */ pth_na->parent->na_flags &= ~NAF_BDG_MAYSLEEP; } for (i = 0; i < pth_na->parent->num_rx_rings; i++) { - pth_na->up.rx_rings[i].save_notify = - pth_na->up.rx_rings[i].nm_notify; - pth_na->up.rx_rings[i].nm_notify = nm_pt_host_notify; + pth_na->up.rx_rings[i]->save_notify = + pth_na->up.rx_rings[i]->nm_notify; + pth_na->up.rx_rings[i]->nm_notify = nm_pt_host_notify; } for (i = 0; i < pth_na->parent->num_tx_rings; i++) { - pth_na->up.tx_rings[i].save_notify = - pth_na->up.tx_rings[i].nm_notify; - pth_na->up.tx_rings[i].nm_notify = nm_pt_host_notify; + pth_na->up.tx_rings[i]->save_notify = + pth_na->up.tx_rings[i]->nm_notify; + pth_na->up.tx_rings[i]->nm_notify = nm_pt_host_notify; } #ifdef RATE memset(&ptns->rate_ctx, 0, sizeof(ptns->rate_ctx)); setup_timer(&ptns->rate_ctx.timer, &rate_callback, (unsigned long)&ptns->rate_ctx); if (mod_timer(&ptns->rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) D("[ptn] Error: mod_timer()\n"); #endif DBG(D("[%s] ptnetmap configuration DONE", pth_na->up.name)); return 0; err: pth_na->ptns = NULL; nm_os_free(ptns); return ret; } /* Switch parent adapter back to normal mode and destroy * ptnetmap state. */ static void ptnetmap_delete(struct netmap_pt_host_adapter *pth_na) { struct ptnetmap_state *ptns = pth_na->ptns; int num_rings; int i; if (!ptns) { /* Nothing to do. */ return; } /* Restore parent adapter callbacks. */ pth_na->parent->nm_notify = pth_na->parent_nm_notify; pth_na->parent->na_private = NULL; pth_na->parent->na_flags = pth_na->parent_na_flags; for (i = 0; i < pth_na->parent->num_rx_rings; i++) { - pth_na->up.rx_rings[i].nm_notify = - pth_na->up.rx_rings[i].save_notify; - pth_na->up.rx_rings[i].save_notify = NULL; + pth_na->up.rx_rings[i]->nm_notify = + pth_na->up.rx_rings[i]->save_notify; + pth_na->up.rx_rings[i]->save_notify = NULL; } for (i = 0; i < pth_na->parent->num_tx_rings; i++) { - pth_na->up.tx_rings[i].nm_notify = - pth_na->up.tx_rings[i].save_notify; - pth_na->up.tx_rings[i].save_notify = NULL; + pth_na->up.tx_rings[i]->nm_notify = + pth_na->up.tx_rings[i]->save_notify; + pth_na->up.tx_rings[i]->save_notify = NULL; } /* Destroy kernel contexts. */ num_rings = ptns->pth_na->up.num_tx_rings + ptns->pth_na->up.num_rx_rings; for (i = 0; i < num_rings; i++) { nm_os_kctx_destroy(ptns->kctxs[i]); ptns->kctxs[i] = NULL; } IFRATE(del_timer(&ptns->rate_ctx.timer)); nm_os_free(ptns); pth_na->ptns = NULL; DBG(D("[%s] ptnetmap deleted", pth_na->up.name)); } /* * Called by netmap_ioctl(). - * Operation is indicated in nmr->nr_cmd. + * Operation is indicated in nr_name. * * Called without NMG_LOCK. */ int -ptnetmap_ctl(struct nmreq *nmr, struct netmap_adapter *na) +ptnetmap_ctl(const char *nr_name, int create, struct netmap_adapter *na) { - struct netmap_pt_host_adapter *pth_na; - struct ptnetmap_cfg *cfg; - char *name; - int cmd, error = 0; + struct netmap_pt_host_adapter *pth_na; + struct ptnetmap_cfg *cfg = NULL; + int error = 0; - name = nmr->nr_name; - cmd = nmr->nr_cmd; + DBG(D("name: %s", nr_name)); - DBG(D("name: %s", name)); + if (!nm_ptnetmap_host_on(na)) { + D("ERROR Netmap adapter %p is not a ptnetmap host adapter", + na); + return ENXIO; + } + pth_na = (struct netmap_pt_host_adapter *)na; - if (!nm_ptnetmap_host_on(na)) { - D("ERROR Netmap adapter %p is not a ptnetmap host adapter", na); - error = ENXIO; - goto done; - } - pth_na = (struct netmap_pt_host_adapter *)na; + NMG_LOCK(); + if (create) { + /* Read hypervisor configuration from userspace. */ + /* TODO */ + if (!cfg) { + goto out; + } + /* Create ptnetmap state (kctxs, ...) and switch parent + * adapter to ptnetmap mode. */ + error = ptnetmap_create(pth_na, cfg); + nm_os_free(cfg); + if (error) { + goto out; + } + /* Start kthreads. */ + error = ptnetmap_start_kctx_workers(pth_na); + if (error) + ptnetmap_delete(pth_na); + } else { + /* Stop kthreads. */ + ptnetmap_stop_kctx_workers(pth_na); + /* Switch parent adapter back to normal mode and destroy + * ptnetmap state (kthreads, ...). */ + ptnetmap_delete(pth_na); + } +out: + NMG_UNLOCK(); - NMG_LOCK(); - switch (cmd) { - case NETMAP_PT_HOST_CREATE: - /* Read hypervisor configuration from userspace. */ - cfg = ptnetmap_read_cfg(nmr); - if (!cfg) - break; - /* Create ptnetmap state (kctxs, ...) and switch parent - * adapter to ptnetmap mode. */ - error = ptnetmap_create(pth_na, cfg); - nm_os_free(cfg); - if (error) - break; - /* Start kthreads. */ - error = ptnetmap_start_kctx_workers(pth_na); - if (error) - ptnetmap_delete(pth_na); - break; - - case NETMAP_PT_HOST_DELETE: - /* Stop kthreads. */ - ptnetmap_stop_kctx_workers(pth_na); - /* Switch parent adapter back to normal mode and destroy - * ptnetmap state (kthreads, ...). */ - ptnetmap_delete(pth_na); - break; - - default: - D("ERROR invalid cmd (nmr->nr_cmd) (0x%x)", cmd); - error = EINVAL; - break; - } - NMG_UNLOCK(); - -done: - return error; + return error; } /* nm_notify callbacks for ptnetmap */ static int nm_pt_host_notify(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_pt_host_adapter *pth_na = (struct netmap_pt_host_adapter *)na->na_private; struct ptnetmap_state *ptns; int k; /* First check that the passthrough port is not being destroyed. */ if (unlikely(!pth_na)) { return NM_IRQ_COMPLETED; } ptns = pth_na->ptns; if (unlikely(!ptns || ptns->stopped)) { return NM_IRQ_COMPLETED; } k = kring->ring_id; /* Notify kthreads (wake up if needed) */ if (kring->tx == NR_TX) { ND(1, "TX backend irq"); IFRATE(ptns->rate_ctx.new.btxwu++); } else { k += pth_na->up.num_tx_rings; ND(1, "RX backend irq"); IFRATE(ptns->rate_ctx.new.brxwu++); } nm_os_kctx_worker_wakeup(ptns->kctxs[k]); return NM_IRQ_COMPLETED; } static int nm_unused_notify(struct netmap_kring *kring, int flags) { D("BUG this should never be called"); return ENXIO; } /* nm_config callback for bwrap */ static int -nm_pt_host_config(struct netmap_adapter *na, u_int *txr, u_int *txd, - u_int *rxr, u_int *rxd) +nm_pt_host_config(struct netmap_adapter *na, struct nm_config_info *info) { struct netmap_pt_host_adapter *pth_na = (struct netmap_pt_host_adapter *)na; struct netmap_adapter *parent = pth_na->parent; int error; //XXX: maybe calling parent->nm_config is better /* forward the request */ error = netmap_update_config(parent); - *rxr = na->num_rx_rings = parent->num_rx_rings; - *txr = na->num_tx_rings = parent->num_tx_rings; - *txd = na->num_tx_desc = parent->num_tx_desc; - *rxd = na->num_rx_desc = parent->num_rx_desc; + info->num_rx_rings = na->num_rx_rings = parent->num_rx_rings; + info->num_tx_rings = na->num_tx_rings = parent->num_tx_rings; + info->num_tx_descs = na->num_tx_desc = parent->num_tx_desc; + info->num_rx_descs = na->num_rx_desc = parent->num_rx_desc; + info->rx_buf_maxsize = na->rx_buf_maxsize = parent->rx_buf_maxsize; - DBG(D("rxr: %d txr: %d txd: %d rxd: %d", *rxr, *txr, *txd, *rxd)); - return error; } /* nm_krings_create callback for ptnetmap */ static int nm_pt_host_krings_create(struct netmap_adapter *na) { struct netmap_pt_host_adapter *pth_na = (struct netmap_pt_host_adapter *)na; struct netmap_adapter *parent = pth_na->parent; enum txrx t; int error; DBG(D("%s", pth_na->up.name)); /* create the parent krings */ error = parent->nm_krings_create(parent); if (error) { return error; } /* A ptnetmap host adapter points the very same krings * as its parent adapter. These pointer are used in the * TX/RX worker functions. */ na->tx_rings = parent->tx_rings; na->rx_rings = parent->rx_rings; na->tailroom = parent->tailroom; for_rx_tx(t) { struct netmap_kring *kring; /* Parent's kring_create function will initialize * its own na->si. We have to init our na->si here. */ nm_os_selinfo_init(&na->si[t]); /* Force the mem_rings_create() method to create the * host rings independently on what the regif asked for: * these rings are needed by the guest ptnetmap adapter * anyway. */ - kring = &NMR(na, t)[nma_get_nrings(na, t)]; + kring = NMR(na, t)[nma_get_nrings(na, t)]; kring->nr_kflags |= NKR_NEEDRING; } return 0; } /* nm_krings_delete callback for ptnetmap */ static void nm_pt_host_krings_delete(struct netmap_adapter *na) { struct netmap_pt_host_adapter *pth_na = (struct netmap_pt_host_adapter *)na; struct netmap_adapter *parent = pth_na->parent; DBG(D("%s", pth_na->up.name)); parent->nm_krings_delete(parent); na->tx_rings = na->rx_rings = na->tailroom = NULL; } /* nm_register callback */ static int nm_pt_host_register(struct netmap_adapter *na, int onoff) { struct netmap_pt_host_adapter *pth_na = (struct netmap_pt_host_adapter *)na; struct netmap_adapter *parent = pth_na->parent; int error; DBG(D("%s onoff %d", pth_na->up.name, onoff)); if (onoff) { /* netmap_do_regif has been called on the ptnetmap na. * We need to pass the information about the * memory allocator to the parent before * putting it in netmap mode */ parent->na_lut = na->na_lut; } /* forward the request to the parent */ error = parent->nm_register(parent, onoff); if (error) return error; if (onoff) { na->na_flags |= NAF_NETMAP_ON | NAF_PTNETMAP_HOST; } else { ptnetmap_delete(pth_na); na->na_flags &= ~(NAF_NETMAP_ON | NAF_PTNETMAP_HOST); } return 0; } /* nm_dtor callback */ static void nm_pt_host_dtor(struct netmap_adapter *na) { struct netmap_pt_host_adapter *pth_na = (struct netmap_pt_host_adapter *)na; struct netmap_adapter *parent = pth_na->parent; DBG(D("%s", pth_na->up.name)); /* The equivalent of NETMAP_PT_HOST_DELETE if the hypervisor * didn't do it. */ ptnetmap_stop_kctx_workers(pth_na); ptnetmap_delete(pth_na); parent->na_flags &= ~NAF_BUSY; netmap_adapter_put(pth_na->parent); pth_na->parent = NULL; } /* check if nmr is a request for a ptnetmap adapter that we can satisfy */ int -netmap_get_pt_host_na(struct nmreq *nmr, struct netmap_adapter **na, +netmap_get_pt_host_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct netmap_mem_d *nmd, int create) { - struct nmreq parent_nmr; + struct nmreq_register *req = (struct nmreq_register *)hdr->nr_body; + struct nmreq_register preq; struct netmap_adapter *parent; /* target adapter */ struct netmap_pt_host_adapter *pth_na; struct ifnet *ifp = NULL; int error; /* Check if it is a request for a ptnetmap adapter */ - if ((nmr->nr_flags & (NR_PTNETMAP_HOST)) == 0) { + if ((req->nr_flags & (NR_PTNETMAP_HOST)) == 0) { return 0; } D("Requesting a ptnetmap host adapter"); pth_na = nm_os_malloc(sizeof(*pth_na)); if (pth_na == NULL) { D("ERROR malloc"); return ENOMEM; } /* first, try to find the adapter that we want to passthrough - * We use the same nmr, after we have turned off the ptnetmap flag. + * We use the same req, after we have turned off the ptnetmap flag. * In this way we can potentially passthrough everything netmap understands. */ - memcpy(&parent_nmr, nmr, sizeof(parent_nmr)); - parent_nmr.nr_flags &= ~(NR_PTNETMAP_HOST); - error = netmap_get_na(&parent_nmr, &parent, &ifp, nmd, create); + memcpy(&preq, req, sizeof(preq)); + preq.nr_flags &= ~(NR_PTNETMAP_HOST); + hdr->nr_body = (uint64_t)&preq; + error = netmap_get_na(hdr, &parent, &ifp, nmd, create); + hdr->nr_body = (uint64_t)req; if (error) { D("parent lookup failed: %d", error); goto put_out_noputparent; } DBG(D("found parent: %s", parent->name)); /* make sure the interface is not already in use */ if (NETMAP_OWNED_BY_ANY(parent)) { D("NIC %s busy, cannot ptnetmap", parent->name); error = EBUSY; goto put_out; } pth_na->parent = parent; /* Follow netmap_attach()-like operations for the host * ptnetmap adapter. */ //XXX pth_na->up.na_flags = parent->na_flags; pth_na->up.num_rx_rings = parent->num_rx_rings; pth_na->up.num_tx_rings = parent->num_tx_rings; pth_na->up.num_tx_desc = parent->num_tx_desc; pth_na->up.num_rx_desc = parent->num_rx_desc; pth_na->up.nm_dtor = nm_pt_host_dtor; pth_na->up.nm_register = nm_pt_host_register; /* Reuse parent's adapter txsync and rxsync methods. */ pth_na->up.nm_txsync = parent->nm_txsync; pth_na->up.nm_rxsync = parent->nm_rxsync; pth_na->up.nm_krings_create = nm_pt_host_krings_create; pth_na->up.nm_krings_delete = nm_pt_host_krings_delete; pth_na->up.nm_config = nm_pt_host_config; /* Set the notify method only or convenience, it will never * be used, since - differently from default krings_create - we * ptnetmap krings_create callback inits kring->nm_notify * directly. */ pth_na->up.nm_notify = nm_unused_notify; pth_na->up.nm_mem = netmap_mem_get(parent->nm_mem); pth_na->up.na_flags |= NAF_HOST_RINGS; error = netmap_attach_common(&pth_na->up); if (error) { D("ERROR netmap_attach_common()"); goto put_out; } *na = &pth_na->up; /* set parent busy, because attached for ptnetmap */ parent->na_flags |= NAF_BUSY; strncpy(pth_na->up.name, parent->name, sizeof(pth_na->up.name)); strcat(pth_na->up.name, "-PTN"); netmap_adapter_get(*na); DBG(D("%s ptnetmap request DONE", pth_na->up.name)); /* drop the reference to the ifp, if any */ if (ifp) if_rele(ifp); return 0; put_out: netmap_adapter_put(parent); if (ifp) if_rele(ifp); put_out_noputparent: nm_os_free(pth_na); return error; } #endif /* WITH_PTNETMAP_HOST */ #ifdef WITH_PTNETMAP_GUEST /* * Guest ptnetmap txsync()/rxsync() routines, used in ptnet device drivers. * These routines are reused across the different operating systems supported * by netmap. */ /* * Reconcile host and guest views of the transmit ring. * * Guest user wants to transmit packets up to the one before ring->head, * and guest kernel knows tx_ring->hwcur is the first packet unsent * by the host kernel. * * We push out as many packets as possible, and possibly * reclaim buffers from previously completed transmission. * * Notifications from the host are enabled only if the user guest would * block (no space in the ring). */ bool netmap_pt_guest_txsync(struct ptnet_csb_gh *ptgh, struct ptnet_csb_hg *pthg, struct netmap_kring *kring, int flags) { bool notify = false; /* Disable notifications */ ptgh->guest_need_kick = 0; /* * First part: tell the host (updating the CSB) to process the new * packets. */ kring->nr_hwcur = pthg->hwcur; ptnetmap_guest_write_kring_csb(ptgh, kring->rcur, kring->rhead); /* Ask for a kick from a guest to the host if needed. */ if (((kring->rhead != kring->nr_hwcur || nm_kr_txempty(kring)) && NM_ACCESS_ONCE(pthg->host_need_kick)) || (flags & NAF_FORCE_RECLAIM)) { ptgh->sync_flags = flags; notify = true; } /* * Second part: reclaim buffers for completed transmissions. */ if (nm_kr_txempty(kring) || (flags & NAF_FORCE_RECLAIM)) { ptnetmap_guest_read_kring_csb(pthg, kring); } /* * No more room in the ring for new transmissions. The user thread will * go to sleep and we need to be notified by the host when more free * space is available. */ if (nm_kr_txempty(kring) && !(kring->nr_kflags & NKR_NOINTR)) { /* Reenable notifications. */ ptgh->guest_need_kick = 1; /* Double check */ ptnetmap_guest_read_kring_csb(pthg, kring); /* If there is new free space, disable notifications */ if (unlikely(!nm_kr_txempty(kring))) { ptgh->guest_need_kick = 0; } } ND(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)", kring->name, ptgh->head, ptgh->cur, pthg->hwtail, kring->rhead, kring->rcur, kring->nr_hwtail); return notify; } /* * Reconcile host and guest view of the receive ring. * * Update hwcur/hwtail from host (reading from CSB). * * If guest user has released buffers up to the one before ring->head, we * also give them to the host. * * Notifications from the host are enabled only if the user guest would * block (no more completed slots in the ring). */ bool netmap_pt_guest_rxsync(struct ptnet_csb_gh *ptgh, struct ptnet_csb_hg *pthg, struct netmap_kring *kring, int flags) { bool notify = false; /* Disable notifications */ ptgh->guest_need_kick = 0; /* * First part: import newly received packets, by updating the kring * hwtail to the hwtail known from the host (read from the CSB). * This also updates the kring hwcur. */ ptnetmap_guest_read_kring_csb(pthg, kring); kring->nr_kflags &= ~NKR_PENDINTR; /* * Second part: tell the host about the slots that guest user has * released, by updating cur and head in the CSB. */ if (kring->rhead != kring->nr_hwcur) { ptnetmap_guest_write_kring_csb(ptgh, kring->rcur, kring->rhead); /* Ask for a kick from the guest to the host if needed. */ if (NM_ACCESS_ONCE(pthg->host_need_kick)) { ptgh->sync_flags = flags; notify = true; } } /* * No more completed RX slots. The user thread will go to sleep and * we need to be notified by the host when more RX slots have been * completed. */ if (nm_kr_rxempty(kring) && !(kring->nr_kflags & NKR_NOINTR)) { /* Reenable notifications. */ ptgh->guest_need_kick = 1; /* Double check */ ptnetmap_guest_read_kring_csb(pthg, kring); /* If there are new slots, disable notifications. */ if (!nm_kr_rxempty(kring)) { ptgh->guest_need_kick = 0; } } ND(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)", kring->name, ptgh->head, ptgh->cur, pthg->hwtail, kring->rhead, kring->rcur, kring->nr_hwtail); return notify; } /* * Callbacks for ptnet drivers: nm_krings_create, nm_krings_delete, nm_dtor. */ int ptnet_nm_krings_create(struct netmap_adapter *na) { struct netmap_pt_guest_adapter *ptna = (struct netmap_pt_guest_adapter *)na; /* Upcast. */ struct netmap_adapter *na_nm = &ptna->hwup.up; struct netmap_adapter *na_dr = &ptna->dr.up; int ret; if (ptna->backend_regifs) { return 0; } /* Create krings on the public netmap adapter. */ ret = netmap_hw_krings_create(na_nm); if (ret) { return ret; } /* Copy krings into the netmap adapter private to the driver. */ na_dr->tx_rings = na_nm->tx_rings; na_dr->rx_rings = na_nm->rx_rings; return 0; } void ptnet_nm_krings_delete(struct netmap_adapter *na) { struct netmap_pt_guest_adapter *ptna = (struct netmap_pt_guest_adapter *)na; /* Upcast. */ struct netmap_adapter *na_nm = &ptna->hwup.up; struct netmap_adapter *na_dr = &ptna->dr.up; if (ptna->backend_regifs) { return; } na_dr->tx_rings = NULL; na_dr->rx_rings = NULL; netmap_hw_krings_delete(na_nm); } void ptnet_nm_dtor(struct netmap_adapter *na) { struct netmap_pt_guest_adapter *ptna = (struct netmap_pt_guest_adapter *)na; netmap_mem_put(ptna->dr.up.nm_mem); memset(&ptna->dr, 0, sizeof(ptna->dr)); netmap_mem_pt_guest_ifp_del(na->nm_mem, na->ifp); } int netmap_pt_guest_attach(struct netmap_adapter *arg, unsigned int nifp_offset, unsigned int memid) { struct netmap_pt_guest_adapter *ptna; struct ifnet *ifp = arg ? arg->ifp : NULL; int error; /* get allocator */ arg->nm_mem = netmap_mem_pt_guest_new(ifp, nifp_offset, memid); if (arg->nm_mem == NULL) return ENOMEM; arg->na_flags |= NAF_MEM_OWNER; error = netmap_attach_ext(arg, sizeof(struct netmap_pt_guest_adapter), 1); if (error) return error; /* get the netmap_pt_guest_adapter */ ptna = (struct netmap_pt_guest_adapter *) NA(ifp); /* Initialize a separate pass-through netmap adapter that is going to * be used by the ptnet driver only, and so never exposed to netmap * applications. We only need a subset of the available fields. */ memset(&ptna->dr, 0, sizeof(ptna->dr)); ptna->dr.up.ifp = ifp; ptna->dr.up.nm_mem = netmap_mem_get(ptna->hwup.up.nm_mem); ptna->dr.up.nm_config = ptna->hwup.up.nm_config; ptna->backend_regifs = 0; return 0; } #endif /* WITH_PTNETMAP_GUEST */ Index: head/sys/dev/netmap/netmap_vale.c =================================================================== --- head/sys/dev/netmap/netmap_vale.c (revision 332422) +++ head/sys/dev/netmap/netmap_vale.c (revision 332423) @@ -1,2935 +1,3162 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2013-2016 Universita` di Pisa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This module implements the VALE switch for netmap --- VALE SWITCH --- NMG_LOCK() serializes all modifications to switches and ports. A switch cannot be deleted until all ports are gone. For each switch, an SX lock (RWlock on linux) protects deletion of ports. When configuring or deleting a new port, the lock is acquired in exclusive mode (after holding NMG_LOCK). When forwarding, the lock is acquired in shared mode (without NMG_LOCK). The lock is held throughout the entire forwarding cycle, during which the thread may incur in a page fault. Hence it is important that sleepable shared locks are used. On the rx ring, the per-port lock is grabbed initially to reserve a number of slot in the ring, then the lock is released, packets are copied from source to destination, and then the lock is acquired again and the receive ring is updated. (A similar thing is done on the tx ring for NIC and host stack ports attached to the switch) */ /* * OS-specific code that is used only within this file. * Other OS-specific code that must be accessed by drivers * is present in netmap_kern.h */ #if defined(__FreeBSD__) #include /* prerequisite */ __FBSDID("$FreeBSD$"); #include #include #include /* defines used in kernel.h */ #include /* types used in module initialization */ #include /* cdevsw struct, UID, GID */ #include #include /* struct socket */ #include #include #include #include /* sockaddrs */ #include #include #include #include #include /* BIOCIMMEDIATE */ #include /* bus_dmamap_* */ #include #include #define BDG_RWLOCK_T struct rwlock // struct rwlock #define BDG_RWINIT(b) \ rw_init_flags(&(b)->bdg_lock, "bdg lock", RW_NOWITNESS) #define BDG_WLOCK(b) rw_wlock(&(b)->bdg_lock) #define BDG_WUNLOCK(b) rw_wunlock(&(b)->bdg_lock) #define BDG_RLOCK(b) rw_rlock(&(b)->bdg_lock) #define BDG_RTRYLOCK(b) rw_try_rlock(&(b)->bdg_lock) #define BDG_RUNLOCK(b) rw_runlock(&(b)->bdg_lock) #define BDG_RWDESTROY(b) rw_destroy(&(b)->bdg_lock) #elif defined(linux) #include "bsd_glue.h" #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #elif defined(_WIN32) #include "win_glue.h" #else #error Unsupported platform #endif /* unsupported */ /* * common headers */ #include #include #include #ifdef WITH_VALE /* * system parameters (most of them in netmap_kern.h) * NM_BDG_NAME prefix for switch port names, default "vale" * NM_BDG_MAXPORTS number of ports * NM_BRIDGES max number of switches in the system. * XXX should become a sysctl or tunable * * Switch ports are named valeX:Y where X is the switch name and Y * is the port. If Y matches a physical interface name, the port is * connected to a physical device. * * Unlike physical interfaces, switch ports use their own memory region * for rings and buffers. * The virtual interfaces use per-queue lock instead of core lock. * In the tx loop, we aggregate traffic in batches to make all operations * faster. The batch size is bridge_batch. */ #define NM_BDG_MAXRINGS 16 /* XXX unclear how many. */ #define NM_BDG_MAXSLOTS 4096 /* XXX same as above */ #define NM_BRIDGE_RINGSIZE 1024 /* in the device */ #define NM_BDG_HASH 1024 /* forwarding table entries */ #define NM_BDG_BATCH 1024 /* entries in the forwarding buffer */ #define NM_MULTISEG 64 /* max size of a chain of bufs */ /* actual size of the tables */ #define NM_BDG_BATCH_MAX (NM_BDG_BATCH + NM_MULTISEG) /* NM_FT_NULL terminates a list of slots in the ft */ #define NM_FT_NULL NM_BDG_BATCH_MAX /* Default size for the Maximum Frame Size. */ #define NM_BDG_MFS_DEFAULT 1514 /* * bridge_batch is set via sysctl to the max batch size to be * used in the bridge. The actual value may be larger as the * last packet in the block may overflow the size. */ static int bridge_batch = NM_BDG_BATCH; /* bridge batch size */ SYSBEGIN(vars_vale); SYSCTL_DECL(_dev_netmap); SYSCTL_INT(_dev_netmap, OID_AUTO, bridge_batch, CTLFLAG_RW, &bridge_batch, 0, "Max batch size to be used in the bridge"); SYSEND; -static int netmap_vp_create(struct nmreq *, struct ifnet *, +static int netmap_vp_create(struct nmreq_header *hdr, struct ifnet *, struct netmap_mem_d *nmd, struct netmap_vp_adapter **); static int netmap_vp_reg(struct netmap_adapter *na, int onoff); static int netmap_bwrap_reg(struct netmap_adapter *, int onoff); /* * For each output interface, nm_bdg_q is used to construct a list. * bq_len is the number of output buffers (we can have coalescing * during the copy). */ struct nm_bdg_q { uint16_t bq_head; uint16_t bq_tail; uint32_t bq_len; /* number of buffers */ }; /* XXX revise this */ struct nm_hash_ent { uint64_t mac; /* the top 2 bytes are the epoch */ uint64_t ports; }; +/* Holds the default callbacks */ +static struct netmap_bdg_ops default_bdg_ops = {netmap_bdg_learning, NULL, NULL}; + /* * nm_bridge is a descriptor for a VALE switch. * Interfaces for a bridge are all in bdg_ports[]. * The array has fixed size, an empty entry does not terminate * the search, but lookups only occur on attach/detach so we * don't mind if they are slow. * * The bridge is non blocking on the transmit ports: excess * packets are dropped if there is no room on the output port. * * bdg_lock protects accesses to the bdg_ports array. * This is a rw lock (or equivalent). */ +#define NM_BDG_IFNAMSIZ IFNAMSIZ struct nm_bridge { /* XXX what is the proper alignment/layout ? */ BDG_RWLOCK_T bdg_lock; /* protects bdg_ports */ int bdg_namelen; - uint32_t bdg_active_ports; /* 0 means free */ - char bdg_basename[IFNAMSIZ]; + uint32_t bdg_active_ports; + char bdg_basename[NM_BDG_IFNAMSIZ]; /* Indexes of active ports (up to active_ports) * and all other remaining ports. */ - uint8_t bdg_port_index[NM_BDG_MAXPORTS]; + uint32_t bdg_port_index[NM_BDG_MAXPORTS]; + /* used by netmap_bdg_detach_common() */ + uint32_t tmp_bdg_port_index[NM_BDG_MAXPORTS]; struct netmap_vp_adapter *bdg_ports[NM_BDG_MAXPORTS]; - /* - * The function to decide the destination port. + * Programmable lookup functions to figure out the destination port. * It returns either of an index of the destination port, * NM_BDG_BROADCAST to broadcast this packet, or NM_BDG_NOPORT not to * forward this packet. ring_nr is the source ring index, and the * function may overwrite this value to forward this packet to a * different ring index. - * This function must be set by netmap_bdg_ctl(). + * The function is set by netmap_bdg_regops(). */ - struct netmap_bdg_ops bdg_ops; + struct netmap_bdg_ops *bdg_ops; + + /* + * Contains the data structure used by the bdg_ops.lookup function. + * By default points to *ht which is allocated on attach and used by the default lookup + * otherwise will point to the data structure received by netmap_bdg_regops(). + */ + void *private_data; + struct nm_hash_ent *ht; - /* the forwarding table, MAC+ports. - * XXX should be changed to an argument to be passed to - * the lookup function + /* Currently used to specify if the bridge is still in use while empty and + * if it has been put in exclusive mode by an external module, see netmap_bdg_regops() + * and netmap_bdg_create(). */ - struct nm_hash_ent *ht; // allocated on attach +#define NM_BDG_ACTIVE 1 +#define NM_BDG_EXCLUSIVE 2 + uint8_t bdg_flags; + #ifdef CONFIG_NET_NS struct net *ns; #endif /* CONFIG_NET_NS */ }; const char* netmap_bdg_name(struct netmap_vp_adapter *vp) { struct nm_bridge *b = vp->na_bdg; if (b == NULL) return NULL; return b->bdg_basename; } #ifndef CONFIG_NET_NS /* * XXX in principle nm_bridges could be created dynamically * Right now we have a static array and deletions are protected * by an exclusive lock. */ static struct nm_bridge *nm_bridges; #endif /* !CONFIG_NET_NS */ /* * this is a slightly optimized copy routine which rounds * to multiple of 64 bytes and is often faster than dealing * with other odd sizes. We assume there is enough room * in the source and destination buffers. * * XXX only for multiples of 64 bytes, non overlapped. */ static inline void pkt_copy(void *_src, void *_dst, int l) { uint64_t *src = _src; uint64_t *dst = _dst; if (unlikely(l >= 1024)) { memcpy(dst, src, l); return; } for (; likely(l > 0); l-=64) { *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; } } static int nm_is_id_char(const char c) { return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '_'); } /* Validate the name of a VALE bridge port and return the * position of the ":" character. */ static int nm_vale_name_validate(const char *name) { int colon_pos = -1; int i; if (!name || strlen(name) < strlen(NM_BDG_NAME)) { return -1; } - for (i = 0; name[i]; i++) { + for (i = 0; i < NM_BDG_IFNAMSIZ && name[i]; i++) { if (name[i] == ':') { - if (colon_pos != -1) { - return -1; - } colon_pos = i; + break; } else if (!nm_is_id_char(name[i])) { return -1; } } - if (i >= IFNAMSIZ) { + if (strlen(name) - colon_pos > IFNAMSIZ) { + /* interface name too long */ return -1; } return colon_pos; } /* * locate a bridge among the existing ones. * MUST BE CALLED WITH NMG_LOCK() * * a ':' in the name terminates the bridge name. Otherwise, just NM_NAME. * We assume that this is called with a name of at least NM_NAME chars. */ static struct nm_bridge * nm_find_bridge(const char *name, int create) { int i, namelen; struct nm_bridge *b = NULL, *bridges; u_int num_bridges; NMG_LOCK_ASSERT(); netmap_bns_getbridges(&bridges, &num_bridges); namelen = nm_vale_name_validate(name); if (namelen < 0) { D("invalid bridge name %s", name ? name : NULL); return NULL; } /* lookup the name, remember empty slot if there is one */ for (i = 0; i < num_bridges; i++) { struct nm_bridge *x = bridges + i; - if (x->bdg_active_ports == 0) { + if ((x->bdg_flags & NM_BDG_ACTIVE) + x->bdg_active_ports == 0) { if (create && b == NULL) b = x; /* record empty slot */ } else if (x->bdg_namelen != namelen) { continue; } else if (strncmp(name, x->bdg_basename, namelen) == 0) { ND("found '%.*s' at %d", namelen, name, i); b = x; break; } } if (i == num_bridges && b) { /* name not found, can create entry */ /* initialize the bridge */ ND("create new bridge %s with ports %d", b->bdg_basename, b->bdg_active_ports); b->ht = nm_os_malloc(sizeof(struct nm_hash_ent) * NM_BDG_HASH); if (b->ht == NULL) { D("failed to allocate hash table"); return NULL; } strncpy(b->bdg_basename, name, namelen); b->bdg_namelen = namelen; b->bdg_active_ports = 0; for (i = 0; i < NM_BDG_MAXPORTS; i++) b->bdg_port_index[i] = i; /* set the default function */ - b->bdg_ops.lookup = netmap_bdg_learning; + b->bdg_ops = &default_bdg_ops; + b->private_data = b->ht; + b->bdg_flags = 0; NM_BNS_GET(b); } return b; } /* * Free the forwarding tables for rings attached to switch ports. */ static void nm_free_bdgfwd(struct netmap_adapter *na) { int nrings, i; - struct netmap_kring *kring; + struct netmap_kring **kring; NMG_LOCK_ASSERT(); nrings = na->num_tx_rings; kring = na->tx_rings; for (i = 0; i < nrings; i++) { - if (kring[i].nkr_ft) { - nm_os_free(kring[i].nkr_ft); - kring[i].nkr_ft = NULL; /* protect from freeing twice */ + if (kring[i]->nkr_ft) { + nm_os_free(kring[i]->nkr_ft); + kring[i]->nkr_ft = NULL; /* protect from freeing twice */ } } } /* * Allocate the forwarding tables for the rings attached to the bridge ports. */ static int nm_alloc_bdgfwd(struct netmap_adapter *na) { int nrings, l, i, num_dstq; - struct netmap_kring *kring; + struct netmap_kring **kring; NMG_LOCK_ASSERT(); /* all port:rings + broadcast */ num_dstq = NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1; l = sizeof(struct nm_bdg_fwd) * NM_BDG_BATCH_MAX; l += sizeof(struct nm_bdg_q) * num_dstq; l += sizeof(uint16_t) * NM_BDG_BATCH_MAX; nrings = netmap_real_rings(na, NR_TX); kring = na->tx_rings; for (i = 0; i < nrings; i++) { struct nm_bdg_fwd *ft; struct nm_bdg_q *dstq; int j; ft = nm_os_malloc(l); if (!ft) { nm_free_bdgfwd(na); return ENOMEM; } dstq = (struct nm_bdg_q *)(ft + NM_BDG_BATCH_MAX); for (j = 0; j < num_dstq; j++) { dstq[j].bq_head = dstq[j].bq_tail = NM_FT_NULL; dstq[j].bq_len = 0; } - kring[i].nkr_ft = ft; + kring[i]->nkr_ft = ft; } return 0; } +static int +netmap_bdg_free(struct nm_bridge *b) +{ + if ((b->bdg_flags & NM_BDG_ACTIVE) + b->bdg_active_ports != 0) { + return EBUSY; + } + ND("marking bridge %s as free", b->bdg_basename); + nm_os_free(b->ht); + b->bdg_ops = NULL; + b->bdg_flags = 0; + NM_BNS_PUT(b); + return 0; +} + + /* remove from bridge b the ports in slots hw and sw * (sw can be -1 if not needed) */ static void netmap_bdg_detach_common(struct nm_bridge *b, int hw, int sw) { int s_hw = hw, s_sw = sw; int i, lim =b->bdg_active_ports; - uint8_t tmp[NM_BDG_MAXPORTS]; + uint32_t *tmp = b->tmp_bdg_port_index; /* New algorithm: make a copy of bdg_port_index; lookup NA(ifp)->bdg_port and SWNA(ifp)->bdg_port in the array of bdg_port_index, replacing them with entries from the bottom of the array; decrement bdg_active_ports; acquire BDG_WLOCK() and copy back the array. */ if (netmap_verbose) D("detach %d and %d (lim %d)", hw, sw, lim); /* make a copy of the list of active ports, update it, * and then copy back within BDG_WLOCK(). */ - memcpy(tmp, b->bdg_port_index, sizeof(tmp)); + memcpy(b->tmp_bdg_port_index, b->bdg_port_index, sizeof(b->tmp_bdg_port_index)); for (i = 0; (hw >= 0 || sw >= 0) && i < lim; ) { if (hw >= 0 && tmp[i] == hw) { ND("detach hw %d at %d", hw, i); lim--; /* point to last active port */ tmp[i] = tmp[lim]; /* swap with i */ tmp[lim] = hw; /* now this is inactive */ hw = -1; } else if (sw >= 0 && tmp[i] == sw) { ND("detach sw %d at %d", sw, i); lim--; tmp[i] = tmp[lim]; tmp[lim] = sw; sw = -1; } else { i++; } } if (hw >= 0 || sw >= 0) { D("XXX delete failed hw %d sw %d, should panic...", hw, sw); } BDG_WLOCK(b); - if (b->bdg_ops.dtor) - b->bdg_ops.dtor(b->bdg_ports[s_hw]); + if (b->bdg_ops->dtor) + b->bdg_ops->dtor(b->bdg_ports[s_hw]); b->bdg_ports[s_hw] = NULL; if (s_sw >= 0) { b->bdg_ports[s_sw] = NULL; } - memcpy(b->bdg_port_index, tmp, sizeof(tmp)); + memcpy(b->bdg_port_index, b->tmp_bdg_port_index, sizeof(b->tmp_bdg_port_index)); b->bdg_active_ports = lim; BDG_WUNLOCK(b); ND("now %d active ports", lim); - if (lim == 0) { - ND("marking bridge %s as free", b->bdg_basename); - nm_os_free(b->ht); - bzero(&b->bdg_ops, sizeof(b->bdg_ops)); - NM_BNS_PUT(b); + netmap_bdg_free(b); +} + +static inline void * +nm_bdg_get_auth_token(struct nm_bridge *b) +{ + return b->ht; +} + +/* bridge not in exclusive mode ==> always valid + * bridge in exclusive mode (created through netmap_bdg_create()) ==> check authentication token + */ +static inline int +nm_bdg_valid_auth_token(struct nm_bridge *b, void *auth_token) +{ + return !(b->bdg_flags & NM_BDG_EXCLUSIVE) || b->ht == auth_token; +} + +/* Allows external modules to create bridges in exclusive mode, + * returns an authentication token that the external module will need + * to provide during nm_bdg_ctl_{attach, detach}(), netmap_bdg_regops(), + * and nm_bdg_update_private_data() operations. + * Successfully executed if ret != NULL and *return_status == 0. + */ +void * +netmap_bdg_create(const char *bdg_name, int *return_status) +{ + struct nm_bridge *b = NULL; + void *ret = NULL; + + NMG_LOCK(); + b = nm_find_bridge(bdg_name, 0 /* don't create */); + if (b) { + *return_status = EEXIST; + goto unlock_bdg_create; } + + b = nm_find_bridge(bdg_name, 1 /* create */); + if (!b) { + *return_status = ENOMEM; + goto unlock_bdg_create; + } + + b->bdg_flags |= NM_BDG_ACTIVE | NM_BDG_EXCLUSIVE; + ret = nm_bdg_get_auth_token(b); + *return_status = 0; + +unlock_bdg_create: + NMG_UNLOCK(); + return ret; } +/* Allows external modules to destroy a bridge created through + * netmap_bdg_create(), the bridge must be empty. + */ +int +netmap_bdg_destroy(const char *bdg_name, void *auth_token) +{ + struct nm_bridge *b = NULL; + int ret = 0; + + NMG_LOCK(); + b = nm_find_bridge(bdg_name, 0 /* don't create */); + if (!b) { + ret = ENXIO; + goto unlock_bdg_free; + } + + if (!nm_bdg_valid_auth_token(b, auth_token)) { + ret = EACCES; + goto unlock_bdg_free; + } + if (!(b->bdg_flags & NM_BDG_EXCLUSIVE)) { + ret = EINVAL; + goto unlock_bdg_free; + } + + b->bdg_flags &= ~(NM_BDG_EXCLUSIVE | NM_BDG_ACTIVE); + ret = netmap_bdg_free(b); + if (ret) { + b->bdg_flags |= NM_BDG_EXCLUSIVE | NM_BDG_ACTIVE; + } + +unlock_bdg_free: + NMG_UNLOCK(); + return ret; +} + + + /* nm_bdg_ctl callback for VALE ports */ static int -netmap_vp_bdg_ctl(struct netmap_adapter *na, struct nmreq *nmr, int attach) +netmap_vp_bdg_ctl(struct nmreq_header *hdr, struct netmap_adapter *na) { struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na; struct nm_bridge *b = vpna->na_bdg; - (void)nmr; // XXX merge ? - if (attach) + if (hdr->nr_reqtype == NETMAP_REQ_VALE_ATTACH) { return 0; /* nothing to do */ + } if (b) { netmap_set_all_rings(na, 0 /* disable */); netmap_bdg_detach_common(b, vpna->bdg_port, -1); vpna->na_bdg = NULL; netmap_set_all_rings(na, 1 /* enable */); } /* I have took reference just for attach */ netmap_adapter_put(na); return 0; } /* nm_dtor callback for ephemeral VALE ports */ static void netmap_vp_dtor(struct netmap_adapter *na) { struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter*)na; struct nm_bridge *b = vpna->na_bdg; ND("%s has %d references", na->name, na->na_refcount); if (b) { netmap_bdg_detach_common(b, vpna->bdg_port, -1); } if (na->ifp != NULL && !nm_iszombie(na)) { WNA(na->ifp) = NULL; if (vpna->autodelete) { ND("releasing %s", na->ifp->if_xname); NMG_UNLOCK(); nm_os_vi_detach(na->ifp); NMG_LOCK(); } } } +/* creates a persistent VALE port */ +int +nm_vi_create(struct nmreq_header *hdr) +{ + struct nmreq_vale_newif *req = + (struct nmreq_vale_newif *)hdr->nr_body; + int error = 0; + /* Build a nmreq_register out of the nmreq_vale_newif, + * so that we can call netmap_get_bdg_na(). */ + struct nmreq_register regreq; + bzero(®req, sizeof(regreq)); + regreq.nr_tx_slots = req->nr_tx_slots; + regreq.nr_rx_slots = req->nr_rx_slots; + regreq.nr_tx_rings = req->nr_tx_rings; + regreq.nr_rx_rings = req->nr_rx_rings; + regreq.nr_mem_id = req->nr_mem_id; + hdr->nr_reqtype = NETMAP_REQ_REGISTER; + hdr->nr_body = (uint64_t)®req; + error = netmap_vi_create(hdr, 0 /* no autodelete */); + hdr->nr_reqtype = NETMAP_REQ_VALE_NEWIF; + hdr->nr_body = (uint64_t)req; + /* Write back to the original struct. */ + req->nr_tx_slots = regreq.nr_tx_slots; + req->nr_rx_slots = regreq.nr_rx_slots; + req->nr_tx_rings = regreq.nr_tx_rings; + req->nr_rx_rings = regreq.nr_rx_rings; + req->nr_mem_id = regreq.nr_mem_id; + return error; +} + /* remove a persistent VALE port from the system */ -static int +int nm_vi_destroy(const char *name) { struct ifnet *ifp; struct netmap_vp_adapter *vpna; int error; ifp = ifunit_ref(name); if (!ifp) return ENXIO; NMG_LOCK(); /* make sure this is actually a VALE port */ if (!NM_NA_VALID(ifp) || NA(ifp)->nm_register != netmap_vp_reg) { error = EINVAL; goto err; } vpna = (struct netmap_vp_adapter *)NA(ifp); /* we can only destroy ports that were created via NETMAP_BDG_NEWIF */ if (vpna->autodelete) { error = EINVAL; goto err; } /* also make sure that nobody is using the inferface */ if (NETMAP_OWNED_BY_ANY(&vpna->up) || vpna->up.na_refcount > 1 /* any ref besides the one in nm_vi_create()? */) { error = EBUSY; goto err; } NMG_UNLOCK(); D("destroying a persistent vale interface %s", ifp->if_xname); /* Linux requires all the references are released * before unregister */ netmap_detach(ifp); if_rele(ifp); nm_os_vi_detach(ifp); return 0; err: NMG_UNLOCK(); if_rele(ifp); return error; } static int -nm_update_info(struct nmreq *nmr, struct netmap_adapter *na) +nm_update_info(struct nmreq_register *req, struct netmap_adapter *na) { - uint64_t memsize; - int ret; - nmr->nr_rx_rings = na->num_rx_rings; - nmr->nr_tx_rings = na->num_tx_rings; - nmr->nr_rx_slots = na->num_rx_desc; - nmr->nr_tx_slots = na->num_tx_desc; - ret = netmap_mem_get_info(na->nm_mem, &memsize, NULL, &nmr->nr_arg2); - nmr->nr_memsize = (uint32_t)memsize; - return ret; + req->nr_rx_rings = na->num_rx_rings; + req->nr_tx_rings = na->num_tx_rings; + req->nr_rx_slots = na->num_rx_desc; + req->nr_tx_slots = na->num_tx_desc; + return netmap_mem_get_info(na->nm_mem, &req->nr_memsize, NULL, + &req->nr_mem_id); } /* * Create a virtual interface registered to the system. * The interface will be attached to a bridge later. */ int -netmap_vi_create(struct nmreq *nmr, int autodelete) +netmap_vi_create(struct nmreq_header *hdr, int autodelete) { + struct nmreq_register *req = (struct nmreq_register *)hdr->nr_body; struct ifnet *ifp; struct netmap_vp_adapter *vpna; struct netmap_mem_d *nmd = NULL; int error; + if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) { + return EINVAL; + } + /* don't include VALE prefix */ - if (!strncmp(nmr->nr_name, NM_BDG_NAME, strlen(NM_BDG_NAME))) + if (!strncmp(hdr->nr_name, NM_BDG_NAME, strlen(NM_BDG_NAME))) return EINVAL; - ifp = ifunit_ref(nmr->nr_name); + if (strlen(hdr->nr_name) >= IFNAMSIZ) { + return EINVAL; + } + ifp = ifunit_ref(hdr->nr_name); if (ifp) { /* already exist, cannot create new one */ error = EEXIST; NMG_LOCK(); if (NM_NA_VALID(ifp)) { - int update_err = nm_update_info(nmr, NA(ifp)); + int update_err = nm_update_info(req, NA(ifp)); if (update_err) error = update_err; } NMG_UNLOCK(); if_rele(ifp); return error; } - error = nm_os_vi_persist(nmr->nr_name, &ifp); + error = nm_os_vi_persist(hdr->nr_name, &ifp); if (error) return error; NMG_LOCK(); - if (nmr->nr_arg2) { - nmd = netmap_mem_find(nmr->nr_arg2); + if (req->nr_mem_id) { + nmd = netmap_mem_find(req->nr_mem_id); if (nmd == NULL) { error = EINVAL; goto err_1; } } /* netmap_vp_create creates a struct netmap_vp_adapter */ - error = netmap_vp_create(nmr, ifp, nmd, &vpna); + error = netmap_vp_create(hdr, ifp, nmd, &vpna); if (error) { D("error %d", error); goto err_1; } /* persist-specific routines */ vpna->up.nm_bdg_ctl = netmap_vp_bdg_ctl; if (!autodelete) { netmap_adapter_get(&vpna->up); } else { vpna->autodelete = 1; } NM_ATTACH_NA(ifp, &vpna->up); /* return the updated info */ - error = nm_update_info(nmr, &vpna->up); + error = nm_update_info(req, &vpna->up); if (error) { goto err_2; } - D("returning nr_arg2 %d", nmr->nr_arg2); + ND("returning nr_mem_id %d", req->nr_mem_id); if (nmd) netmap_mem_put(nmd); NMG_UNLOCK(); - D("created %s", ifp->if_xname); + ND("created %s", ifp->if_xname); return 0; err_2: netmap_detach(ifp); err_1: if (nmd) netmap_mem_put(nmd); NMG_UNLOCK(); nm_os_vi_detach(ifp); return error; } /* Try to get a reference to a netmap adapter attached to a VALE switch. * If the adapter is found (or is created), this function returns 0, a * non NULL pointer is returned into *na, and the caller holds a * reference to the adapter. * If an adapter is not found, then no reference is grabbed and the * function returns an error code, or 0 if there is just a VALE prefix * mismatch. Therefore the caller holds a reference when * (*na != NULL && return == 0). */ int -netmap_get_bdg_na(struct nmreq *nmr, struct netmap_adapter **na, +netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na, struct netmap_mem_d *nmd, int create) { - char *nr_name = nmr->nr_name; + char *nr_name = hdr->nr_name; const char *ifname; struct ifnet *ifp = NULL; int error = 0; struct netmap_vp_adapter *vpna, *hostna = NULL; struct nm_bridge *b; - int i, j, cand = -1, cand2 = -1; + uint32_t i, j; + uint32_t cand = NM_BDG_NOPORT, cand2 = NM_BDG_NOPORT; int needed; *na = NULL; /* default return value */ /* first try to see if this is a bridge port. */ NMG_LOCK_ASSERT(); if (strncmp(nr_name, NM_BDG_NAME, sizeof(NM_BDG_NAME) - 1)) { return 0; /* no error, but no VALE prefix */ } b = nm_find_bridge(nr_name, create); if (b == NULL) { D("no bridges available for '%s'", nr_name); return (create ? ENOMEM : ENXIO); } if (strlen(nr_name) < b->bdg_namelen) /* impossible */ panic("x"); /* Now we are sure that name starts with the bridge's name, * lookup the port in the bridge. We need to scan the entire * list. It is not important to hold a WLOCK on the bridge * during the search because NMG_LOCK already guarantees * that there are no other possible writers. */ /* lookup in the local list of ports */ for (j = 0; j < b->bdg_active_ports; j++) { i = b->bdg_port_index[j]; vpna = b->bdg_ports[i]; ND("checking %s", vpna->up.name); if (!strcmp(vpna->up.name, nr_name)) { netmap_adapter_get(&vpna->up); ND("found existing if %s refs %d", nr_name) *na = &vpna->up; return 0; } } /* not found, should we create it? */ if (!create) return ENXIO; /* yes we should, see if we have space to attach entries */ needed = 2; /* in some cases we only need 1 */ if (b->bdg_active_ports + needed >= NM_BDG_MAXPORTS) { D("bridge full %d, cannot create new port", b->bdg_active_ports); return ENOMEM; } /* record the next two ports available, but do not allocate yet */ cand = b->bdg_port_index[b->bdg_active_ports]; cand2 = b->bdg_port_index[b->bdg_active_ports + 1]; ND("+++ bridge %s port %s used %d avail %d %d", b->bdg_basename, ifname, b->bdg_active_ports, cand, cand2); /* * try see if there is a matching NIC with this name * (after the bridge's name) */ ifname = nr_name + b->bdg_namelen + 1; ifp = ifunit_ref(ifname); if (!ifp) { - /* Create an ephemeral virtual port - * This block contains all the ephemeral-specific logics + /* Create an ephemeral virtual port. + * This block contains all the ephemeral-specific logic. */ - if (nmr->nr_cmd) { - /* nr_cmd must be 0 for a virtual port */ + + if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) { error = EINVAL; goto out; } /* bdg_netmap_attach creates a struct netmap_adapter */ - error = netmap_vp_create(nmr, NULL, nmd, &vpna); + error = netmap_vp_create(hdr, NULL, nmd, &vpna); if (error) { D("error %d", error); goto out; } /* shortcut - we can skip get_hw_na(), * ownership check and nm_bdg_attach() */ + } else { struct netmap_adapter *hw; /* the vale:nic syntax is only valid for some commands */ - switch (nmr->nr_cmd) { - case NETMAP_BDG_ATTACH: - case NETMAP_BDG_DETACH: - case NETMAP_BDG_POLLING_ON: - case NETMAP_BDG_POLLING_OFF: + switch (hdr->nr_reqtype) { + case NETMAP_REQ_VALE_ATTACH: + case NETMAP_REQ_VALE_DETACH: + case NETMAP_REQ_VALE_POLLING_ENABLE: + case NETMAP_REQ_VALE_POLLING_DISABLE: break; /* ok */ default: error = EINVAL; goto out; } error = netmap_get_hw_na(ifp, nmd, &hw); if (error || hw == NULL) goto out; /* host adapter might not be created */ error = hw->nm_bdg_attach(nr_name, hw); if (error) goto out; vpna = hw->na_vp; hostna = hw->na_hostvp; - if (nmr->nr_arg1 != NETMAP_BDG_HOST) - hostna = NULL; + if (hdr->nr_reqtype == NETMAP_REQ_VALE_ATTACH) { + /* Check if we need to skip the host rings. */ + struct nmreq_vale_attach *areq = + (struct nmreq_vale_attach *)hdr->nr_body; + if (areq->reg.nr_mode != NR_REG_NIC_SW) { + hostna = NULL; + } + } } BDG_WLOCK(b); vpna->bdg_port = cand; ND("NIC %p to bridge port %d", vpna, cand); /* bind the port to the bridge (virtual ports are not active) */ b->bdg_ports[cand] = vpna; vpna->na_bdg = b; b->bdg_active_ports++; if (hostna != NULL) { /* also bind the host stack to the bridge */ b->bdg_ports[cand2] = hostna; hostna->bdg_port = cand2; hostna->na_bdg = b; b->bdg_active_ports++; ND("host %p to bridge port %d", hostna, cand2); } ND("if %s refs %d", ifname, vpna->up.na_refcount); BDG_WUNLOCK(b); *na = &vpna->up; netmap_adapter_get(*na); out: if (ifp) if_rele(ifp); return error; } - -/* Process NETMAP_BDG_ATTACH */ -static int -nm_bdg_ctl_attach(struct nmreq *nmr) +/* Process NETMAP_REQ_VALE_ATTACH. + */ +int +nm_bdg_ctl_attach(struct nmreq_header *hdr, void *auth_token) { + struct nmreq_vale_attach *req = + (struct nmreq_vale_attach *)hdr->nr_body; + struct netmap_vp_adapter * vpna; struct netmap_adapter *na; struct netmap_mem_d *nmd = NULL; + struct nm_bridge *b = NULL; int error; NMG_LOCK(); + /* permission check for modified bridges */ + b = nm_find_bridge(hdr->nr_name, 0 /* don't create */); + if (b && !nm_bdg_valid_auth_token(b, auth_token)) { + error = EACCES; + goto unlock_exit; + } - if (nmr->nr_arg2) { - nmd = netmap_mem_find(nmr->nr_arg2); + if (req->reg.nr_mem_id) { + nmd = netmap_mem_find(req->reg.nr_mem_id); if (nmd == NULL) { error = EINVAL; goto unlock_exit; } } - /* XXX check existing one */ - error = netmap_get_bdg_na(nmr, &na, nmd, 0); + /* check for existing one */ + error = netmap_get_bdg_na(hdr, &na, nmd, 0); if (!error) { error = EBUSY; goto unref_exit; } - error = netmap_get_bdg_na(nmr, &na, nmd, 1 /* create if not exists */); - if (error) /* no device */ + error = netmap_get_bdg_na(hdr, &na, + nmd, 1 /* create if not exists */); + if (error) { /* no device */ goto unlock_exit; + } if (na == NULL) { /* VALE prefix missing */ error = EINVAL; goto unlock_exit; } if (NETMAP_OWNED_BY_ANY(na)) { error = EBUSY; goto unref_exit; } if (na->nm_bdg_ctl) { /* nop for VALE ports. The bwrap needs to put the hwna * in netmap mode (see netmap_bwrap_bdg_ctl) */ - error = na->nm_bdg_ctl(na, nmr, 1); + error = na->nm_bdg_ctl(hdr, na); if (error) goto unref_exit; ND("registered %s to netmap-mode", na->name); } + vpna = (struct netmap_vp_adapter *)na; + req->port_index = vpna->bdg_port; NMG_UNLOCK(); return 0; unref_exit: netmap_adapter_put(na); unlock_exit: NMG_UNLOCK(); return error; } static inline int nm_is_bwrap(struct netmap_adapter *na) { return na->nm_register == netmap_bwrap_reg; } -/* process NETMAP_BDG_DETACH */ -static int -nm_bdg_ctl_detach(struct nmreq *nmr) +/* Process NETMAP_REQ_VALE_DETACH. + */ +int +nm_bdg_ctl_detach(struct nmreq_header *hdr, void *auth_token) { + struct nmreq_vale_detach *nmreq_det = (void *)hdr->nr_body; + struct netmap_vp_adapter *vpna; struct netmap_adapter *na; + struct nm_bridge *b = NULL; int error; NMG_LOCK(); - error = netmap_get_bdg_na(nmr, &na, NULL, 0 /* don't create */); + /* permission check for modified bridges */ + b = nm_find_bridge(hdr->nr_name, 0 /* don't create */); + if (b && !nm_bdg_valid_auth_token(b, auth_token)) { + error = EACCES; + goto unlock_exit; + } + + error = netmap_get_bdg_na(hdr, &na, NULL, 0 /* don't create */); if (error) { /* no device, or another bridge or user owns the device */ goto unlock_exit; } if (na == NULL) { /* VALE prefix missing */ error = EINVAL; goto unlock_exit; } else if (nm_is_bwrap(na) && ((struct netmap_bwrap_adapter *)na)->na_polling_state) { /* Don't detach a NIC with polling */ error = EBUSY; - netmap_adapter_put(na); - goto unlock_exit; + goto unref_exit; } + + vpna = (struct netmap_vp_adapter *)na; + if (na->na_vp != vpna) { + /* trying to detach first attach of VALE persistent port attached + * to 2 bridges + */ + error = EBUSY; + goto unref_exit; + } + nmreq_det->port_index = vpna->bdg_port; + if (na->nm_bdg_ctl) { /* remove the port from bridge. The bwrap * also needs to put the hwna in normal mode */ - error = na->nm_bdg_ctl(na, nmr, 0); + error = na->nm_bdg_ctl(hdr, na); } +unref_exit: netmap_adapter_put(na); unlock_exit: NMG_UNLOCK(); return error; } struct nm_bdg_polling_state; struct nm_bdg_kthread { struct nm_kctx *nmk; u_int qfirst; u_int qlast; struct nm_bdg_polling_state *bps; }; struct nm_bdg_polling_state { bool configured; bool stopped; struct netmap_bwrap_adapter *bna; - u_int reg; + uint32_t mode; u_int qfirst; u_int qlast; u_int cpu_from; u_int ncpus; struct nm_bdg_kthread *kthreads; }; static void netmap_bwrap_polling(void *data, int is_kthread) { struct nm_bdg_kthread *nbk = data; struct netmap_bwrap_adapter *bna; u_int qfirst, qlast, i; - struct netmap_kring *kring0, *kring; + struct netmap_kring **kring0, *kring; if (!nbk) return; qfirst = nbk->qfirst; qlast = nbk->qlast; bna = nbk->bps->bna; kring0 = NMR(bna->hwna, NR_RX); for (i = qfirst; i < qlast; i++) { - kring = kring0 + i; + kring = kring0[i]; kring->nm_notify(kring, 0); } } static int nm_bdg_create_kthreads(struct nm_bdg_polling_state *bps) { struct nm_kctx_cfg kcfg; int i, j; bps->kthreads = nm_os_malloc(sizeof(struct nm_bdg_kthread) * bps->ncpus); if (bps->kthreads == NULL) return ENOMEM; bzero(&kcfg, sizeof(kcfg)); kcfg.worker_fn = netmap_bwrap_polling; kcfg.use_kthread = 1; for (i = 0; i < bps->ncpus; i++) { struct nm_bdg_kthread *t = bps->kthreads + i; - int all = (bps->ncpus == 1 && bps->reg == NR_REG_ALL_NIC); + int all = (bps->ncpus == 1 && + bps->mode == NETMAP_POLLING_MODE_SINGLE_CPU); int affinity = bps->cpu_from + i; t->bps = bps; t->qfirst = all ? bps->qfirst /* must be 0 */: affinity; t->qlast = all ? bps->qlast : t->qfirst + 1; D("kthread %d a:%u qf:%u ql:%u", i, affinity, t->qfirst, t->qlast); kcfg.type = i; kcfg.worker_private = t; - t->nmk = nm_os_kctx_create(&kcfg, 0, NULL); + t->nmk = nm_os_kctx_create(&kcfg, NULL); if (t->nmk == NULL) { goto cleanup; } nm_os_kctx_worker_setaff(t->nmk, affinity); } return 0; cleanup: for (j = 0; j < i; j++) { struct nm_bdg_kthread *t = bps->kthreads + i; nm_os_kctx_destroy(t->nmk); } nm_os_free(bps->kthreads); return EFAULT; } /* A variant of ptnetmap_start_kthreads() */ static int nm_bdg_polling_start_kthreads(struct nm_bdg_polling_state *bps) { int error, i, j; if (!bps) { D("polling is not configured"); return EFAULT; } bps->stopped = false; for (i = 0; i < bps->ncpus; i++) { struct nm_bdg_kthread *t = bps->kthreads + i; error = nm_os_kctx_worker_start(t->nmk); if (error) { D("error in nm_kthread_start()"); goto cleanup; } } return 0; cleanup: for (j = 0; j < i; j++) { struct nm_bdg_kthread *t = bps->kthreads + i; nm_os_kctx_worker_stop(t->nmk); } bps->stopped = true; return error; } static void nm_bdg_polling_stop_delete_kthreads(struct nm_bdg_polling_state *bps) { int i; if (!bps) return; for (i = 0; i < bps->ncpus; i++) { struct nm_bdg_kthread *t = bps->kthreads + i; nm_os_kctx_worker_stop(t->nmk); nm_os_kctx_destroy(t->nmk); } bps->stopped = true; } static int -get_polling_cfg(struct nmreq *nmr, struct netmap_adapter *na, - struct nm_bdg_polling_state *bps) +get_polling_cfg(struct nmreq_vale_polling *req, struct netmap_adapter *na, + struct nm_bdg_polling_state *bps) { - int req_cpus, avail_cpus, core_from; - u_int reg, i, qfirst, qlast; + unsigned int avail_cpus, core_from; + unsigned int qfirst, qlast; + uint32_t i = req->nr_first_cpu_id; + uint32_t req_cpus = req->nr_num_polling_cpus; avail_cpus = nm_os_ncpus(); - req_cpus = nmr->nr_arg1; if (req_cpus == 0) { D("req_cpus must be > 0"); return EINVAL; } else if (req_cpus >= avail_cpus) { - D("for safety, we need at least one core left in the system"); + D("Cannot use all the CPUs in the system"); return EINVAL; } - reg = nmr->nr_flags & NR_REG_MASK; - i = nmr->nr_ringid & NETMAP_RING_MASK; - /* - * ONE_NIC: dedicate one core to one ring. If multiple cores - * are specified, consecutive rings are also polled. - * For example, if ringid=2 and 2 cores are given, - * ring 2 and 3 are polled by core 2 and 3, respectively. - * ALL_NIC: poll all the rings using a core specified by ringid. - * the number of cores must be 1. - */ - if (reg == NR_REG_ONE_NIC) { + + if (req->nr_mode == NETMAP_POLLING_MODE_MULTI_CPU) { + /* Use a separate core for each ring. If nr_num_polling_cpus>1 + * more consecutive rings are polled. + * For example, if nr_first_cpu_id=2 and nr_num_polling_cpus=2, + * ring 2 and 3 are polled by core 2 and 3, respectively. */ if (i + req_cpus > nma_get_nrings(na, NR_RX)) { - D("only %d rings exist (ring %u-%u is given)", - nma_get_nrings(na, NR_RX), i, i+req_cpus); + D("Rings %u-%u not in range (have %d rings)", + i, i + req_cpus, nma_get_nrings(na, NR_RX)); return EINVAL; } qfirst = i; qlast = qfirst + req_cpus; core_from = qfirst; - } else if (reg == NR_REG_ALL_NIC) { + + } else if (req->nr_mode == NETMAP_POLLING_MODE_SINGLE_CPU) { + /* Poll all the rings using a core specified by nr_first_cpu_id. + * the number of cores must be 1. */ if (req_cpus != 1) { - D("ncpus must be 1 not %d for REG_ALL_NIC", req_cpus); + D("ncpus must be 1 for NETMAP_POLLING_MODE_SINGLE_CPU " + "(was %d)", req_cpus); return EINVAL; } qfirst = 0; qlast = nma_get_nrings(na, NR_RX); core_from = i; } else { - D("reg must be ALL_NIC or ONE_NIC"); + D("Invalid polling mode"); return EINVAL; } - bps->reg = reg; + bps->mode = req->nr_mode; bps->qfirst = qfirst; bps->qlast = qlast; bps->cpu_from = core_from; bps->ncpus = req_cpus; D("%s qfirst %u qlast %u cpu_from %u ncpus %u", - reg == NR_REG_ALL_NIC ? "REG_ALL_NIC" : "REG_ONE_NIC", + req->nr_mode == NETMAP_POLLING_MODE_MULTI_CPU ? + "MULTI" : "SINGLE", qfirst, qlast, core_from, req_cpus); return 0; } static int -nm_bdg_ctl_polling_start(struct nmreq *nmr, struct netmap_adapter *na) +nm_bdg_ctl_polling_start(struct nmreq_vale_polling *req, struct netmap_adapter *na) { struct nm_bdg_polling_state *bps; struct netmap_bwrap_adapter *bna; int error; bna = (struct netmap_bwrap_adapter *)na; if (bna->na_polling_state) { D("ERROR adapter already in polling mode"); return EFAULT; } bps = nm_os_malloc(sizeof(*bps)); if (!bps) return ENOMEM; bps->configured = false; bps->stopped = true; - if (get_polling_cfg(nmr, na, bps)) { + if (get_polling_cfg(req, na, bps)) { nm_os_free(bps); return EINVAL; } if (nm_bdg_create_kthreads(bps)) { nm_os_free(bps); return EFAULT; } bps->configured = true; bna->na_polling_state = bps; bps->bna = bna; /* disable interrupts if possible */ nma_intr_enable(bna->hwna, 0); /* start kthread now */ error = nm_bdg_polling_start_kthreads(bps); if (error) { D("ERROR nm_bdg_polling_start_kthread()"); nm_os_free(bps->kthreads); nm_os_free(bps); bna->na_polling_state = NULL; nma_intr_enable(bna->hwna, 1); } return error; } static int -nm_bdg_ctl_polling_stop(struct nmreq *nmr, struct netmap_adapter *na) +nm_bdg_ctl_polling_stop(struct netmap_adapter *na) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na; struct nm_bdg_polling_state *bps; if (!bna->na_polling_state) { D("ERROR adapter is not in polling mode"); return EFAULT; } bps = bna->na_polling_state; nm_bdg_polling_stop_delete_kthreads(bna->na_polling_state); bps->configured = false; nm_os_free(bps); bna->na_polling_state = NULL; /* reenable interrupts */ nma_intr_enable(bna->hwna, 1); return 0; } -/* Called by either user's context (netmap_ioctl()) - * or external kernel modules (e.g., Openvswitch). - * Operation is indicated in nmr->nr_cmd. - * NETMAP_BDG_OPS that sets configure/lookup/dtor functions to the bridge - * requires bdg_ops argument; the other commands ignore this argument. - * - * Called without NMG_LOCK. - */ int -netmap_bdg_ctl(struct nmreq *nmr, struct netmap_bdg_ops *bdg_ops) +nm_bdg_polling(struct nmreq_header *hdr) { + struct nmreq_vale_polling *req = + (struct nmreq_vale_polling *)hdr->nr_body; + struct netmap_adapter *na = NULL; + int error = 0; + + NMG_LOCK(); + error = netmap_get_bdg_na(hdr, &na, NULL, /*create=*/0); + if (na && !error) { + if (!nm_is_bwrap(na)) { + error = EOPNOTSUPP; + } else if (hdr->nr_reqtype == NETMAP_BDG_POLLING_ON) { + error = nm_bdg_ctl_polling_start(req, na); + if (!error) + netmap_adapter_get(na); + } else { + error = nm_bdg_ctl_polling_stop(na); + if (!error) + netmap_adapter_put(na); + } + netmap_adapter_put(na); + } else if (!na && !error) { + /* Not VALE port. */ + error = EINVAL; + } + NMG_UNLOCK(); + + return error; +} + +/* Process NETMAP_REQ_VALE_LIST. */ +int +netmap_bdg_list(struct nmreq_header *hdr) +{ + struct nmreq_vale_list *req = + (struct nmreq_vale_list *)hdr->nr_body; + int namelen = strlen(hdr->nr_name); struct nm_bridge *b, *bridges; - struct netmap_adapter *na; struct netmap_vp_adapter *vpna; - char *name = nmr->nr_name; - int cmd = nmr->nr_cmd, namelen = strlen(name); int error = 0, i, j; u_int num_bridges; netmap_bns_getbridges(&bridges, &num_bridges); - switch (cmd) { - case NETMAP_BDG_NEWIF: - error = netmap_vi_create(nmr, 0 /* no autodelete */); - break; - - case NETMAP_BDG_DELIF: - error = nm_vi_destroy(nmr->nr_name); - break; - - case NETMAP_BDG_ATTACH: - error = nm_bdg_ctl_attach(nmr); - break; - - case NETMAP_BDG_DETACH: - error = nm_bdg_ctl_detach(nmr); - break; - - case NETMAP_BDG_LIST: - /* this is used to enumerate bridges and ports */ - if (namelen) { /* look up indexes of bridge and port */ - if (strncmp(name, NM_BDG_NAME, strlen(NM_BDG_NAME))) { - error = EINVAL; - break; - } - NMG_LOCK(); - b = nm_find_bridge(name, 0 /* don't create */); - if (!b) { - error = ENOENT; - NMG_UNLOCK(); - break; - } - - error = 0; - nmr->nr_arg1 = b - bridges; /* bridge index */ - nmr->nr_arg2 = NM_BDG_NOPORT; - for (j = 0; j < b->bdg_active_ports; j++) { - i = b->bdg_port_index[j]; - vpna = b->bdg_ports[i]; - if (vpna == NULL) { - D("---AAAAAAAAARGH-------"); - continue; - } - /* the former and the latter identify a - * virtual port and a NIC, respectively - */ - if (!strcmp(vpna->up.name, name)) { - nmr->nr_arg2 = i; /* port index */ - break; - } - } - NMG_UNLOCK(); - } else { - /* return the first non-empty entry starting from - * bridge nr_arg1 and port nr_arg2. - * - * Users can detect the end of the same bridge by - * seeing the new and old value of nr_arg1, and can - * detect the end of all the bridge by error != 0 - */ - i = nmr->nr_arg1; - j = nmr->nr_arg2; - - NMG_LOCK(); - for (error = ENOENT; i < NM_BRIDGES; i++) { - b = bridges + i; - for ( ; j < NM_BDG_MAXPORTS; j++) { - if (b->bdg_ports[j] == NULL) - continue; - vpna = b->bdg_ports[j]; - strncpy(name, vpna->up.name, (size_t)IFNAMSIZ); - error = 0; - goto out; - } - j = 0; /* following bridges scan from 0 */ - } - out: - nmr->nr_arg1 = i; - nmr->nr_arg2 = j; - NMG_UNLOCK(); + /* this is used to enumerate bridges and ports */ + if (namelen) { /* look up indexes of bridge and port */ + if (strncmp(hdr->nr_name, NM_BDG_NAME, + strlen(NM_BDG_NAME))) { + return EINVAL; } - break; - - case NETMAP_BDG_REGOPS: /* XXX this should not be available from userspace */ - /* register callbacks to the given bridge. - * nmr->nr_name may be just bridge's name (including ':' - * if it is not just NM_NAME). - */ - if (!bdg_ops) { - error = EINVAL; - break; - } NMG_LOCK(); - b = nm_find_bridge(name, 0 /* don't create */); + b = nm_find_bridge(hdr->nr_name, 0 /* don't create */); if (!b) { - error = EINVAL; - } else { - b->bdg_ops = *bdg_ops; + NMG_UNLOCK(); + return ENOENT; } - NMG_UNLOCK(); - break; - case NETMAP_BDG_VNET_HDR: - /* Valid lengths for the virtio-net header are 0 (no header), - 10 and 12. */ - if (nmr->nr_arg1 != 0 && - nmr->nr_arg1 != sizeof(struct nm_vnet_hdr) && - nmr->nr_arg1 != 12) { - error = EINVAL; - break; - } - NMG_LOCK(); - error = netmap_get_bdg_na(nmr, &na, NULL, 0); - if (na && !error) { - vpna = (struct netmap_vp_adapter *)na; - na->virt_hdr_len = nmr->nr_arg1; - if (na->virt_hdr_len) { - vpna->mfs = NETMAP_BUF_SIZE(na); + req->nr_bridge_idx = b - bridges; /* bridge index */ + req->nr_port_idx = NM_BDG_NOPORT; + for (j = 0; j < b->bdg_active_ports; j++) { + i = b->bdg_port_index[j]; + vpna = b->bdg_ports[i]; + if (vpna == NULL) { + D("This should not happen"); + continue; } - D("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na); - netmap_adapter_put(na); - } else if (!na) { - error = ENXIO; + /* the former and the latter identify a + * virtual port and a NIC, respectively + */ + if (!strcmp(vpna->up.name, hdr->nr_name)) { + req->nr_port_idx = i; /* port index */ + break; + } } NMG_UNLOCK(); - break; + } else { + /* return the first non-empty entry starting from + * bridge nr_arg1 and port nr_arg2. + * + * Users can detect the end of the same bridge by + * seeing the new and old value of nr_arg1, and can + * detect the end of all the bridge by error != 0 + */ + i = req->nr_bridge_idx; + j = req->nr_port_idx; - case NETMAP_BDG_POLLING_ON: - case NETMAP_BDG_POLLING_OFF: NMG_LOCK(); - error = netmap_get_bdg_na(nmr, &na, NULL, 0); - if (na && !error) { - if (!nm_is_bwrap(na)) { - error = EOPNOTSUPP; - } else if (cmd == NETMAP_BDG_POLLING_ON) { - error = nm_bdg_ctl_polling_start(nmr, na); - if (!error) - netmap_adapter_get(na); - } else { - error = nm_bdg_ctl_polling_stop(nmr, na); - if (!error) - netmap_adapter_put(na); + for (error = ENOENT; i < NM_BRIDGES; i++) { + b = bridges + i; + for ( ; j < NM_BDG_MAXPORTS; j++) { + if (b->bdg_ports[j] == NULL) + continue; + vpna = b->bdg_ports[j]; + /* write back the VALE switch name */ + strncpy(hdr->nr_name, vpna->up.name, + (size_t)IFNAMSIZ); + error = 0; + goto out; } - netmap_adapter_put(na); + j = 0; /* following bridges scan from 0 */ } + out: + req->nr_bridge_idx = i; + req->nr_port_idx = j; NMG_UNLOCK(); - break; + } - default: - D("invalid cmd (nmr->nr_cmd) (0x%x)", cmd); + return error; +} + +/* Called by external kernel modules (e.g., Openvswitch). + * to set configure/lookup/dtor functions of a VALE instance. + * Register callbacks to the given bridge. 'name' may be just + * bridge's name (including ':' if it is not just NM_BDG_NAME). + * + * Called without NMG_LOCK. + */ + +int +netmap_bdg_regops(const char *name, struct netmap_bdg_ops *bdg_ops, void *private_data, void *auth_token) +{ + struct nm_bridge *b; + int error = 0; + + NMG_LOCK(); + b = nm_find_bridge(name, 0 /* don't create */); + if (!b) { + error = ENXIO; + goto unlock_regops; + } + if (!nm_bdg_valid_auth_token(b, auth_token)) { + error = EACCES; + goto unlock_regops; + } + + BDG_WLOCK(b); + if (!bdg_ops) { + /* resetting the bridge */ + bzero(b->ht, sizeof(struct nm_hash_ent) * NM_BDG_HASH); + b->bdg_ops = &default_bdg_ops; + b->private_data = b->ht; + } else { + /* modifying the bridge */ + b->private_data = private_data; + b->bdg_ops = bdg_ops; + } + BDG_WUNLOCK(b); + +unlock_regops: + NMG_UNLOCK(); + return error; +} + +/* Called by external kernel modules (e.g., Openvswitch). + * to modify the private data previously given to regops(). + * 'name' may be just bridge's name (including ':' if it + * is not just NM_BDG_NAME). + * Called without NMG_LOCK. + */ +int +nm_bdg_update_private_data(const char *name, bdg_update_private_data_fn_t callback, + void *callback_data, void *auth_token) +{ + void *private_data = NULL; + struct nm_bridge *b; + int error = 0; + + NMG_LOCK(); + b = nm_find_bridge(name, 0 /* don't create */); + if (!b) { error = EINVAL; - break; + goto unlock_update_priv; } + if (!nm_bdg_valid_auth_token(b, auth_token)) { + error = EACCES; + goto unlock_update_priv; + } + BDG_WLOCK(b); + private_data = callback(b->private_data, callback_data, &error); + b->private_data = private_data; + BDG_WUNLOCK(b); + +unlock_update_priv: + NMG_UNLOCK(); return error; } int -netmap_bdg_config(struct nmreq *nmr) +netmap_bdg_config(struct nm_ifreq *nr) { struct nm_bridge *b; int error = EINVAL; NMG_LOCK(); - b = nm_find_bridge(nmr->nr_name, 0); + b = nm_find_bridge(nr->nifr_name, 0); if (!b) { NMG_UNLOCK(); return error; } NMG_UNLOCK(); /* Don't call config() with NMG_LOCK() held */ BDG_RLOCK(b); - if (b->bdg_ops.config != NULL) - error = b->bdg_ops.config((struct nm_ifreq *)nmr); + if (b->bdg_ops->config != NULL) + error = b->bdg_ops->config(nr); BDG_RUNLOCK(b); return error; } /* nm_krings_create callback for VALE ports. * Calls the standard netmap_krings_create, then adds leases on rx * rings and bdgfwd on tx rings. */ static int netmap_vp_krings_create(struct netmap_adapter *na) { u_int tailroom; int error, i; uint32_t *leases; u_int nrx = netmap_real_rings(na, NR_RX); /* * Leases are attached to RX rings on vale ports */ tailroom = sizeof(uint32_t) * na->num_rx_desc * nrx; error = netmap_krings_create(na, tailroom); if (error) return error; leases = na->tailroom; for (i = 0; i < nrx; i++) { /* Receive rings */ - na->rx_rings[i].nkr_leases = leases; + na->rx_rings[i]->nkr_leases = leases; leases += na->num_rx_desc; } error = nm_alloc_bdgfwd(na); if (error) { netmap_krings_delete(na); return error; } return 0; } /* nm_krings_delete callback for VALE ports. */ static void netmap_vp_krings_delete(struct netmap_adapter *na) { nm_free_bdgfwd(na); netmap_krings_delete(na); } static int nm_bdg_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na, u_int ring_nr); /* * main dispatch routine for the bridge. * Grab packets from a kring, move them into the ft structure * associated to the tx (input) port. Max one instance per port, * filtered on input (ioctl, poll or XXX). * Returns the next position in the ring. */ static int nm_bdg_preflush(struct netmap_kring *kring, u_int end) { struct netmap_vp_adapter *na = (struct netmap_vp_adapter*)kring->na; struct netmap_ring *ring = kring->ring; struct nm_bdg_fwd *ft; u_int ring_nr = kring->ring_id; u_int j = kring->nr_hwcur, lim = kring->nkr_num_slots - 1; u_int ft_i = 0; /* start from 0 */ u_int frags = 1; /* how many frags ? */ struct nm_bridge *b = na->na_bdg; /* To protect against modifications to the bridge we acquire a * shared lock, waiting if we can sleep (if the source port is * attached to a user process) or with a trylock otherwise (NICs). */ ND("wait rlock for %d packets", ((j > end ? lim+1 : 0) + end) - j); if (na->up.na_flags & NAF_BDG_MAYSLEEP) BDG_RLOCK(b); else if (!BDG_RTRYLOCK(b)) return j; ND(5, "rlock acquired for %d packets", ((j > end ? lim+1 : 0) + end) - j); ft = kring->nkr_ft; for (; likely(j != end); j = nm_next(j, lim)) { struct netmap_slot *slot = &ring->slot[j]; char *buf; ft[ft_i].ft_len = slot->len; ft[ft_i].ft_flags = slot->flags; + ft[ft_i].ft_offset = 0; ND("flags is 0x%x", slot->flags); /* we do not use the buf changed flag, but we still need to reset it */ slot->flags &= ~NS_BUF_CHANGED; /* this slot goes into a list so initialize the link field */ ft[ft_i].ft_next = NM_FT_NULL; buf = ft[ft_i].ft_buf = (slot->flags & NS_INDIRECT) ? (void *)(uintptr_t)slot->ptr : NMB(&na->up, slot); if (unlikely(buf == NULL)) { RD(5, "NULL %s buffer pointer from %s slot %d len %d", (slot->flags & NS_INDIRECT) ? "INDIRECT" : "DIRECT", kring->name, j, ft[ft_i].ft_len); buf = ft[ft_i].ft_buf = NETMAP_BUF_BASE(&na->up); ft[ft_i].ft_len = 0; ft[ft_i].ft_flags = 0; } __builtin_prefetch(buf); ++ft_i; if (slot->flags & NS_MOREFRAG) { frags++; continue; } if (unlikely(netmap_verbose && frags > 1)) RD(5, "%d frags at %d", frags, ft_i - frags); ft[ft_i - frags].ft_frags = frags; frags = 1; if (unlikely((int)ft_i >= bridge_batch)) ft_i = nm_bdg_flush(ft, ft_i, na, ring_nr); } if (frags > 1) { /* Here ft_i > 0, ft[ft_i-1].flags has NS_MOREFRAG, and we * have to fix frags count. */ frags--; ft[ft_i - 1].ft_flags &= ~NS_MOREFRAG; ft[ft_i - frags].ft_frags = frags; D("Truncate incomplete fragment at %d (%d frags)", ft_i, frags); } if (ft_i) ft_i = nm_bdg_flush(ft, ft_i, na, ring_nr); BDG_RUNLOCK(b); return j; } /* ----- FreeBSD if_bridge hash function ------- */ /* * The following hash function is adapted from "Hash Functions" by Bob Jenkins * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). * * http://www.burtleburtle.net/bob/hash/spooky.html */ #define mix(a, b, c) \ do { \ a -= b; a -= c; a ^= (c >> 13); \ b -= c; b -= a; b ^= (a << 8); \ c -= a; c -= b; c ^= (b >> 13); \ a -= b; a -= c; a ^= (c >> 12); \ b -= c; b -= a; b ^= (a << 16); \ c -= a; c -= b; c ^= (b >> 5); \ a -= b; a -= c; a ^= (c >> 3); \ b -= c; b -= a; b ^= (a << 10); \ c -= a; c -= b; c ^= (b >> 15); \ } while (/*CONSTCOND*/0) static __inline uint32_t nm_bridge_rthash(const uint8_t *addr) { uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key b += addr[5] << 8; b += addr[4]; a += addr[3] << 24; a += addr[2] << 16; a += addr[1] << 8; a += addr[0]; mix(a, b, c); #define BRIDGE_RTHASH_MASK (NM_BDG_HASH-1) return (c & BRIDGE_RTHASH_MASK); } #undef mix /* nm_register callback for VALE ports */ static int netmap_vp_reg(struct netmap_adapter *na, int onoff) { struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter*)na; enum txrx t; int i; /* persistent ports may be put in netmap mode * before being attached to a bridge */ if (vpna->na_bdg) BDG_WLOCK(vpna->na_bdg); if (onoff) { for_rx_tx(t) { for (i = 0; i < netmap_real_rings(na, t); i++) { - struct netmap_kring *kring = &NMR(na, t)[i]; + struct netmap_kring *kring = NMR(na, t)[i]; if (nm_kring_pending_on(kring)) kring->nr_mode = NKR_NETMAP_ON; } } if (na->active_fds == 0) na->na_flags |= NAF_NETMAP_ON; /* XXX on FreeBSD, persistent VALE ports should also * toggle IFCAP_NETMAP in na->ifp (2014-03-16) */ } else { if (na->active_fds == 0) na->na_flags &= ~NAF_NETMAP_ON; for_rx_tx(t) { for (i = 0; i < netmap_real_rings(na, t); i++) { - struct netmap_kring *kring = &NMR(na, t)[i]; + struct netmap_kring *kring = NMR(na, t)[i]; if (nm_kring_pending_off(kring)) kring->nr_mode = NKR_NETMAP_OFF; } } } if (vpna->na_bdg) BDG_WUNLOCK(vpna->na_bdg); return 0; } /* * Lookup function for a learning bridge. * Update the hash table with the source address, * and then returns the destination port index, and the * ring in *dst_ring (at the moment, always use ring 0) */ -u_int +uint32_t netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring, - struct netmap_vp_adapter *na) + struct netmap_vp_adapter *na, void *private_data) { - uint8_t *buf = ft->ft_buf; - u_int buf_len = ft->ft_len; - struct nm_hash_ent *ht = na->na_bdg->ht; + uint8_t *buf = ((uint8_t *)ft->ft_buf) + ft->ft_offset; + u_int buf_len = ft->ft_len - ft->ft_offset; + struct nm_hash_ent *ht = private_data; uint32_t sh, dh; u_int dst, mysrc = na->bdg_port; uint64_t smac, dmac; uint8_t indbuf[12]; - /* safety check, unfortunately we have many cases */ - if (buf_len >= 14 + na->up.virt_hdr_len) { - /* virthdr + mac_hdr in the same slot */ - buf += na->up.virt_hdr_len; - buf_len -= na->up.virt_hdr_len; - } else if (buf_len == na->up.virt_hdr_len && ft->ft_flags & NS_MOREFRAG) { - /* only header in first fragment */ - ft++; - buf = ft->ft_buf; - buf_len = ft->ft_len; - } else { - RD(5, "invalid buf format, length %d", buf_len); + if (buf_len < 14) { return NM_BDG_NOPORT; } if (ft->ft_flags & NS_INDIRECT) { if (copyin(buf, indbuf, sizeof(indbuf))) { return NM_BDG_NOPORT; } buf = indbuf; } dmac = le64toh(*(uint64_t *)(buf)) & 0xffffffffffff; smac = le64toh(*(uint64_t *)(buf + 4)); smac >>= 16; /* * The hash is somewhat expensive, there might be some * worthwhile optimizations here. */ if (((buf[6] & 1) == 0) && (na->last_smac != smac)) { /* valid src */ uint8_t *s = buf+6; sh = nm_bridge_rthash(s); /* hash of source */ /* update source port forwarding entry */ na->last_smac = ht[sh].mac = smac; /* XXX expire ? */ ht[sh].ports = mysrc; if (netmap_verbose) D("src %02x:%02x:%02x:%02x:%02x:%02x on port %d", s[0], s[1], s[2], s[3], s[4], s[5], mysrc); } dst = NM_BDG_BROADCAST; if ((buf[0] & 1) == 0) { /* unicast */ dh = nm_bridge_rthash(buf); /* hash of dst */ if (ht[dh].mac == dmac) { /* found dst */ dst = ht[dh].ports; } } return dst; } /* * Available space in the ring. Only used in VALE code * and only with is_rx = 1 */ static inline uint32_t nm_kr_space(struct netmap_kring *k, int is_rx) { int space; if (is_rx) { int busy = k->nkr_hwlease - k->nr_hwcur; if (busy < 0) busy += k->nkr_num_slots; space = k->nkr_num_slots - 1 - busy; } else { /* XXX never used in this branch */ space = k->nr_hwtail - k->nkr_hwlease; if (space < 0) space += k->nkr_num_slots; } #if 0 // sanity check if (k->nkr_hwlease >= k->nkr_num_slots || k->nr_hwcur >= k->nkr_num_slots || k->nr_tail >= k->nkr_num_slots || busy < 0 || busy >= k->nkr_num_slots) { D("invalid kring, cur %d tail %d lease %d lease_idx %d lim %d", k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease, k->nkr_lease_idx, k->nkr_num_slots); } #endif return space; } /* make a lease on the kring for N positions. return the * lease index * XXX only used in VALE code and with is_rx = 1 */ static inline uint32_t nm_kr_lease(struct netmap_kring *k, u_int n, int is_rx) { uint32_t lim = k->nkr_num_slots - 1; uint32_t lease_idx = k->nkr_lease_idx; k->nkr_leases[lease_idx] = NR_NOSLOT; k->nkr_lease_idx = nm_next(lease_idx, lim); if (n > nm_kr_space(k, is_rx)) { D("invalid request for %d slots", n); panic("x"); } /* XXX verify that there are n slots */ k->nkr_hwlease += n; if (k->nkr_hwlease > lim) k->nkr_hwlease -= lim + 1; if (k->nkr_hwlease >= k->nkr_num_slots || k->nr_hwcur >= k->nkr_num_slots || k->nr_hwtail >= k->nkr_num_slots || k->nkr_lease_idx >= k->nkr_num_slots) { D("invalid kring %s, cur %d tail %d lease %d lease_idx %d lim %d", k->na->name, k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease, k->nkr_lease_idx, k->nkr_num_slots); } return lease_idx; } /* * * This flush routine supports only unicast and broadcast but a large * number of ports, and lets us replace the learn and dispatch functions. */ int nm_bdg_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na, u_int ring_nr) { struct nm_bdg_q *dst_ents, *brddst; uint16_t num_dsts = 0, *dsts; struct nm_bridge *b = na->na_bdg; u_int i, me = na->bdg_port; /* * The work area (pointed by ft) is followed by an array of * pointers to queues , dst_ents; there are NM_BDG_MAXRINGS * queues per port plus one for the broadcast traffic. * Then we have an array of destination indexes. */ dst_ents = (struct nm_bdg_q *)(ft + NM_BDG_BATCH_MAX); dsts = (uint16_t *)(dst_ents + NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1); /* first pass: find a destination for each packet in the batch */ for (i = 0; likely(i < n); i += ft[i].ft_frags) { uint8_t dst_ring = ring_nr; /* default, same ring as origin */ uint16_t dst_port, d_i; struct nm_bdg_q *d; + struct nm_bdg_fwd *start_ft = NULL; ND("slot %d frags %d", i, ft[i].ft_frags); - /* Drop the packet if the virtio-net header is not into the first - fragment nor at the very beginning of the second. */ - if (unlikely(na->up.virt_hdr_len > ft[i].ft_len)) + + if (na->up.virt_hdr_len < ft[i].ft_len) { + ft[i].ft_offset = na->up.virt_hdr_len; + start_ft = &ft[i]; + } else if (na->up.virt_hdr_len == ft[i].ft_len && ft[i].ft_flags & NS_MOREFRAG) { + ft[i].ft_offset = ft[i].ft_len; + start_ft = &ft[i+1]; + } else { + /* Drop the packet if the virtio-net header is not into the first + * fragment nor at the very beginning of the second. + */ continue; - dst_port = b->bdg_ops.lookup(&ft[i], &dst_ring, na); + } + dst_port = b->bdg_ops->lookup(start_ft, &dst_ring, na, b->private_data); if (netmap_verbose > 255) RD(5, "slot %d port %d -> %d", i, me, dst_port); if (dst_port >= NM_BDG_NOPORT) continue; /* this packet is identified to be dropped */ else if (dst_port == NM_BDG_BROADCAST) dst_ring = 0; /* broadcasts always go to ring 0 */ else if (unlikely(dst_port == me || !b->bdg_ports[dst_port])) continue; /* get a position in the scratch pad */ d_i = dst_port * NM_BDG_MAXRINGS + dst_ring; d = dst_ents + d_i; /* append the first fragment to the list */ if (d->bq_head == NM_FT_NULL) { /* new destination */ d->bq_head = d->bq_tail = i; /* remember this position to be scanned later */ if (dst_port != NM_BDG_BROADCAST) dsts[num_dsts++] = d_i; } else { ft[d->bq_tail].ft_next = i; d->bq_tail = i; } d->bq_len += ft[i].ft_frags; } /* * Broadcast traffic goes to ring 0 on all destinations. * So we need to add these rings to the list of ports to scan. * XXX at the moment we scan all NM_BDG_MAXPORTS ports, which is * expensive. We should keep a compact list of active destinations * so we could shorten this loop. */ brddst = dst_ents + NM_BDG_BROADCAST * NM_BDG_MAXRINGS; if (brddst->bq_head != NM_FT_NULL) { u_int j; for (j = 0; likely(j < b->bdg_active_ports); j++) { uint16_t d_i; i = b->bdg_port_index[j]; if (unlikely(i == me)) continue; d_i = i * NM_BDG_MAXRINGS; if (dst_ents[d_i].bq_head == NM_FT_NULL) dsts[num_dsts++] = d_i; } } ND(5, "pass 1 done %d pkts %d dsts", n, num_dsts); /* second pass: scan destinations */ for (i = 0; i < num_dsts; i++) { struct netmap_vp_adapter *dst_na; struct netmap_kring *kring; struct netmap_ring *ring; u_int dst_nr, lim, j, d_i, next, brd_next; u_int needed, howmany; int retry = netmap_txsync_retry; struct nm_bdg_q *d; uint32_t my_start = 0, lease_idx = 0; int nrings; int virt_hdr_mismatch = 0; d_i = dsts[i]; ND("second pass %d port %d", i, d_i); d = dst_ents + d_i; // XXX fix the division dst_na = b->bdg_ports[d_i/NM_BDG_MAXRINGS]; /* protect from the lookup function returning an inactive * destination port */ if (unlikely(dst_na == NULL)) goto cleanup; if (dst_na->up.na_flags & NAF_SW_ONLY) goto cleanup; /* * The interface may be in !netmap mode in two cases: * - when na is attached but not activated yet; * - when na is being deactivated but is still attached. */ if (unlikely(!nm_netmap_on(&dst_na->up))) { ND("not in netmap mode!"); goto cleanup; } /* there is at least one either unicast or broadcast packet */ brd_next = brddst->bq_head; next = d->bq_head; /* we need to reserve this many slots. If fewer are * available, some packets will be dropped. * Packets may have multiple fragments, so we may not use * there is a chance that we may not use all of the slots * we have claimed, so we will need to handle the leftover * ones when we regain the lock. */ needed = d->bq_len + brddst->bq_len; if (unlikely(dst_na->up.virt_hdr_len != na->up.virt_hdr_len)) { if (netmap_verbose) { RD(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len, dst_na->up.virt_hdr_len); } /* There is a virtio-net header/offloadings mismatch between * source and destination. The slower mismatch datapath will * be used to cope with all the mismatches. */ virt_hdr_mismatch = 1; if (dst_na->mfs < na->mfs) { /* We may need to do segmentation offloadings, and so * we may need a number of destination slots greater * than the number of input slots ('needed'). * We look for the smallest integer 'x' which satisfies: * needed * na->mfs + x * H <= x * na->mfs * where 'H' is the length of the longest header that may * be replicated in the segmentation process (e.g. for * TCPv4 we must account for ethernet header, IP header * and TCPv4 header). */ KASSERT(dst_na->mfs > 0, ("vpna->mfs is 0")); needed = (needed * na->mfs) / (dst_na->mfs - WORST_CASE_GSO_HEADER) + 1; ND(3, "srcmtu=%u, dstmtu=%u, x=%u", na->mfs, dst_na->mfs, needed); } } ND(5, "pass 2 dst %d is %x %s", i, d_i, is_vp ? "virtual" : "nic/host"); dst_nr = d_i & (NM_BDG_MAXRINGS-1); nrings = dst_na->up.num_rx_rings; if (dst_nr >= nrings) dst_nr = dst_nr % nrings; - kring = &dst_na->up.rx_rings[dst_nr]; + kring = dst_na->up.rx_rings[dst_nr]; ring = kring->ring; /* the destination ring may have not been opened for RX */ if (unlikely(ring == NULL || kring->nr_mode != NKR_NETMAP_ON)) goto cleanup; lim = kring->nkr_num_slots - 1; retry: if (dst_na->retry && retry) { /* try to get some free slot from the previous run */ kring->nm_notify(kring, 0); /* actually useful only for bwraps, since there * the notify will trigger a txsync on the hwna. VALE ports * have dst_na->retry == 0 */ } /* reserve the buffers in the queue and an entry * to report completion, and drop lock. * XXX this might become a helper function. */ mtx_lock(&kring->q_lock); if (kring->nkr_stopped) { mtx_unlock(&kring->q_lock); goto cleanup; } my_start = j = kring->nkr_hwlease; howmany = nm_kr_space(kring, 1); if (needed < howmany) howmany = needed; lease_idx = nm_kr_lease(kring, howmany, 1); mtx_unlock(&kring->q_lock); /* only retry if we need more than available slots */ if (retry && needed <= howmany) retry = 0; /* copy to the destination queue */ while (howmany > 0) { struct netmap_slot *slot; struct nm_bdg_fwd *ft_p, *ft_end; u_int cnt; /* find the queue from which we pick next packet. * NM_FT_NULL is always higher than valid indexes * so we never dereference it if the other list * has packets (and if both are empty we never * get here). */ if (next < brd_next) { ft_p = ft + next; next = ft_p->ft_next; } else { /* insert broadcast */ ft_p = ft + brd_next; brd_next = ft_p->ft_next; } cnt = ft_p->ft_frags; // cnt > 0 if (unlikely(cnt > howmany)) break; /* no more space */ if (netmap_verbose && cnt > 1) RD(5, "rx %d frags to %d", cnt, j); ft_end = ft_p + cnt; if (unlikely(virt_hdr_mismatch)) { bdg_mismatch_datapath(na, dst_na, ft_p, ring, &j, lim, &howmany); } else { howmany -= cnt; do { char *dst, *src = ft_p->ft_buf; size_t copy_len = ft_p->ft_len, dst_len = copy_len; slot = &ring->slot[j]; dst = NMB(&dst_na->up, slot); ND("send [%d] %d(%d) bytes at %s:%d", i, (int)copy_len, (int)dst_len, NM_IFPNAME(dst_ifp), j); /* round to a multiple of 64 */ copy_len = (copy_len + 63) & ~63; if (unlikely(copy_len > NETMAP_BUF_SIZE(&dst_na->up) || copy_len > NETMAP_BUF_SIZE(&na->up))) { RD(5, "invalid len %d, down to 64", (int)copy_len); copy_len = dst_len = 64; // XXX } if (ft_p->ft_flags & NS_INDIRECT) { if (copyin(src, dst, copy_len)) { // invalid user pointer, pretend len is 0 dst_len = 0; } } else { //memcpy(dst, src, copy_len); pkt_copy(src, dst, (int)copy_len); } slot->len = dst_len; slot->flags = (cnt << 8)| NS_MOREFRAG; j = nm_next(j, lim); needed--; ft_p++; } while (ft_p != ft_end); slot->flags = (cnt << 8); /* clear flag on last entry */ } /* are we done ? */ if (next == NM_FT_NULL && brd_next == NM_FT_NULL) break; } { /* current position */ uint32_t *p = kring->nkr_leases; /* shorthand */ uint32_t update_pos; int still_locked = 1; mtx_lock(&kring->q_lock); if (unlikely(howmany > 0)) { /* not used all bufs. If i am the last one * i can recover the slots, otherwise must * fill them with 0 to mark empty packets. */ ND("leftover %d bufs", howmany); if (nm_next(lease_idx, lim) == kring->nkr_lease_idx) { /* yes i am the last one */ ND("roll back nkr_hwlease to %d", j); kring->nkr_hwlease = j; } else { while (howmany-- > 0) { ring->slot[j].len = 0; ring->slot[j].flags = 0; j = nm_next(j, lim); } } } p[lease_idx] = j; /* report I am done */ update_pos = kring->nr_hwtail; if (my_start == update_pos) { /* all slots before my_start have been reported, * so scan subsequent leases to see if other ranges * have been completed, and to a selwakeup or txsync. */ while (lease_idx != kring->nkr_lease_idx && p[lease_idx] != NR_NOSLOT) { j = p[lease_idx]; p[lease_idx] = NR_NOSLOT; lease_idx = nm_next(lease_idx, lim); } /* j is the new 'write' position. j != my_start * means there are new buffers to report */ if (likely(j != my_start)) { kring->nr_hwtail = j; still_locked = 0; mtx_unlock(&kring->q_lock); kring->nm_notify(kring, 0); /* this is netmap_notify for VALE ports and * netmap_bwrap_notify for bwrap. The latter will * trigger a txsync on the underlying hwna */ if (dst_na->retry && retry--) { /* XXX this is going to call nm_notify again. * Only useful for bwrap in virtual machines */ goto retry; } } } if (still_locked) mtx_unlock(&kring->q_lock); } cleanup: d->bq_head = d->bq_tail = NM_FT_NULL; /* cleanup */ d->bq_len = 0; } brddst->bq_head = brddst->bq_tail = NM_FT_NULL; /* cleanup */ brddst->bq_len = 0; return 0; } /* nm_txsync callback for VALE ports */ static int netmap_vp_txsync(struct netmap_kring *kring, int flags) { struct netmap_vp_adapter *na = (struct netmap_vp_adapter *)kring->na; u_int done; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; if (bridge_batch <= 0) { /* testing only */ done = head; // used all goto done; } if (!na->na_bdg) { done = head; goto done; } if (bridge_batch > NM_BDG_BATCH) bridge_batch = NM_BDG_BATCH; done = nm_bdg_preflush(kring, head); done: if (done != head) D("early break at %d/ %d, tail %d", done, head, kring->nr_hwtail); /* * packets between 'done' and 'cur' are left unsent. */ kring->nr_hwcur = done; kring->nr_hwtail = nm_prev(done, lim); if (netmap_verbose) D("%s ring %d flags %d", na->up.name, kring->ring_id, flags); return 0; } /* rxsync code used by VALE ports nm_rxsync callback and also * internally by the brwap */ static int netmap_vp_rxsync_locked(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_ring *ring = kring->ring; u_int nm_i, lim = kring->nkr_num_slots - 1; u_int head = kring->rhead; int n; if (head > lim) { D("ouch dangerous reset!!!"); n = netmap_ring_reinit(kring); goto done; } /* First part, import newly received packets. */ /* actually nothing to do here, they are already in the kring */ /* Second part, skip past packets that userspace has released. */ nm_i = kring->nr_hwcur; if (nm_i != head) { /* consistency check, but nothing really important here */ for (n = 0; likely(nm_i != head); n++) { struct netmap_slot *slot = &ring->slot[nm_i]; void *addr = NMB(na, slot); if (addr == NETMAP_BUF_BASE(kring->na)) { /* bad buf */ D("bad buffer index %d, ignore ?", slot->buf_idx); } slot->flags &= ~NS_BUF_CHANGED; nm_i = nm_next(nm_i, lim); } kring->nr_hwcur = head; } n = 0; done: return n; } /* * nm_rxsync callback for VALE ports * user process reading from a VALE switch. * Already protected against concurrent calls from userspace, * but we must acquire the queue's lock to protect against * writers on the same queue. */ static int netmap_vp_rxsync(struct netmap_kring *kring, int flags) { int n; mtx_lock(&kring->q_lock); n = netmap_vp_rxsync_locked(kring, flags); mtx_unlock(&kring->q_lock); return n; } /* nm_bdg_attach callback for VALE ports * The na_vp port is this same netmap_adapter. There is no host port. */ static int netmap_vp_bdg_attach(const char *name, struct netmap_adapter *na) { struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter *)na; - if (vpna->na_bdg) + if (vpna->na_bdg) { return netmap_bwrap_attach(name, na); + } na->na_vp = vpna; strncpy(na->name, name, sizeof(na->name)); na->na_hostvp = NULL; return 0; } /* create a netmap_vp_adapter that describes a VALE port. * Only persistent VALE ports have a non-null ifp. */ static int -netmap_vp_create(struct nmreq *nmr, struct ifnet *ifp, - struct netmap_mem_d *nmd, - struct netmap_vp_adapter **ret) +netmap_vp_create(struct nmreq_header *hdr, struct ifnet *ifp, + struct netmap_mem_d *nmd, struct netmap_vp_adapter **ret) { + struct nmreq_register *req = (struct nmreq_register *)hdr->nr_body; struct netmap_vp_adapter *vpna; struct netmap_adapter *na; int error = 0; u_int npipes = 0; + u_int extrabufs = 0; + if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) { + return EINVAL; + } + vpna = nm_os_malloc(sizeof(*vpna)); if (vpna == NULL) return ENOMEM; na = &vpna->up; na->ifp = ifp; - strncpy(na->name, nmr->nr_name, sizeof(na->name)); + strncpy(na->name, hdr->nr_name, sizeof(na->name)); /* bound checking */ - na->num_tx_rings = nmr->nr_tx_rings; + na->num_tx_rings = req->nr_tx_rings; nm_bound_var(&na->num_tx_rings, 1, 1, NM_BDG_MAXRINGS, NULL); - nmr->nr_tx_rings = na->num_tx_rings; // write back - na->num_rx_rings = nmr->nr_rx_rings; + req->nr_tx_rings = na->num_tx_rings; /* write back */ + na->num_rx_rings = req->nr_rx_rings; nm_bound_var(&na->num_rx_rings, 1, 1, NM_BDG_MAXRINGS, NULL); - nmr->nr_rx_rings = na->num_rx_rings; // write back - nm_bound_var(&nmr->nr_tx_slots, NM_BRIDGE_RINGSIZE, + req->nr_rx_rings = na->num_rx_rings; /* write back */ + nm_bound_var(&req->nr_tx_slots, NM_BRIDGE_RINGSIZE, 1, NM_BDG_MAXSLOTS, NULL); - na->num_tx_desc = nmr->nr_tx_slots; - nm_bound_var(&nmr->nr_rx_slots, NM_BRIDGE_RINGSIZE, + na->num_tx_desc = req->nr_tx_slots; + nm_bound_var(&req->nr_rx_slots, NM_BRIDGE_RINGSIZE, 1, NM_BDG_MAXSLOTS, NULL); /* validate number of pipes. We want at least 1, * but probably can do with some more. * So let's use 2 as default (when 0 is supplied) */ - npipes = nmr->nr_arg1; nm_bound_var(&npipes, 2, 1, NM_MAXPIPES, NULL); - nmr->nr_arg1 = npipes; /* write back */ /* validate extra bufs */ - nm_bound_var(&nmr->nr_arg3, 0, 0, + nm_bound_var(&extrabufs, 0, 0, 128*NM_BDG_MAXSLOTS, NULL); - na->num_rx_desc = nmr->nr_rx_slots; + req->nr_extra_bufs = extrabufs; /* write back */ + na->num_rx_desc = req->nr_rx_slots; /* Set the mfs to a default value, as it is needed on the VALE * mismatch datapath. XXX We should set it according to the MTU * known to the kernel. */ vpna->mfs = NM_BDG_MFS_DEFAULT; vpna->last_smac = ~0llu; /*if (vpna->mfs > netmap_buf_size) TODO netmap_buf_size is zero?? vpna->mfs = netmap_buf_size; */ if (netmap_verbose) D("max frame size %u", vpna->mfs); na->na_flags |= NAF_BDG_MAYSLEEP; /* persistent VALE ports look like hw devices * with a native netmap adapter */ if (ifp) na->na_flags |= NAF_NATIVE; na->nm_txsync = netmap_vp_txsync; na->nm_rxsync = netmap_vp_rxsync; na->nm_register = netmap_vp_reg; na->nm_krings_create = netmap_vp_krings_create; na->nm_krings_delete = netmap_vp_krings_delete; na->nm_dtor = netmap_vp_dtor; - D("nr_arg2 %d", nmr->nr_arg2); + ND("nr_mem_id %d", req->nr_mem_id); na->nm_mem = nmd ? netmap_mem_get(nmd): netmap_mem_private_new( na->num_tx_rings, na->num_tx_desc, na->num_rx_rings, na->num_rx_desc, - nmr->nr_arg3, npipes, &error); + req->nr_extra_bufs, npipes, &error); if (na->nm_mem == NULL) goto err; na->nm_bdg_attach = netmap_vp_bdg_attach; /* other nmd fields are set in the common routine */ error = netmap_attach_common(na); if (error) goto err; *ret = vpna; return 0; err: if (na->nm_mem != NULL) netmap_mem_put(na->nm_mem); nm_os_free(vpna); return error; } /* Bridge wrapper code (bwrap). * This is used to connect a non-VALE-port netmap_adapter (hwna) to a * VALE switch. * The main task is to swap the meaning of tx and rx rings to match the * expectations of the VALE switch code (see nm_bdg_flush). * * The bwrap works by interposing a netmap_bwrap_adapter between the * rest of the system and the hwna. The netmap_bwrap_adapter looks like * a netmap_vp_adapter to the rest the system, but, internally, it * translates all callbacks to what the hwna expects. * * Note that we have to intercept callbacks coming from two sides: * * - callbacks coming from the netmap module are intercepted by * passing around the netmap_bwrap_adapter instead of the hwna * * - callbacks coming from outside of the netmap module only know * about the hwna. This, however, only happens in interrupt * handlers, where only the hwna->nm_notify callback is called. * What the bwrap does is to overwrite the hwna->nm_notify callback * with its own netmap_bwrap_intr_notify. * XXX This assumes that the hwna->nm_notify callback was the * standard netmap_notify(), as it is the case for nic adapters. * Any additional action performed by hwna->nm_notify will not be * performed by netmap_bwrap_intr_notify. * * Additionally, the bwrap can optionally attach the host rings pair * of the wrapped adapter to a different port of the switch. */ static void netmap_bwrap_dtor(struct netmap_adapter *na) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter*)na; struct netmap_adapter *hwna = bna->hwna; struct nm_bridge *b = bna->up.na_bdg, *bh = bna->host.na_bdg; if (bna->host.up.nm_mem) netmap_mem_put(bna->host.up.nm_mem); if (b) { netmap_bdg_detach_common(b, bna->up.bdg_port, (bh ? bna->host.bdg_port : -1)); } ND("na %p", na); na->ifp = NULL; bna->host.up.ifp = NULL; + hwna->na_vp = bna->saved_na_vp; + hwna->na_hostvp = NULL; hwna->na_private = NULL; - hwna->na_vp = hwna->na_hostvp = NULL; hwna->na_flags &= ~NAF_BUSY; netmap_adapter_put(hwna); } /* * Intr callback for NICs connected to a bridge. * Simply ignore tx interrupts (maybe we could try to recover space ?) * and pass received packets from nic to the bridge. * * XXX TODO check locking: this is called from the interrupt * handler so we should make sure that the interface is not * disconnected while passing down an interrupt. * * Note, no user process can access this NIC or the host stack. * The only part of the ring that is significant are the slots, * and head/cur/tail are set from the kring as needed * (part as a receive ring, part as a transmit ring). * * callback that overwrites the hwna notify callback. * Packets come from the outside or from the host stack and are put on an * hwna rx ring. * The bridge wrapper then sends the packets through the bridge. */ static int netmap_bwrap_intr_notify(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_bwrap_adapter *bna = na->na_private; struct netmap_kring *bkring; struct netmap_vp_adapter *vpna = &bna->up; u_int ring_nr = kring->ring_id; int ret = NM_IRQ_COMPLETED; int error; if (netmap_verbose) D("%s %s 0x%x", na->name, kring->name, flags); - bkring = &vpna->up.tx_rings[ring_nr]; + bkring = vpna->up.tx_rings[ring_nr]; /* make sure the ring is not disabled */ if (nm_kr_tryget(kring, 0 /* can't sleep */, NULL)) { return EIO; } if (netmap_verbose) D("%s head %d cur %d tail %d", na->name, kring->rhead, kring->rcur, kring->rtail); /* simulate a user wakeup on the rx ring * fetch packets that have arrived. */ error = kring->nm_sync(kring, 0); if (error) goto put_out; if (kring->nr_hwcur == kring->nr_hwtail) { if (netmap_verbose) D("how strange, interrupt with no packets on %s", na->name); goto put_out; } /* new packets are kring->rcur to kring->nr_hwtail, and the bkring * had hwcur == bkring->rhead. So advance bkring->rhead to kring->nr_hwtail * to push all packets out. */ bkring->rhead = bkring->rcur = kring->nr_hwtail; netmap_vp_txsync(bkring, flags); /* mark all buffers as released on this ring */ kring->rhead = kring->rcur = kring->rtail = kring->nr_hwtail; /* another call to actually release the buffers */ error = kring->nm_sync(kring, 0); /* The second rxsync may have further advanced hwtail. If this happens, * return NM_IRQ_RESCHED, otherwise just return NM_IRQ_COMPLETED. */ if (kring->rcur != kring->nr_hwtail) { ret = NM_IRQ_RESCHED; } put_out: nm_kr_put(kring); return error ? error : ret; } /* nm_register callback for bwrap */ static int netmap_bwrap_reg(struct netmap_adapter *na, int onoff) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na; struct netmap_adapter *hwna = bna->hwna; struct netmap_vp_adapter *hostna = &bna->host; int error, i; enum txrx t; ND("%s %s", na->name, onoff ? "on" : "off"); if (onoff) { /* netmap_do_regif has been called on the bwrap na. * We need to pass the information about the * memory allocator down to the hwna before * putting it in netmap mode */ hwna->na_lut = na->na_lut; if (hostna->na_bdg) { /* if the host rings have been attached to switch, * we need to copy the memory allocator information * in the hostna also */ hostna->up.na_lut = na->na_lut; } } /* pass down the pending ring state information */ for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t) + 1; i++) - NMR(hwna, t)[i].nr_pending_mode = - NMR(na, t)[i].nr_pending_mode; + NMR(hwna, t)[i]->nr_pending_mode = + NMR(na, t)[i]->nr_pending_mode; } /* forward the request to the hwna */ error = hwna->nm_register(hwna, onoff); if (error) return error; /* copy up the current ring state information */ for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { - struct netmap_kring *kring = &NMR(hwna, t)[i]; - NMR(na, t)[i].nr_mode = kring->nr_mode; + struct netmap_kring *kring = NMR(hwna, t)[i]; + NMR(na, t)[i]->nr_mode = kring->nr_mode; } } /* impersonate a netmap_vp_adapter */ netmap_vp_reg(na, onoff); if (hostna->na_bdg) netmap_vp_reg(&hostna->up, onoff); if (onoff) { u_int i; /* intercept the hwna nm_nofify callback on the hw rings */ for (i = 0; i < hwna->num_rx_rings; i++) { - hwna->rx_rings[i].save_notify = hwna->rx_rings[i].nm_notify; - hwna->rx_rings[i].nm_notify = netmap_bwrap_intr_notify; + hwna->rx_rings[i]->save_notify = hwna->rx_rings[i]->nm_notify; + hwna->rx_rings[i]->nm_notify = netmap_bwrap_intr_notify; } i = hwna->num_rx_rings; /* for safety */ /* save the host ring notify unconditionally */ - hwna->rx_rings[i].save_notify = hwna->rx_rings[i].nm_notify; + hwna->rx_rings[i]->save_notify = hwna->rx_rings[i]->nm_notify; if (hostna->na_bdg) { /* also intercept the host ring notify */ - hwna->rx_rings[i].nm_notify = netmap_bwrap_intr_notify; + hwna->rx_rings[i]->nm_notify = netmap_bwrap_intr_notify; } if (na->active_fds == 0) na->na_flags |= NAF_NETMAP_ON; } else { u_int i; if (na->active_fds == 0) na->na_flags &= ~NAF_NETMAP_ON; /* reset all notify callbacks (including host ring) */ for (i = 0; i <= hwna->num_rx_rings; i++) { - hwna->rx_rings[i].nm_notify = hwna->rx_rings[i].save_notify; - hwna->rx_rings[i].save_notify = NULL; + hwna->rx_rings[i]->nm_notify = hwna->rx_rings[i]->save_notify; + hwna->rx_rings[i]->save_notify = NULL; } hwna->na_lut.lut = NULL; + hwna->na_lut.plut = NULL; hwna->na_lut.objtotal = 0; hwna->na_lut.objsize = 0; /* pass ownership of the netmap rings to the hwna */ for_rx_tx(t) { for (i = 0; i < nma_get_nrings(na, t) + 1; i++) { - NMR(na, t)[i].ring = NULL; + NMR(na, t)[i]->ring = NULL; } } } return 0; } /* nm_config callback for bwrap */ static int -netmap_bwrap_config(struct netmap_adapter *na, u_int *txr, u_int *txd, - u_int *rxr, u_int *rxd) +netmap_bwrap_config(struct netmap_adapter *na, struct nm_config_info *info) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na; struct netmap_adapter *hwna = bna->hwna; /* forward the request */ netmap_update_config(hwna); - /* swap the results */ - *txr = hwna->num_rx_rings; - *txd = hwna->num_rx_desc; - *rxr = hwna->num_tx_rings; - *rxd = hwna->num_rx_desc; + /* swap the results and propagate */ + info->num_tx_rings = hwna->num_rx_rings; + info->num_tx_descs = hwna->num_rx_desc; + info->num_rx_rings = hwna->num_tx_rings; + info->num_rx_descs = hwna->num_tx_desc; + info->rx_buf_maxsize = hwna->rx_buf_maxsize; return 0; } /* nm_krings_create callback for bwrap */ static int netmap_bwrap_krings_create(struct netmap_adapter *na) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na; struct netmap_adapter *hwna = bna->hwna; struct netmap_adapter *hostna = &bna->host.up; int i, error = 0; enum txrx t; ND("%s", na->name); /* impersonate a netmap_vp_adapter */ error = netmap_vp_krings_create(na); if (error) return error; /* also create the hwna krings */ error = hwna->nm_krings_create(hwna); if (error) { goto err_del_vp_rings; } /* increment the usage counter for all the hwna krings */ for_rx_tx(t) { for (i = 0; i < nma_get_nrings(hwna, t) + 1; i++) { - NMR(hwna, t)[i].users++; + NMR(hwna, t)[i]->users++; } } /* now create the actual rings */ error = netmap_mem_rings_create(hwna); if (error) { goto err_dec_users; } /* cross-link the netmap rings * The original number of rings comes from hwna, * rx rings on one side equals tx rings on the other. */ for_rx_tx(t) { enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */ for (i = 0; i < nma_get_nrings(hwna, r) + 1; i++) { - NMR(na, t)[i].nkr_num_slots = NMR(hwna, r)[i].nkr_num_slots; - NMR(na, t)[i].ring = NMR(hwna, r)[i].ring; + NMR(na, t)[i]->nkr_num_slots = NMR(hwna, r)[i]->nkr_num_slots; + NMR(na, t)[i]->ring = NMR(hwna, r)[i]->ring; } } if (na->na_flags & NAF_HOST_RINGS) { /* the hostna rings are the host rings of the bwrap. * The corresponding krings must point back to the * hostna */ hostna->tx_rings = &na->tx_rings[na->num_tx_rings]; - hostna->tx_rings[0].na = hostna; + hostna->tx_rings[0]->na = hostna; hostna->rx_rings = &na->rx_rings[na->num_rx_rings]; - hostna->rx_rings[0].na = hostna; + hostna->rx_rings[0]->na = hostna; } return 0; err_dec_users: for_rx_tx(t) { - NMR(hwna, t)[i].users--; + NMR(hwna, t)[i]->users--; } hwna->nm_krings_delete(hwna); err_del_vp_rings: netmap_vp_krings_delete(na); return error; } static void netmap_bwrap_krings_delete(struct netmap_adapter *na) { struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter *)na; struct netmap_adapter *hwna = bna->hwna; enum txrx t; int i; ND("%s", na->name); /* decrement the usage counter for all the hwna krings */ for_rx_tx(t) { for (i = 0; i < nma_get_nrings(hwna, t) + 1; i++) { - NMR(hwna, t)[i].users--; + NMR(hwna, t)[i]->users--; } } /* delete any netmap rings that are no longer needed */ netmap_mem_rings_delete(hwna); hwna->nm_krings_delete(hwna); netmap_vp_krings_delete(na); } /* notify method for the bridge-->hwna direction */ static int netmap_bwrap_notify(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_bwrap_adapter *bna = na->na_private; struct netmap_adapter *hwna = bna->hwna; u_int ring_n = kring->ring_id; u_int lim = kring->nkr_num_slots - 1; struct netmap_kring *hw_kring; int error; ND("%s: na %s hwna %s", (kring ? kring->name : "NULL!"), (na ? na->name : "NULL!"), (hwna ? hwna->name : "NULL!")); - hw_kring = &hwna->tx_rings[ring_n]; + hw_kring = hwna->tx_rings[ring_n]; if (nm_kr_tryget(hw_kring, 0, NULL)) { return ENXIO; } /* first step: simulate a user wakeup on the rx ring */ netmap_vp_rxsync(kring, flags); ND("%s[%d] PRE rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)", na->name, ring_n, kring->nr_hwcur, kring->nr_hwtail, kring->nkr_hwlease, ring->head, ring->cur, ring->tail, hw_kring->nr_hwcur, hw_kring->nr_hwtail, hw_ring->rtail); /* second step: the new packets are sent on the tx ring * (which is actually the same ring) */ hw_kring->rhead = hw_kring->rcur = kring->nr_hwtail; error = hw_kring->nm_sync(hw_kring, flags); if (error) goto put_out; /* third step: now we are back the rx ring */ /* claim ownership on all hw owned bufs */ kring->rhead = kring->rcur = nm_next(hw_kring->nr_hwtail, lim); /* skip past reserved slot */ /* fourth step: the user goes to sleep again, causing another rxsync */ netmap_vp_rxsync(kring, flags); ND("%s[%d] PST rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)", na->name, ring_n, kring->nr_hwcur, kring->nr_hwtail, kring->nkr_hwlease, ring->head, ring->cur, ring->tail, hw_kring->nr_hwcur, hw_kring->nr_hwtail, hw_kring->rtail); put_out: nm_kr_put(hw_kring); return error ? error : NM_IRQ_COMPLETED; } /* nm_bdg_ctl callback for the bwrap. * Called on bridge-attach and detach, as an effect of vale-ctl -[ahd]. * On attach, it needs to provide a fake netmap_priv_d structure and * perform a netmap_do_regif() on the bwrap. This will put both the * bwrap and the hwna in netmap mode, with the netmap rings shared * and cross linked. Moroever, it will start intercepting interrupts * directed to hwna. */ static int -netmap_bwrap_bdg_ctl(struct netmap_adapter *na, struct nmreq *nmr, int attach) +netmap_bwrap_bdg_ctl(struct nmreq_header *hdr, struct netmap_adapter *na) { struct netmap_priv_d *npriv; struct netmap_bwrap_adapter *bna = (struct netmap_bwrap_adapter*)na; int error = 0; - if (attach) { + if (hdr->nr_reqtype == NETMAP_REQ_VALE_ATTACH) { + struct nmreq_vale_attach *req = + (struct nmreq_vale_attach *)hdr->nr_body; + if (req->reg.nr_ringid != 0 || + (req->reg.nr_mode != NR_REG_ALL_NIC && + req->reg.nr_mode != NR_REG_NIC_SW)) { + /* We only support attaching all the NIC rings + * and/or the host stack. */ + return EINVAL; + } if (NETMAP_OWNED_BY_ANY(na)) { return EBUSY; } if (bna->na_kpriv) { /* nothing to do */ return 0; } npriv = netmap_priv_new(); if (npriv == NULL) return ENOMEM; npriv->np_ifp = na->ifp; /* let the priv destructor release the ref */ - error = netmap_do_regif(npriv, na, nmr->nr_ringid, nmr->nr_flags); + error = netmap_do_regif(npriv, na, req->reg.nr_mode, + req->reg.nr_ringid, req->reg.nr_flags); if (error) { netmap_priv_delete(npriv); return error; } bna->na_kpriv = npriv; na->na_flags |= NAF_BUSY; } else { if (na->active_fds == 0) /* not registered */ return EINVAL; netmap_priv_delete(bna->na_kpriv); bna->na_kpriv = NULL; na->na_flags &= ~NAF_BUSY; } - return error; + return error; } /* attach a bridge wrapper to the 'real' device */ int netmap_bwrap_attach(const char *nr_name, struct netmap_adapter *hwna) { struct netmap_bwrap_adapter *bna; struct netmap_adapter *na = NULL; struct netmap_adapter *hostna = NULL; int error = 0; enum txrx t; /* make sure the NIC is not already in use */ if (NETMAP_OWNED_BY_ANY(hwna)) { D("NIC %s busy, cannot attach to bridge", hwna->name); return EBUSY; } bna = nm_os_malloc(sizeof(*bna)); if (bna == NULL) { return ENOMEM; } na = &bna->up.up; /* make bwrap ifp point to the real ifp */ na->ifp = hwna->ifp; if_ref(na->ifp); na->na_private = bna; strncpy(na->name, nr_name, sizeof(na->name)); /* fill the ring data for the bwrap adapter with rx/tx meanings * swapped. The real cross-linking will be done during register, * when all the krings will have been created. */ for_rx_tx(t) { enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */ nma_set_nrings(na, t, nma_get_nrings(hwna, r)); nma_set_ndesc(na, t, nma_get_ndesc(hwna, r)); } na->nm_dtor = netmap_bwrap_dtor; na->nm_register = netmap_bwrap_reg; // na->nm_txsync = netmap_bwrap_txsync; // na->nm_rxsync = netmap_bwrap_rxsync; na->nm_config = netmap_bwrap_config; na->nm_krings_create = netmap_bwrap_krings_create; na->nm_krings_delete = netmap_bwrap_krings_delete; na->nm_notify = netmap_bwrap_notify; na->nm_bdg_ctl = netmap_bwrap_bdg_ctl; na->pdev = hwna->pdev; na->nm_mem = netmap_mem_get(hwna->nm_mem); na->virt_hdr_len = hwna->virt_hdr_len; bna->up.retry = 1; /* XXX maybe this should depend on the hwna */ /* Set the mfs, needed on the VALE mismatch datapath. */ bna->up.mfs = NM_BDG_MFS_DEFAULT; bna->hwna = hwna; netmap_adapter_get(hwna); hwna->na_private = bna; /* weak reference */ + bna->saved_na_vp = hwna->na_vp; hwna->na_vp = &bna->up; + bna->up.up.na_vp = &(bna->up); if (hwna->na_flags & NAF_HOST_RINGS) { if (hwna->na_flags & NAF_SW_ONLY) na->na_flags |= NAF_SW_ONLY; na->na_flags |= NAF_HOST_RINGS; hostna = &bna->host.up; snprintf(hostna->name, sizeof(hostna->name), "%s^", nr_name); hostna->ifp = hwna->ifp; for_rx_tx(t) { enum txrx r = nm_txrx_swap(t); nma_set_nrings(hostna, t, 1); nma_set_ndesc(hostna, t, nma_get_ndesc(hwna, r)); } // hostna->nm_txsync = netmap_bwrap_host_txsync; // hostna->nm_rxsync = netmap_bwrap_host_rxsync; hostna->nm_notify = netmap_bwrap_notify; hostna->nm_mem = netmap_mem_get(na->nm_mem); hostna->na_private = bna; hostna->na_vp = &bna->up; na->na_hostvp = hwna->na_hostvp = hostna->na_hostvp = &bna->host; hostna->na_flags = NAF_BUSY; /* prevent NIOCREGIF */ bna->host.mfs = NM_BDG_MFS_DEFAULT; } ND("%s<->%s txr %d txd %d rxr %d rxd %d", na->name, ifp->if_xname, na->num_tx_rings, na->num_tx_desc, na->num_rx_rings, na->num_rx_desc); error = netmap_attach_common(na); if (error) { goto err_free; } hwna->na_flags |= NAF_BUSY; return 0; err_free: hwna->na_vp = hwna->na_hostvp = NULL; netmap_adapter_put(hwna); nm_os_free(bna); return error; } struct nm_bridge * netmap_init_bridges2(u_int n) { int i; struct nm_bridge *b; b = nm_os_malloc(sizeof(struct nm_bridge) * n); if (b == NULL) return NULL; for (i = 0; i < n; i++) BDG_RWINIT(&b[i]); return b; } void netmap_uninit_bridges2(struct nm_bridge *b, u_int n) { int i; if (b == NULL) return; for (i = 0; i < n; i++) BDG_RWDESTROY(&b[i]); nm_os_free(b); } int netmap_init_bridges(void) { #ifdef CONFIG_NET_NS return netmap_bns_register(); #else nm_bridges = netmap_init_bridges2(NM_BRIDGES); if (nm_bridges == NULL) return ENOMEM; return 0; #endif } void netmap_uninit_bridges(void) { #ifdef CONFIG_NET_NS netmap_bns_unregister(); #else netmap_uninit_bridges2(nm_bridges, NM_BRIDGES); #endif } #endif /* WITH_VALE */ Index: head/sys/dev/re/if_re.c =================================================================== --- head/sys/dev/re/if_re.c (revision 332422) +++ head/sys/dev/re/if_re.c (revision 332423) @@ -1,4080 +1,4080 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1997, 1998-2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver * * Written by Bill Paul * Senior Networking Software Engineer * Wind River Systems */ /* * This driver is designed to support RealTek's next generation of * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. * * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible * with the older 8139 family, however it also supports a special * C+ mode of operation that provides several new performance enhancing * features. These include: * * o Descriptor based DMA mechanism. Each descriptor represents * a single packet fragment. Data buffers may be aligned on * any byte boundary. * * o 64-bit DMA * * o TCP/IP checksum offload for both RX and TX * * o High and normal priority transmit DMA rings * * o VLAN tag insertion and extraction * * o TCP large send (segmentation offload) * * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ * programming API is fairly straightforward. The RX filtering, EEPROM * access and PHY access is the same as it is on the older 8139 series * chips. * * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the * same programming API and feature set as the 8139C+ with the following * differences and additions: * * o 1000Mbps mode * * o Jumbo frames * * o GMII and TBI ports/registers for interfacing with copper * or fiber PHYs * * o RX and TX DMA rings can have up to 1024 descriptors * (the 8139C+ allows a maximum of 64) * * o Slight differences in register layout from the 8139C+ * * The TX start and timer interrupt registers are at different locations * on the 8169 than they are on the 8139C+. Also, the status word in the * RX descriptor has a slightly different bit layout. The 8169 does not * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' * copper gigE PHY. * * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs * (the 'S' stands for 'single-chip'). These devices have the same * programming API as the older 8169, but also have some vendor-specific * registers for the on-board PHY. The 8110S is a LAN-on-motherboard * part designed to be pin-compatible with the RealTek 8100 10/100 chip. * * This driver takes advantage of the RX and TX checksum offload and * VLAN tag insertion/extraction features. It also implements TX * interrupt moderation using the timer interrupt registers, which * significantly reduces TX interrupt load. There is also support * for jumbo frames, however the 8169/8169S/8110S can not transmit * jumbo frames larger than 7440, so the max MTU possible with this * driver is 7422 bytes. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(re, pci, 1, 1, 1); MODULE_DEPEND(re, ether, 1, 1, 1); MODULE_DEPEND(re, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* Tunables. */ static int intr_filter = 0; TUNABLE_INT("hw.re.intr_filter", &intr_filter); static int msi_disable = 0; TUNABLE_INT("hw.re.msi_disable", &msi_disable); static int msix_disable = 0; TUNABLE_INT("hw.re.msix_disable", &msix_disable); static int prefer_iomap = 0; TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap); #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* * Various supported device vendors/types and their names. */ static const struct rl_type re_devs[] = { { DLINK_VENDORID, DLINK_DEVICEID_528T, 0, "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, { DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0, "D-Link DGE-530(T) Gigabit Ethernet Adapter" }, { RT_VENDORID, RT_DEVICEID_8139, 0, "RealTek 8139C+ 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8101E, 0, "RealTek 810xE PCIe 10/100baseTX" }, { RT_VENDORID, RT_DEVICEID_8168, 0, "RealTek 8168/8111 B/C/CP/D/DP/E/F/G PCIe Gigabit Ethernet" }, { NCUBE_VENDORID, RT_DEVICEID_8168, 0, "TP-Link TG-3468 v2 (RTL8168) Gigabit Ethernet" }, { RT_VENDORID, RT_DEVICEID_8169, 0, "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" }, { RT_VENDORID, RT_DEVICEID_8169SC, 0, "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0, "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" }, { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0, "Linksys EG1032 (RTL8169S) Gigabit Ethernet" }, { USR_VENDORID, USR_DEVICEID_997902, 0, "US Robotics 997902 (RTL8169S) Gigabit Ethernet" } }; static const struct rl_hwrev re_hwrevs[] = { { RL_HWREV_8139, RL_8139, "", RL_MTU }, { RL_HWREV_8139A, RL_8139, "A", RL_MTU }, { RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU }, { RL_HWREV_8139B, RL_8139, "B", RL_MTU }, { RL_HWREV_8130, RL_8139, "8130", RL_MTU }, { RL_HWREV_8139C, RL_8139, "C", RL_MTU }, { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU }, { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU }, { RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU }, { RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU }, { RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU }, { RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, { RL_HWREV_8100, RL_8139, "8100", RL_MTU }, { RL_HWREV_8101, RL_8139, "8101", RL_MTU }, { RL_HWREV_8100E, RL_8169, "8100E", RL_MTU }, { RL_HWREV_8101E, RL_8169, "8101E", RL_MTU }, { RL_HWREV_8102E, RL_8169, "8102E", RL_MTU }, { RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU }, { RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU }, { RL_HWREV_8103E, RL_8169, "8103E", RL_MTU }, { RL_HWREV_8401E, RL_8169, "8401E", RL_MTU }, { RL_HWREV_8402, RL_8169, "8402", RL_MTU }, { RL_HWREV_8105E, RL_8169, "8105E", RL_MTU }, { RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU }, { RL_HWREV_8106E, RL_8169, "8106E", RL_MTU }, { RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU }, { RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU }, { RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K }, { RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K }, { RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K }, { RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K}, { RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K}, { RL_HWREV_8168EP, RL_8169, "8168EP/8111EP", RL_JUMBO_MTU_9K}, { RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K}, { RL_HWREV_8168G, RL_8169, "8168G/8111G", RL_JUMBO_MTU_9K}, { RL_HWREV_8168GU, RL_8169, "8168GU/8111GU", RL_JUMBO_MTU_9K}, { RL_HWREV_8168H, RL_8169, "8168H/8111H", RL_JUMBO_MTU_9K}, { RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K}, { RL_HWREV_8411B, RL_8169, "8411B", RL_JUMBO_MTU_9K}, { 0, 0, NULL, 0 } }; static int re_probe (device_t); static int re_attach (device_t); static int re_detach (device_t); static int re_encap (struct rl_softc *, struct mbuf **); static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int); static int re_allocmem (device_t, struct rl_softc *); static __inline void re_discard_rxbuf (struct rl_softc *, int); static int re_newbuf (struct rl_softc *, int); static int re_jumbo_newbuf (struct rl_softc *, int); static int re_rx_list_init (struct rl_softc *); static int re_jrx_list_init (struct rl_softc *); static int re_tx_list_init (struct rl_softc *); #ifdef RE_FIXUP_RX static __inline void re_fixup_rx (struct mbuf *); #endif static int re_rxeof (struct rl_softc *, int *); static void re_txeof (struct rl_softc *); #ifdef DEVICE_POLLING static int re_poll (struct ifnet *, enum poll_cmd, int); static int re_poll_locked (struct ifnet *, enum poll_cmd, int); #endif static int re_intr (void *); static void re_intr_msi (void *); static void re_tick (void *); static void re_int_task (void *, int); static void re_start (struct ifnet *); static void re_start_locked (struct ifnet *); static int re_ioctl (struct ifnet *, u_long, caddr_t); static void re_init (void *); static void re_init_locked (struct rl_softc *); static void re_stop (struct rl_softc *); static void re_watchdog (struct rl_softc *); static int re_suspend (device_t); static int re_resume (device_t); static int re_shutdown (device_t); static int re_ifmedia_upd (struct ifnet *); static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *); static void re_eeprom_putbyte (struct rl_softc *, int); static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *); static void re_read_eeprom (struct rl_softc *, caddr_t, int, int); static int re_gmii_readreg (device_t, int, int); static int re_gmii_writereg (device_t, int, int, int); static int re_miibus_readreg (device_t, int, int); static int re_miibus_writereg (device_t, int, int, int); static void re_miibus_statchg (device_t); static void re_set_jumbo (struct rl_softc *, int); static void re_set_rxmode (struct rl_softc *); static void re_reset (struct rl_softc *); static void re_setwol (struct rl_softc *); static void re_clrwol (struct rl_softc *); static void re_set_linkspeed (struct rl_softc *); #ifdef DEV_NETMAP /* see ixgbe.c for details */ #include MODULE_DEPEND(re, netmap, 1, 1, 1); #endif /* !DEV_NETMAP */ #ifdef RE_DIAG static int re_diag (struct rl_softc *); #endif static void re_add_sysctls (struct rl_softc *); static int re_sysctl_stats (SYSCTL_HANDLER_ARGS); static int sysctl_int_range (SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_re_int_mod (SYSCTL_HANDLER_ARGS); static device_method_t re_methods[] = { /* Device interface */ DEVMETHOD(device_probe, re_probe), DEVMETHOD(device_attach, re_attach), DEVMETHOD(device_detach, re_detach), DEVMETHOD(device_suspend, re_suspend), DEVMETHOD(device_resume, re_resume), DEVMETHOD(device_shutdown, re_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, re_miibus_readreg), DEVMETHOD(miibus_writereg, re_miibus_writereg), DEVMETHOD(miibus_statchg, re_miibus_statchg), DEVMETHOD_END }; static driver_t re_driver = { "re", re_methods, sizeof(struct rl_softc) }; static devclass_t re_devclass; DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0); DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); #define EE_SET(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) | x) #define EE_CLR(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) & ~x) /* * Send a read command and address to the EEPROM, check for ACK. */ static void re_eeprom_putbyte(struct rl_softc *sc, int addr) { int d, i; d = addr | (RL_9346_READ << sc->rl_eewidth); /* * Feed in each bit and strobe the clock. */ for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { if (d & i) { EE_SET(RL_EE_DATAIN); } else { EE_CLR(RL_EE_DATAIN); } DELAY(100); EE_SET(RL_EE_CLK); DELAY(150); EE_CLR(RL_EE_CLK); DELAY(100); } } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) { int i; u_int16_t word = 0; /* * Send address of word we want to read. */ re_eeprom_putbyte(sc, addr); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { EE_SET(RL_EE_CLK); DELAY(100); if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) word |= i; EE_CLR(RL_EE_CLK); DELAY(100); } *dest = word; } /* * Read a sequence of words from the EEPROM. */ static void re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) { int i; u_int16_t word = 0, *ptr; CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); DELAY(100); for (i = 0; i < cnt; i++) { CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); re_eeprom_getword(sc, off + i, &word); CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); ptr = (u_int16_t *)(dest + (i * 2)); *ptr = word; } CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); } static int re_gmii_readreg(device_t dev, int phy, int reg) { struct rl_softc *sc; u_int32_t rval; int i; sc = device_get_softc(dev); /* Let the rgephy driver read the GMEDIASTAT register */ if (reg == RL_GMEDIASTAT) { rval = CSR_READ_1(sc, RL_GMEDIASTAT); return (rval); } CSR_WRITE_4(sc, RL_PHYAR, reg << 16); for (i = 0; i < RL_PHY_TIMEOUT; i++) { rval = CSR_READ_4(sc, RL_PHYAR); if (rval & RL_PHYAR_BUSY) break; DELAY(25); } if (i == RL_PHY_TIMEOUT) { device_printf(sc->rl_dev, "PHY read failed\n"); return (0); } /* * Controller requires a 20us delay to process next MDIO request. */ DELAY(20); return (rval & RL_PHYAR_PHYDATA); } static int re_gmii_writereg(device_t dev, int phy, int reg, int data) { struct rl_softc *sc; u_int32_t rval; int i; sc = device_get_softc(dev); CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); for (i = 0; i < RL_PHY_TIMEOUT; i++) { rval = CSR_READ_4(sc, RL_PHYAR); if (!(rval & RL_PHYAR_BUSY)) break; DELAY(25); } if (i == RL_PHY_TIMEOUT) { device_printf(sc->rl_dev, "PHY write failed\n"); return (0); } /* * Controller requires a 20us delay to process next MDIO request. */ DELAY(20); return (0); } static int re_miibus_readreg(device_t dev, int phy, int reg) { struct rl_softc *sc; u_int16_t rval = 0; u_int16_t re8139_reg = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8169) { rval = re_gmii_readreg(dev, phy, reg); return (rval); } switch (reg) { case MII_BMCR: re8139_reg = RL_BMCR; break; case MII_BMSR: re8139_reg = RL_BMSR; break; case MII_ANAR: re8139_reg = RL_ANAR; break; case MII_ANER: re8139_reg = RL_ANER; break; case MII_ANLPAR: re8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); /* * Allow the rlphy driver to read the media status * register. If we have a link partner which does not * support NWAY, this is the register which will tell * us the results of parallel detection. */ case RL_MEDIASTAT: rval = CSR_READ_1(sc, RL_MEDIASTAT); return (rval); default: device_printf(sc->rl_dev, "bad phy register\n"); return (0); } rval = CSR_READ_2(sc, re8139_reg); if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) { /* 8139C+ has different bit layout. */ rval &= ~(BMCR_LOOP | BMCR_ISO); } return (rval); } static int re_miibus_writereg(device_t dev, int phy, int reg, int data) { struct rl_softc *sc; u_int16_t re8139_reg = 0; int rval = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8169) { rval = re_gmii_writereg(dev, phy, reg, data); return (rval); } switch (reg) { case MII_BMCR: re8139_reg = RL_BMCR; if (sc->rl_type == RL_8139CPLUS) { /* 8139C+ has different bit layout. */ data &= ~(BMCR_LOOP | BMCR_ISO); } break; case MII_BMSR: re8139_reg = RL_BMSR; break; case MII_ANAR: re8139_reg = RL_ANAR; break; case MII_ANER: re8139_reg = RL_ANER; break; case MII_ANLPAR: re8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); break; default: device_printf(sc->rl_dev, "bad phy register\n"); return (0); } CSR_WRITE_2(sc, re8139_reg, data); return (0); } static void re_miibus_statchg(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->rl_miibus); ifp = sc->rl_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; sc->rl_flags &= ~RL_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->rl_flags |= RL_FLAG_LINK; break; case IFM_1000_T: if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) break; sc->rl_flags |= RL_FLAG_LINK; break; default: break; } } /* * RealTek controllers do not provide any interface to the RX/TX * MACs for resolved speed, duplex and flow-control parameters. */ } /* * Set the RX configuration and 64-bit multicast hash filter. */ static void re_set_rxmode(struct rl_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; uint32_t hashes[2] = { 0, 0 }; uint32_t h, rxfilt; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; if ((sc->rl_flags & RL_FLAG_EARLYOFF) != 0) rxfilt |= RL_RXCFG_EARLYOFF; else if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) rxfilt |= RL_RXCFG_EARLYOFFV2; if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { if (ifp->if_flags & IFF_PROMISC) rxfilt |= RL_RXCFG_RX_ALLPHYS; /* * Unlike other hardwares, we have to explicitly set * RL_RXCFG_RX_MULTI to receive multicast frames in * promiscuous mode. */ rxfilt |= RL_RXCFG_RX_MULTI; hashes[0] = hashes[1] = 0xffffffff; goto done; } if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } if_maddr_runlock(ifp); if (hashes[0] != 0 || hashes[1] != 0) { /* * For some unfathomable reason, RealTek decided to * reverse the order of the multicast hash registers * in the PCI Express parts. This means we have to * write the hash pattern in reverse order for those * devices. */ if ((sc->rl_flags & RL_FLAG_PCIE) != 0) { h = bswap32(hashes[0]); hashes[0] = bswap32(hashes[1]); hashes[1] = h; } rxfilt |= RL_RXCFG_RX_MULTI; } if (sc->rl_hwrev->rl_rev == RL_HWREV_8168F) { /* Disable multicast filtering due to silicon bug. */ hashes[0] = 0xffffffff; hashes[1] = 0xffffffff; } done: CSR_WRITE_4(sc, RL_MAR0, hashes[0]); CSR_WRITE_4(sc, RL_MAR4, hashes[1]); CSR_WRITE_4(sc, RL_RXCFG, rxfilt); } static void re_reset(struct rl_softc *sc) { int i; RL_LOCK_ASSERT(sc); CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); for (i = 0; i < RL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) break; } if (i == RL_TIMEOUT) device_printf(sc->rl_dev, "reset never completed!\n"); if ((sc->rl_flags & RL_FLAG_MACRESET) != 0) CSR_WRITE_1(sc, 0x82, 1); if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S) re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0); } #ifdef RE_DIAG /* * The following routine is designed to test for a defect on some * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# * lines connected to the bus, however for a 32-bit only card, they * should be pulled high. The result of this defect is that the * NIC will not work right if you plug it into a 64-bit slot: DMA * operations will be done with 64-bit transfers, which will fail * because the 64-bit data lines aren't connected. * * There's no way to work around this (short of talking a soldering * iron to the board), however we can detect it. The method we use * here is to put the NIC into digital loopback mode, set the receiver * to promiscuous mode, and then try to send a frame. We then compare * the frame data we sent to what was received. If the data matches, * then the NIC is working correctly, otherwise we know the user has * a defective NIC which has been mistakenly plugged into a 64-bit PCI * slot. In the latter case, there's no way the NIC can work correctly, * so we print out a message on the console and abort the device attach. */ static int re_diag(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; struct mbuf *m0; struct ether_header *eh; struct rl_desc *cur_rx; u_int16_t status; u_int32_t rxstat; int total_len, i, error = 0, phyaddr; u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; /* Allocate a single mbuf */ MGETHDR(m0, M_NOWAIT, MT_DATA); if (m0 == NULL) return (ENOBUFS); RL_LOCK(sc); /* * Initialize the NIC in test mode. This sets the chip up * so that it can send and receive frames, but performs the * following special functions: * - Puts receiver in promiscuous mode * - Enables digital loopback mode * - Leaves interrupts turned off */ ifp->if_flags |= IFF_PROMISC; sc->rl_testmode = 1; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); sc->rl_flags |= RL_FLAG_LINK; if (sc->rl_type == RL_8169) phyaddr = 1; else phyaddr = 0; re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET); for (i = 0; i < RL_TIMEOUT; i++) { status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR); if (!(status & BMCR_RESET)) break; } re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP); CSR_WRITE_2(sc, RL_ISR, RL_INTRS); DELAY(100000); /* Put some data in the mbuf */ eh = mtod(m0, struct ether_header *); bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); eh->ether_type = htons(ETHERTYPE_IP); m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; /* * Queue the packet, start transmission. * Note: IF_HANDOFF() ultimately calls re_start() for us. */ CSR_WRITE_2(sc, RL_ISR, 0xFFFF); RL_UNLOCK(sc); /* XXX: re_diag must not be called when in ALTQ mode */ IF_HANDOFF(&ifp->if_snd, m0, ifp); RL_LOCK(sc); m0 = NULL; /* Wait for it to propagate through the chip */ DELAY(100000); for (i = 0; i < RL_TIMEOUT; i++) { status = CSR_READ_2(sc, RL_ISR); CSR_WRITE_2(sc, RL_ISR, status); if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) break; DELAY(10); } if (i == RL_TIMEOUT) { device_printf(sc->rl_dev, "diagnostic failed, failed to receive packet in" " loopback mode\n"); error = EIO; goto done; } /* * The packet should have been dumped into the first * entry in the RX DMA ring. Grab it from there. */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[0].rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[0].rx_dmamap); m0 = sc->rl_ldata.rl_rx_desc[0].rx_m; sc->rl_ldata.rl_rx_desc[0].rx_m = NULL; eh = mtod(m0, struct ether_header *); cur_rx = &sc->rl_ldata.rl_rx_list[0]; total_len = RL_RXBYTES(cur_rx); rxstat = le32toh(cur_rx->rl_cmdstat); if (total_len != ETHER_MIN_LEN) { device_printf(sc->rl_dev, "diagnostic failed, received short packet\n"); error = EIO; goto done; } /* Test that the received packet data matches what we sent. */ if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || ntohs(eh->ether_type) != ETHERTYPE_IP) { device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n"); device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n", dst, ":", src, ":", ETHERTYPE_IP); device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n", eh->ether_dhost, ":", eh->ether_shost, ":", ntohs(eh->ether_type)); device_printf(sc->rl_dev, "You may have a defective 32-bit " "NIC plugged into a 64-bit PCI slot.\n"); device_printf(sc->rl_dev, "Please re-install the NIC in a " "32-bit slot for proper operation.\n"); device_printf(sc->rl_dev, "Read the re(4) man page for more " "details.\n"); error = EIO; } done: /* Turn interface off, release resources */ sc->rl_testmode = 0; sc->rl_flags &= ~RL_FLAG_LINK; ifp->if_flags &= ~IFF_PROMISC; re_stop(sc); if (m0 != NULL) m_freem(m0); RL_UNLOCK(sc); return (error); } #endif /* * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int re_probe(device_t dev) { const struct rl_type *t; uint16_t devid, vendor; uint16_t revid, sdevid; int i; vendor = pci_get_vendor(dev); devid = pci_get_device(dev); revid = pci_get_revid(dev); sdevid = pci_get_subdevice(dev); if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) { if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) { /* * Only attach to rev. 3 of the Linksys EG1032 adapter. * Rev. 2 is supported by sk(4). */ return (ENXIO); } } if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) { if (revid != 0x20) { /* 8139, let rl(4) take care of this device. */ return (ENXIO); } } t = re_devs; for (i = 0; i < nitems(re_devs); i++, t++) { if (vendor == t->rl_vid && devid == t->rl_did) { device_set_desc(dev, t->rl_name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } /* * Map a single buffer address. */ static void re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int re_allocmem(device_t dev, struct rl_softc *sc) { bus_addr_t lowaddr; bus_size_t rx_list_size, tx_list_size; int error; int i; rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc); tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc); /* * Allocate the parent bus DMA tag appropriate for PCI. * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD * register should be set. However some RealTek chips are known * to be buggy on DAC handling, therefore disable DAC by limiting * DMA address space to 32bit. PCIe variants of RealTek chips * may not have the limitation. */ lowaddr = BUS_SPACE_MAXADDR; if ((sc->rl_flags & RL_FLAG_PCIE) == 0) lowaddr = BUS_SPACE_MAXADDR_32BIT; error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->rl_parent_tag); if (error) { device_printf(dev, "could not allocate parent DMA tag\n"); return (error); } /* * Allocate map for TX mbufs. */ error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0, NULL, NULL, &sc->rl_ldata.rl_tx_mtag); if (error) { device_printf(dev, "could not allocate TX DMA tag\n"); return (error); } /* * Allocate map for RX mbufs. */ if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL, &sc->rl_ldata.rl_jrx_mtag); if (error) { device_printf(dev, "could not allocate jumbo RX DMA tag\n"); return (error); } } error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag); if (error) { device_printf(dev, "could not allocate RX DMA tag\n"); return (error); } /* * Allocate map for TX descriptor list. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, tx_list_size, 1, tx_list_size, 0, NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); if (error) { device_printf(dev, "could not allocate TX DMA ring tag\n"); return (error); } /* Allocate DMA'able memory for the TX ring */ error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_ldata.rl_tx_list_map); if (error) { device_printf(dev, "could not allocate TX DMA ring\n"); return (error); } /* Load the map for the TX ring. */ sc->rl_ldata.rl_tx_list_addr = 0; error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, tx_list_size, re_dma_map_addr, &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) { device_printf(dev, "could not load TX DMA ring\n"); return (ENOMEM); } /* Create DMA maps for TX buffers */ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0, &sc->rl_ldata.rl_tx_desc[i].tx_dmamap); if (error) { device_printf(dev, "could not create DMA map for TX\n"); return (error); } } /* * Allocate map for RX descriptor list. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, rx_list_size, 1, rx_list_size, 0, NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); if (error) { device_printf(dev, "could not create RX DMA ring tag\n"); return (error); } /* Allocate DMA'able memory for the RX ring */ error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_ldata.rl_rx_list_map); if (error) { device_printf(dev, "could not allocate RX DMA ring\n"); return (error); } /* Load the map for the RX ring. */ sc->rl_ldata.rl_rx_list_addr = 0; error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, rx_list_size, re_dma_map_addr, &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) { device_printf(dev, "could not load RX DMA ring\n"); return (ENOMEM); } /* Create DMA maps for RX buffers */ if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, &sc->rl_ldata.rl_jrx_sparemap); if (error) { device_printf(dev, "could not create spare DMA map for jumbo RX\n"); return (error); } for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); if (error) { device_printf(dev, "could not create DMA map for jumbo RX\n"); return (error); } } } error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, &sc->rl_ldata.rl_rx_sparemap); if (error) { device_printf(dev, "could not create spare DMA map for RX\n"); return (error); } for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, &sc->rl_ldata.rl_rx_desc[i].rx_dmamap); if (error) { device_printf(dev, "could not create DMA map for RX\n"); return (error); } } /* Create DMA map for statistics. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL, &sc->rl_ldata.rl_stag); if (error) { device_printf(dev, "could not create statistics DMA tag\n"); return (error); } /* Allocate DMA'able memory for statistics. */ error = bus_dmamem_alloc(sc->rl_ldata.rl_stag, (void **)&sc->rl_ldata.rl_stats, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_ldata.rl_smap); if (error) { device_printf(dev, "could not allocate statistics DMA memory\n"); return (error); } /* Load the map for statistics. */ sc->rl_ldata.rl_stats_addr = 0; error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr, &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT); if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) { device_printf(dev, "could not load statistics DMA memory\n"); return (ENOMEM); } return (0); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int re_attach(device_t dev) { u_char eaddr[ETHER_ADDR_LEN]; u_int16_t as[ETHER_ADDR_LEN / 2]; struct rl_softc *sc; struct ifnet *ifp; const struct rl_hwrev *hw_rev; int capmask, error = 0, hwrev, i, msic, msixc, phy, reg, rid; u_int32_t cap, ctl; u_int16_t devid, re_did = 0; uint8_t cfg; sc = device_get_softc(dev); sc->rl_dev = dev; mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); /* * Map control/status registers. */ pci_enable_busmaster(dev); devid = pci_get_device(dev); /* * Prefer memory space register mapping over IO space. * Because RTL8169SC does not seem to work when memory mapping * is used always activate io mapping. */ if (devid == RT_DEVICEID_8169SC) prefer_iomap = 1; if (prefer_iomap == 0) { sc->rl_res_id = PCIR_BAR(1); sc->rl_res_type = SYS_RES_MEMORY; /* RTL8168/8101E seems to use different BARs. */ if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E) sc->rl_res_id = PCIR_BAR(2); } else { sc->rl_res_id = PCIR_BAR(0); sc->rl_res_type = SYS_RES_IOPORT; } sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, &sc->rl_res_id, RF_ACTIVE); if (sc->rl_res == NULL && prefer_iomap == 0) { sc->rl_res_id = PCIR_BAR(0); sc->rl_res_type = SYS_RES_IOPORT; sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, &sc->rl_res_id, RF_ACTIVE); } if (sc->rl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->rl_btag = rman_get_bustag(sc->rl_res); sc->rl_bhandle = rman_get_bushandle(sc->rl_res); msic = pci_msi_count(dev); msixc = pci_msix_count(dev); if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { sc->rl_flags |= RL_FLAG_PCIE; sc->rl_expcap = reg; } if (bootverbose) { device_printf(dev, "MSI count : %d\n", msic); device_printf(dev, "MSI-X count : %d\n", msixc); } if (msix_disable > 0) msixc = 0; if (msi_disable > 0) msic = 0; /* Prefer MSI-X to MSI. */ if (msixc > 0) { msixc = RL_MSI_MESSAGES; rid = PCIR_BAR(4); sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->rl_res_pba == NULL) { device_printf(sc->rl_dev, "could not allocate MSI-X PBA resource\n"); } if (sc->rl_res_pba != NULL && pci_alloc_msix(dev, &msixc) == 0) { if (msixc == RL_MSI_MESSAGES) { device_printf(dev, "Using %d MSI-X message\n", msixc); sc->rl_flags |= RL_FLAG_MSIX; } else pci_release_msi(dev); } if ((sc->rl_flags & RL_FLAG_MSIX) == 0) { if (sc->rl_res_pba != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba); sc->rl_res_pba = NULL; msixc = 0; } } /* Prefer MSI to INTx. */ if (msixc == 0 && msic > 0) { msic = RL_MSI_MESSAGES; if (pci_alloc_msi(dev, &msic) == 0) { if (msic == RL_MSI_MESSAGES) { device_printf(dev, "Using %d MSI message\n", msic); sc->rl_flags |= RL_FLAG_MSI; /* Explicitly set MSI enable bit. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); cfg = CSR_READ_1(sc, RL_CFG2); cfg |= RL_CFG2_MSI; CSR_WRITE_1(sc, RL_CFG2, cfg); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); } else pci_release_msi(dev); } if ((sc->rl_flags & RL_FLAG_MSI) == 0) msic = 0; } /* Allocate interrupt */ if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) { rid = 0; sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->rl_irq[0] == NULL) { device_printf(dev, "couldn't allocate IRQ resources\n"); error = ENXIO; goto fail; } } else { for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { sc->rl_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->rl_irq[i] == NULL) { device_printf(dev, "couldn't allocate IRQ resources for " "message %d\n", rid); error = ENXIO; goto fail; } } } if ((sc->rl_flags & RL_FLAG_MSI) == 0) { CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); cfg = CSR_READ_1(sc, RL_CFG2); if ((cfg & RL_CFG2_MSI) != 0) { device_printf(dev, "turning off MSI enable bit.\n"); cfg &= ~RL_CFG2_MSI; CSR_WRITE_1(sc, RL_CFG2, cfg); } CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); } /* Disable ASPM L0S/L1 and CLKREQ. */ if (sc->rl_expcap != 0) { cap = pci_read_config(dev, sc->rl_expcap + PCIER_LINK_CAP, 2); if ((cap & PCIEM_LINK_CAP_ASPM) != 0) { ctl = pci_read_config(dev, sc->rl_expcap + PCIER_LINK_CTL, 2); if ((ctl & (PCIEM_LINK_CTL_ECPM | PCIEM_LINK_CTL_ASPMC))!= 0) { ctl &= ~(PCIEM_LINK_CTL_ECPM | PCIEM_LINK_CTL_ASPMC); pci_write_config(dev, sc->rl_expcap + PCIER_LINK_CTL, ctl, 2); device_printf(dev, "ASPM disabled\n"); } } else device_printf(dev, "no ASPM capability\n"); } hw_rev = re_hwrevs; hwrev = CSR_READ_4(sc, RL_TXCFG); switch (hwrev & 0x70000000) { case 0x00000000: case 0x10000000: device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000); hwrev &= (RL_TXCFG_HWREV | 0x80000000); break; default: device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000); sc->rl_macrev = hwrev & 0x00700000; hwrev &= RL_TXCFG_HWREV; break; } device_printf(dev, "MAC rev. 0x%08x\n", sc->rl_macrev); while (hw_rev->rl_desc != NULL) { if (hw_rev->rl_rev == hwrev) { sc->rl_type = hw_rev->rl_type; sc->rl_hwrev = hw_rev; break; } hw_rev++; } if (hw_rev->rl_desc == NULL) { device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev); error = ENXIO; goto fail; } switch (hw_rev->rl_rev) { case RL_HWREV_8139CPLUS: sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; break; case RL_HWREV_8100E: case RL_HWREV_8101E: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; break; case RL_HWREV_8102E: case RL_HWREV_8102EL: case RL_HWREV_8102EL_SPIN1: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; break; case RL_HWREV_8103E: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP; break; case RL_HWREV_8401E: case RL_HWREV_8105E: case RL_HWREV_8105E_SPIN1: case RL_HWREV_8106E: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; break; case RL_HWREV_8402: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ; break; case RL_HWREV_8168B_SPIN1: case RL_HWREV_8168B_SPIN2: sc->rl_flags |= RL_FLAG_WOLRXENB; /* FALLTHROUGH */ case RL_HWREV_8168B_SPIN3: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; break; case RL_HWREV_8168C_SPIN2: sc->rl_flags |= RL_FLAG_MACSLEEP; /* FALLTHROUGH */ case RL_HWREV_8168C: if (sc->rl_macrev == 0x00200000) sc->rl_flags |= RL_FLAG_MACSLEEP; /* FALLTHROUGH */ case RL_HWREV_8168CP: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168D: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168DP: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168E: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168E_VL: case RL_HWREV_8168F: sc->rl_flags |= RL_FLAG_EARLYOFF; /* FALLTHROUGH */ case RL_HWREV_8411: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168EP: case RL_HWREV_8168G: case RL_HWREV_8411B: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK | RL_FLAG_8168G_PLUS; break; case RL_HWREV_8168GU: case RL_HWREV_8168H: if (pci_get_device(dev) == RT_DEVICEID_8101E) { /* RTL8106E(US), RTL8107E */ sc->rl_flags |= RL_FLAG_FASTETHER; } else sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_8168G_PLUS; break; case RL_HWREV_8169_8110SB: case RL_HWREV_8169_8110SBL: case RL_HWREV_8169_8110SC: case RL_HWREV_8169_8110SCE: sc->rl_flags |= RL_FLAG_PHYWAKE; /* FALLTHROUGH */ case RL_HWREV_8169: case RL_HWREV_8169S: case RL_HWREV_8110S: sc->rl_flags |= RL_FLAG_MACRESET; break; default: break; } if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) { sc->rl_cfg0 = RL_8139_CFG0; sc->rl_cfg1 = RL_8139_CFG1; sc->rl_cfg2 = 0; sc->rl_cfg3 = RL_8139_CFG3; sc->rl_cfg4 = RL_8139_CFG4; sc->rl_cfg5 = RL_8139_CFG5; } else { sc->rl_cfg0 = RL_CFG0; sc->rl_cfg1 = RL_CFG1; sc->rl_cfg2 = RL_CFG2; sc->rl_cfg3 = RL_CFG3; sc->rl_cfg4 = RL_CFG4; sc->rl_cfg5 = RL_CFG5; } /* Reset the adapter. */ RL_LOCK(sc); re_reset(sc); RL_UNLOCK(sc); /* Enable PME. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); cfg = CSR_READ_1(sc, sc->rl_cfg1); cfg |= RL_CFG1_PME; CSR_WRITE_1(sc, sc->rl_cfg1, cfg); cfg = CSR_READ_1(sc, sc->rl_cfg5); cfg &= RL_CFG5_PME_STS; CSR_WRITE_1(sc, sc->rl_cfg5, cfg); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); if ((sc->rl_flags & RL_FLAG_PAR) != 0) { /* * XXX Should have a better way to extract station * address from EEPROM. */ for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); } else { sc->rl_eewidth = RL_9356_ADDR_LEN; re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); if (re_did != 0x8129) sc->rl_eewidth = RL_9346_ADDR_LEN; /* * Get station address from the EEPROM. */ re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); for (i = 0; i < ETHER_ADDR_LEN / 2; i++) as[i] = le16toh(as[i]); bcopy(as, eaddr, ETHER_ADDR_LEN); } if (sc->rl_type == RL_8169) { /* Set RX length mask and number of descriptors. */ sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; sc->rl_txstart = RL_GTXSTART; sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; } else { /* Set RX length mask and number of descriptors. */ sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; sc->rl_txstart = RL_TXSTART; sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; } error = re_allocmem(dev, sc); if (error) goto fail; re_add_sysctls(sc); ifp = sc->rl_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } /* Take controller out of deep sleep mode. */ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) | 0x01); else CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) & ~0x01); } /* Take PHY out of power down mode. */ if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) { CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); if (hw_rev->rl_rev == RL_HWREV_8401E) CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); } if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) { re_gmii_writereg(dev, 1, 0x1f, 0); re_gmii_writereg(dev, 1, 0x0e, 0); } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = re_ioctl; ifp->if_start = re_start; /* * RTL8168/8111C generates wrong IP checksummed frame if the * packet has IP options so disable TX checksum offloading. */ if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C || sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2 || sc->rl_hwrev->rl_rev == RL_HWREV_8168CP) { ifp->if_hwassist = 0; ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TSO4; } else { ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; } ifp->if_hwassist |= CSUM_TSO; ifp->if_capenable = ifp->if_capabilities; ifp->if_init = re_init; IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); #define RE_PHYAD_INTERNAL 0 /* Do MII setup. */ phy = RE_PHYAD_INTERNAL; if (sc->rl_type == RL_8169) phy = 1; capmask = BMSR_DEFCAPMASK; if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) capmask &= ~BMSR_EXTSTAT; error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd, re_ifmedia_sts, capmask, phy, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* VLAN capability setup */ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; if (ifp->if_capabilities & IFCAP_HWCSUM) ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; /* Enable WOL if PM is supported. */ if (pci_find_cap(sc->rl_dev, PCIY_PMG, ®) == 0) ifp->if_capabilities |= IFCAP_WOL; ifp->if_capenable = ifp->if_capabilities; ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST); /* * Don't enable TSO by default. It is known to generate * corrupted TCP segments(bad TCP options) under certain * circumstances. */ ifp->if_hwassist &= ~CSUM_TSO; ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO); #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); #ifdef DEV_NETMAP re_netmap_attach(sc); #endif /* DEV_NETMAP */ #ifdef RE_DIAG /* * Perform hardware diagnostic on the original RTL8169. * Some 32-bit cards were incorrectly wired and would * malfunction if plugged into a 64-bit slot. */ if (hwrev == RL_HWREV_8169) { error = re_diag(sc); if (error) { device_printf(dev, "attach aborted due to hardware diag failure\n"); ether_ifdetach(ifp); goto fail; } } #endif #ifdef RE_TX_MODERATION intr_filter = 1; #endif /* Hook interrupt last to avoid having to lock softc */ if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && intr_filter == 0) { error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc, &sc->rl_intrhand[0]); } else { error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, &sc->rl_intrhand[0]); } if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); } fail: if (error) re_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int re_detach(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; int i, rid; sc = device_get_softc(dev); ifp = sc->rl_ifp; KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif RL_LOCK(sc); #if 0 sc->suspended = 1; #endif re_stop(sc); RL_UNLOCK(sc); callout_drain(&sc->rl_stat_callout); taskqueue_drain(taskqueue_fast, &sc->rl_inttask); /* * Force off the IFF_UP flag here, in case someone * still had a BPF descriptor attached to this * interface. If they do, ether_ifdetach() will cause * the BPF code to try and clear the promisc mode * flag, which will bubble down to re_ioctl(), * which will try to call re_init() again. This will * turn the NIC back on and restart the MII ticker, * which will panic the system when the kernel tries * to invoke the re_tick() function that isn't there * anymore. */ ifp->if_flags &= ~IFF_UP; ether_ifdetach(ifp); } if (sc->rl_miibus) device_delete_child(dev, sc->rl_miibus); bus_generic_detach(dev); /* * The rest is resource deallocation, so we should already be * stopped here. */ if (sc->rl_intrhand[0] != NULL) { bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]); sc->rl_intrhand[0] = NULL; } if (ifp != NULL) { #ifdef DEV_NETMAP netmap_detach(ifp); #endif /* DEV_NETMAP */ if_free(ifp); } if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) rid = 0; else rid = 1; if (sc->rl_irq[0] != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]); sc->rl_irq[0] = NULL; } if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0) pci_release_msi(dev); if (sc->rl_res_pba) { rid = PCIR_BAR(4); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba); } if (sc->rl_res) bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id, sc->rl_res); /* Unload and free the RX DMA ring memory and map */ if (sc->rl_ldata.rl_rx_list_tag) { if (sc->rl_ldata.rl_rx_list_addr) bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map); if (sc->rl_ldata.rl_rx_list) bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_list_map); bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); } /* Unload and free the TX DMA ring memory and map */ if (sc->rl_ldata.rl_tx_list_tag) { if (sc->rl_ldata.rl_tx_list_addr) bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map); if (sc->rl_ldata.rl_tx_list) bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list, sc->rl_ldata.rl_tx_list_map); bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); } /* Destroy all the RX and TX buffer maps */ if (sc->rl_ldata.rl_tx_mtag) { for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap) bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag, sc->rl_ldata.rl_tx_desc[i].tx_dmamap); } bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag); } if (sc->rl_ldata.rl_rx_mtag) { for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap) bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[i].rx_dmamap); } if (sc->rl_ldata.rl_rx_sparemap) bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_sparemap); bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag); } if (sc->rl_ldata.rl_jrx_mtag) { for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap) bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); } if (sc->rl_ldata.rl_jrx_sparemap) bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, sc->rl_ldata.rl_jrx_sparemap); bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag); } /* Unload and free the stats buffer and map */ if (sc->rl_ldata.rl_stag) { if (sc->rl_ldata.rl_stats_addr) bus_dmamap_unload(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap); if (sc->rl_ldata.rl_stats) bus_dmamem_free(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap); bus_dma_tag_destroy(sc->rl_ldata.rl_stag); } if (sc->rl_parent_tag) bus_dma_tag_destroy(sc->rl_parent_tag); mtx_destroy(&sc->rl_mtx); return (0); } static __inline void re_discard_rxbuf(struct rl_softc *sc, int idx) { struct rl_desc *desc; struct rl_rxdesc *rxd; uint32_t cmdstat; if (sc->rl_ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) rxd = &sc->rl_ldata.rl_jrx_desc[idx]; else rxd = &sc->rl_ldata.rl_rx_desc[idx]; desc = &sc->rl_ldata.rl_rx_list[idx]; desc->rl_vlanctl = 0; cmdstat = rxd->rx_size; if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) cmdstat |= RL_RDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); } static int re_newbuf(struct rl_softc *sc, int idx) { struct mbuf *m; struct rl_rxdesc *rxd; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct rl_desc *desc; uint32_t cmdstat; int error, nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; #ifdef RE_FIXUP_RX /* * This is part of an evil trick to deal with non-x86 platforms. * The RealTek chip requires RX buffers to be aligned on 64-bit * boundaries, but that will hose non-x86 machines. To get around * this, we leave some empty space at the start of each buffer * and for non-x86 hosts, we copy the buffer back six bytes * to achieve word alignment. This is slightly more efficient * than allocating a new buffer, copying the contents, and * discarding the old buffer. */ m_adj(m, RE_ETHER_ALIGN); #endif error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); rxd = &sc->rl_ldata.rl_rx_desc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); } rxd->rx_m = m; map = rxd->rx_dmamap; rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap; rxd->rx_size = segs[0].ds_len; sc->rl_ldata.rl_rx_sparemap = map; bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); desc = &sc->rl_ldata.rl_rx_list[idx]; desc->rl_vlanctl = 0; desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); cmdstat = segs[0].ds_len; if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) cmdstat |= RL_RDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); return (0); } static int re_jumbo_newbuf(struct rl_softc *sc, int idx) { struct mbuf *m; struct rl_rxdesc *rxd; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct rl_desc *desc; uint32_t cmdstat; int error, nsegs; m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MJUM9BYTES; #ifdef RE_FIXUP_RX m_adj(m, RE_ETHER_ALIGN); #endif error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag, sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); rxd = &sc->rl_ldata.rl_jrx_desc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap); } rxd->rx_m = m; map = rxd->rx_dmamap; rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap; rxd->rx_size = segs[0].ds_len; sc->rl_ldata.rl_jrx_sparemap = map; bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); desc = &sc->rl_ldata.rl_rx_list[idx]; desc->rl_vlanctl = 0; desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); cmdstat = segs[0].ds_len; if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) cmdstat |= RL_RDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); return (0); } #ifdef RE_FIXUP_RX static __inline void re_fixup_rx(struct mbuf *m) { int i; uint16_t *src, *dst; src = mtod(m, uint16_t *); dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src; for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN; } #endif static int re_tx_list_init(struct rl_softc *sc) { struct rl_desc *desc; int i; RL_LOCK_ASSERT(sc); bzero(sc->rl_ldata.rl_tx_list, sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)); for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) sc->rl_ldata.rl_tx_desc[i].tx_m = NULL; #ifdef DEV_NETMAP re_netmap_tx_init(sc); #endif /* DEV_NETMAP */ /* Set EOR. */ desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1]; desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR); bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->rl_ldata.rl_tx_prodidx = 0; sc->rl_ldata.rl_tx_considx = 0; sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; return (0); } static int re_rx_list_init(struct rl_softc *sc) { int error, i; bzero(sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { sc->rl_ldata.rl_rx_desc[i].rx_m = NULL; if ((error = re_newbuf(sc, i)) != 0) return (error); } #ifdef DEV_NETMAP re_netmap_rx_init(sc); #endif /* DEV_NETMAP */ /* Flush the RX descriptors */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = 0; sc->rl_head = sc->rl_tail = NULL; sc->rl_int_rx_act = 0; return (0); } static int re_jrx_list_init(struct rl_softc *sc) { int error, i; bzero(sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL; if ((error = re_jumbo_newbuf(sc, i)) != 0) return (error); } bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = 0; sc->rl_head = sc->rl_tail = NULL; sc->rl_int_rx_act = 0; return (0); } /* * RX handler for C+ and 8169. For the gigE chips, we support * the reception of jumbo frames that have been fragmented * across multiple 2K mbuf cluster buffers. */ static int re_rxeof(struct rl_softc *sc, int *rx_npktsp) { struct mbuf *m; struct ifnet *ifp; int i, rxerr, total_len; struct rl_desc *cur_rx; u_int32_t rxstat, rxvlan; int jumbo, maxpkt = 16, rx_npkts = 0; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; #ifdef DEV_NETMAP if (netmap_rx_irq(ifp, 0, &rx_npkts)) return 0; #endif /* DEV_NETMAP */ if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) jumbo = 1; else jumbo = 0; /* Invalidate the descriptor memory */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0; i = RL_RX_DESC_NXT(sc, i)) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; cur_rx = &sc->rl_ldata.rl_rx_list[i]; rxstat = le32toh(cur_rx->rl_cmdstat); if ((rxstat & RL_RDESC_STAT_OWN) != 0) break; total_len = rxstat & sc->rl_rxlenmask; rxvlan = le32toh(cur_rx->rl_vlanctl); if (jumbo != 0) m = sc->rl_ldata.rl_jrx_desc[i].rx_m; else m = sc->rl_ldata.rl_rx_desc[i].rx_m; if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) != (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) { /* * RTL8168C or later controllers do not * support multi-fragment packet. */ re_discard_rxbuf(sc, i); continue; } else if ((rxstat & RL_RDESC_STAT_EOF) == 0) { if (re_newbuf(sc, i) != 0) { /* * If this is part of a multi-fragment packet, * discard all the pieces. */ if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_discard_rxbuf(sc, i); continue; } m->m_len = RE_RX_DESC_BUFLEN; if (sc->rl_head == NULL) sc->rl_head = sc->rl_tail = m; else { m->m_flags &= ~M_PKTHDR; sc->rl_tail->m_next = m; sc->rl_tail = m; } continue; } /* * NOTE: for the 8139C+, the frame length field * is always 12 bits in size, but for the gigE chips, * it is 13 bits (since the max RX frame length is 16K). * Unfortunately, all 32 bits in the status word * were already used, so to make room for the extra * length bit, RealTek took out the 'frame alignment * error' bit and shifted the other status bits * over one slot. The OWN, EOR, FS and LS bits are * still in the same places. We have already extracted * the frame length and checked the OWN bit, so rather * than using an alternate bit mapping, we shift the * status bits one space to the right so we can evaluate * them using the 8169 status as though it was in the * same format as that of the 8139C+. */ if (sc->rl_type == RL_8169) rxstat >>= 1; /* * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be * set, but if CRC is clear, it will still be a valid frame. */ if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) { rxerr = 1; if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 && total_len > 8191 && (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT) rxerr = 0; if (rxerr != 0) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); /* * If this is part of a multi-fragment packet, * discard all the pieces. */ if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_discard_rxbuf(sc, i); continue; } } /* * If allocating a replacement mbuf fails, * reload the current one. */ if (jumbo != 0) rxerr = re_jumbo_newbuf(sc, i); else rxerr = re_newbuf(sc, i); if (rxerr != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_discard_rxbuf(sc, i); continue; } if (sc->rl_head != NULL) { if (jumbo != 0) m->m_len = total_len; else { m->m_len = total_len % RE_RX_DESC_BUFLEN; if (m->m_len == 0) m->m_len = RE_RX_DESC_BUFLEN; } /* * Special case: if there's 4 bytes or less * in this buffer, the mbuf can be discarded: * the last 4 bytes is the CRC, which we don't * care about anyway. */ if (m->m_len <= ETHER_CRC_LEN) { sc->rl_tail->m_len -= (ETHER_CRC_LEN - m->m_len); m_freem(m); } else { m->m_len -= ETHER_CRC_LEN; m->m_flags &= ~M_PKTHDR; sc->rl_tail->m_next = m; } m = sc->rl_head; sc->rl_head = sc->rl_tail = NULL; m->m_pkthdr.len = total_len - ETHER_CRC_LEN; } else m->m_pkthdr.len = m->m_len = (total_len - ETHER_CRC_LEN); #ifdef RE_FIXUP_RX re_fixup_rx(m); #endif if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = ifp; /* Do RX checksumming if enabled */ if (ifp->if_capenable & IFCAP_RXCSUM) { if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { /* Check IP header checksum */ if (rxstat & RL_RDESC_STAT_PROTOID) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; /* Check TCP/UDP checksum */ if ((RL_TCPPKT(rxstat) && !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || (RL_UDPPKT(rxstat) && !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } else { /* * RTL8168C/RTL816CP/RTL8111C/RTL8111CP */ if ((rxstat & RL_RDESC_STAT_PROTOID) && (rxvlan & RL_RDESC_IPV4)) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) && (rxvlan & RL_RDESC_IPV4)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if (((rxstat & RL_RDESC_STAT_TCP) && !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || ((rxstat & RL_RDESC_STAT_UDP) && !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } } maxpkt--; if (rxvlan & RL_RDESC_VLANCTL_TAG) { m->m_pkthdr.ether_vtag = bswap16((rxvlan & RL_RDESC_VLANCTL_DATA)); m->m_flags |= M_VLANTAG; } RL_UNLOCK(sc); (*ifp->if_input)(ifp, m); RL_LOCK(sc); rx_npkts++; } /* Flush the RX DMA ring */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = i; if (rx_npktsp != NULL) *rx_npktsp = rx_npkts; if (maxpkt) return (EAGAIN); return (0); } static void re_txeof(struct rl_softc *sc) { struct ifnet *ifp; struct rl_txdesc *txd; u_int32_t txstat; int cons; cons = sc->rl_ldata.rl_tx_considx; if (cons == sc->rl_ldata.rl_tx_prodidx) return; ifp = sc->rl_ifp; #ifdef DEV_NETMAP if (netmap_tx_irq(ifp, 0)) return; #endif /* DEV_NETMAP */ /* Invalidate the TX descriptor list */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (; cons != sc->rl_ldata.rl_tx_prodidx; cons = RL_TX_DESC_NXT(sc, cons)) { txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat); if (txstat & RL_TDESC_STAT_OWN) break; /* * We only stash mbufs in the last descriptor * in a fragment chain, which also happens to * be the only place where the TX status bits * are valid. */ if (txstat & RL_TDESC_CMD_EOF) { txd = &sc->rl_ldata.rl_tx_desc[cons]; bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbufs!", __func__)); m_freem(txd->tx_m); txd->tx_m = NULL; if (txstat & (RL_TDESC_STAT_EXCESSCOL| RL_TDESC_STAT_COLCNT)) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); if (txstat & RL_TDESC_STAT_TXERRSUM) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); else if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } sc->rl_ldata.rl_tx_free++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } sc->rl_ldata.rl_tx_considx = cons; /* No changes made to the TX ring, so no flush needed */ if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) { #ifdef RE_TX_MODERATION /* * If not all descriptors have been reaped yet, reload * the timer so that we will eventually get another * interrupt that will cause us to re-enter this routine. * This is done in case the transmitter has gone idle. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif } else sc->rl_watchdog_timer = 0; } static void re_tick(void *xsc) { struct rl_softc *sc; struct mii_data *mii; sc = xsc; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); mii_tick(mii); if ((sc->rl_flags & RL_FLAG_LINK) == 0) re_miibus_statchg(sc->rl_dev); /* * Reclaim transmitted frames here. Technically it is not * necessary to do here but it ensures periodic reclamation * regardless of Tx completion interrupt which seems to be * lost on PCIe based controllers under certain situations. */ re_txeof(sc); re_watchdog(sc); callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); } #ifdef DEVICE_POLLING static int re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; int rx_npkts = 0; RL_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) rx_npkts = re_poll_locked(ifp, cmd, count); RL_UNLOCK(sc); return (rx_npkts); } static int re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; int rx_npkts; RL_LOCK_ASSERT(sc); sc->rxcycles = count; re_rxeof(sc, &rx_npkts); re_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ u_int16_t status; status = CSR_READ_2(sc, RL_ISR); if (status == 0xffff) return (rx_npkts); if (status) CSR_WRITE_2(sc, RL_ISR, status); if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && (sc->rl_flags & RL_FLAG_PCIE)) CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); /* * XXX check behaviour on receiver stalls. */ if (status & RL_ISR_SYSTEM_ERR) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } } return (rx_npkts); } #endif /* DEVICE_POLLING */ static int re_intr(void *arg) { struct rl_softc *sc; uint16_t status; sc = arg; status = CSR_READ_2(sc, RL_ISR); if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0) return (FILTER_STRAY); CSR_WRITE_2(sc, RL_IMR, 0); taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask); return (FILTER_HANDLED); } static void re_int_task(void *arg, int npending) { struct rl_softc *sc; struct ifnet *ifp; u_int16_t status; int rval = 0; sc = arg; ifp = sc->rl_ifp; RL_LOCK(sc); status = CSR_READ_2(sc, RL_ISR); CSR_WRITE_2(sc, RL_ISR, status); if (sc->suspended || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RL_UNLOCK(sc); return; } #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { RL_UNLOCK(sc); return; } #endif if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) rval = re_rxeof(sc, NULL); /* * Some chips will ignore a second TX request issued * while an existing transmission is in progress. If * the transmitter goes idle but there are still * packets waiting to be sent, we need to restart the * channel here to flush them out. This only seems to * be required with the PCIe devices. */ if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && (sc->rl_flags & RL_FLAG_PCIE)) CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); if (status & ( #ifdef RE_TX_MODERATION RL_ISR_TIMEOUT_EXPIRED| #else RL_ISR_TX_OK| #endif RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL)) re_txeof(sc); if (status & RL_ISR_SYSTEM_ERR) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); RL_UNLOCK(sc); if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) { taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask); return; } CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); } static void re_intr_msi(void *xsc) { struct rl_softc *sc; struct ifnet *ifp; uint16_t intrs, status; sc = xsc; RL_LOCK(sc); ifp = sc->rl_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { RL_UNLOCK(sc); return; } #endif /* Disable interrupts. */ CSR_WRITE_2(sc, RL_IMR, 0); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RL_UNLOCK(sc); return; } intrs = RL_INTRS_CPLUS; status = CSR_READ_2(sc, RL_ISR); CSR_WRITE_2(sc, RL_ISR, status); if (sc->rl_int_rx_act > 0) { intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); } if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) { re_rxeof(sc, NULL); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (sc->rl_int_rx_mod != 0 && (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) { /* Rearm one-shot timer. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); sc->rl_int_rx_act = 1; } else { intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN; sc->rl_int_rx_act = 0; } } } /* * Some chips will ignore a second TX request issued * while an existing transmission is in progress. If * the transmitter goes idle but there are still * packets waiting to be sent, we need to restart the * channel here to flush them out. This only seems to * be required with the PCIe devices. */ if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && (sc->rl_flags & RL_FLAG_PCIE)) CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL)) re_txeof(sc); if (status & RL_ISR_SYSTEM_ERR) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); CSR_WRITE_2(sc, RL_IMR, intrs); } RL_UNLOCK(sc); } static int re_encap(struct rl_softc *sc, struct mbuf **m_head) { struct rl_txdesc *txd, *txd_last; bus_dma_segment_t segs[RL_NTXSEGS]; bus_dmamap_t map; struct mbuf *m_new; struct rl_desc *desc; int nsegs, prod; int i, error, ei, si; int padlen; uint32_t cmdstat, csum_flags, vlanctl; RL_LOCK_ASSERT(sc); M_ASSERTPKTHDR((*m_head)); /* * With some of the RealTek chips, using the checksum offload * support in conjunction with the autopadding feature results * in the transmission of corrupt frames. For example, if we * need to send a really small IP fragment that's less than 60 * bytes in size, and IP header checksumming is enabled, the * resulting ethernet frame that appears on the wire will * have garbled payload. To work around this, if TX IP checksum * offload is enabled, we always manually pad short frames out * to the minimum ethernet frame size. */ if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) { padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len; if (M_WRITABLE(*m_head) == 0) { /* Get a writable copy. */ m_new = m_dup(*m_head, M_NOWAIT); m_freem(*m_head); if (m_new == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m_new; } if ((*m_head)->m_next != NULL || M_TRAILINGSPACE(*m_head) < padlen) { m_new = m_defrag(*m_head, M_NOWAIT); if (m_new == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } } else m_new = *m_head; /* * Manually pad short frames, and zero the pad space * to avoid leaking data. */ bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen); m_new->m_pkthdr.len += padlen; m_new->m_len = m_new->m_pkthdr.len; *m_head = m_new; } prod = sc->rl_ldata.rl_tx_prodidx; txd = &sc->rl_ldata.rl_tx_desc[prod]; error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m_new = m_collapse(*m_head, M_NOWAIT, RL_NTXSEGS); if (m_new == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m_new; error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check for number of available descriptors. */ if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); return (ENOBUFS); } bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE); /* * Set up checksum offload. Note: checksum offload bits must * appear in all descriptors of a multi-descriptor transmit * attempt. This is according to testing done with an 8169 * chip. This is a requirement. */ vlanctl = 0; csum_flags = 0; if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) { csum_flags |= RL_TDESC_CMD_LGSEND; vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << RL_TDESC_CMD_MSSVALV2_SHIFT); } else { csum_flags |= RL_TDESC_CMD_LGSEND | ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << RL_TDESC_CMD_MSSVAL_SHIFT); } } else { /* * Unconditionally enable IP checksum if TCP or UDP * checksum is required. Otherwise, TCP/UDP checksum * doesn't make effects. */ if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) { if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { csum_flags |= RL_TDESC_CMD_IPCSUM; if (((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) != 0) csum_flags |= RL_TDESC_CMD_TCPCSUM; if (((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) != 0) csum_flags |= RL_TDESC_CMD_UDPCSUM; } else { vlanctl |= RL_TDESC_CMD_IPCSUMV2; if (((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) != 0) vlanctl |= RL_TDESC_CMD_TCPCSUMV2; if (((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) != 0) vlanctl |= RL_TDESC_CMD_UDPCSUMV2; } } } /* * Set up hardware VLAN tagging. Note: vlan tag info must * appear in all descriptors of a multi-descriptor * transmission attempt. */ if ((*m_head)->m_flags & M_VLANTAG) vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) | RL_TDESC_VLANCTL_TAG; si = prod; for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) { desc = &sc->rl_ldata.rl_tx_list[prod]; desc->rl_vlanctl = htole32(vlanctl); desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); cmdstat = segs[i].ds_len; if (i != 0) cmdstat |= RL_TDESC_CMD_OWN; if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1) cmdstat |= RL_TDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | csum_flags); sc->rl_ldata.rl_tx_free--; } /* Update producer index. */ sc->rl_ldata.rl_tx_prodidx = prod; /* Set EOF on the last descriptor. */ ei = RL_TX_DESC_PRV(sc, prod); desc = &sc->rl_ldata.rl_tx_list[ei]; desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); desc = &sc->rl_ldata.rl_tx_list[si]; /* Set SOF and transfer ownership of packet to the chip. */ desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF); /* * Insure that the map for this transmission * is placed at the array index of the last descriptor * in this chain. (Swap last and first dmamaps.) */ txd_last = &sc->rl_ldata.rl_tx_desc[ei]; map = txd->tx_dmamap; txd->tx_dmamap = txd_last->tx_dmamap; txd_last->tx_dmamap = map; txd_last->tx_m = *m_head; return (0); } static void re_start(struct ifnet *ifp) { struct rl_softc *sc; sc = ifp->if_softc; RL_LOCK(sc); re_start_locked(ifp); RL_UNLOCK(sc); } /* * Main transmit routine for C+ and gigE NICs. */ static void re_start_locked(struct ifnet *ifp) { struct rl_softc *sc; struct mbuf *m_head; int queued; sc = ifp->if_softc; #ifdef DEV_NETMAP /* XXX is this necessary ? */ if (ifp->if_capenable & IFCAP_NETMAP) { - struct netmap_kring *kring = &NA(ifp)->tx_rings[0]; + struct netmap_kring *kring = NA(ifp)->tx_rings[0]; if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) { /* kick the tx unit */ CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); #ifdef RE_TX_MODERATION CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif sc->rl_watchdog_timer = 5; } return; } #endif /* DEV_NETMAP */ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) return; for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->rl_ldata.rl_tx_free > 1;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (re_encap(sc, &m_head) != 0) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); queued++; } if (queued == 0) { #ifdef RE_TX_MODERATION if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif return; } /* Flush the TX descriptors */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); #ifdef RE_TX_MODERATION /* * Use the countdown timer for interrupt moderation. * 'TX done' interrupts are disabled. Instead, we reset the * countdown timer, which will begin counting until it hits * the value in the TIMERINT register, and then trigger an * interrupt. Each time we write to the TIMERCNT register, * the timer count is reset to 0. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif /* * Set a timeout in case the chip goes out to lunch. */ sc->rl_watchdog_timer = 5; } static void re_set_jumbo(struct rl_softc *sc, int jumbo) { if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) { pci_set_max_read_req(sc->rl_dev, 4096); return; } CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); if (jumbo != 0) { CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) | RL_CFG3_JUMBO_EN0); switch (sc->rl_hwrev->rl_rev) { case RL_HWREV_8168DP: break; case RL_HWREV_8168E: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) | 0x01); break; default: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1); } } else { CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) & ~RL_CFG3_JUMBO_EN0); switch (sc->rl_hwrev->rl_rev) { case RL_HWREV_8168DP: break; case RL_HWREV_8168E: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) & ~0x01); break; default: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1); } } CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); switch (sc->rl_hwrev->rl_rev) { case RL_HWREV_8168DP: pci_set_max_read_req(sc->rl_dev, 4096); break; default: if (jumbo != 0) pci_set_max_read_req(sc->rl_dev, 512); else pci_set_max_read_req(sc->rl_dev, 4096); } } static void re_init(void *xsc) { struct rl_softc *sc = xsc; RL_LOCK(sc); re_init_locked(sc); RL_UNLOCK(sc); } static void re_init_locked(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; struct mii_data *mii; uint32_t reg; uint16_t cfg; union { uint32_t align_dummy; u_char eaddr[ETHER_ADDR_LEN]; } eaddr; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* * Cancel pending I/O and free all RX/TX buffers. */ re_stop(sc); /* Put controller into known state. */ re_reset(sc); /* * For C+ mode, initialize the RX descriptors and mbufs. */ if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { if (ifp->if_mtu > RL_MTU) { if (re_jrx_list_init(sc) != 0) { device_printf(sc->rl_dev, "no memory for jumbo RX buffers\n"); re_stop(sc); return; } /* Disable checksum offloading for jumbo frames. */ ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4); ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO); } else { if (re_rx_list_init(sc) != 0) { device_printf(sc->rl_dev, "no memory for RX buffers\n"); re_stop(sc); return; } } re_set_jumbo(sc, ifp->if_mtu > RL_MTU); } else { if (re_rx_list_init(sc) != 0) { device_printf(sc->rl_dev, "no memory for RX buffers\n"); re_stop(sc); return; } if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) { if (ifp->if_mtu > RL_MTU) pci_set_max_read_req(sc->rl_dev, 512); else pci_set_max_read_req(sc->rl_dev, 4096); } } re_tx_list_init(sc); /* * Enable C+ RX and TX mode, as well as VLAN stripping and * RX checksum offload. We must configure the C+ register * before all others. */ cfg = RL_CPLUSCMD_PCI_MRW; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) cfg |= RL_CPLUSCMD_RXCSUM_ENB; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) cfg |= RL_CPLUSCMD_VLANSTRIP; if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) { cfg |= RL_CPLUSCMD_MACSTAT_DIS; /* XXX magic. */ cfg |= 0x0001; } else cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB; CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC || sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) { reg = 0x000fff00; if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0) reg |= 0x000000ff; if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) reg |= 0x00f00000; CSR_WRITE_4(sc, 0x7c, reg); /* Disable interrupt mitigation. */ CSR_WRITE_2(sc, 0xe2, 0); } /* * Disable TSO if interface MTU size is greater than MSS * allowed in controller. */ if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; } /* * Init our MAC address. Even though the chipset * documentation doesn't mention it, we need to enter "Config * register write enable" mode to modify the ID registers. */ /* Copy MAC address on stack to align. */ bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); CSR_WRITE_4(sc, RL_IDR0, htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); CSR_WRITE_4(sc, RL_IDR4, htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); /* * Load the addresses of the RX and TX lists into the chip. */ CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) { /* Disable RXDV gate. */ CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) & ~0x00080000); } /* * Enable transmit and receive for pre-RTL8168G controllers. * RX/TX MACs should be enabled before RX/TX configuration. */ if ((sc->rl_flags & RL_FLAG_8168G_PLUS) == 0) CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB); /* * Set the initial TX configuration. */ if (sc->rl_testmode) { if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG|RL_LOOPTEST_ON); else CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); } else CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); /* * Set the initial RX configuration. */ re_set_rxmode(sc); /* Configure interrupt moderation. */ if (sc->rl_type == RL_8169) { /* Magic from vendor. */ CSR_WRITE_2(sc, RL_INTRMOD, 0x5100); } /* * Enable transmit and receive for RTL8168G and later controllers. * RX/TX MACs should be enabled after RX/TX configuration. */ if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB); #ifdef DEVICE_POLLING /* * Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_2(sc, RL_IMR, 0); else /* otherwise ... */ #endif /* * Enable interrupts. */ if (sc->rl_testmode) CSR_WRITE_2(sc, RL_IMR, 0); else CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS); /* Set initial TX threshold */ sc->rl_txthresh = RL_TX_THRESH_INIT; /* Start RX/TX process. */ CSR_WRITE_4(sc, RL_MISSEDPKT, 0); /* * Initialize the timer interrupt register so that * a timer interrupt will be generated once the timer * reaches a certain number of ticks. The timer is * reloaded on each transmit. */ #ifdef RE_TX_MODERATION /* * Use timer interrupt register to moderate TX interrupt * moderation, which dramatically improves TX frame rate. */ if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); else CSR_WRITE_4(sc, RL_TIMERINT, 0x400); #else /* * Use timer interrupt register to moderate RX interrupt * moderation. */ if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && intr_filter == 0) { if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(sc->rl_int_rx_mod)); } else { if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0)); } #endif /* * For 8169 gigE NICs, set the max allowed RX packet * size so we can receive jumbo frames. */ if (sc->rl_type == RL_8169) { if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { /* * For controllers that use new jumbo frame scheme, * set maximum size of jumbo frame depending on * controller revisions. */ if (ifp->if_mtu > RL_MTU) CSR_WRITE_2(sc, RL_MAXRXPKTLEN, sc->rl_hwrev->rl_max_mtu + ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN + ETHER_CRC_LEN); else CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); } else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && sc->rl_hwrev->rl_max_mtu == RL_MTU) { /* RTL810x has no jumbo frame support. */ CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); } else CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); } if (sc->rl_testmode) return; CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) | RL_CFG1_DRVLOAD); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->rl_flags &= ~RL_FLAG_LINK; mii_mediachg(mii); sc->rl_watchdog_timer = 0; callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); } /* * Set media options. */ static int re_ifmedia_upd(struct ifnet *ifp) { struct rl_softc *sc; struct mii_data *mii; int error; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); RL_LOCK(sc); error = mii_mediachg(mii); RL_UNLOCK(sc); return (error); } /* * Report current media status. */ static void re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct rl_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); RL_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; RL_UNLOCK(sc); } static int re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct rl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; switch (command) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu || ((sc->rl_flags & RL_FLAG_FASTETHER) != 0 && ifr->ifr_mtu > RL_MTU)) { error = EINVAL; break; } RL_LOCK(sc); if (ifp->if_mtu != ifr->ifr_mtu) { ifp->if_mtu = ifr->ifr_mtu; if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO); ifp->if_hwassist &= ~CSUM_TSO; } VLAN_CAPABILITIES(ifp); } RL_UNLOCK(sc); break; case SIOCSIFFLAGS: RL_LOCK(sc); if ((ifp->if_flags & IFF_UP) != 0) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (((ifp->if_flags ^ sc->rl_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) re_set_rxmode(sc); } else re_init_locked(sc); } else { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) re_stop(sc); } sc->rl_if_flags = ifp->if_flags; RL_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: RL_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) re_set_rxmode(sc); RL_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->rl_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: { int mask, reinit; mask = ifr->ifr_reqcap ^ ifp->if_capenable; reinit = 0; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(re_poll, ifp); if (error) return (error); RL_LOCK(sc); /* Disable interrupts */ CSR_WRITE_2(sc, RL_IMR, 0x0000); ifp->if_capenable |= IFCAP_POLLING; RL_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts. */ RL_LOCK(sc); CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); ifp->if_capenable &= ~IFCAP_POLLING; RL_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ RL_LOCK(sc); if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= RE_CSUM_FEATURES; else ifp->if_hwassist &= ~RE_CSUM_FEATURES; reinit = 1; } if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { ifp->if_capenable ^= IFCAP_RXCSUM; reinit = 1; } if ((mask & IFCAP_TSO4) != 0 && (ifp->if_capabilities & IFCAP_TSO4) != 0) { ifp->if_capenable ^= IFCAP_TSO4; if ((IFCAP_TSO4 & ifp->if_capenable) != 0) ifp->if_hwassist |= CSUM_TSO; else ifp->if_hwassist &= ~CSUM_TSO; if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; } } if ((mask & IFCAP_VLAN_HWTSO) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; /* TSO over VLAN requires VLAN hardware tagging. */ if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; reinit = 1; } if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && (mask & (IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWTSO)) != 0) reinit = 1; if ((mask & IFCAP_WOL) != 0 && (ifp->if_capabilities & IFCAP_WOL) != 0) { if ((mask & IFCAP_WOL_UCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_UCAST; if ((mask & IFCAP_WOL_MCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_MCAST; if ((mask & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; } if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } RL_UNLOCK(sc); VLAN_CAPABILITIES(ifp); } break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void re_watchdog(struct rl_softc *sc) { struct ifnet *ifp; RL_LOCK_ASSERT(sc); if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0) return; ifp = sc->rl_ifp; re_txeof(sc); if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) { if_printf(ifp, "watchdog timeout (missed Tx interrupts) " "-- recovering\n"); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); return; } if_printf(ifp, "watchdog timeout\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); re_rxeof(sc, NULL); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void re_stop(struct rl_softc *sc) { int i; struct ifnet *ifp; struct rl_txdesc *txd; struct rl_rxdesc *rxd; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; sc->rl_watchdog_timer = 0; callout_stop(&sc->rl_stat_callout); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* * Disable accepting frames to put RX MAC into idle state. * Otherwise it's possible to get frames while stop command * execution is in progress and controller can DMA the frame * to already freed RX buffer during that period. */ CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) & ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI | RL_RXCFG_RX_BROAD)); if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) { /* Enable RXDV gate. */ CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) | 0x00080000); } if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) { for (i = RL_TIMEOUT; i > 0; i--) { if ((CSR_READ_1(sc, sc->rl_txstart) & RL_TXSTART_START) == 0) break; DELAY(20); } if (i == 0) device_printf(sc->rl_dev, "stopping TX poll timed out!\n"); CSR_WRITE_1(sc, RL_COMMAND, 0x00); } else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) { CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | RL_CMD_RX_ENB); if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) { for (i = RL_TIMEOUT; i > 0; i--) { if ((CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_QUEUE_EMPTY) != 0) break; DELAY(100); } if (i == 0) device_printf(sc->rl_dev, "stopping TXQ timed out!\n"); } } else CSR_WRITE_1(sc, RL_COMMAND, 0x00); DELAY(1000); CSR_WRITE_2(sc, RL_IMR, 0x0000); CSR_WRITE_2(sc, RL_ISR, 0xFFFF); if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } /* Free the TX list buffers. */ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { txd = &sc->rl_ldata.rl_tx_desc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } /* Free the RX list buffers. */ for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { rxd = &sc->rl_ldata.rl_rx_desc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { rxd = &sc->rl_ldata.rl_jrx_desc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } } } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int re_suspend(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); re_stop(sc); re_setwol(sc); sc->suspended = 1; RL_UNLOCK(sc); return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int re_resume(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); RL_LOCK(sc); ifp = sc->rl_ifp; /* Take controller out of sleep mode. */ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) | 0x01); } /* * Clear WOL matching such that normal Rx filtering * wouldn't interfere with WOL patterns. */ re_clrwol(sc); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) re_init_locked(sc); sc->suspended = 0; RL_UNLOCK(sc); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int re_shutdown(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); re_stop(sc); /* * Mark interface as down since otherwise we will panic if * interrupt comes in later on, which can happen in some * cases. */ sc->rl_ifp->if_flags &= ~IFF_UP; re_setwol(sc); RL_UNLOCK(sc); return (0); } static void re_set_linkspeed(struct rl_softc *sc) { struct mii_softc *miisc; struct mii_data *mii; int aneg, i, phyno; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); mii_pollstat(mii); aneg = 0; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch IFM_SUBTYPE(mii->mii_media_active) { case IFM_10_T: case IFM_100_TX: return; case IFM_1000_T: aneg++; break; default: break; } } miisc = LIST_FIRST(&mii->mii_phys); phyno = miisc->mii_phy; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0); re_miibus_writereg(sc->rl_dev, phyno, MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); re_miibus_writereg(sc->rl_dev, phyno, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG); DELAY(1000); if (aneg != 0) { /* * Poll link state until re(4) get a 10/100Mbps link. */ for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { mii_pollstat(mii); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: return; default: break; } } RL_UNLOCK(sc); pause("relnk", hz); RL_LOCK(sc); } if (i == MII_ANEGTICKS_GIGE) device_printf(sc->rl_dev, "establishing a link failed, WOL may not work!"); } /* * No link, force MAC to have 100Mbps, full-duplex link. * MAC does not require reprogramming on resolved speed/duplex, * so this is just for completeness. */ mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; } static void re_setwol(struct rl_softc *sc) { struct ifnet *ifp; int pmc; uint16_t pmstat; uint8_t v; RL_LOCK_ASSERT(sc); if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) return; ifp = sc->rl_ifp; /* Put controller into sleep mode. */ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) & ~0x01); } if ((ifp->if_capenable & IFCAP_WOL) != 0) { if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) { /* Disable RXDV gate. */ CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) & ~0x00080000); } re_set_rxmode(sc); if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0) re_set_linkspeed(sc); if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0) CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB); } /* Enable config register write. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); /* Enable PME. */ v = CSR_READ_1(sc, sc->rl_cfg1); v &= ~RL_CFG1_PME; if ((ifp->if_capenable & IFCAP_WOL) != 0) v |= RL_CFG1_PME; CSR_WRITE_1(sc, sc->rl_cfg1, v); v = CSR_READ_1(sc, sc->rl_cfg3); v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) v |= RL_CFG3_WOL_MAGIC; CSR_WRITE_1(sc, sc->rl_cfg3, v); v = CSR_READ_1(sc, sc->rl_cfg5); v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST | RL_CFG5_WOL_LANWAKE); if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) v |= RL_CFG5_WOL_UCAST; if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST; if ((ifp->if_capenable & IFCAP_WOL) != 0) v |= RL_CFG5_WOL_LANWAKE; CSR_WRITE_1(sc, sc->rl_cfg5, v); /* Config register write done. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); if ((ifp->if_capenable & IFCAP_WOL) == 0 && (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80); /* * It seems that hardware resets its link speed to 100Mbps in * power down mode so switching to 100Mbps in driver is not * needed. */ /* Request PME if WOL is requested. */ pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } static void re_clrwol(struct rl_softc *sc) { int pmc; uint8_t v; RL_LOCK_ASSERT(sc); if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) return; /* Enable config register write. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); v = CSR_READ_1(sc, sc->rl_cfg3); v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); CSR_WRITE_1(sc, sc->rl_cfg3, v); /* Config register write done. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); v = CSR_READ_1(sc, sc->rl_cfg5); v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); v &= ~RL_CFG5_WOL_LANWAKE; CSR_WRITE_1(sc, sc->rl_cfg5, v); } static void re_add_sysctls(struct rl_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; int error; ctx = device_get_sysctl_ctx(sc->rl_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev)); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, re_sysctl_stats, "I", "Statistics Information"); if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) return; SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->rl_int_rx_mod, 0, sysctl_hw_re_int_mod, "I", "re RX interrupt moderation"); /* Pull in device tunables. */ sc->rl_int_rx_mod = RL_TIMER_DEFAULT; error = resource_int_value(device_get_name(sc->rl_dev), device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod); if (error == 0) { if (sc->rl_int_rx_mod < RL_TIMER_MIN || sc->rl_int_rx_mod > RL_TIMER_MAX) { device_printf(sc->rl_dev, "int_rx_mod value out of " "range; using default: %d\n", RL_TIMER_DEFAULT); sc->rl_int_rx_mod = RL_TIMER_DEFAULT; } } } static int re_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct rl_softc *sc; struct rl_stats *stats; int error, i, result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || req->newptr == NULL) return (error); if (result == 1) { sc = (struct rl_softc *)arg1; RL_LOCK(sc); if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RL_UNLOCK(sc); goto done; } bus_dmamap_sync(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD); CSR_WRITE_4(sc, RL_DUMPSTATS_HI, RL_ADDR_HI(sc->rl_ldata.rl_stats_addr)); CSR_WRITE_4(sc, RL_DUMPSTATS_LO, RL_ADDR_LO(sc->rl_ldata.rl_stats_addr)); CSR_WRITE_4(sc, RL_DUMPSTATS_LO, RL_ADDR_LO(sc->rl_ldata.rl_stats_addr | RL_DUMPSTATS_START)); for (i = RL_TIMEOUT; i > 0; i--) { if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) & RL_DUMPSTATS_START) == 0) break; DELAY(1000); } bus_dmamap_sync(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD); RL_UNLOCK(sc); if (i == 0) { device_printf(sc->rl_dev, "DUMP statistics request timed out\n"); return (ETIMEDOUT); } done: stats = sc->rl_ldata.rl_stats; printf("%s statistics:\n", device_get_nameunit(sc->rl_dev)); printf("Tx frames : %ju\n", (uintmax_t)le64toh(stats->rl_tx_pkts)); printf("Rx frames : %ju\n", (uintmax_t)le64toh(stats->rl_rx_pkts)); printf("Tx errors : %ju\n", (uintmax_t)le64toh(stats->rl_tx_errs)); printf("Rx errors : %u\n", le32toh(stats->rl_rx_errs)); printf("Rx missed frames : %u\n", (uint32_t)le16toh(stats->rl_missed_pkts)); printf("Rx frame alignment errs : %u\n", (uint32_t)le16toh(stats->rl_rx_framealign_errs)); printf("Tx single collisions : %u\n", le32toh(stats->rl_tx_onecoll)); printf("Tx multiple collisions : %u\n", le32toh(stats->rl_tx_multicolls)); printf("Rx unicast frames : %ju\n", (uintmax_t)le64toh(stats->rl_rx_ucasts)); printf("Rx broadcast frames : %ju\n", (uintmax_t)le64toh(stats->rl_rx_bcasts)); printf("Rx multicast frames : %u\n", le32toh(stats->rl_rx_mcasts)); printf("Tx aborts : %u\n", (uint32_t)le16toh(stats->rl_tx_aborts)); printf("Tx underruns : %u\n", (uint32_t)le16toh(stats->rl_rx_underruns)); } return (error); } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (arg1 == NULL) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN, RL_TIMER_MAX)); } Index: head/sys/modules/netmap/Makefile =================================================================== --- head/sys/modules/netmap/Makefile (revision 332422) +++ head/sys/modules/netmap/Makefile (revision 332423) @@ -1,27 +1,28 @@ # $FreeBSD$ # # Compile netmap as a module, useful if you want a netmap bridge # or loadable drivers. SYSDIR?=${SRCTOP}/sys .include "${SYSDIR}/conf/kern.opts.mk" .PATH: ${SYSDIR}/dev/netmap .PATH.h: ${SYSDIR}/net CFLAGS += -I${SYSDIR}/ -D INET KMOD = netmap SRCS = device_if.h bus_if.h pci_if.h opt_netmap.h SRCS += netmap.c netmap.h netmap_kern.h SRCS += netmap_mem2.c netmap_mem2.h SRCS += netmap_generic.c SRCS += netmap_mbq.c netmap_mbq.h SRCS += netmap_vale.c SRCS += netmap_freebsd.c SRCS += netmap_offloadings.c SRCS += netmap_pipe.c SRCS += netmap_monitor.c SRCS += netmap_pt.c +SRCS += netmap_legacy.c SRCS += if_ptnet.c SRCS += opt_inet.h opt_inet6.h .include Index: head/sys/net/iflib.c =================================================================== --- head/sys/net/iflib.c (revision 332422) +++ head/sys/net/iflib.c (revision 332423) @@ -1,5969 +1,5969 @@ /*- * Copyright (c) 2014-2017, Matthew Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Neither the name of Matthew Macy nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_acpi.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ifdi_if.h" #if defined(__i386__) || defined(__amd64__) #include #include #include #include #include #include #endif #include /* * enable accounting of every mbuf as it comes in to and goes out of * iflib's software descriptor references */ #define MEMORY_LOGGING 0 /* * Enable mbuf vectors for compressing long mbuf chains */ /* * NB: * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead * we prefetch needs to be determined by the time spent in m_free vis a vis * the cost of a prefetch. This will of course vary based on the workload: * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which * is quite expensive, thus suggesting very little prefetch. * - small packet forwarding which is just returning a single mbuf to * UMA will typically be very fast vis a vis the cost of a memory * access. */ /* * File organization: * - private structures * - iflib private utility functions * - ifnet functions * - vlan registry and other exported functions * - iflib public core functions * * */ static MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library"); struct iflib_txq; typedef struct iflib_txq *iflib_txq_t; struct iflib_rxq; typedef struct iflib_rxq *iflib_rxq_t; struct iflib_fl; typedef struct iflib_fl *iflib_fl_t; struct iflib_ctx; static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); typedef struct iflib_filter_info { driver_filter_t *ifi_filter; void *ifi_filter_arg; struct grouptask *ifi_task; void *ifi_ctx; } *iflib_filter_info_t; struct iflib_ctx { KOBJ_FIELDS; /* * Pointer to hardware driver's softc */ void *ifc_softc; device_t ifc_dev; if_t ifc_ifp; cpuset_t ifc_cpus; if_shared_ctx_t ifc_sctx; struct if_softc_ctx ifc_softc_ctx; struct mtx ifc_mtx; uint16_t ifc_nhwtxqs; iflib_txq_t ifc_txqs; iflib_rxq_t ifc_rxqs; uint32_t ifc_if_flags; uint32_t ifc_flags; uint32_t ifc_max_fl_buf_size; int ifc_in_detach; int ifc_link_state; int ifc_link_irq; int ifc_watchdog_events; struct cdev *ifc_led_dev; struct resource *ifc_msix_mem; struct if_irq ifc_legacy_irq; struct grouptask ifc_admin_task; struct grouptask ifc_vflr_task; struct iflib_filter_info ifc_filter_info; struct ifmedia ifc_media; struct sysctl_oid *ifc_sysctl_node; uint16_t ifc_sysctl_ntxqs; uint16_t ifc_sysctl_nrxqs; uint16_t ifc_sysctl_qs_eq_override; uint16_t ifc_sysctl_rx_budget; qidx_t ifc_sysctl_ntxds[8]; qidx_t ifc_sysctl_nrxds[8]; struct if_txrx ifc_txrx; #define isc_txd_encap ifc_txrx.ift_txd_encap #define isc_txd_flush ifc_txrx.ift_txd_flush #define isc_txd_credits_update ifc_txrx.ift_txd_credits_update #define isc_rxd_available ifc_txrx.ift_rxd_available #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_rxd_flush ifc_txrx.ift_rxd_flush #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_legacy_intr ifc_txrx.ift_legacy_intr eventhandler_tag ifc_vlan_attach_event; eventhandler_tag ifc_vlan_detach_event; uint8_t ifc_mac[ETHER_ADDR_LEN]; char ifc_mtx_name[16]; }; void * iflib_get_softc(if_ctx_t ctx) { return (ctx->ifc_softc); } device_t iflib_get_dev(if_ctx_t ctx) { return (ctx->ifc_dev); } if_t iflib_get_ifp(if_ctx_t ctx) { return (ctx->ifc_ifp); } struct ifmedia * iflib_get_media(if_ctx_t ctx) { return (&ctx->ifc_media); } void iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]) { bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN); } if_softc_ctx_t iflib_get_softc_ctx(if_ctx_t ctx) { return (&ctx->ifc_softc_ctx); } if_shared_ctx_t iflib_get_sctx(if_ctx_t ctx) { return (ctx->ifc_sctx); } #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2) #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*)) #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1))) #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP) #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF) #define RX_SW_DESC_MAP_CREATED (1 << 0) #define TX_SW_DESC_MAP_CREATED (1 << 1) #define RX_SW_DESC_INUSE (1 << 3) #define TX_SW_DESC_MAPPED (1 << 4) #define M_TOOBIG M_PROTO1 typedef struct iflib_sw_rx_desc_array { bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ struct mbuf **ifsd_m; /* pkthdr mbufs */ caddr_t *ifsd_cl; /* direct cluster pointer for rx */ uint8_t *ifsd_flags; } iflib_rxsd_array_t; typedef struct iflib_sw_tx_desc_array { bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ struct mbuf **ifsd_m; /* pkthdr mbufs */ uint8_t *ifsd_flags; } if_txsd_vec_t; /* magic number that should be high enough for any hardware */ #define IFLIB_MAX_TX_SEGS 128 /* bnxt supports 64 with hardware LRO enabled */ #define IFLIB_MAX_RX_SEGS 64 #define IFLIB_RX_COPY_THRESH 128 #define IFLIB_MAX_RX_REFRESH 32 /* The minimum descriptors per second before we start coalescing */ #define IFLIB_MIN_DESC_SEC 16384 #define IFLIB_DEFAULT_TX_UPDATE_FREQ 16 #define IFLIB_QUEUE_IDLE 0 #define IFLIB_QUEUE_HUNG 1 #define IFLIB_QUEUE_WORKING 2 /* maximum number of txqs that can share an rx interrupt */ #define IFLIB_MAX_TX_SHARED_INTR 4 /* this should really scale with ring size - this is a fairly arbitrary value */ #define TX_BATCH_SIZE 32 #define IFLIB_RESTART_BUDGET 8 #define IFC_LEGACY 0x001 #define IFC_QFLUSH 0x002 #define IFC_MULTISEG 0x004 #define IFC_DMAR 0x008 #define IFC_SC_ALLOCATED 0x010 #define IFC_INIT_DONE 0x020 #define IFC_PREFETCH 0x040 #define IFC_DO_RESET 0x080 #define IFC_CHECK_HUNG 0x100 #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) struct iflib_txq { qidx_t ift_in_use; qidx_t ift_cidx; qidx_t ift_cidx_processed; qidx_t ift_pidx; uint8_t ift_gen; uint8_t ift_br_offset; uint16_t ift_npending; uint16_t ift_db_pending; uint16_t ift_rs_pending; /* implicit pad */ uint8_t ift_txd_size[8]; uint64_t ift_processed; uint64_t ift_cleaned; uint64_t ift_cleaned_prev; #if MEMORY_LOGGING uint64_t ift_enqueued; uint64_t ift_dequeued; #endif uint64_t ift_no_tx_dma_setup; uint64_t ift_no_desc_avail; uint64_t ift_mbuf_defrag_failed; uint64_t ift_mbuf_defrag; uint64_t ift_map_failed; uint64_t ift_txd_encap_efbig; uint64_t ift_pullups; struct mtx ift_mtx; struct mtx ift_db_mtx; /* constant values */ if_ctx_t ift_ctx; struct ifmp_ring *ift_br; struct grouptask ift_task; qidx_t ift_size; uint16_t ift_id; struct callout ift_timer; if_txsd_vec_t ift_sds; uint8_t ift_qstatus; uint8_t ift_closed; uint8_t ift_update_freq; struct iflib_filter_info ift_filter_info; bus_dma_tag_t ift_desc_tag; bus_dma_tag_t ift_tso_desc_tag; iflib_dma_info_t ift_ifdi; #define MTX_NAME_LEN 16 char ift_mtx_name[MTX_NAME_LEN]; char ift_db_mtx_name[MTX_NAME_LEN]; bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE); #ifdef IFLIB_DIAGNOSTICS uint64_t ift_cpu_exec_count[256]; #endif } __aligned(CACHE_LINE_SIZE); struct iflib_fl { qidx_t ifl_cidx; qidx_t ifl_pidx; qidx_t ifl_credits; uint8_t ifl_gen; uint8_t ifl_rxd_size; #if MEMORY_LOGGING uint64_t ifl_m_enqueued; uint64_t ifl_m_dequeued; uint64_t ifl_cl_enqueued; uint64_t ifl_cl_dequeued; #endif /* implicit pad */ bitstr_t *ifl_rx_bitmap; qidx_t ifl_fragidx; /* constant */ qidx_t ifl_size; uint16_t ifl_buf_size; uint16_t ifl_cltype; uma_zone_t ifl_zone; iflib_rxsd_array_t ifl_sds; iflib_rxq_t ifl_rxq; uint8_t ifl_id; bus_dma_tag_t ifl_desc_tag; iflib_dma_info_t ifl_ifdi; uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE); caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH]; qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH]; } __aligned(CACHE_LINE_SIZE); static inline qidx_t get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen) { qidx_t used; if (pidx > cidx) used = pidx - cidx; else if (pidx < cidx) used = size - cidx + pidx; else if (gen == 0 && pidx == cidx) used = 0; else if (gen == 1 && pidx == cidx) used = size; else panic("bad state"); return (used); } #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen)) #define IDXDIFF(head, tail, wrap) \ ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) struct iflib_rxq { /* If there is a separate completion queue - * these are the cq cidx and pidx. Otherwise * these are unused. */ qidx_t ifr_size; qidx_t ifr_cq_cidx; qidx_t ifr_cq_pidx; uint8_t ifr_cq_gen; uint8_t ifr_fl_offset; if_ctx_t ifr_ctx; iflib_fl_t ifr_fl; uint64_t ifr_rx_irq; uint16_t ifr_id; uint8_t ifr_lro_enabled; uint8_t ifr_nfl; uint8_t ifr_ntxqirq; uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR]; struct lro_ctrl ifr_lc; struct grouptask ifr_task; struct iflib_filter_info ifr_filter_info; iflib_dma_info_t ifr_ifdi; /* dynamically allocate if any drivers need a value substantially larger than this */ struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE); #ifdef IFLIB_DIAGNOSTICS uint64_t ifr_cpu_exec_count[256]; #endif } __aligned(CACHE_LINE_SIZE); typedef struct if_rxsd { caddr_t *ifsd_cl; struct mbuf **ifsd_m; iflib_fl_t ifsd_fl; qidx_t ifsd_cidx; } *if_rxsd_t; /* multiple of word size */ #ifdef __LP64__ #define PKT_INFO_SIZE 6 #define RXD_INFO_SIZE 5 #define PKT_TYPE uint64_t #else #define PKT_INFO_SIZE 11 #define RXD_INFO_SIZE 8 #define PKT_TYPE uint32_t #endif #define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3) #define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4) typedef struct if_pkt_info_pad { PKT_TYPE pkt_val[PKT_INFO_SIZE]; } *if_pkt_info_pad_t; typedef struct if_rxd_info_pad { PKT_TYPE rxd_val[RXD_INFO_SIZE]; } *if_rxd_info_pad_t; CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info)); CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info)); static inline void pkt_info_zero(if_pkt_info_t pi) { if_pkt_info_pad_t pi_pad; pi_pad = (if_pkt_info_pad_t)pi; pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0; pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0; #ifndef __LP64__ pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0; pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0; #endif } static inline void rxd_info_zero(if_rxd_info_t ri) { if_rxd_info_pad_t ri_pad; int i; ri_pad = (if_rxd_info_pad_t)ri; for (i = 0; i < RXD_LOOP_BOUND; i += 4) { ri_pad->rxd_val[i] = 0; ri_pad->rxd_val[i+1] = 0; ri_pad->rxd_val[i+2] = 0; ri_pad->rxd_val[i+3] = 0; } #ifdef __LP64__ ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0; #endif } /* * Only allow a single packet to take up most 1/nth of the tx ring */ #define MAX_SINGLE_PACKET_FRACTION 12 #define IF_BAD_DMA (bus_addr_t)-1 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING)) #define CTX_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_mtx, _name, "iflib ctx lock", MTX_DEF) #define CTX_LOCK(ctx) mtx_lock(&(ctx)->ifc_mtx) #define CTX_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_mtx) #define CTX_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_mtx) #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx) #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx) /* Our boot-time initialization hook */ static int iflib_module_event_handler(module_t, int, void *); static moduledata_t iflib_moduledata = { "iflib", iflib_module_event_handler, NULL }; DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY); MODULE_VERSION(iflib, 1); MODULE_DEPEND(iflib, pci, 1, 1, 1); MODULE_DEPEND(iflib, ether, 1, 1, 1); TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1); TASKQGROUP_DEFINE(if_config_tqg, 1, 1); #ifndef IFLIB_DEBUG_COUNTERS #ifdef INVARIANTS #define IFLIB_DEBUG_COUNTERS 1 #else #define IFLIB_DEBUG_COUNTERS 0 #endif /* !INVARIANTS */ #endif static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0, "iflib driver parameters"); /* * XXX need to ensure that this can't accidentally cause the head to be moved backwards */ static int iflib_min_tx_latency = 0; SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW, &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput"); static int iflib_no_tx_batch = 0; SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW, &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput"); #if IFLIB_DEBUG_COUNTERS static int iflib_tx_seen; static int iflib_tx_sent; static int iflib_tx_encap; static int iflib_rx_allocs; static int iflib_fl_refills; static int iflib_fl_refills_large; static int iflib_tx_frees; SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, &iflib_tx_seen, 0, "# tx mbufs seen"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, &iflib_tx_sent, 0, "# tx mbufs sent"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, &iflib_tx_encap, 0, "# tx mbufs encapped"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, &iflib_tx_frees, 0, "# tx frees"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, &iflib_rx_allocs, 0, "# rx allocations"); SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, &iflib_fl_refills, 0, "# refills"); SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD, &iflib_fl_refills_large, 0, "# large refills"); static int iflib_txq_drain_flushing; static int iflib_txq_drain_oactive; static int iflib_txq_drain_notready; static int iflib_txq_drain_encapfail; SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD, &iflib_txq_drain_flushing, 0, "# drain flushes"); SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD, &iflib_txq_drain_oactive, 0, "# drain oactives"); SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD, &iflib_txq_drain_notready, 0, "# drain notready"); SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_encapfail, CTLFLAG_RD, &iflib_txq_drain_encapfail, 0, "# drain encap fails"); static int iflib_encap_load_mbuf_fail; static int iflib_encap_pad_mbuf_fail; static int iflib_encap_txq_avail_fail; static int iflib_encap_txd_encap_fail; SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD, &iflib_encap_load_mbuf_fail, 0, "# busdma load failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD, &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD, &iflib_encap_txq_avail_fail, 0, "# txq avail failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD, &iflib_encap_txd_encap_fail, 0, "# driver encap failures"); static int iflib_task_fn_rxs; static int iflib_rx_intr_enables; static int iflib_fast_intrs; static int iflib_intr_link; static int iflib_intr_msix; static int iflib_rx_unavail; static int iflib_rx_ctx_inactive; static int iflib_rx_zero_len; static int iflib_rx_if_input; static int iflib_rx_mbuf_null; static int iflib_rxd_flush; static int iflib_verbose_debug; SYSCTL_INT(_net_iflib, OID_AUTO, intr_link, CTLFLAG_RD, &iflib_intr_link, 0, "# intr link calls"); SYSCTL_INT(_net_iflib, OID_AUTO, intr_msix, CTLFLAG_RD, &iflib_intr_msix, 0, "# intr msix calls"); SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, &iflib_task_fn_rxs, 0, "# task_fn_rx calls"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD, &iflib_rx_intr_enables, 0, "# rx intr enables"); SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, &iflib_fast_intrs, 0, "# fast_intr calls"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, &iflib_rx_unavail, 0, "# times rxeof called with no available data"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD, &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_zero_len, CTLFLAG_RD, &iflib_rx_zero_len, 0, "# times rxeof saw zero len mbuf"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, &iflib_rx_if_input, 0, "# times rxeof called if_input"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD, &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf"); SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, &iflib_rxd_flush, 0, "# times rxd_flush called"); SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW, &iflib_verbose_debug, 0, "enable verbose debugging"); #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1) static void iflib_debug_reset(void) { iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs = iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees = iflib_txq_drain_flushing = iflib_txq_drain_oactive = iflib_txq_drain_notready = iflib_txq_drain_encapfail = iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail = iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail = iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs = iflib_intr_link = iflib_intr_msix = iflib_rx_unavail = iflib_rx_ctx_inactive = iflib_rx_zero_len = iflib_rx_if_input = iflib_rx_mbuf_null = iflib_rxd_flush = 0; } #else #define DBG_COUNTER_INC(name) static void iflib_debug_reset(void) {} #endif #define IFLIB_DEBUG 0 static void iflib_tx_structures_free(if_ctx_t ctx); static void iflib_rx_structures_free(if_ctx_t ctx); static int iflib_queues_alloc(if_ctx_t ctx); static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq); static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget); static int iflib_qset_structures_setup(if_ctx_t ctx); static int iflib_msix_init(if_ctx_t ctx); static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, char *str); static void iflib_txq_check_drain(iflib_txq_t txq, int budget); static uint32_t iflib_txq_can_drain(struct ifmp_ring *); static int iflib_register(if_ctx_t); static void iflib_init_locked(if_ctx_t ctx); static void iflib_add_device_sysctl_pre(if_ctx_t ctx); static void iflib_add_device_sysctl_post(if_ctx_t ctx); static void iflib_ifmp_purge(iflib_txq_t txq); static void _iflib_pre_assert(if_softc_ctx_t scctx); static void iflib_stop(if_ctx_t ctx); static void iflib_if_init_locked(if_ctx_t ctx); #ifndef __NO_STRICT_ALIGNMENT static struct mbuf * iflib_fixup_rx(struct mbuf *m); #endif #ifdef DEV_NETMAP #include #include #include MODULE_DEPEND(iflib, netmap, 1, 1, 1); static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init); /* * device-specific sysctl variables: * * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. * During regular operations the CRC is stripped, but on some * hardware reception of frames not multiple of 64 is slower, * so using crcstrip=0 helps in benchmarks. * * iflib_rx_miss, iflib_rx_miss_bufs: * count packets that might be missed due to lost interrupts. */ SYSCTL_DECL(_dev_netmap); /* * The xl driver by default strips CRCs and we do not override it. */ int iflib_crcstrip = 1; SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip, CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames"); int iflib_rx_miss, iflib_rx_miss_bufs; SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss, CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr"); SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs, CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs"); /* * Register/unregister. We are already under netmap lock. * Only called on the first register or the last unregister. */ static int iflib_netmap_register(struct netmap_adapter *na, int onoff) { struct ifnet *ifp = na->ifp; if_ctx_t ctx = ifp->if_softc; int status; CTX_LOCK(ctx); IFDI_INTR_DISABLE(ctx); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if (!CTX_IS_VF(ctx)) IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); /* enable or disable flags and callbacks in na and ifp */ if (onoff) { nm_set_native_flags(na); } else { nm_clear_native_flags(na); } iflib_stop(ctx); iflib_init_locked(ctx); IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ? status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1; if (status) nm_clear_native_flags(na); CTX_UNLOCK(ctx); return (status); } static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init) { struct netmap_adapter *na = kring->na; u_int const lim = kring->nkr_num_slots - 1; u_int head = kring->rhead; struct netmap_ring *ring = kring->ring; bus_dmamap_t *map; struct if_rxd_update iru; if_ctx_t ctx = rxq->ifr_ctx; iflib_fl_t fl = &rxq->ifr_fl[0]; uint32_t refill_pidx, nic_i; if (nm_i == head && __predict_true(!init)) return 0; iru_init(&iru, rxq, 0 /* flid */); map = fl->ifl_sds.ifsd_map; refill_pidx = netmap_idx_k2n(kring, nm_i); /* * IMPORTANT: we must leave one free slot in the ring, * so move head back by one unit */ head = nm_prev(head, lim); while (nm_i != head) { for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) { struct netmap_slot *slot = &ring->slot[nm_i]; void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]); uint32_t nic_i_dma = refill_pidx; nic_i = netmap_idx_k2n(kring, nm_i); MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH); if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ return netmap_ring_reinit(kring); fl->ifl_vm_addrs[tmp_pidx] = addr; if (__predict_false(init) && map) { netmap_load_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); } else if (map && (slot->flags & NS_BUF_CHANGED)) { /* buffer has changed, reload map */ netmap_reload_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); } slot->flags &= ~NS_BUF_CHANGED; nm_i = nm_next(nm_i, lim); fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim); if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1) continue; iru.iru_pidx = refill_pidx; iru.iru_count = tmp_pidx+1; ctx->isc_rxd_refill(ctx->ifc_softc, &iru); refill_pidx = nic_i; if (map == NULL) continue; for (int n = 0; n < iru.iru_count; n++) { bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma], BUS_DMASYNC_PREREAD); /* XXX - change this to not use the netmap func*/ nic_i_dma = nm_next(nic_i_dma, lim); } } } kring->nr_hwcur = head; if (map) bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i); return (0); } /* * Reconcile kernel and user view of the transmit ring. * * All information is in the kring. * Userspace wants to send packets up to the one before kring->rhead, * kernel knows kring->nr_hwcur is the first unsent packet. * * Here we push packets out (as many as possible), and possibly * reclaim buffers from previously completed transmission. * * The caller (netmap) guarantees that there is only one instance * running at any time. Any interference with other driver * methods should be handled by the individual drivers. */ static int iflib_netmap_txsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct ifnet *ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int nm_i; /* index into the netmap ring */ u_int nic_i; /* index into the NIC ring */ u_int n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; struct if_pkt_info pi; /* * interrupts on every tx packet are expensive so request * them every half ring, or where NS_REPORT is set */ u_int report_frequency = kring->nkr_num_slots >> 1; /* device-specific */ if_ctx_t ctx = ifp->if_softc; iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id]; if (txq->ift_sds.ifsd_map) bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * First part: process new packets to send. * nm_i is the current index in the netmap ring, * nic_i is the corresponding index in the NIC ring. * * If we have packets to send (nm_i != head) * iterate over the netmap ring, fetch length and update * the corresponding slot in the NIC ring. Some drivers also * need to update the buffer's physical address in the NIC slot * even NS_BUF_CHANGED is not set (PNMB computes the addresses). * * The netmap_reload_map() calls is especially expensive, * even when (as in this case) the tag is 0, so do only * when the buffer has actually changed. * * If possible do not set the report/intr bit on all slots, * but only a few times per ring or when NS_REPORT is set. * * Finally, on 10G and faster drivers, it might be useful * to prefetch the next slot and txr entry. */ nm_i = netmap_idx_n2k(kring, kring->nr_hwcur); pkt_info_zero(&pi); pi.ipi_segs = txq->ift_segs; pi.ipi_qsidx = kring->ring_id; if (nm_i != head) { /* we have new packets to send */ nic_i = netmap_idx_k2n(kring, nm_i); __builtin_prefetch(&ring->slot[nm_i]); __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); if (txq->ift_sds.ifsd_map) __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]); for (n = 0; nm_i != head; n++) { struct netmap_slot *slot = &ring->slot[nm_i]; u_int len = slot->len; uint64_t paddr; void *addr = PNMB(na, slot, &paddr); int flags = (slot->flags & NS_REPORT || nic_i == 0 || nic_i == report_frequency) ? IPI_TX_INTR : 0; /* device-specific */ pi.ipi_len = len; pi.ipi_segs[0].ds_addr = paddr; pi.ipi_segs[0].ds_len = len; pi.ipi_nsegs = 1; pi.ipi_ndescs = 0; pi.ipi_pidx = nic_i; pi.ipi_flags = flags; /* Fill the slot in the NIC ring. */ ctx->isc_txd_encap(ctx->ifc_softc, &pi); /* prefetch for next round */ __builtin_prefetch(&ring->slot[nm_i + 1]); __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]); if (txq->ift_sds.ifsd_map) { __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]); NM_CHECK_ADDR_LEN(na, addr, len); if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr); } /* make sure changes to the buffer are synced */ bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i], BUS_DMASYNC_PREWRITE); } slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } kring->nr_hwcur = head; /* synchronize the NIC ring */ if (txq->ift_sds.ifsd_map) bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* (re)start the tx unit up to slot nic_i (excluded) */ ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i); } /* * Second part: reclaim buffers for completed transmissions. */ if (iflib_tx_credits_update(ctx, txq)) { /* some tx completed, increment avail */ nic_i = txq->ift_cidx_processed; kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); } return (0); } /* * Reconcile kernel and user view of the receive ring. * Same as for the txsync, this routine must be efficient. * The caller guarantees a single invocations, but races against * the rest of the driver should be handled here. * * On call, kring->rhead is the first packet that userspace wants * to keep, and kring->rcur is the wakeup point. * The kernel has previously reported packets up to kring->rtail. * * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective * of whether or not we received an interrupt. */ static int iflib_netmap_rxsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_ring *ring = kring->ring; uint32_t nm_i; /* index into the netmap ring */ uint32_t nic_i; /* index into the NIC ring */ u_int i, n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = netmap_idx_n2k(kring, kring->rhead); int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; struct if_rxd_info ri; struct ifnet *ifp = na->ifp; if_ctx_t ctx = ifp->if_softc; iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; iflib_fl_t fl = rxq->ifr_fl; if (head > lim) return netmap_ring_reinit(kring); /* XXX check sync modes */ for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) { if (fl->ifl_sds.ifsd_map == NULL) continue; bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); } /* * First part: import newly received packets. * * nm_i is the index of the next free slot in the netmap ring, * nic_i is the index of the next received packet in the NIC ring, * and they may differ in case if_init() has been called while * in netmap mode. For the receive ring we have * * nic_i = rxr->next_check; * nm_i = kring->nr_hwtail (previous) * and * nm_i == (nic_i + kring->nkr_hwofs) % ring_size * * rxr->next_check is set to 0 on a ring reinit */ if (netmap_no_pendintr || force_update) { int crclen = iflib_crcstrip ? 0 : 4; int error, avail; for (i = 0; i < rxq->ifr_nfl; i++) { fl = &rxq->ifr_fl[i]; nic_i = fl->ifl_cidx; nm_i = netmap_idx_n2k(kring, nic_i); avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX); for (n = 0; avail > 0; n++, avail--) { rxd_info_zero(&ri); ri.iri_frags = rxq->ifr_frags; ri.iri_qsidx = kring->ring_id; ri.iri_ifp = ctx->ifc_ifp; ri.iri_cidx = nic_i; error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen; ring->slot[nm_i].flags = 0; if (fl->ifl_sds.ifsd_map) bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD); nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } if (n) { /* update the state variables */ if (netmap_no_pendintr && !force_update) { /* diagnostics */ iflib_rx_miss ++; iflib_rx_miss_bufs += n; } fl->ifl_cidx = nic_i; kring->nr_hwtail = netmap_idx_k2n(kring, nm_i); } kring->nr_kflags &= ~NKR_PENDINTR; } } /* * Second part: skip past packets that userspace has released. * (kring->nr_hwcur to head excluded), * and make the buffers available for reception. * As usual nm_i is the index in the netmap ring, * nic_i is the index in the NIC ring, and * nm_i == (nic_i + kring->nkr_hwofs) % ring_size */ /* XXX not sure how this will work with multiple free lists */ nm_i = netmap_idx_n2k(kring, kring->nr_hwcur); return (netmap_fl_refill(rxq, kring, nm_i, false)); } static void iflib_netmap_intr(struct netmap_adapter *na, int onoff) { struct ifnet *ifp = na->ifp; if_ctx_t ctx = ifp->if_softc; CTX_LOCK(ctx); if (onoff) { IFDI_INTR_ENABLE(ctx); } else { IFDI_INTR_DISABLE(ctx); } CTX_UNLOCK(ctx); } static int iflib_netmap_attach(if_ctx_t ctx) { struct netmap_adapter na; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; bzero(&na, sizeof(na)); na.ifp = ctx->ifc_ifp; na.na_flags = NAF_BDG_MAYSLEEP; MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); na.num_tx_desc = scctx->isc_ntxd[0]; na.num_rx_desc = scctx->isc_nrxd[0]; na.nm_txsync = iflib_netmap_txsync; na.nm_rxsync = iflib_netmap_rxsync; na.nm_register = iflib_netmap_register; na.nm_intr = iflib_netmap_intr; na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; return (netmap_attach(&na)); } static void iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq) { struct netmap_adapter *na = NA(ctx->ifc_ifp); struct netmap_slot *slot; slot = netmap_reset(na, NR_TX, txq->ift_id, 0); if (slot == NULL) return; if (txq->ift_sds.ifsd_map == NULL) return; for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { /* * In netmap mode, set the map for the packet buffer. * NOTE: Some drivers (not this one) also need to set * the physical buffer address in the NIC ring. * netmap_idx_n2k() maps a nic index, i, into the corresponding * netmap slot index, si */ - int si = netmap_idx_n2k(&na->tx_rings[txq->ift_id], i); + int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i); netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si)); } } static void iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) { struct netmap_adapter *na = NA(ctx->ifc_ifp); - struct netmap_kring *kring = &na->rx_rings[rxq->ifr_id]; + struct netmap_kring *kring = na->rx_rings[rxq->ifr_id]; struct netmap_slot *slot; uint32_t nm_i; slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); if (slot == NULL) return; nm_i = netmap_idx_n2k(kring, 0); netmap_fl_refill(rxq, kring, nm_i, true); } #define iflib_netmap_detach(ifp) netmap_detach(ifp) #else #define iflib_netmap_txq_init(ctx, txq) #define iflib_netmap_rxq_init(ctx, rxq) #define iflib_netmap_detach(ifp) #define iflib_netmap_attach(ctx) (0) #define netmap_rx_irq(ifp, qid, budget) (0) #define netmap_tx_irq(ifp, qid) do {} while (0) #endif #if defined(__i386__) || defined(__amd64__) static __inline void prefetch(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } static __inline void prefetch2cachelines(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); #if (CACHE_LINE_SIZE < 128) __asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long))))); #endif } #else #define prefetch(x) #define prefetch2cachelines(x) #endif static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid) { iflib_fl_t fl; fl = &rxq->ifr_fl[flid]; iru->iru_paddrs = fl->ifl_bus_addrs; iru->iru_vaddrs = &fl->ifl_vm_addrs[0]; iru->iru_idxs = fl->ifl_rxd_idxs; iru->iru_qsidx = rxq->ifr_id; iru->iru_buf_size = fl->ifl_buf_size; iru->iru_flidx = fl->ifl_id; } static void _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { if (err) return; *(bus_addr_t *) arg = segs[0].ds_addr; } int iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) { int err; if_shared_ctx_t sctx = ctx->ifc_sctx; device_t dev = ctx->ifc_dev; KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ sctx->isc_q_align, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->idi_tag); if (err) { device_printf(dev, "%s: bus_dma_tag_create failed: %d\n", __func__, err); goto fail_0; } err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map); if (err) { device_printf(dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", __func__, (uintmax_t)size, err); goto fail_1; } dma->idi_paddr = IF_BAD_DMA; err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr, size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT); if (err || dma->idi_paddr == IF_BAD_DMA) { device_printf(dev, "%s: bus_dmamap_load failed: %d\n", __func__, err); goto fail_2; } dma->idi_size = size; return (0); fail_2: bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); fail_1: bus_dma_tag_destroy(dma->idi_tag); fail_0: dma->idi_tag = NULL; return (err); } int iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count) { int i, err; iflib_dma_info_t *dmaiter; dmaiter = dmalist; for (i = 0; i < count; i++, dmaiter++) { if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0) break; } if (err) iflib_dma_free_multi(dmalist, i); return (err); } void iflib_dma_free(iflib_dma_info_t dma) { if (dma->idi_tag == NULL) return; if (dma->idi_paddr != IF_BAD_DMA) { bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->idi_tag, dma->idi_map); dma->idi_paddr = IF_BAD_DMA; } if (dma->idi_vaddr != NULL) { bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); dma->idi_vaddr = NULL; } bus_dma_tag_destroy(dma->idi_tag); dma->idi_tag = NULL; } void iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count) { int i; iflib_dma_info_t *dmaiter = dmalist; for (i = 0; i < count; i++, dmaiter++) iflib_dma_free(*dmaiter); } #ifdef EARLY_AP_STARTUP static const int iflib_started = 1; #else /* * We used to abuse the smp_started flag to decide if the queues have been * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()). * That gave bad races, since the SYSINIT() runs strictly after smp_started * is set. Run a SYSINIT() strictly after that to just set a usable * completion flag. */ static int iflib_started; static void iflib_record_started(void *arg) { iflib_started = 1; } SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST, iflib_record_started, NULL); #endif static int iflib_fast_intr(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; if (!iflib_started) return (FILTER_HANDLED); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) return (FILTER_HANDLED); GROUPTASK_ENQUEUE(gtask); return (FILTER_HANDLED); } static int iflib_fast_intr_rxtx(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx; if_ctx_t ctx; int i, cidx; if (!iflib_started) return (FILTER_HANDLED); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) return (FILTER_HANDLED); for (i = 0; i < rxq->ifr_ntxqirq; i++) { qidx_t txqid = rxq->ifr_txqid[i]; ctx = rxq->ifr_ctx; if (!ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) { IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid); continue; } GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); } if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ) cidx = rxq->ifr_cq_cidx; else cidx = rxq->ifr_fl[0].ifl_cidx; if (iflib_rxd_avail(ctx, rxq, cidx, 1)) GROUPTASK_ENQUEUE(gtask); else IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); return (FILTER_HANDLED); } static int iflib_fast_intr_ctx(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; if (!iflib_started) return (FILTER_HANDLED); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) return (FILTER_HANDLED); GROUPTASK_ENQUEUE(gtask); return (FILTER_HANDLED); } static int _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, driver_filter_t filter, driver_intr_t handler, void *arg, char *name) { int rc, flags; struct resource *res; void *tag = NULL; device_t dev = ctx->ifc_dev; flags = RF_ACTIVE; if (ctx->ifc_flags & IFC_LEGACY) flags |= RF_SHAREABLE; MPASS(rid < 512); irq->ii_rid = rid; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags); if (res == NULL) { device_printf(dev, "failed to allocate IRQ for rid %d, name %s.\n", rid, name); return (ENOMEM); } irq->ii_res = res; KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL")); rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET, filter, handler, arg, &tag); if (rc != 0) { device_printf(dev, "failed to setup interrupt for rid %d, name %s: %d\n", rid, name ? name : "unknown", rc); return (rc); } else if (name) bus_describe_intr(dev, res, tag, "%s", name); irq->ii_tag = tag; return (0); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. This is * called only once at attach, setup is done every reset. * **********************************************************************/ static int iflib_txsd_alloc(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; int err, nsegments, ntsosegments; nsegments = scctx->isc_tx_nsegments; ntsosegments = scctx->isc_tx_tso_segments_max; MPASS(scctx->isc_ntxd[0] > 0); MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0); MPASS(nsegments > 0); MPASS(ntsosegments > 0); /* * Setup DMA descriptor areas. */ if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sctx->isc_tx_maxsize, /* maxsize */ nsegments, /* nsegments */ sctx->isc_tx_maxsegsize, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txq->ift_desc_tag))) { device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err); device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n", (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize); goto fail; } if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ scctx->isc_tx_tso_size_max, /* maxsize */ ntsosegments, /* nsegments */ scctx->isc_tx_tso_segsize_max, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txq->ift_tso_desc_tag))) { device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err); goto fail; } if (!(txq->ift_sds.ifsd_flags = (uint8_t *) malloc(sizeof(uint8_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); err = ENOMEM; goto fail; } if (!(txq->ift_sds.ifsd_m = (struct mbuf **) malloc(sizeof(struct mbuf *) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); err = ENOMEM; goto fail; } /* Create the descriptor buffer dma maps */ #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__))) if ((ctx->ifc_flags & IFC_DMAR) == 0) return (0); if (!(txq->ift_sds.ifsd_map = (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer map memory\n"); err = ENOMEM; goto fail; } for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) { err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]); if (err != 0) { device_printf(dev, "Unable to create TX DMA map\n"); goto fail; } } #endif return (0); fail: /* We free all, it handles case where we are in the middle */ iflib_tx_structures_free(ctx); return (err); } static void iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i) { bus_dmamap_t map; map = NULL; if (txq->ift_sds.ifsd_map != NULL) map = txq->ift_sds.ifsd_map[i]; if (map != NULL) { bus_dmamap_unload(txq->ift_desc_tag, map); bus_dmamap_destroy(txq->ift_desc_tag, map); txq->ift_sds.ifsd_map[i] = NULL; } } static void iflib_txq_destroy(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; for (int i = 0; i < txq->ift_size; i++) iflib_txsd_destroy(ctx, txq, i); if (txq->ift_sds.ifsd_map != NULL) { free(txq->ift_sds.ifsd_map, M_IFLIB); txq->ift_sds.ifsd_map = NULL; } if (txq->ift_sds.ifsd_m != NULL) { free(txq->ift_sds.ifsd_m, M_IFLIB); txq->ift_sds.ifsd_m = NULL; } if (txq->ift_sds.ifsd_flags != NULL) { free(txq->ift_sds.ifsd_flags, M_IFLIB); txq->ift_sds.ifsd_flags = NULL; } if (txq->ift_desc_tag != NULL) { bus_dma_tag_destroy(txq->ift_desc_tag); txq->ift_desc_tag = NULL; } if (txq->ift_tso_desc_tag != NULL) { bus_dma_tag_destroy(txq->ift_tso_desc_tag); txq->ift_tso_desc_tag = NULL; } } static void iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i) { struct mbuf **mp; mp = &txq->ift_sds.ifsd_m[i]; if (*mp == NULL) return; if (txq->ift_sds.ifsd_map != NULL) { bus_dmamap_sync(txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_desc_tag, txq->ift_sds.ifsd_map[i]); } m_free(*mp); DBG_COUNTER_INC(tx_frees); *mp = NULL; } static int iflib_txq_setup(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; iflib_dma_info_t di; int i; /* Set number of descriptors available */ txq->ift_qstatus = IFLIB_QUEUE_IDLE; /* XXX make configurable */ txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ; /* Reset indices */ txq->ift_cidx_processed = 0; txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset]; for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) bzero((void *)di->idi_vaddr, di->idi_size); IFDI_TXQ_SETUP(ctx, txq->ift_id); for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) bus_dmamap_sync(di->idi_tag, di->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int iflib_rxsd_alloc(iflib_rxq_t rxq) { if_ctx_t ctx = rxq->ifr_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; iflib_fl_t fl; int err; MPASS(scctx->isc_nrxd[0] > 0); MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0); fl = rxq->ifr_fl; for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */ err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sctx->isc_rx_maxsize, /* maxsize */ sctx->isc_rx_nsegments, /* nsegments */ sctx->isc_rx_maxsegsize, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &fl->ifl_desc_tag); if (err) { device_printf(dev, "%s: bus_dma_tag_create failed %d\n", __func__, err); goto fail; } if (!(fl->ifl_sds.ifsd_flags = (uint8_t *) malloc(sizeof(uint8_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); err = ENOMEM; goto fail; } if (!(fl->ifl_sds.ifsd_m = (struct mbuf **) malloc(sizeof(struct mbuf *) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); err = ENOMEM; goto fail; } if (!(fl->ifl_sds.ifsd_cl = (caddr_t *) malloc(sizeof(caddr_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); err = ENOMEM; goto fail; } /* Create the descriptor buffer dma maps */ #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__))) if ((ctx->ifc_flags & IFC_DMAR) == 0) continue; if (!(fl->ifl_sds.ifsd_map = (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer map memory\n"); err = ENOMEM; goto fail; } for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) { err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]); if (err != 0) { device_printf(dev, "Unable to create RX buffer DMA map\n"); goto fail; } } #endif } return (0); fail: iflib_rx_structures_free(ctx); return (err); } /* * Internal service routines */ struct rxq_refill_cb_arg { int error; bus_dma_segment_t seg; int nseg; }; static void _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct rxq_refill_cb_arg *cb_arg = arg; cb_arg->error = error; cb_arg->seg = segs[0]; cb_arg->nseg = nseg; } #ifdef ACPI_DMAR #define IS_DMAR(ctx) (ctx->ifc_flags & IFC_DMAR) #else #define IS_DMAR(ctx) (0) #endif /** * rxq_refill - refill an rxq free-buffer list * @ctx: the iflib context * @rxq: the free-list to refill * @n: the number of new buffers to allocate * * (Re)populate an rxq free-buffer list with up to @n new packet buffers. * The caller must assure that @n does not exceed the queue's capacity. */ static void _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) { struct mbuf *m; int idx, frag_idx = fl->ifl_fragidx; int pidx = fl->ifl_pidx; caddr_t cl, *sd_cl; struct mbuf **sd_m; uint8_t *sd_flags; struct if_rxd_update iru; bus_dmamap_t *sd_map; int n, i = 0; uint64_t bus_addr; int err; qidx_t credits; sd_m = fl->ifl_sds.ifsd_m; sd_map = fl->ifl_sds.ifsd_map; sd_cl = fl->ifl_sds.ifsd_cl; sd_flags = fl->ifl_sds.ifsd_flags; idx = pidx; credits = fl->ifl_credits; n = count; MPASS(n > 0); MPASS(credits + n <= fl->ifl_size); if (pidx < fl->ifl_cidx) MPASS(pidx + n <= fl->ifl_cidx); if (pidx == fl->ifl_cidx && (credits < fl->ifl_size)) MPASS(fl->ifl_gen == 0); if (pidx > fl->ifl_cidx) MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); DBG_COUNTER_INC(fl_refills); if (n > 8) DBG_COUNTER_INC(fl_refills_large); iru_init(&iru, fl->ifl_rxq, fl->ifl_id); while (n--) { /* * We allocate an uninitialized mbuf + cluster, mbuf is * initialized after rx. * * If the cluster is still set then we know a minimum sized packet was received */ bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx); if ((frag_idx < 0) || (frag_idx >= fl->ifl_size)) bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx); if ((cl = sd_cl[frag_idx]) == NULL) { if ((cl = sd_cl[frag_idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL) break; #if MEMORY_LOGGING fl->ifl_cl_enqueued++; #endif } if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) { break; } #if MEMORY_LOGGING fl->ifl_m_enqueued++; #endif DBG_COUNTER_INC(rx_allocs); #if defined(__i386__) || defined(__amd64__) if (!IS_DMAR(ctx)) { bus_addr = pmap_kextract((vm_offset_t)cl); } else #endif { struct rxq_refill_cb_arg cb_arg; iflib_rxq_t q; cb_arg.error = 0; q = fl->ifl_rxq; MPASS(sd_map != NULL); MPASS(sd_map[frag_idx] != NULL); err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx], cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0); bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx], BUS_DMASYNC_PREREAD); if (err != 0 || cb_arg.error) { /* * !zone_pack ? */ if (fl->ifl_zone == zone_pack) uma_zfree(fl->ifl_zone, cl); m_free(m); n = 0; goto done; } bus_addr = cb_arg.seg.ds_addr; } bit_set(fl->ifl_rx_bitmap, frag_idx); sd_flags[frag_idx] |= RX_SW_DESC_INUSE; MPASS(sd_m[frag_idx] == NULL); sd_cl[frag_idx] = cl; sd_m[frag_idx] = m; fl->ifl_rxd_idxs[i] = frag_idx; fl->ifl_bus_addrs[i] = bus_addr; fl->ifl_vm_addrs[i] = cl; credits++; i++; MPASS(credits <= fl->ifl_size); if (++idx == fl->ifl_size) { fl->ifl_gen = 1; idx = 0; } if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { iru.iru_pidx = pidx; iru.iru_count = i; ctx->isc_rxd_refill(ctx->ifc_softc, &iru); i = 0; pidx = idx; fl->ifl_pidx = idx; fl->ifl_credits = credits; } } done: if (i) { iru.iru_pidx = pidx; iru.iru_count = i; ctx->isc_rxd_refill(ctx->ifc_softc, &iru); fl->ifl_pidx = idx; fl->ifl_credits = credits; } DBG_COUNTER_INC(rxd_flush); if (fl->ifl_pidx == 0) pidx = fl->ifl_size - 1; else pidx = fl->ifl_pidx - 1; if (sd_map) bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx); fl->ifl_fragidx = frag_idx; } static __inline void __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max) { /* we avoid allowing pidx to catch up with cidx as it confuses ixl */ int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1; #ifdef INVARIANTS int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1; #endif MPASS(fl->ifl_credits <= fl->ifl_size); MPASS(reclaimable == delta); if (reclaimable > 0) _iflib_fl_refill(ctx, fl, min(max, reclaimable)); } static void iflib_fl_bufs_free(iflib_fl_t fl) { iflib_dma_info_t idi = fl->ifl_ifdi; uint32_t i; for (i = 0; i < fl->ifl_size; i++) { struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i]; uint8_t *sd_flags = &fl->ifl_sds.ifsd_flags[i]; caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i]; if (*sd_flags & RX_SW_DESC_INUSE) { if (fl->ifl_sds.ifsd_map != NULL) { bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i]; bus_dmamap_unload(fl->ifl_desc_tag, sd_map); if (fl->ifl_rxq->ifr_ctx->ifc_in_detach) bus_dmamap_destroy(fl->ifl_desc_tag, sd_map); } if (*sd_m != NULL) { m_init(*sd_m, M_NOWAIT, MT_DATA, 0); uma_zfree(zone_mbuf, *sd_m); } if (*sd_cl != NULL) uma_zfree(fl->ifl_zone, *sd_cl); *sd_flags = 0; } else { MPASS(*sd_cl == NULL); MPASS(*sd_m == NULL); } #if MEMORY_LOGGING fl->ifl_m_dequeued++; fl->ifl_cl_dequeued++; #endif *sd_cl = NULL; *sd_m = NULL; } #ifdef INVARIANTS for (i = 0; i < fl->ifl_size; i++) { MPASS(fl->ifl_sds.ifsd_flags[i] == 0); MPASS(fl->ifl_sds.ifsd_cl[i] == NULL); MPASS(fl->ifl_sds.ifsd_m[i] == NULL); } #endif /* * Reset free list values */ fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0; bzero(idi->idi_vaddr, idi->idi_size); } /********************************************************************* * * Initialize a receive ring and its buffers. * **********************************************************************/ static int iflib_fl_setup(iflib_fl_t fl) { iflib_rxq_t rxq = fl->ifl_rxq; if_ctx_t ctx = rxq->ifr_ctx; if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1); /* ** Free current RX buffer structs and their mbufs */ iflib_fl_bufs_free(fl); /* Now replenish the mbufs */ MPASS(fl->ifl_credits == 0); /* * XXX don't set the max_frame_size to larger * than the hardware can handle */ if (sctx->isc_max_frame_size <= 2048) fl->ifl_buf_size = MCLBYTES; #ifndef CONTIGMALLOC_WORKS else fl->ifl_buf_size = MJUMPAGESIZE; #else else if (sctx->isc_max_frame_size <= 4096) fl->ifl_buf_size = MJUMPAGESIZE; else if (sctx->isc_max_frame_size <= 9216) fl->ifl_buf_size = MJUM9BYTES; else fl->ifl_buf_size = MJUM16BYTES; #endif if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size) ctx->ifc_max_fl_buf_size = fl->ifl_buf_size; fl->ifl_cltype = m_gettype(fl->ifl_buf_size); fl->ifl_zone = m_getzone(fl->ifl_buf_size); /* avoid pre-allocating zillions of clusters to an idle card * potentially speeding up attach */ _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size)); MPASS(min(128, fl->ifl_size) == fl->ifl_credits); if (min(128, fl->ifl_size) != fl->ifl_credits) return (ENOBUFS); /* * handle failure */ MPASS(rxq != NULL); MPASS(fl->ifl_ifdi != NULL); bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /********************************************************************* * * Free receive ring data structures * **********************************************************************/ static void iflib_rx_sds_free(iflib_rxq_t rxq) { iflib_fl_t fl; int i; if (rxq->ifr_fl != NULL) { for (i = 0; i < rxq->ifr_nfl; i++) { fl = &rxq->ifr_fl[i]; if (fl->ifl_desc_tag != NULL) { bus_dma_tag_destroy(fl->ifl_desc_tag); fl->ifl_desc_tag = NULL; } free(fl->ifl_sds.ifsd_m, M_IFLIB); free(fl->ifl_sds.ifsd_cl, M_IFLIB); /* XXX destroy maps first */ free(fl->ifl_sds.ifsd_map, M_IFLIB); fl->ifl_sds.ifsd_m = NULL; fl->ifl_sds.ifsd_cl = NULL; fl->ifl_sds.ifsd_map = NULL; } free(rxq->ifr_fl, M_IFLIB); rxq->ifr_fl = NULL; rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; } } /* * MI independent logic * */ static void iflib_timer(void *arg) { iflib_txq_t txq = arg; if_ctx_t ctx = txq->ift_ctx; if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) return; /* ** Check on the state of the TX queue(s), this ** can be done without the lock because its RO ** and the HUNG state will be static if set. */ IFDI_TIMER(ctx, txq->ift_id); if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) && ((txq->ift_cleaned_prev == txq->ift_cleaned) || (sctx->isc_pause_frames == 0))) goto hung; if (ifmp_ring_is_stalled(txq->ift_br)) txq->ift_qstatus = IFLIB_QUEUE_HUNG; txq->ift_cleaned_prev = txq->ift_cleaned; /* handle any laggards */ if (txq->ift_db_pending) GROUPTASK_ENQUEUE(&txq->ift_task); sctx->isc_pause_frames = 0; if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); return; hung: CTX_LOCK(ctx); if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n", txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx); IFDI_WATCHDOG_RESET(ctx); ctx->ifc_watchdog_events++; ctx->ifc_flags |= IFC_DO_RESET; iflib_admin_intr_deferred(ctx); CTX_UNLOCK(ctx); } static void iflib_init_locked(if_ctx_t ctx) { if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_t ifp = ctx->ifc_ifp; iflib_fl_t fl; iflib_txq_t txq; iflib_rxq_t rxq; int i, j, tx_ip_csum_flags, tx_ip6_csum_flags; if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); IFDI_INTR_DISABLE(ctx); tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP); tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP); /* Set hardware offload abilities */ if_clearhwassist(ifp); if (if_getcapenable(ifp) & IFCAP_TXCSUM) if_sethwassistbits(ifp, tx_ip_csum_flags, 0); if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) if_sethwassistbits(ifp, tx_ip6_csum_flags, 0); if (if_getcapenable(ifp) & IFCAP_TSO4) if_sethwassistbits(ifp, CSUM_IP_TSO, 0); if (if_getcapenable(ifp) & IFCAP_TSO6) if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) { CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); iflib_netmap_txq_init(ctx, txq); } #ifdef INVARIANTS i = if_getdrvflags(ifp); #endif IFDI_INIT(ctx); MPASS(if_getdrvflags(ifp) == i); for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { /* XXX this should really be done on a per-queue basis */ if (if_getcapenable(ifp) & IFCAP_NETMAP) { MPASS(rxq->ifr_id == i); iflib_netmap_rxq_init(ctx, rxq); continue; } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { if (iflib_fl_setup(fl)) { device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n"); goto done; } } } done: if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); IFDI_INTR_ENABLE(ctx); txq = ctx->ifc_txqs; for (i = 0; i < sctx->isc_ntxqsets; i++, txq++) callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); } static int iflib_media_change(if_t ifp) { if_ctx_t ctx = if_getsoftc(ifp); int err; CTX_LOCK(ctx); if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0) iflib_init_locked(ctx); CTX_UNLOCK(ctx); return (err); } static void iflib_media_status(if_t ifp, struct ifmediareq *ifmr) { if_ctx_t ctx = if_getsoftc(ifp); CTX_LOCK(ctx); IFDI_UPDATE_ADMIN_STATUS(ctx); IFDI_MEDIA_STATUS(ctx, ifmr); CTX_UNLOCK(ctx); } static void iflib_stop(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; iflib_rxq_t rxq = ctx->ifc_rxqs; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; iflib_dma_info_t di; iflib_fl_t fl; int i, j; /* Tell the stack that the interface is no longer active */ if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); IFDI_INTR_DISABLE(ctx); DELAY(1000); IFDI_STOP(ctx); DELAY(1000); iflib_debug_reset(); /* Wait for current tx queue users to exit to disarm watchdog timer. */ for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { /* make sure all transmitters have completed before proceeding XXX */ CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); /* clean any enqueued buffers */ iflib_ifmp_purge(txq); /* Free any existing tx buffers. */ for (j = 0; j < txq->ift_size; j++) { iflib_txsd_free(ctx, txq, j); } txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0; txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; txq->ift_pullups = 0; ifmp_ring_reset_stats(txq->ift_br); for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++) bzero((void *)di->idi_vaddr, di->idi_size); } for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) { /* make sure all transmitters have completed before proceeding XXX */ for (j = 0, di = rxq->ifr_ifdi; j < rxq->ifr_nfl; j++, di++) bzero((void *)di->idi_vaddr, di->idi_size); /* also resets the free lists pidx/cidx */ for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) iflib_fl_bufs_free(fl); } } static inline caddr_t calc_next_rxd(iflib_fl_t fl, int cidx) { qidx_t size; int nrxd; caddr_t start, end, cur, next; nrxd = fl->ifl_size; size = fl->ifl_rxd_size; start = fl->ifl_ifdi->idi_vaddr; if (__predict_false(size == 0)) return (start); cur = start + size*cidx; end = start + size*nrxd; next = CACHE_PTR_NEXT(cur); return (next < end ? next : start); } static inline void prefetch_pkts(iflib_fl_t fl, int cidx) { int nextptr; int nrxd = fl->ifl_size; caddr_t next_rxd; nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1); prefetch(&fl->ifl_sds.ifsd_m[nextptr]); prefetch(&fl->ifl_sds.ifsd_cl[nextptr]); next_rxd = calc_next_rxd(fl, cidx); prefetch(next_rxd); prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]); } static void rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd) { int flid, cidx; bus_dmamap_t map; iflib_fl_t fl; iflib_dma_info_t di; int next; map = NULL; flid = irf->irf_flid; cidx = irf->irf_idx; fl = &rxq->ifr_fl[flid]; sd->ifsd_fl = fl; sd->ifsd_cidx = cidx; sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx]; sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx]; fl->ifl_credits--; #if MEMORY_LOGGING fl->ifl_m_dequeued++; #endif if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH) prefetch_pkts(fl, cidx); if (fl->ifl_sds.ifsd_map != NULL) { next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1); prefetch(&fl->ifl_sds.ifsd_map[next]); map = fl->ifl_sds.ifsd_map[cidx]; di = fl->ifl_ifdi; next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1); prefetch(&fl->ifl_sds.ifsd_flags[next]); bus_dmamap_sync(di->idi_tag, di->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* not valid assert if bxe really does SGE from non-contiguous elements */ MPASS(fl->ifl_cidx == cidx); if (unload) bus_dmamap_unload(fl->ifl_desc_tag, map); } fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1); if (__predict_false(fl->ifl_cidx == 0)) fl->ifl_gen = 0; if (map != NULL) bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bit_clear(fl->ifl_rx_bitmap, cidx); } static struct mbuf * assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd) { int i, padlen , flags; struct mbuf *m, *mh, *mt; caddr_t cl; i = 0; mh = NULL; do { rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd); MPASS(*sd->ifsd_cl != NULL); MPASS(*sd->ifsd_m != NULL); /* Don't include zero-length frags */ if (ri->iri_frags[i].irf_len == 0) { /* XXX we can save the cluster here, but not the mbuf */ m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0); m_free(*sd->ifsd_m); *sd->ifsd_m = NULL; continue; } m = *sd->ifsd_m; *sd->ifsd_m = NULL; if (mh == NULL) { flags = M_PKTHDR|M_EXT; mh = mt = m; padlen = ri->iri_pad; } else { flags = M_EXT; mt->m_next = m; mt = m; /* assuming padding is only on the first fragment */ padlen = 0; } cl = *sd->ifsd_cl; *sd->ifsd_cl = NULL; /* Can these two be made one ? */ m_init(m, M_NOWAIT, MT_DATA, flags); m_cljset(m, cl, sd->ifsd_fl->ifl_cltype); /* * These must follow m_init and m_cljset */ m->m_data += padlen; ri->iri_len -= padlen; m->m_len = ri->iri_frags[i].irf_len; } while (++i < ri->iri_nfrags); return (mh); } /* * Process one software descriptor */ static struct mbuf * iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri) { struct if_rxsd sd; struct mbuf *m; /* should I merge this back in now that the two paths are basically duplicated? */ if (ri->iri_nfrags == 1 && ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) { rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd); m = *sd.ifsd_m; *sd.ifsd_m = NULL; m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR); #ifndef __NO_STRICT_ALIGNMENT if (!IP_ALIGNED(m)) m->m_data += 2; #endif memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len); m->m_len = ri->iri_frags[0].irf_len; } else { m = assemble_segments(rxq, ri, &sd); } m->m_pkthdr.len = ri->iri_len; m->m_pkthdr.rcvif = ri->iri_ifp; m->m_flags |= ri->iri_flags; m->m_pkthdr.ether_vtag = ri->iri_vtag; m->m_pkthdr.flowid = ri->iri_flowid; M_HASHTYPE_SET(m, ri->iri_rsstype); m->m_pkthdr.csum_flags = ri->iri_csum_flags; m->m_pkthdr.csum_data = ri->iri_csum_data; return (m); } #if defined(INET6) || defined(INET) static void iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6) { CURVNET_SET(lc->ifp->if_vnet); #if defined(INET6) *v6 = VNET(ip6_forwarding); #endif #if defined(INET) *v4 = VNET(ipforwarding); #endif CURVNET_RESTORE(); } /* * Returns true if it's possible this packet could be LROed. * if it returns false, it is guaranteed that tcp_lro_rx() * would not return zero. */ static bool iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding) { struct ether_header *eh; uint16_t eh_type; eh = mtod(m, struct ether_header *); eh_type = ntohs(eh->ether_type); switch (eh_type) { #if defined(INET6) case ETHERTYPE_IPV6: return !v6_forwarding; #endif #if defined (INET) case ETHERTYPE_IP: return !v4_forwarding; #endif } return false; } #else static void iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused) { } #endif static bool iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) { if_ctx_t ctx = rxq->ifr_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; int avail, i; qidx_t *cidxp; struct if_rxd_info ri; int err, budget_left, rx_bytes, rx_pkts; iflib_fl_t fl; struct ifnet *ifp; int lro_enabled; bool lro_possible = false; bool v4_forwarding, v6_forwarding; /* * XXX early demux data packets so that if_input processing only handles * acks in interrupt context */ struct mbuf *m, *mh, *mt, *mf; ifp = ctx->ifc_ifp; mh = mt = NULL; MPASS(budget > 0); rx_pkts = rx_bytes = 0; if (sctx->isc_flags & IFLIB_HAS_RXCQ) cidxp = &rxq->ifr_cq_cidx; else cidxp = &rxq->ifr_fl[0].ifl_cidx; if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) __iflib_fl_refill_lt(ctx, fl, budget + 8); DBG_COUNTER_INC(rx_unavail); return (false); } for (budget_left = budget; (budget_left > 0) && (avail > 0); budget_left--, avail--) { if (__predict_false(!CTX_ACTIVE(ctx))) { DBG_COUNTER_INC(rx_ctx_inactive); break; } /* * Reset client set fields to their default values */ rxd_info_zero(&ri); ri.iri_qsidx = rxq->ifr_id; ri.iri_cidx = *cidxp; ri.iri_ifp = ifp; ri.iri_frags = rxq->ifr_frags; err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); if (err) goto err; if (sctx->isc_flags & IFLIB_HAS_RXCQ) { *cidxp = ri.iri_cidx; /* Update our consumer index */ /* XXX NB: shurd - check if this is still safe */ while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) { rxq->ifr_cq_cidx -= scctx->isc_nrxd[0]; rxq->ifr_cq_gen = 0; } /* was this only a completion queue message? */ if (__predict_false(ri.iri_nfrags == 0)) continue; } MPASS(ri.iri_nfrags != 0); MPASS(ri.iri_len != 0); /* will advance the cidx on the corresponding free lists */ m = iflib_rxd_pkt_get(rxq, &ri); if (avail == 0 && budget_left) avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left); if (__predict_false(m == NULL)) { DBG_COUNTER_INC(rx_mbuf_null); continue; } /* imm_pkt: -- cxgb */ if (mh == NULL) mh = mt = m; else { mt->m_nextpkt = m; mt = m; } } /* make sure that we can refill faster than drain */ for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) __iflib_fl_refill_lt(ctx, fl, budget + 8); lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); if (lro_enabled) iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding); mt = mf = NULL; while (mh != NULL) { m = mh; mh = mh->m_nextpkt; m->m_nextpkt = NULL; #ifndef __NO_STRICT_ALIGNMENT if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL) continue; #endif rx_bytes += m->m_pkthdr.len; rx_pkts++; #if defined(INET6) || defined(INET) if (lro_enabled) { if (!lro_possible) { lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding); if (lro_possible && mf != NULL) { ifp->if_input(ifp, mf); DBG_COUNTER_INC(rx_if_input); mt = mf = NULL; } } if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) == (CSUM_L4_CALC|CSUM_L4_VALID)) { if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0) continue; } } #endif if (lro_possible) { ifp->if_input(ifp, m); DBG_COUNTER_INC(rx_if_input); continue; } if (mf == NULL) mf = m; if (mt != NULL) mt->m_nextpkt = m; mt = m; } if (mf != NULL) { ifp->if_input(ifp, mf); DBG_COUNTER_INC(rx_if_input); } if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes); if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts); /* * Flush any outstanding LRO work */ #if defined(INET6) || defined(INET) tcp_lro_flush_all(&rxq->ifr_lc); #endif if (avail) return true; return (iflib_rxd_avail(ctx, rxq, *cidxp, 1)); err: CTX_LOCK(ctx); ctx->ifc_flags |= IFC_DO_RESET; iflib_admin_intr_deferred(ctx); CTX_UNLOCK(ctx); return (false); } #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1) static inline qidx_t txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use) { qidx_t notify_count = TXD_NOTIFY_COUNT(txq); qidx_t minthresh = txq->ift_size / 8; if (in_use > 4*minthresh) return (notify_count); if (in_use > 2*minthresh) return (notify_count >> 1); if (in_use > minthresh) return (notify_count >> 3); return (0); } static inline qidx_t txq_max_rs_deferred(iflib_txq_t txq) { qidx_t notify_count = TXD_NOTIFY_COUNT(txq); qidx_t minthresh = txq->ift_size / 8; if (txq->ift_in_use > 4*minthresh) return (notify_count); if (txq->ift_in_use > 2*minthresh) return (notify_count >> 1); if (txq->ift_in_use > minthresh) return (notify_count >> 2); return (2); } #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags) #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG) #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use)) #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq) #define TXQ_MAX_DB_CONSUMED(size) (size >> 4) /* forward compatibility for cxgb */ #define FIRST_QSET(ctx) 0 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets) #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets) #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) /* XXX we should be setting this to something other than zero */ #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) #define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max) static inline bool iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use) { qidx_t dbval, max; bool rang; rang = false; max = TXQ_MAX_DB_DEFERRED(txq, in_use); if (ring || txq->ift_db_pending >= max) { dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); txq->ift_db_pending = txq->ift_npending = 0; rang = true; } return (rang); } #ifdef PKT_DEBUG static void print_pkt(if_pkt_info_t pi) { printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n", pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag); printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); } #endif #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO) #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO) static int iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) { if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx; struct ether_vlan_header *eh; struct mbuf *m, *n; n = m = *mp; if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) && M_WRITABLE(m) == 0) { if ((m = m_dup(m, M_NOWAIT)) == NULL) { return (ENOMEM); } else { m_freem(*mp); n = *mp = m; } } /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ if (__predict_false(m->m_len < sizeof(*eh))) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL)) return (ENOMEM); } eh = mtod(m, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { pi->ipi_etype = ntohs(eh->evl_proto); pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { pi->ipi_etype = ntohs(eh->evl_encap_proto); pi->ipi_ehdrlen = ETHER_HDR_LEN; } switch (pi->ipi_etype) { #ifdef INET case ETHERTYPE_IP: { struct ip *ip = NULL; struct tcphdr *th = NULL; int minthlen; minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th)); if (__predict_false(m->m_len < minthlen)) { /* * if this code bloat is causing too much of a hit * move it to a separate function and mark it noinline */ if (m->m_len == pi->ipi_ehdrlen) { n = m->m_next; MPASS(n); if (n->m_len >= sizeof(*ip)) { ip = (struct ip *)n->m_data; if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } else { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) return (ENOMEM); ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); } } else { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) return (ENOMEM); ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } } else { ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } pi->ipi_ip_hlen = ip->ip_hl << 2; pi->ipi_ipproto = ip->ip_p; pi->ipi_flags |= IPI_TX_IPV4; if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP)) ip->ip_sum = 0; if (IS_TSO4(pi)) { if (pi->ipi_ipproto == IPPROTO_TCP) { if (__predict_false(th == NULL)) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL)) return (ENOMEM); th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen); } pi->ipi_tcp_hflags = th->th_flags; pi->ipi_tcp_hlen = th->th_off << 2; pi->ipi_tcp_seq = th->th_seq; } if (__predict_false(ip->ip_p != IPPROTO_TCP)) return (ENXIO); th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; if (sctx->isc_flags & IFLIB_TSO_INIT_IP) { ip->ip_sum = 0; ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz); } } break; } #endif #ifdef INET6 case ETHERTYPE_IPV6: { struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen); struct tcphdr *th; pi->ipi_ip_hlen = sizeof(struct ip6_hdr); if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) { if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL)) return (ENOMEM); } th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen); /* XXX-BZ this will go badly in case of ext hdrs. */ pi->ipi_ipproto = ip6->ip6_nxt; pi->ipi_flags |= IPI_TX_IPV6; if (IS_TSO6(pi)) { if (pi->ipi_ipproto == IPPROTO_TCP) { if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) { if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL)) return (ENOMEM); } pi->ipi_tcp_hflags = th->th_flags; pi->ipi_tcp_hlen = th->th_off << 2; } if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP)) return (ENXIO); /* * The corresponding flag is set by the stack in the IPv4 * TSO case, but not in IPv6 (at least in FreeBSD 10.2). * So, set it here because the rest of the flow requires it. */ pi->ipi_csum_flags |= CSUM_TCP_IPV6; th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; } break; } #endif default: pi->ipi_csum_flags &= ~CSUM_OFFLOAD; pi->ipi_ip_hlen = 0; break; } *mp = m; return (0); } static __noinline struct mbuf * collapse_pkthdr(struct mbuf *m0) { struct mbuf *m, *m_next, *tmp; m = m0; m_next = m->m_next; while (m_next != NULL && m_next->m_len == 0) { m = m_next; m->m_next = NULL; m_free(m); m_next = m_next->m_next; } m = m0; m->m_next = m_next; if ((m_next->m_flags & M_EXT) == 0) { m = m_defrag(m, M_NOWAIT); } else { tmp = m_next->m_next; memcpy(m_next, m, MPKTHSIZE); m = m_next; m->m_next = tmp; } return (m); } /* * If dodgy hardware rejects the scatter gather chain we've handed it * we'll need to remove the mbuf chain from ifsg_m[] before we can add the * m_defrag'd mbufs */ static __noinline struct mbuf * iflib_remove_mbuf(iflib_txq_t txq) { int ntxd, i, pidx; struct mbuf *m, *mh, **ifsd_m; pidx = txq->ift_pidx; ifsd_m = txq->ift_sds.ifsd_m; ntxd = txq->ift_size; mh = m = ifsd_m[pidx]; ifsd_m[pidx] = NULL; #if MEMORY_LOGGING txq->ift_dequeued++; #endif i = 1; while (m) { ifsd_m[(pidx + i) & (ntxd -1)] = NULL; #if MEMORY_LOGGING txq->ift_dequeued++; #endif m = m->m_next; i++; } return (mh); } static int iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs, int max_segs, int flags) { if_ctx_t ctx; if_shared_ctx_t sctx; if_softc_ctx_t scctx; int i, next, pidx, err, ntxd, count; struct mbuf *m, *tmp, **ifsd_m; m = *m0; /* * Please don't ever do this */ if (__predict_false(m->m_len == 0)) *m0 = m = collapse_pkthdr(m); ctx = txq->ift_ctx; sctx = ctx->ifc_sctx; scctx = &ctx->ifc_softc_ctx; ifsd_m = txq->ift_sds.ifsd_m; ntxd = txq->ift_size; pidx = txq->ift_pidx; if (map != NULL) { uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags; err = bus_dmamap_load_mbuf_sg(tag, map, *m0, segs, nsegs, BUS_DMA_NOWAIT); if (err) return (err); ifsd_flags[pidx] |= TX_SW_DESC_MAPPED; count = 0; m = *m0; do { if (__predict_false(m->m_len <= 0)) { tmp = m; m = m->m_next; tmp->m_next = NULL; m_free(tmp); continue; } m = m->m_next; count++; } while (m != NULL); if (count > *nsegs) { ifsd_m[pidx] = *m0; ifsd_m[pidx]->m_flags |= M_TOOBIG; return (0); } m = *m0; count = 0; do { next = (pidx + count) & (ntxd-1); MPASS(ifsd_m[next] == NULL); ifsd_m[next] = m; count++; tmp = m; m = m->m_next; } while (m != NULL); } else { int buflen, sgsize, maxsegsz, max_sgsize; vm_offset_t vaddr; vm_paddr_t curaddr; count = i = 0; m = *m0; if (m->m_pkthdr.csum_flags & CSUM_TSO) maxsegsz = scctx->isc_tx_tso_segsize_max; else maxsegsz = sctx->isc_tx_maxsegsize; do { if (__predict_false(m->m_len <= 0)) { tmp = m; m = m->m_next; tmp->m_next = NULL; m_free(tmp); continue; } buflen = m->m_len; vaddr = (vm_offset_t)m->m_data; /* * see if we can't be smarter about physically * contiguous mappings */ next = (pidx + count) & (ntxd-1); MPASS(ifsd_m[next] == NULL); #if MEMORY_LOGGING txq->ift_enqueued++; #endif ifsd_m[next] = m; while (buflen > 0) { if (i >= max_segs) goto err; max_sgsize = MIN(buflen, maxsegsz); curaddr = pmap_kextract(vaddr); sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); sgsize = MIN(sgsize, max_sgsize); segs[i].ds_addr = curaddr; segs[i].ds_len = sgsize; vaddr += sgsize; buflen -= sgsize; i++; } count++; tmp = m; m = m->m_next; } while (m != NULL); *nsegs = i; } return (0); err: *m0 = iflib_remove_mbuf(txq); return (EFBIG); } static inline caddr_t calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid) { qidx_t size; int ntxd; caddr_t start, end, cur, next; ntxd = txq->ift_size; size = txq->ift_txd_size[qid]; start = txq->ift_ifdi[qid].idi_vaddr; if (__predict_false(size == 0)) return (start); cur = start + size*cidx; end = start + size*ntxd; next = CACHE_PTR_NEXT(cur); return (next < end ? next : start); } /* * Pad an mbuf to ensure a minimum ethernet frame size. * min_frame_size is the frame size (less CRC) to pad the mbuf to */ static __noinline int iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size) { /* * 18 is enough bytes to pad an ARP packet to 46 bytes, and * and ARP message is the smallest common payload I can think of */ static char pad[18]; /* just zeros */ int n; struct mbuf *new_head; if (!M_WRITABLE(*m_head)) { new_head = m_dup(*m_head, M_NOWAIT); if (new_head == NULL) { m_freem(*m_head); device_printf(dev, "cannot pad short frame, m_dup() failed"); DBG_COUNTER_INC(encap_pad_mbuf_fail); return ENOMEM; } m_freem(*m_head); *m_head = new_head; } for (n = min_frame_size - (*m_head)->m_pkthdr.len; n > 0; n -= sizeof(pad)) if (!m_append(*m_head, min(n, sizeof(pad)), pad)) break; if (n > 0) { m_freem(*m_head); device_printf(dev, "cannot pad short frame\n"); DBG_COUNTER_INC(encap_pad_mbuf_fail); return (ENOBUFS); } return 0; } static int iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) { if_ctx_t ctx; if_shared_ctx_t sctx; if_softc_ctx_t scctx; bus_dma_segment_t *segs; struct mbuf *m_head; void *next_txd; bus_dmamap_t map; struct if_pkt_info pi; int remap = 0; int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; bus_dma_tag_t desc_tag; segs = txq->ift_segs; ctx = txq->ift_ctx; sctx = ctx->ifc_sctx; scctx = &ctx->ifc_softc_ctx; segs = txq->ift_segs; ntxd = txq->ift_size; m_head = *m_headp; map = NULL; /* * If we're doing TSO the next descriptor to clean may be quite far ahead */ cidx = txq->ift_cidx; pidx = txq->ift_pidx; if (ctx->ifc_flags & IFC_PREFETCH) { next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1); if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) { next_txd = calc_next_txd(txq, cidx, 0); prefetch(next_txd); } /* prefetch the next cache line of mbuf pointers and flags */ prefetch(&txq->ift_sds.ifsd_m[next]); if (txq->ift_sds.ifsd_map != NULL) { prefetch(&txq->ift_sds.ifsd_map[next]); next = (cidx + CACHE_LINE_SIZE) & (ntxd-1); prefetch(&txq->ift_sds.ifsd_flags[next]); } } else if (txq->ift_sds.ifsd_map != NULL) map = txq->ift_sds.ifsd_map[pidx]; if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { desc_tag = txq->ift_tso_desc_tag; max_segs = scctx->isc_tx_tso_segments_max; } else { desc_tag = txq->ift_desc_tag; max_segs = scctx->isc_tx_nsegments; } if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) && __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) { err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size); if (err) return err; } m_head = *m_headp; pkt_info_zero(&pi); pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST)); pi.ipi_pidx = pidx; pi.ipi_qsidx = txq->ift_id; pi.ipi_len = m_head->m_pkthdr.len; pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags; pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0; /* deliberate bitwise OR to make one condition */ if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) { if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) return (err); m_head = *m_headp; } retry: err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT); defrag: if (__predict_false(err)) { switch (err) { case EFBIG: /* try collapse once and defrag once */ if (remap == 0) m_head = m_collapse(*m_headp, M_NOWAIT, max_segs); if (remap == 1) m_head = m_defrag(*m_headp, M_NOWAIT); remap++; if (__predict_false(m_head == NULL)) goto defrag_failed; txq->ift_mbuf_defrag++; *m_headp = m_head; goto retry; break; case ENOMEM: txq->ift_no_tx_dma_setup++; break; default: txq->ift_no_tx_dma_setup++; m_freem(*m_headp); DBG_COUNTER_INC(tx_frees); *m_headp = NULL; break; } txq->ift_map_failed++; DBG_COUNTER_INC(encap_load_mbuf_fail); return (err); } /* * XXX assumes a 1 to 1 relationship between segments and * descriptors - this does not hold true on all drivers, e.g. * cxgb */ if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { txq->ift_no_desc_avail++; if (map != NULL) bus_dmamap_unload(desc_tag, map); DBG_COUNTER_INC(encap_txq_avail_fail); if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) GROUPTASK_ENQUEUE(&txq->ift_task); return (ENOBUFS); } /* * On Intel cards we can greatly reduce the number of TX interrupts * we see by only setting report status on every Nth descriptor. * However, this also means that the driver will need to keep track * of the descriptors that RS was set on to check them for the DD bit. */ txq->ift_rs_pending += nsegs + 1; if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) || iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs - 1) <= MAX_TX_DESC(ctx)) { pi.ipi_flags |= IPI_TX_INTR; txq->ift_rs_pending = 0; } pi.ipi_segs = segs; pi.ipi_nsegs = nsegs; MPASS(pidx >= 0 && pidx < txq->ift_size); #ifdef PKT_DEBUG print_pkt(&pi); #endif if (map != NULL) bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE); if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { if (map != NULL) bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); DBG_COUNTER_INC(tx_encap); MPASS(pi.ipi_new_pidx < txq->ift_size); ndesc = pi.ipi_new_pidx - pi.ipi_pidx; if (pi.ipi_new_pidx < pi.ipi_pidx) { ndesc += txq->ift_size; txq->ift_gen = 1; } /* * drivers can need as many as * two sentinels */ MPASS(ndesc <= pi.ipi_nsegs + 2); MPASS(pi.ipi_new_pidx != pidx); MPASS(ndesc > 0); txq->ift_in_use += ndesc; /* * We update the last software descriptor again here because there may * be a sentinel and/or there may be more mbufs than segments */ txq->ift_pidx = pi.ipi_new_pidx; txq->ift_npending += pi.ipi_ndescs; } else if (__predict_false(err == EFBIG && remap < 2)) { *m_headp = m_head = iflib_remove_mbuf(txq); remap = 1; txq->ift_txd_encap_efbig++; goto defrag; } else DBG_COUNTER_INC(encap_txd_encap_fail); return (err); defrag_failed: txq->ift_mbuf_defrag_failed++; txq->ift_map_failed++; m_freem(*m_headp); DBG_COUNTER_INC(tx_frees); *m_headp = NULL; return (ENOMEM); } static void iflib_tx_desc_free(iflib_txq_t txq, int n) { int hasmap; uint32_t qsize, cidx, mask, gen; struct mbuf *m, **ifsd_m; uint8_t *ifsd_flags; bus_dmamap_t *ifsd_map; bool do_prefetch; cidx = txq->ift_cidx; gen = txq->ift_gen; qsize = txq->ift_size; mask = qsize-1; hasmap = txq->ift_sds.ifsd_map != NULL; ifsd_flags = txq->ift_sds.ifsd_flags; ifsd_m = txq->ift_sds.ifsd_m; ifsd_map = txq->ift_sds.ifsd_map; do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH); while (n--) { if (do_prefetch) { prefetch(ifsd_m[(cidx + 3) & mask]); prefetch(ifsd_m[(cidx + 4) & mask]); } if (ifsd_m[cidx] != NULL) { prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]); prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]); if (hasmap && (ifsd_flags[cidx] & TX_SW_DESC_MAPPED)) { /* * does it matter if it's not the TSO tag? If so we'll * have to add the type to flags */ bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]); ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED; } if ((m = ifsd_m[cidx]) != NULL) { /* XXX we don't support any drivers that batch packets yet */ MPASS(m->m_nextpkt == NULL); /* if the number of clusters exceeds the number of segments * there won't be space on the ring to save a pointer to each * cluster so we simply free the list here */ if (m->m_flags & M_TOOBIG) { m_freem(m); } else { m_free(m); } ifsd_m[cidx] = NULL; #if MEMORY_LOGGING txq->ift_dequeued++; #endif DBG_COUNTER_INC(tx_frees); } } if (__predict_false(++cidx == qsize)) { cidx = 0; gen = 0; } } txq->ift_cidx = cidx; txq->ift_gen = gen; } static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh) { int reclaim; if_ctx_t ctx = txq->ift_ctx; KASSERT(thresh >= 0, ("invalid threshold to reclaim")); MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); /* * Need a rate-limiting check so that this isn't called every time */ iflib_tx_credits_update(ctx, txq); reclaim = DESC_RECLAIMABLE(txq); if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) { #ifdef INVARIANTS if (iflib_verbose_debug) { printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__, txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments, reclaim, thresh); } #endif return (0); } iflib_tx_desc_free(txq, reclaim); txq->ift_cleaned += reclaim; txq->ift_in_use -= reclaim; return (reclaim); } static struct mbuf ** _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining) { int next, size; struct mbuf **items; size = r->size; next = (cidx + CACHE_PTR_INCREMENT) & (size-1); items = __DEVOLATILE(struct mbuf **, &r->items[0]); prefetch(items[(cidx + offset) & (size-1)]); if (remaining > 1) { prefetch2cachelines(&items[next]); prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]); prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]); prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]); } return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)])); } static void iflib_txq_check_drain(iflib_txq_t txq, int budget) { ifmp_ring_check_drainage(txq->ift_br, budget); } static uint32_t iflib_txq_can_drain(struct ifmp_ring *r) { iflib_txq_t txq = r->cookie; if_ctx_t ctx = txq->ift_ctx; return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) || ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)); } static uint32_t iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) { iflib_txq_t txq = r->cookie; if_ctx_t ctx = txq->ift_ctx; struct ifnet *ifp = ctx->ifc_ifp; struct mbuf **mp, *m; int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail; int reclaimed, err, in_use_prev, desc_used; bool do_prefetch, ring, rang; if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || !LINK_ACTIVE(ctx))) { DBG_COUNTER_INC(txq_drain_notready); return (0); } reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use); avail = IDXDIFF(pidx, cidx, r->size); if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { DBG_COUNTER_INC(txq_drain_flushing); for (i = 0; i < avail; i++) { m_free(r->items[(cidx + i) & (r->size-1)]); r->items[(cidx + i) & (r->size-1)] = NULL; } return (avail); } if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { txq->ift_qstatus = IFLIB_QUEUE_IDLE; CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); DBG_COUNTER_INC(txq_drain_oactive); return (0); } if (reclaimed) txq->ift_qstatus = IFLIB_QUEUE_IDLE; consumed = mcast_sent = bytes_sent = pkt_sent = 0; count = MIN(avail, TX_BATCH_SIZE); #ifdef INVARIANTS if (iflib_verbose_debug) printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__, avail, ctx->ifc_flags, TXQ_AVAIL(txq)); #endif do_prefetch = (ctx->ifc_flags & IFC_PREFETCH); avail = TXQ_AVAIL(txq); for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) { int pidx_prev, rem = do_prefetch ? count - i : 0; mp = _ring_peek_one(r, cidx, i, rem); MPASS(mp != NULL && *mp != NULL); if (__predict_false(*mp == (struct mbuf *)txq)) { consumed++; reclaimed++; continue; } in_use_prev = txq->ift_in_use; pidx_prev = txq->ift_pidx; err = iflib_encap(txq, mp); if (__predict_false(err)) { DBG_COUNTER_INC(txq_drain_encapfail); /* no room - bail out */ if (err == ENOBUFS) break; consumed++; DBG_COUNTER_INC(txq_drain_encapfail); /* we can't send this packet - skip it */ continue; } consumed++; pkt_sent++; m = *mp; DBG_COUNTER_INC(tx_sent); bytes_sent += m->m_pkthdr.len; mcast_sent += !!(m->m_flags & M_MCAST); avail = TXQ_AVAIL(txq); txq->ift_db_pending += (txq->ift_in_use - in_use_prev); desc_used += (txq->ift_in_use - in_use_prev); ETHER_BPF_MTAP(ifp, m); if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) break; rang = iflib_txd_db_check(ctx, txq, false, in_use_prev); } /* deliberate use of bitwise or to avoid gratuitous short-circuit */ ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx)); iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use); if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); if (mcast_sent) if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent); #ifdef INVARIANTS if (iflib_verbose_debug) printf("consumed=%d\n", consumed); #endif return (consumed); } static uint32_t iflib_txq_drain_always(struct ifmp_ring *r) { return (1); } static uint32_t iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) { int i, avail; struct mbuf **mp; iflib_txq_t txq; txq = r->cookie; txq->ift_qstatus = IFLIB_QUEUE_IDLE; CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); avail = IDXDIFF(pidx, cidx, r->size); for (i = 0; i < avail; i++) { mp = _ring_peek_one(r, cidx, i, avail - i); if (__predict_false(*mp == (struct mbuf *)txq)) continue; m_freem(*mp); } MPASS(ifmp_ring_is_stalled(r) == 0); return (avail); } static void iflib_ifmp_purge(iflib_txq_t txq) { struct ifmp_ring *r; r = txq->ift_br; r->drain = iflib_txq_drain_free; r->can_drain = iflib_txq_drain_always; ifmp_ring_check_drainage(r, r->size); r->drain = iflib_txq_drain; r->can_drain = iflib_txq_can_drain; } static void _task_fn_tx(void *context) { iflib_txq_t txq = context; if_ctx_t ctx = txq->ift_ctx; struct ifnet *ifp = ctx->ifc_ifp; int rc; #ifdef IFLIB_DIAGNOSTICS txq->ift_cpu_exec_count[curcpu]++; #endif if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) return; if (if_getcapenable(ifp) & IFCAP_NETMAP) { if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)) netmap_tx_irq(ifp, txq->ift_id); IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); return; } if (txq->ift_db_pending) ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE); ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); else { rc = IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); } } static void _task_fn_rx(void *context) { iflib_rxq_t rxq = context; if_ctx_t ctx = rxq->ifr_ctx; bool more; int rc; uint16_t budget; #ifdef IFLIB_DIAGNOSTICS rxq->ifr_cpu_exec_count[curcpu]++; #endif DBG_COUNTER_INC(task_fn_rxs); if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; more = true; #ifdef DEV_NETMAP if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) { u_int work = 0; if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) { more = false; } } #endif budget = ctx->ifc_sysctl_rx_budget; if (budget == 0) budget = 16; /* XXX */ if (more == false || (more = iflib_rxeof(rxq, budget)) == false) { if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); else { DBG_COUNTER_INC(rx_intr_enables); rc = IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); } } if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; if (more) GROUPTASK_ENQUEUE(&rxq->ifr_task); } static void _task_fn_admin(void *context) { if_ctx_t ctx = context; if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; iflib_txq_t txq; int i; if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) { if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { return; } } CTX_LOCK(ctx); for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); } IFDI_UPDATE_ADMIN_STATUS(ctx); for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); IFDI_LINK_INTR_ENABLE(ctx); if (ctx->ifc_flags & IFC_DO_RESET) { ctx->ifc_flags &= ~IFC_DO_RESET; iflib_if_init_locked(ctx); } CTX_UNLOCK(ctx); if (LINK_ACTIVE(ctx) == 0) return; for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); } static void _task_fn_iov(void *context) { if_ctx_t ctx = context; if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) return; CTX_LOCK(ctx); IFDI_VFLR_HANDLE(ctx); CTX_UNLOCK(ctx); } static int iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS) { int err; if_int_delay_info_t info; if_ctx_t ctx; info = (if_int_delay_info_t)arg1; ctx = info->iidi_ctx; info->iidi_req = req; info->iidi_oidp = oidp; CTX_LOCK(ctx); err = IFDI_SYSCTL_INT_DELAY(ctx, info); CTX_UNLOCK(ctx); return (err); } /********************************************************************* * * IFNET FUNCTIONS * **********************************************************************/ static void iflib_if_init_locked(if_ctx_t ctx) { iflib_stop(ctx); iflib_init_locked(ctx); } static void iflib_if_init(void *arg) { if_ctx_t ctx = arg; CTX_LOCK(ctx); iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); } static int iflib_if_transmit(if_t ifp, struct mbuf *m) { if_ctx_t ctx = if_getsoftc(ifp); iflib_txq_t txq; int err, qidx; if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) { DBG_COUNTER_INC(tx_frees); m_freem(m); return (ENOBUFS); } MPASS(m->m_nextpkt == NULL); qidx = 0; if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m)) qidx = QIDX(ctx, m); /* * XXX calculate buf_ring based on flowid (divvy up bits?) */ txq = &ctx->ifc_txqs[qidx]; #ifdef DRIVER_BACKPRESSURE if (txq->ift_closed) { while (m != NULL) { next = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); m = next; } return (ENOBUFS); } #endif #ifdef notyet qidx = count = 0; mp = marr; next = m; do { count++; next = next->m_nextpkt; } while (next != NULL); if (count > nitems(marr)) if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) { /* XXX check nextpkt */ m_freem(m); /* XXX simplify for now */ DBG_COUNTER_INC(tx_frees); return (ENOBUFS); } for (next = m, i = 0; next != NULL; i++) { mp[i] = next; next = next->m_nextpkt; mp[i]->m_nextpkt = NULL; } #endif DBG_COUNTER_INC(tx_seen); err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE); GROUPTASK_ENQUEUE(&txq->ift_task); if (err) { /* support forthcoming later */ #ifdef DRIVER_BACKPRESSURE txq->ift_closed = TRUE; #endif ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); m_freem(m); } return (err); } static void iflib_if_qflush(if_t ifp) { if_ctx_t ctx = if_getsoftc(ifp); iflib_txq_t txq = ctx->ifc_txqs; int i; CTX_LOCK(ctx); ctx->ifc_flags |= IFC_QFLUSH; CTX_UNLOCK(ctx); for (i = 0; i < NTXQSETS(ctx); i++, txq++) while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br))) iflib_txq_check_drain(txq, 0); CTX_LOCK(ctx); ctx->ifc_flags &= ~IFC_QFLUSH; CTX_UNLOCK(ctx); if_qflush(ifp); } #define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \ IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \ IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO) static int iflib_if_ioctl(if_t ifp, u_long command, caddr_t data) { if_ctx_t ctx = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; #endif bool avoid_reset = FALSE; int err = 0, reinit = 0, bits; switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { if_setflagbits(ifp, IFF_UP,0); if (!(if_getdrvflags(ifp)& IFF_DRV_RUNNING)) reinit = 1; #ifdef INET if (!(if_getflags(ifp) & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else err = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: CTX_LOCK(ctx); if (ifr->ifr_mtu == if_getmtu(ifp)) { CTX_UNLOCK(ctx); break; } bits = if_getdrvflags(ifp); /* stop the driver and free any clusters before proceeding */ iflib_stop(ctx); if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) { if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size) ctx->ifc_flags |= IFC_MULTISEG; else ctx->ifc_flags &= ~IFC_MULTISEG; err = if_setmtu(ifp, ifr->ifr_mtu); } iflib_init_locked(ctx); if_setdrvflags(ifp, bits); CTX_UNLOCK(ctx); break; case SIOCSIFFLAGS: CTX_LOCK(ctx); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if ((if_getflags(ifp) ^ ctx->ifc_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { err = IFDI_PROMISC_SET(ctx, if_getflags(ifp)); } } else reinit = 1; } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { iflib_stop(ctx); } ctx->ifc_if_flags = if_getflags(ifp); CTX_UNLOCK(ctx); break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { CTX_LOCK(ctx); IFDI_INTR_DISABLE(ctx); IFDI_MULTI_SET(ctx); IFDI_INTR_ENABLE(ctx); CTX_UNLOCK(ctx); } break; case SIOCSIFMEDIA: CTX_LOCK(ctx); IFDI_MEDIA_SET(ctx); CTX_UNLOCK(ctx); /* falls thru */ case SIOCGIFMEDIA: case SIOCGIFXMEDIA: err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command); break; case SIOCGI2C: { struct ifi2creq i2c; err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); if (err != 0) break; if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { err = EINVAL; break; } if (i2c.len > sizeof(i2c.data)) { err = EINVAL; break; } if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0) err = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); break; } case SIOCSIFCAP: { int mask, setmask; mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); setmask = 0; #ifdef TCP_OFFLOAD setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6); #endif setmask |= (mask & IFCAP_FLAGS); if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); if ((mask & IFCAP_WOL) && (if_getcapabilities(ifp) & IFCAP_WOL) != 0) setmask |= (mask & (IFCAP_WOL_MCAST|IFCAP_WOL_MAGIC)); if_vlancap(ifp); /* * want to ensure that traffic has stopped before we change any of the flags */ if (setmask) { CTX_LOCK(ctx); bits = if_getdrvflags(ifp); if (bits & IFF_DRV_RUNNING) iflib_stop(ctx); if_togglecapenable(ifp, setmask); if (bits & IFF_DRV_RUNNING) iflib_init_locked(ctx); if_setdrvflags(ifp, bits); CTX_UNLOCK(ctx); } break; } case SIOCGPRIVATE_0: case SIOCSDRVSPEC: case SIOCGDRVSPEC: CTX_LOCK(ctx); err = IFDI_PRIV_IOCTL(ctx, command, data); CTX_UNLOCK(ctx); break; default: err = ether_ioctl(ifp, command, data); break; } if (reinit) iflib_if_init(ctx); return (err); } static uint64_t iflib_if_get_counter(if_t ifp, ift_counter cnt) { if_ctx_t ctx = if_getsoftc(ifp); return (IFDI_GET_COUNTER(ctx, cnt)); } /********************************************************************* * * OTHER FUNCTIONS EXPORTED TO THE STACK * **********************************************************************/ static void iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag) { if_ctx_t ctx = if_getsoftc(ifp); if ((void *)ctx != arg) return; if ((vtag == 0) || (vtag > 4095)) return; CTX_LOCK(ctx); IFDI_VLAN_REGISTER(ctx, vtag); /* Re-init to load the changes */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); } static void iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag) { if_ctx_t ctx = if_getsoftc(ifp); if ((void *)ctx != arg) return; if ((vtag == 0) || (vtag > 4095)) return; CTX_LOCK(ctx); IFDI_VLAN_UNREGISTER(ctx, vtag); /* Re-init to load the changes */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); } static void iflib_led_func(void *arg, int onoff) { if_ctx_t ctx = arg; CTX_LOCK(ctx); IFDI_LED_FUNC(ctx, onoff); CTX_UNLOCK(ctx); } /********************************************************************* * * BUS FUNCTION DEFINITIONS * **********************************************************************/ int iflib_device_probe(device_t dev) { pci_vendor_info_t *ent; uint16_t pci_vendor_id, pci_device_id; uint16_t pci_subvendor_id, pci_subdevice_id; uint16_t pci_rev_id; if_shared_ctx_t sctx; if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) return (ENOTSUP); pci_vendor_id = pci_get_vendor(dev); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); pci_rev_id = pci_get_revid(dev); if (sctx->isc_parse_devinfo != NULL) sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id); ent = sctx->isc_vendor_info; while (ent->pvi_vendor_id != 0) { if (pci_vendor_id != ent->pvi_vendor_id) { ent++; continue; } if ((pci_device_id == ent->pvi_device_id) && ((pci_subvendor_id == ent->pvi_subvendor_id) || (ent->pvi_subvendor_id == 0)) && ((pci_subdevice_id == ent->pvi_subdevice_id) || (ent->pvi_subdevice_id == 0)) && ((pci_rev_id == ent->pvi_rev_id) || (ent->pvi_rev_id == 0))) { device_set_desc_copy(dev, ent->pvi_name); /* this needs to be changed to zero if the bus probing code * ever stops re-probing on best match because the sctx * may have its values over written by register calls * in subsequent probes */ return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } int iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp) { int err, rid, msix, msix_bar; if_ctx_t ctx; if_t ifp; if_softc_ctx_t scctx; int i; uint16_t main_txq; uint16_t main_rxq; ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO); if (sc == NULL) { sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); device_set_softc(dev, ctx); ctx->ifc_flags |= IFC_SC_ALLOCATED; } ctx->ifc_sctx = sctx; ctx->ifc_dev = dev; ctx->ifc_softc = sc; if ((err = iflib_register(ctx)) != 0) { device_printf(dev, "iflib_register failed %d\n", err); return (err); } iflib_add_device_sysctl_pre(ctx); scctx = &ctx->ifc_softc_ctx; ifp = ctx->ifc_ifp; ctx->ifc_nhwtxqs = sctx->isc_ntxqs; /* * XXX sanity check that ntxd & nrxd are a power of 2 */ if (ctx->ifc_sysctl_ntxqs != 0) scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs; if (ctx->ifc_sysctl_nrxqs != 0) scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs; for (i = 0; i < sctx->isc_ntxqs; i++) { if (ctx->ifc_sysctl_ntxds[i] != 0) scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i]; else scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; } for (i = 0; i < sctx->isc_nrxqs; i++) { if (ctx->ifc_sysctl_nrxds[i] != 0) scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i]; else scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; } for (i = 0; i < sctx->isc_nrxqs; i++) { if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) { device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n", i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]); scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i]; } if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) { device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n", i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]); scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i]; } } for (i = 0; i < sctx->isc_ntxqs; i++) { if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) { device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n", i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]); scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i]; } if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) { device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n", i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]); scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i]; } } if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); return (err); } _iflib_pre_assert(scctx); ctx->ifc_txrx = *scctx->isc_txrx; #ifdef INVARIANTS MPASS(scctx->isc_capenable); if (scctx->isc_capenable & IFCAP_TXCSUM) MPASS(scctx->isc_tx_csum_flags); #endif if_setcapabilities(ifp, scctx->isc_capenable | IFCAP_HWSTATS); if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS); if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; #ifdef ACPI_DMAR if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL) ctx->ifc_flags |= IFC_DMAR; #elif !(defined(__i386__) || defined(__amd64__)) /* set unconditionally for !x86 */ ctx->ifc_flags |= IFC_DMAR; #endif msix_bar = scctx->isc_msix_bar; main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; /* XXX change for per-queue sizes */ device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); for (i = 0; i < sctx->isc_nrxqs; i++) { if (!powerof2(scctx->isc_nrxd[i])) { /* round down instead? */ device_printf(dev, "# rx descriptors must be a power of 2\n"); err = EINVAL; goto fail; } } for (i = 0; i < sctx->isc_ntxqs; i++) { if (!powerof2(scctx->isc_ntxd[i])) { device_printf(dev, "# tx descriptors must be a power of 2"); err = EINVAL; goto fail; } } if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_tso_segments_max = max(1, scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); /* * Protect the stack against modern hardware */ if (scctx->isc_tx_tso_size_max > FREEBSD_TSO_SIZE_MAX) scctx->isc_tx_tso_size_max = FREEBSD_TSO_SIZE_MAX; /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ ifp->if_hw_tsomaxsegcount = scctx->isc_tx_tso_segments_max; ifp->if_hw_tsomax = scctx->isc_tx_tso_size_max; ifp->if_hw_tsomaxsegsize = scctx->isc_tx_tso_segsize_max; if (scctx->isc_rss_table_size == 0) scctx->isc_rss_table_size = 64; scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); /* XXX format name */ taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin"); /* Set up cpu set. If it fails, use the set of all CPUs. */ if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) { device_printf(dev, "Unable to fetch CPU list\n"); CPU_COPY(&all_cpus, &ctx->ifc_cpus); } MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0); /* ** Now setup MSI or MSI/X, should ** return us the number of supported ** vectors. (Will be 1 for MSI) */ if (sctx->isc_flags & IFLIB_SKIP_MSIX) { msix = scctx->isc_vectors; } else if (scctx->isc_msix_bar != 0) /* * The simple fact that isc_msix_bar is not 0 does not mean we * we have a good value there that is known to work. */ msix = iflib_msix_init(ctx); else { scctx->isc_vectors = 1; scctx->isc_ntxqsets = 1; scctx->isc_nrxqsets = 1; scctx->isc_intr = IFLIB_INTR_LEGACY; msix = 0; } /* Get memory for the station queues */ if ((err = iflib_queues_alloc(ctx))) { device_printf(dev, "Unable to allocate queue memory\n"); goto fail; } if ((err = iflib_qset_structures_setup(ctx))) { device_printf(dev, "qset structure setup failed %d\n", err); goto fail_queues; } /* * Group taskqueues aren't properly set up until SMP is started, * so we disable interrupts until we can handle them post * SI_SUB_SMP. * * XXX: disabling interrupts doesn't actually work, at least for * the non-MSI case. When they occur before SI_SUB_SMP completes, * we do null handling and depend on this not causing too large an * interrupt storm. */ IFDI_INTR_DISABLE(ctx); if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) { device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err); goto fail_intr_free; } if (msix <= 1) { rid = 0; if (scctx->isc_intr == IFLIB_INTR_MSI) { MPASS(msix == 1); rid = 1; } if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { device_printf(dev, "iflib_legacy_setup failed %d\n", err); goto fail_intr_free; } } ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); if ((err = IFDI_ATTACH_POST(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); goto fail_detach; } if ((err = iflib_netmap_attach(ctx))) { device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); goto fail_detach; } *ctxp = ctx; if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); iflib_add_device_sysctl_post(ctx); ctx->ifc_flags |= IFC_INIT_DONE; return (0); fail_detach: ether_ifdetach(ctx->ifc_ifp); fail_intr_free: if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI) pci_release_msi(ctx->ifc_dev); fail_queues: /* XXX free queues */ fail: IFDI_DETACH(ctx); return (err); } int iflib_device_attach(device_t dev) { if_ctx_t ctx; if_shared_ctx_t sctx; if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) return (ENOTSUP); pci_enable_busmaster(dev); return (iflib_device_register(dev, NULL, sctx, &ctx)); } int iflib_device_deregister(if_ctx_t ctx) { if_t ifp = ctx->ifc_ifp; iflib_txq_t txq; iflib_rxq_t rxq; device_t dev = ctx->ifc_dev; int i, j; struct taskqgroup *tqg; iflib_fl_t fl; /* Make sure VLANS are not using driver */ if (if_vlantrunkinuse(ifp)) { device_printf(dev,"Vlan in use, detach first\n"); return (EBUSY); } CTX_LOCK(ctx); ctx->ifc_in_detach = 1; iflib_stop(ctx); CTX_UNLOCK(ctx); /* Unregister VLAN events */ if (ctx->ifc_vlan_attach_event != NULL) EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); if (ctx->ifc_vlan_detach_event != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); iflib_netmap_detach(ifp); ether_ifdetach(ifp); /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ CTX_LOCK_DESTROY(ctx); if (ctx->ifc_led_dev != NULL) led_destroy(ctx->ifc_led_dev); /* XXX drain any dependent tasks */ tqg = qgroup_if_io_tqg; for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { callout_drain(&txq->ift_timer); if (txq->ift_task.gt_uniq != NULL) taskqgroup_detach(tqg, &txq->ift_task); } for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { if (rxq->ifr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &rxq->ifr_task); for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) free(fl->ifl_rx_bitmap, M_IFLIB); } tqg = qgroup_if_config_tqg; if (ctx->ifc_admin_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_admin_task); if (ctx->ifc_vflr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_vflr_task); IFDI_DETACH(ctx); device_set_softc(ctx->ifc_dev, NULL); if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { pci_release_msi(dev); } if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { iflib_irq_free(ctx, &ctx->ifc_legacy_irq); } if (ctx->ifc_msix_mem != NULL) { bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem); ctx->ifc_msix_mem = NULL; } bus_generic_detach(dev); if_free(ifp); iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); if (ctx->ifc_flags & IFC_SC_ALLOCATED) free(ctx->ifc_softc, M_IFLIB); free(ctx, M_IFLIB); return (0); } int iflib_device_detach(device_t dev) { if_ctx_t ctx = device_get_softc(dev); return (iflib_device_deregister(ctx)); } int iflib_device_suspend(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_SUSPEND(ctx); CTX_UNLOCK(ctx); return bus_generic_suspend(dev); } int iflib_device_shutdown(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_SHUTDOWN(ctx); CTX_UNLOCK(ctx); return bus_generic_suspend(dev); } int iflib_device_resume(device_t dev) { if_ctx_t ctx = device_get_softc(dev); iflib_txq_t txq = ctx->ifc_txqs; CTX_LOCK(ctx); IFDI_RESUME(ctx); iflib_init_locked(ctx); CTX_UNLOCK(ctx); for (int i = 0; i < NTXQSETS(ctx); i++, txq++) iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); return (bus_generic_resume(dev)); } int iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) { int error; if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); error = IFDI_IOV_INIT(ctx, num_vfs, params); CTX_UNLOCK(ctx); return (error); } void iflib_device_iov_uninit(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_IOV_UNINIT(ctx); CTX_UNLOCK(ctx); } int iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) { int error; if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); error = IFDI_IOV_VF_ADD(ctx, vfnum, params); CTX_UNLOCK(ctx); return (error); } /********************************************************************* * * MODULE FUNCTION DEFINITIONS * **********************************************************************/ /* * - Start a fast taskqueue thread for each core * - Start a taskqueue for control operations */ static int iflib_module_init(void) { return (0); } static int iflib_module_event_handler(module_t mod, int what, void *arg) { int err; switch (what) { case MOD_LOAD: if ((err = iflib_module_init()) != 0) return (err); break; case MOD_UNLOAD: return (EBUSY); default: return (EOPNOTSUPP); } return (0); } /********************************************************************* * * PUBLIC FUNCTION DEFINITIONS * ordered as in iflib.h * **********************************************************************/ static void _iflib_assert(if_shared_ctx_t sctx) { MPASS(sctx->isc_tx_maxsize); MPASS(sctx->isc_tx_maxsegsize); MPASS(sctx->isc_rx_maxsize); MPASS(sctx->isc_rx_nsegments); MPASS(sctx->isc_rx_maxsegsize); MPASS(sctx->isc_nrxd_min[0]); MPASS(sctx->isc_nrxd_max[0]); MPASS(sctx->isc_nrxd_default[0]); MPASS(sctx->isc_ntxd_min[0]); MPASS(sctx->isc_ntxd_max[0]); MPASS(sctx->isc_ntxd_default[0]); } static void _iflib_pre_assert(if_softc_ctx_t scctx) { MPASS(scctx->isc_txrx->ift_txd_encap); MPASS(scctx->isc_txrx->ift_txd_flush); MPASS(scctx->isc_txrx->ift_txd_credits_update); MPASS(scctx->isc_txrx->ift_rxd_available); MPASS(scctx->isc_txrx->ift_rxd_pkt_get); MPASS(scctx->isc_txrx->ift_rxd_refill); MPASS(scctx->isc_txrx->ift_rxd_flush); } static int iflib_register(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; driver_t *driver = sctx->isc_driver; device_t dev = ctx->ifc_dev; if_t ifp; _iflib_assert(sctx); CTX_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (ENOMEM); } /* * Initialize our context's device specific methods */ kobj_init((kobj_t) ctx, (kobj_class_t) driver); kobj_class_compile((kobj_class_t) driver); driver->refs++; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setsoftc(ifp, ctx); if_setdev(ifp, dev); if_setinitfn(ifp, iflib_if_init); if_setioctlfn(ifp, iflib_if_ioctl); if_settransmitfn(ifp, iflib_if_transmit); if_setqflushfn(ifp, iflib_if_qflush); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); ctx->ifc_vlan_attach_event = EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, EVENTHANDLER_PRI_FIRST); ctx->ifc_vlan_detach_event = EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx, EVENTHANDLER_PRI_FIRST); ifmedia_init(&ctx->ifc_media, IFM_IMASK, iflib_media_change, iflib_media_status); return (0); } static int iflib_queues_alloc(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; int nrxqsets = scctx->isc_nrxqsets; int ntxqsets = scctx->isc_ntxqsets; iflib_txq_t txq; iflib_rxq_t rxq; iflib_fl_t fl = NULL; int i, j, cpu, err, txconf, rxconf; iflib_dma_info_t ifdip; uint32_t *rxqsizes = scctx->isc_rxqsizes; uint32_t *txqsizes = scctx->isc_txqsizes; uint8_t nrxqs = sctx->isc_nrxqs; uint8_t ntxqs = sctx->isc_ntxqs; int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; caddr_t *vaddrs; uint64_t *paddrs; struct ifmp_ring **brscp; KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1")); KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1")); brscp = NULL; txq = NULL; rxq = NULL; /* Allocate the TX ring struct memory */ if (!(txq = (iflib_txq_t) malloc(sizeof(struct iflib_txq) * ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate TX ring memory\n"); err = ENOMEM; goto fail; } /* Now allocate the RX */ if (!(rxq = (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) * nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX ring memory\n"); err = ENOMEM; goto rx_fail; } ctx->ifc_txqs = txq; ctx->ifc_rxqs = rxq; /* * XXX handle allocation failure */ for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { /* Set up some basics */ if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { device_printf(dev, "failed to allocate iflib_dma_info\n"); err = ENOMEM; goto err_tx_desc; } txq->ift_ifdi = ifdip; for (j = 0; j < ntxqs; j++, ifdip++) { if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate Descriptor memory\n"); err = ENOMEM; goto err_tx_desc; } txq->ift_txd_size[j] = scctx->isc_txd_size[j]; bzero((void *)ifdip->idi_vaddr, txqsizes[j]); } txq->ift_ctx = ctx; txq->ift_id = i; if (sctx->isc_flags & IFLIB_HAS_TXCQ) { txq->ift_br_offset = 1; } else { txq->ift_br_offset = 0; } /* XXX fix this */ txq->ift_timer.c_cpu = cpu; if (iflib_txsd_alloc(txq)) { device_printf(dev, "Critical Failure setting up TX buffers\n"); err = ENOMEM; goto err_tx_desc; } /* Initialize the TX lock */ snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout", device_get_nameunit(dev), txq->ift_id); mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF); callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0); snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db", device_get_nameunit(dev), txq->ift_id); err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain, iflib_txq_can_drain, M_IFLIB, M_WAITOK); if (err) { /* XXX free any allocated rings */ device_printf(dev, "Unable to allocate buf_ring\n"); goto err_tx_desc; } } for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { /* Set up some basics */ if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { device_printf(dev, "failed to allocate iflib_dma_info\n"); err = ENOMEM; goto err_tx_desc; } rxq->ifr_ifdi = ifdip; /* XXX this needs to be changed if #rx queues != #tx queues */ rxq->ifr_ntxqirq = 1; rxq->ifr_txqid[0] = i; for (j = 0; j < nrxqs; j++, ifdip++) { if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate Descriptor memory\n"); err = ENOMEM; goto err_tx_desc; } bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); } rxq->ifr_ctx = ctx; rxq->ifr_id = i; if (sctx->isc_flags & IFLIB_HAS_RXCQ) { rxq->ifr_fl_offset = 1; } else { rxq->ifr_fl_offset = 0; } rxq->ifr_nfl = nfree_lists; if (!(fl = (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate free list memory\n"); err = ENOMEM; goto err_tx_desc; } rxq->ifr_fl = fl; for (j = 0; j < nfree_lists; j++) { fl[j].ifl_rxq = rxq; fl[j].ifl_id = j; fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset]; fl[j].ifl_rxd_size = scctx->isc_rxd_size[j]; } /* Allocate receive buffers for the ring*/ if (iflib_rxsd_alloc(rxq)) { device_printf(dev, "Critical Failure setting up receive buffers\n"); err = ENOMEM; goto err_rx_desc; } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK|M_ZERO); } /* TXQs */ vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); for (i = 0; i < ntxqsets; i++) { iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi; for (j = 0; j < ntxqs; j++, di++) { vaddrs[i*ntxqs + j] = di->idi_vaddr; paddrs[i*ntxqs + j] = di->idi_paddr; } } if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) { device_printf(ctx->ifc_dev, "device queue allocation failed\n"); iflib_tx_structures_free(ctx); free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); goto err_rx_desc; } free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); /* RXQs */ vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); for (i = 0; i < nrxqsets; i++) { iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi; for (j = 0; j < nrxqs; j++, di++) { vaddrs[i*nrxqs + j] = di->idi_vaddr; paddrs[i*nrxqs + j] = di->idi_paddr; } } if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) { device_printf(ctx->ifc_dev, "device queue allocation failed\n"); iflib_tx_structures_free(ctx); free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); goto err_rx_desc; } free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); return (0); /* XXX handle allocation failure changes */ err_rx_desc: err_tx_desc: if (ctx->ifc_rxqs != NULL) free(ctx->ifc_rxqs, M_IFLIB); ctx->ifc_rxqs = NULL; if (ctx->ifc_txqs != NULL) free(ctx->ifc_txqs, M_IFLIB); ctx->ifc_txqs = NULL; rx_fail: if (brscp != NULL) free(brscp, M_IFLIB); if (rxq != NULL) free(rxq, M_IFLIB); if (txq != NULL) free(txq, M_IFLIB); fail: return (err); } static int iflib_tx_structures_setup(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; int i; for (i = 0; i < NTXQSETS(ctx); i++, txq++) iflib_txq_setup(txq); return (0); } static void iflib_tx_structures_free(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; int i, j; for (i = 0; i < NTXQSETS(ctx); i++, txq++) { iflib_txq_destroy(txq); for (j = 0; j < ctx->ifc_nhwtxqs; j++) iflib_dma_free(&txq->ift_ifdi[j]); } free(ctx->ifc_txqs, M_IFLIB); ctx->ifc_txqs = NULL; IFDI_QUEUES_FREE(ctx); } /********************************************************************* * * Initialize all receive rings. * **********************************************************************/ static int iflib_rx_structures_setup(if_ctx_t ctx) { iflib_rxq_t rxq = ctx->ifc_rxqs; int q; #if defined(INET6) || defined(INET) int i, err; #endif for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { #if defined(INET6) || defined(INET) tcp_lro_free(&rxq->ifr_lc); if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp, TCP_LRO_ENTRIES, min(1024, ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) { device_printf(ctx->ifc_dev, "LRO Initialization failed!\n"); goto fail; } rxq->ifr_lro_enabled = TRUE; #endif IFDI_RXQ_SETUP(ctx, rxq->ifr_id); } return (0); #if defined(INET6) || defined(INET) fail: /* * Free RX software descriptors allocated so far, we will only handle * the rings that completed, the failing case will have * cleaned up for itself. 'q' failed, so its the terminus. */ rxq = ctx->ifc_rxqs; for (i = 0; i < q; ++i, rxq++) { iflib_rx_sds_free(rxq); rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; } return (err); #endif } /********************************************************************* * * Free all receive rings. * **********************************************************************/ static void iflib_rx_structures_free(if_ctx_t ctx) { iflib_rxq_t rxq = ctx->ifc_rxqs; for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) { iflib_rx_sds_free(rxq); } } static int iflib_qset_structures_setup(if_ctx_t ctx) { int err; if ((err = iflib_tx_structures_setup(ctx)) != 0) return (err); if ((err = iflib_rx_structures_setup(ctx)) != 0) { device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err); iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); } return (err); } int iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, char *name) { return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name)); } #ifdef SMP static int find_nth(if_ctx_t ctx, int qid) { cpuset_t cpus; int i, cpuid, eqid, count; CPU_COPY(&ctx->ifc_cpus, &cpus); count = CPU_COUNT(&cpus); eqid = qid % count; /* clear up to the qid'th bit */ for (i = 0; i < eqid; i++) { cpuid = CPU_FFS(&cpus); MPASS(cpuid != 0); CPU_CLR(cpuid-1, &cpus); } cpuid = CPU_FFS(&cpus); MPASS(cpuid != 0); return (cpuid-1); } #ifdef SCHED_ULE extern struct cpu_group *cpu_top; /* CPU topology */ static int find_child_with_core(int cpu, struct cpu_group *grp) { int i; if (grp->cg_children == 0) return -1; MPASS(grp->cg_child); for (i = 0; i < grp->cg_children; i++) { if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask)) return i; } return -1; } /* * Find the nth thread on the specified core */ static int find_thread(int cpu, int thread_num) { struct cpu_group *grp; int i; cpuset_t cs; grp = cpu_top; if (grp == NULL) return cpu; i = 0; while ((i = find_child_with_core(cpu, grp)) != -1) { /* If the child only has one cpu, don't descend */ if (grp->cg_child[i].cg_count <= 1) break; grp = &grp->cg_child[i]; } /* If they don't share at least an L2 cache, use the same CPU */ if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE) return cpu; /* Now pick one */ CPU_COPY(&grp->cg_mask, &cs); for (i = thread_num % grp->cg_count; i > 0; i--) { MPASS(CPU_FFS(&cs)); CPU_CLR(CPU_FFS(&cs) - 1, &cs); } MPASS(CPU_FFS(&cs)); return CPU_FFS(&cs) - 1; } #else static int find_thread(int cpu, int thread_num __unused) { return cpu; } #endif static int get_thread_num(if_ctx_t ctx, iflib_intr_type_t type, int qid) { switch (type) { case IFLIB_INTR_TX: /* TX queues get threads on the same core as the corresponding RX queue */ /* XXX handle multiple RX threads per core and more than two threads per core */ return qid / CPU_COUNT(&ctx->ifc_cpus) + 1; case IFLIB_INTR_RX: case IFLIB_INTR_RXTX: /* RX queues get the first thread on their core */ return qid / CPU_COUNT(&ctx->ifc_cpus); default: return -1; } } #else #define get_thread_num(ctx, type, qid) CPU_FIRST() #define find_thread(cpuid, tid) CPU_FIRST() #define find_nth(ctx, gid) CPU_FIRST() #endif /* Just to avoid copy/paste */ static inline int iflib_irq_set_affinity(if_ctx_t ctx, int irq, iflib_intr_type_t type, int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, char *name) { int cpuid; int err, tid; cpuid = find_nth(ctx, qid); tid = get_thread_num(ctx, type, qid); MPASS(tid >= 0); cpuid = find_thread(cpuid, tid); err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, irq, name); if (err) { device_printf(ctx->ifc_dev, "taskqgroup_attach_cpu failed %d\n", err); return (err); } #ifdef notyet if (cpuid > ctx->ifc_cpuid_highest) ctx->ifc_cpuid_highest = cpuid; #endif return 0; } int iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, iflib_intr_type_t type, driver_filter_t *filter, void *filter_arg, int qid, char *name) { struct grouptask *gtask; struct taskqgroup *tqg; iflib_filter_info_t info; gtask_fn_t *fn; int tqrid, err; driver_filter_t *intr_fast; void *q; info = &ctx->ifc_filter_info; tqrid = rid; switch (type) { /* XXX merge tx/rx for netmap? */ case IFLIB_INTR_TX: q = &ctx->ifc_txqs[qid]; info = &ctx->ifc_txqs[qid].ift_filter_info; gtask = &ctx->ifc_txqs[qid].ift_task; tqg = qgroup_if_io_tqg; fn = _task_fn_tx; intr_fast = iflib_fast_intr; GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_RX: q = &ctx->ifc_rxqs[qid]; info = &ctx->ifc_rxqs[qid].ifr_filter_info; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr; GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_RXTX: q = &ctx->ifc_rxqs[qid]; info = &ctx->ifc_rxqs[qid].ifr_filter_info; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr_rxtx; GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_ADMIN: q = ctx; tqrid = -1; info = &ctx->ifc_filter_info; gtask = &ctx->ifc_admin_task; tqg = qgroup_if_config_tqg; fn = _task_fn_admin; intr_fast = iflib_fast_intr_ctx; break; default: panic("unknown net intr type"); } info->ifi_filter = filter; info->ifi_filter_arg = filter_arg; info->ifi_task = gtask; info->ifi_ctx = q; err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name); if (err != 0) { device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err); return (err); } if (type == IFLIB_INTR_ADMIN) return (0); if (tqrid != -1) { err = iflib_irq_set_affinity(ctx, rman_get_start(irq->ii_res), type, qid, gtask, tqg, q, name); if (err) return (err); } else { taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); } return (0); } void iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, char *name) { struct grouptask *gtask; struct taskqgroup *tqg; gtask_fn_t *fn; void *q; int irq_num = -1; int err; switch (type) { case IFLIB_INTR_TX: q = &ctx->ifc_txqs[qid]; gtask = &ctx->ifc_txqs[qid].ift_task; tqg = qgroup_if_io_tqg; fn = _task_fn_tx; if (irq != NULL) irq_num = rman_get_start(irq->ii_res); break; case IFLIB_INTR_RX: q = &ctx->ifc_rxqs[qid]; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; if (irq != NULL) irq_num = rman_get_start(irq->ii_res); break; case IFLIB_INTR_IOV: q = ctx; gtask = &ctx->ifc_vflr_task; tqg = qgroup_if_config_tqg; fn = _task_fn_iov; break; default: panic("unknown net intr type"); } GROUPTASK_INIT(gtask, 0, fn, q); if (irq_num != -1) { err = iflib_irq_set_affinity(ctx, irq_num, type, qid, gtask, tqg, q, name); if (err) taskqgroup_attach(tqg, gtask, q, irq_num, name); } else { taskqgroup_attach(tqg, gtask, q, irq_num, name); } } void iflib_irq_free(if_ctx_t ctx, if_irq_t irq) { if (irq->ii_tag) bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag); if (irq->ii_res) bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, irq->ii_rid, irq->ii_res); } static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, char *name) { iflib_txq_t txq = ctx->ifc_txqs; iflib_rxq_t rxq = ctx->ifc_rxqs; if_irq_t irq = &ctx->ifc_legacy_irq; iflib_filter_info_t info; struct grouptask *gtask; struct taskqgroup *tqg; gtask_fn_t *fn; int tqrid; void *q; int err; q = &ctx->ifc_rxqs[0]; info = &rxq[0].ifr_filter_info; gtask = &rxq[0].ifr_task; tqg = qgroup_if_io_tqg; tqrid = irq->ii_rid = *rid; fn = _task_fn_rx; ctx->ifc_flags |= IFC_LEGACY; info->ifi_filter = filter; info->ifi_filter_arg = filter_arg; info->ifi_task = gtask; info->ifi_ctx = ctx; /* We allocate a single interrupt resource */ if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0) return (err); GROUPTASK_INIT(gtask, 0, fn, q); taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq); taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, rman_get_start(irq->ii_res), "tx"); return (0); } void iflib_led_create(if_ctx_t ctx) { ctx->ifc_led_dev = led_create(iflib_led_func, ctx, device_get_nameunit(ctx->ifc_dev)); } void iflib_tx_intr_deferred(if_ctx_t ctx, int txqid) { GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); } void iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid) { GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task); } void iflib_admin_intr_deferred(if_ctx_t ctx) { #ifdef INVARIANTS struct grouptask *gtask; gtask = &ctx->ifc_admin_task; MPASS(gtask != NULL && gtask->gt_taskqueue != NULL); #endif GROUPTASK_ENQUEUE(&ctx->ifc_admin_task); } void iflib_iov_intr_deferred(if_ctx_t ctx) { GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task); } void iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name) { taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name); } void iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask, gtask_fn_t *fn, char *name) { GROUPTASK_INIT(gtask, 0, fn, ctx); taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name); } void iflib_config_gtask_deinit(struct grouptask *gtask) { taskqgroup_detach(qgroup_if_config_tqg, gtask); } void iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate) { if_t ifp = ctx->ifc_ifp; iflib_txq_t txq = ctx->ifc_txqs; if_setbaudrate(ifp, baudrate); if (baudrate >= IF_Gbps(10)) ctx->ifc_flags |= IFC_PREFETCH; /* If link down, disable watchdog */ if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) txq->ift_qstatus = IFLIB_QUEUE_IDLE; } ctx->ifc_link_state = link_state; if_link_state_change(ifp, link_state); } static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq) { int credits; #ifdef INVARIANTS int credits_pre = txq->ift_cidx_processed; #endif if (ctx->isc_txd_credits_update == NULL) return (0); if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0) return (0); txq->ift_processed += credits; txq->ift_cidx_processed += credits; MPASS(credits_pre + credits == txq->ift_cidx_processed); if (txq->ift_cidx_processed >= txq->ift_size) txq->ift_cidx_processed -= txq->ift_size; return (credits); } static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget) { return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx, budget)); } void iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name, const char *description, if_int_delay_info_t info, int offset, int value) { info->iidi_ctx = ctx; info->iidi_offset = offset; info->iidi_value = value; SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)), OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, info, 0, iflib_sysctl_int_delay, "I", description); } struct mtx * iflib_ctx_lock_get(if_ctx_t ctx) { return (&ctx->ifc_mtx); } static int iflib_msix_init(if_ctx_t ctx) { device_t dev = ctx->ifc_dev; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs; int iflib_num_tx_queues, iflib_num_rx_queues; int err, admincnt, bar; iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs; iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs; device_printf(dev, "msix_init qsets capped at %d\n", imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets)); bar = ctx->ifc_softc_ctx.isc_msix_bar; admincnt = sctx->isc_admin_intrcnt; /* Override by global tuneable */ { int i; size_t len = sizeof(i); err = kernel_sysctlbyname(curthread, "hw.pci.enable_msix", &i, &len, NULL, 0, NULL, 0); if (err == 0) { if (i == 0) goto msi; } else { device_printf(dev, "unable to read hw.pci.enable_msix."); } } /* Override by tuneable */ if (scctx->isc_disable_msix) goto msi; /* ** When used in a virtualized environment ** PCI BUSMASTER capability may not be set ** so explicity set it here and rewrite ** the ENABLE in the MSIX control register ** at this point to cause the host to ** successfully initialize us. */ { int msix_ctrl, rid; pci_enable_busmaster(dev); rid = 0; if (pci_find_cap(dev, PCIY_MSIX, &rid) == 0 && rid != 0) { rid += PCIR_MSIX_CTRL; msix_ctrl = pci_read_config(dev, rid, 2); msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(dev, rid, msix_ctrl, 2); } else { device_printf(dev, "PCIY_MSIX capability not found; " "or rid %d == 0.\n", rid); goto msi; } } /* * bar == -1 => "trust me I know what I'm doing" * Some drivers are for hardware that is so shoddily * documented that no one knows which bars are which * so the developer has to map all bars. This hack * allows shoddy garbage to use msix in this framework. */ if (bar != -1) { ctx->ifc_msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar, RF_ACTIVE); if (ctx->ifc_msix_mem == NULL) { /* May not be enabled */ device_printf(dev, "Unable to map MSIX table \n"); goto msi; } } /* First try MSI/X */ if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */ device_printf(dev, "System has MSIX disabled \n"); bus_release_resource(dev, SYS_RES_MEMORY, bar, ctx->ifc_msix_mem); ctx->ifc_msix_mem = NULL; goto msi; } #if IFLIB_DEBUG /* use only 1 qset in debug mode */ queuemsgs = min(msgs - admincnt, 1); #else queuemsgs = msgs - admincnt; #endif #ifdef RSS queues = imin(queuemsgs, rss_getnumbuckets()); #else queues = queuemsgs; #endif queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues); device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n", CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt); #ifdef RSS /* If we're doing RSS, clamp at the number of RSS buckets */ if (queues > rss_getnumbuckets()) queues = rss_getnumbuckets(); #endif if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt) rx_queues = iflib_num_rx_queues; else rx_queues = queues; if (rx_queues > scctx->isc_nrxqsets) rx_queues = scctx->isc_nrxqsets; /* * We want this to be all logical CPUs by default */ if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues) tx_queues = iflib_num_tx_queues; else tx_queues = mp_ncpus; if (tx_queues > scctx->isc_ntxqsets) tx_queues = scctx->isc_ntxqsets; if (ctx->ifc_sysctl_qs_eq_override == 0) { #ifdef INVARIANTS if (tx_queues != rx_queues) device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n", min(rx_queues, tx_queues), min(rx_queues, tx_queues)); #endif tx_queues = min(rx_queues, tx_queues); rx_queues = min(rx_queues, tx_queues); } device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues); vectors = rx_queues + admincnt; if ((err = pci_alloc_msix(dev, &vectors)) == 0) { device_printf(dev, "Using MSIX interrupts with %d vectors\n", vectors); scctx->isc_vectors = vectors; scctx->isc_nrxqsets = rx_queues; scctx->isc_ntxqsets = tx_queues; scctx->isc_intr = IFLIB_INTR_MSIX; return (vectors); } else { device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err); } msi: vectors = pci_msi_count(dev); scctx->isc_nrxqsets = 1; scctx->isc_ntxqsets = 1; scctx->isc_vectors = vectors; if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) { device_printf(dev,"Using an MSI interrupt\n"); scctx->isc_intr = IFLIB_INTR_MSI; } else { device_printf(dev,"Using a Legacy interrupt\n"); scctx->isc_intr = IFLIB_INTR_LEGACY; } return (vectors); } char * ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" }; static int mp_ring_state_handler(SYSCTL_HANDLER_ARGS) { int rc; uint16_t *state = ((uint16_t *)oidp->oid_arg1); struct sbuf *sb; char *ring_state = "UNKNOWN"; /* XXX needed ? */ rc = sysctl_wire_old_buffer(req, 0); MPASS(rc == 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 80, req); MPASS(sb != NULL); if (sb == NULL) return (ENOMEM); if (state[3] <= 3) ring_state = ring_states[state[3]]; sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s", state[0], state[1], state[2], ring_state); rc = sbuf_finish(sb); sbuf_delete(sb); return(rc); } enum iflib_ndesc_handler { IFLIB_NTXD_HANDLER, IFLIB_NRXD_HANDLER, }; static int mp_ndesc_handler(SYSCTL_HANDLER_ARGS) { if_ctx_t ctx = (void *)arg1; enum iflib_ndesc_handler type = arg2; char buf[256] = {0}; qidx_t *ndesc; char *p, *next; int nqs, rc, i; MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER); nqs = 8; switch(type) { case IFLIB_NTXD_HANDLER: ndesc = ctx->ifc_sysctl_ntxds; if (ctx->ifc_sctx) nqs = ctx->ifc_sctx->isc_ntxqs; break; case IFLIB_NRXD_HANDLER: ndesc = ctx->ifc_sysctl_nrxds; if (ctx->ifc_sctx) nqs = ctx->ifc_sctx->isc_nrxqs; break; } if (nqs == 0) nqs = 8; for (i=0; i<8; i++) { if (i >= nqs) break; if (i) strcat(buf, ","); sprintf(strchr(buf, 0), "%d", ndesc[i]); } rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (rc || req->newptr == NULL) return rc; for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p; i++, p = strsep(&next, " ,")) { ndesc[i] = strtoul(p, NULL, 10); } return(rc); } #define NAME_BUFLEN 32 static void iflib_add_device_sysctl_pre(if_ctx_t ctx) { device_t dev = iflib_get_dev(ctx); struct sysctl_oid_list *child, *oid_list; struct sysctl_ctx_list *ctx_list; struct sysctl_oid *node; ctx_list = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib", CTLFLAG_RD, NULL, "IFLIB fields"); oid_list = SYSCTL_CHILDREN(node); SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version", CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0, "driver version"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs", CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, "# of txqs to use, 0 => use default #"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs", CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0, "# of rxqs to use, 0 => use default #"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable", CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0, "permit #txq != #rxq"); SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix", CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0, "disable MSIX (default 0)"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget", CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, "set the rx budget"); /* XXX change for per-queue sizes */ SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds", CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER, mp_ndesc_handler, "A", "list of # of tx descriptors to use, 0 = use default #"); SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds", CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER, mp_ndesc_handler, "A", "list of # of rx descriptors to use, 0 = use default #"); } static void iflib_add_device_sysctl_post(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = iflib_get_dev(ctx); struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx_list; iflib_fl_t fl; iflib_txq_t txq; iflib_rxq_t rxq; int i, j; char namebuf[NAME_BUFLEN]; char *qfmt; struct sysctl_oid *queue_node, *fl_node, *node; struct sysctl_oid_list *queue_list, *fl_list; ctx_list = device_get_sysctl_ctx(dev); node = ctx->ifc_sysctl_node; child = SYSCTL_CHILDREN(node); if (scctx->isc_ntxqsets > 100) qfmt = "txq%03d"; else if (scctx->isc_ntxqsets > 10) qfmt = "txq%02d"; else qfmt = "txq%d"; for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) { snprintf(namebuf, NAME_BUFLEN, qfmt, i); queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); #if MEMORY_LOGGING SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued", CTLFLAG_RD, &txq->ift_dequeued, "total mbufs freed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued", CTLFLAG_RD, &txq->ift_enqueued, "total mbufs enqueued"); #endif SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag", CTLFLAG_RD, &txq->ift_mbuf_defrag, "# of times m_defrag was called"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups", CTLFLAG_RD, &txq->ift_pullups, "# of times m_pullup was called"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed", CTLFLAG_RD, &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &txq->ift_no_desc_avail, "# of times no descriptors were available"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed", CTLFLAG_RD, &txq->ift_map_failed, "# of times dma map failed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig", CTLFLAG_RD, &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup", CTLFLAG_RD, &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx", CTLFLAG_RD, &txq->ift_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx", CTLFLAG_RD, &txq->ift_cidx, 1, "Consumer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed", CTLFLAG_RD, &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use", CTLFLAG_RD, &txq->ift_in_use, 1, "descriptors in use"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed", CTLFLAG_RD, &txq->ift_processed, "descriptors procesed for clean"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned", CTLFLAG_RD, &txq->ift_cleaned, "total cleaned"); SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state", CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state), 0, mp_ring_state_handler, "A", "soft ring state"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues", CTLFLAG_RD, &txq->ift_br->enqueues, "# of enqueues to the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops", CTLFLAG_RD, &txq->ift_br->drops, "# of drops in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts", CTLFLAG_RD, &txq->ift_br->starts, "# of normal consumer starts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls", CTLFLAG_RD, &txq->ift_br->stalls, "# of consumer stalls in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts", CTLFLAG_RD, &txq->ift_br->restarts, "# of consumer restarts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications", CTLFLAG_RD, &txq->ift_br->abdications, "# of consumer abdications in the mp_ring for this queue"); } if (scctx->isc_nrxqsets > 100) qfmt = "rxq%03d"; else if (scctx->isc_nrxqsets > 10) qfmt = "rxq%02d"; else qfmt = "rxq%d"; for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { snprintf(namebuf, NAME_BUFLEN, qfmt, i); queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); if (sctx->isc_flags & IFLIB_HAS_RXCQ) { SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx", CTLFLAG_RD, &rxq->ifr_cq_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx", CTLFLAG_RD, &rxq->ifr_cq_cidx, 1, "Consumer Index"); } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j); fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "freelist Name"); fl_list = SYSCTL_CHILDREN(fl_node); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx", CTLFLAG_RD, &fl->ifl_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx", CTLFLAG_RD, &fl->ifl_cidx, 1, "Consumer Index"); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits", CTLFLAG_RD, &fl->ifl_credits, 1, "credits available"); #if MEMORY_LOGGING SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued", CTLFLAG_RD, &fl->ifl_m_enqueued, "mbufs allocated"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued", CTLFLAG_RD, &fl->ifl_m_dequeued, "mbufs freed"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued", CTLFLAG_RD, &fl->ifl_cl_enqueued, "clusters allocated"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued", CTLFLAG_RD, &fl->ifl_cl_dequeued, "clusters freed"); #endif } } } #ifndef __NO_STRICT_ALIGNMENT static struct mbuf * iflib_fixup_rx(struct mbuf *m) { struct mbuf *n; if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); m->m_data += ETHER_HDR_LEN; n = m; } else { MGETHDR(n, M_NOWAIT, MT_DATA); if (n == NULL) { m_freem(m); return (NULL); } bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); m->m_data += ETHER_HDR_LEN; m->m_len -= ETHER_HDR_LEN; n->m_len = ETHER_HDR_LEN; M_MOVE_PKTHDR(n, m); n->m_next = m; } return (n); } #endif Index: head/sys/net/netmap.h =================================================================== --- head/sys/net/netmap.h (revision 332422) +++ head/sys/net/netmap.h (revision 332423) @@ -1,652 +1,705 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``S IS''AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * Definitions of constants and the structures used by the netmap * framework, for the part visible to both kernel and userspace. * Detailed info on netmap is available with "man netmap" or at * * http://info.iet.unipi.it/~luigi/netmap/ * * This API is also used to communicate with the VALE software switch */ #ifndef _NET_NETMAP_H_ #define _NET_NETMAP_H_ -#define NETMAP_API 11 /* current API version */ +#define NETMAP_API 12 /* current API version */ #define NETMAP_MIN_API 11 /* min and max versions accepted */ #define NETMAP_MAX_API 15 /* * Some fields should be cache-aligned to reduce contention. * The alignment is architecture and OS dependent, but rather than * digging into OS headers to find the exact value we use an estimate * that should cover most architectures. */ #define NM_CACHE_ALIGN 128 /* * --- Netmap data structures --- * * The userspace data structures used by netmap are shown below. * They are allocated by the kernel and mmap()ed by userspace threads. * Pointers are implemented as memory offsets or indexes, * so that they can be easily dereferenced in kernel and userspace. KERNEL (opaque, obviously) ==================================================================== | USERSPACE | struct netmap_ring +---->+---------------+ / | head,cur,tail | struct netmap_if (nifp, 1 per fd) / | buf_ofs | +---------------+ / | other fields | | ni_tx_rings | / +===============+ | ni_rx_rings | / | buf_idx, len | slot[0] | | / | flags, ptr | | | / +---------------+ +===============+ / | buf_idx, len | slot[1] | txring_ofs[0] | (rel.to nifp)--' | flags, ptr | | txring_ofs[1] | +---------------+ (tx+1 entries) (num_slots entries) | txring_ofs[t] | | buf_idx, len | slot[n-1] +---------------+ | flags, ptr | | rxring_ofs[0] | +---------------+ | rxring_ofs[1] | (rx+1 entries) | rxring_ofs[r] | +---------------+ * For each "interface" (NIC, host stack, PIPE, VALE switch port) bound to * a file descriptor, the mmap()ed region contains a (logically readonly) * struct netmap_if pointing to struct netmap_ring's. * * There is one netmap_ring per physical NIC ring, plus one tx/rx ring * pair attached to the host stack (this pair is unused for non-NIC ports). * * All physical/host stack ports share the same memory region, * so that zero-copy can be implemented between them. * VALE switch ports instead have separate memory regions. * * The netmap_ring is the userspace-visible replica of the NIC ring. * Each slot has the index of a buffer (MTU-sized and residing in the * mmapped region), its length and some flags. An extra 64-bit pointer * is provided for user-supplied buffers in the tx path. * * In user space, the buffer address is computed as * (char *)ring + buf_ofs + index * NETMAP_BUF_SIZE * * Added in NETMAP_API 11: * * + NIOCREGIF can request the allocation of extra spare buffers from * the same memory pool. The desired number of buffers must be in * nr_arg3. The ioctl may return fewer buffers, depending on memory * availability. nr_arg3 will return the actual value, and, once * mapped, nifp->ni_bufs_head will be the index of the first buffer. * * The buffers are linked to each other using the first uint32_t * as the index. On close, ni_bufs_head must point to the list of * buffers to be released. * * + NIOCREGIF can request space for extra rings (and buffers) * allocated in the same memory space. The number of extra rings * is in nr_arg1, and is advisory. This is a no-op on NICs where * the size of the memory space is fixed. * * + NIOCREGIF can attach to PIPE rings sharing the same memory * space with a parent device. The ifname indicates the parent device, * which must already exist. Flags in nr_flags indicate if we want to * bind the master or slave side, the index (from nr_ringid) * is just a cookie and does not need to be sequential. * * + NIOCREGIF can also attach to 'monitor' rings that replicate * the content of specific rings, also from the same memory space. * * Extra flags in nr_flags support the above functions. * Application libraries may use the following naming scheme: * netmap:foo all NIC ring pairs * netmap:foo^ only host ring pair * netmap:foo+ all NIC ring + host ring pairs * netmap:foo-k the k-th NIC ring pair * netmap:foo{k PIPE ring pair k, master side * netmap:foo}k PIPE ring pair k, slave side * * Some notes about host rings: * * + The RX host ring is used to store those packets that the host network * stack is trying to transmit through a NIC queue, but only if that queue * is currently in netmap mode. Netmap will not intercept host stack mbufs * designated to NIC queues that are not in netmap mode. As a consequence, * registering a netmap port with netmap:foo^ is not enough to intercept * mbufs in the RX host ring; the netmap port should be registered with * netmap:foo*, or another registration should be done to open at least a * NIC TX queue in netmap mode. * * + Netmap is not currently able to deal with intercepted trasmit mbufs which * require offloadings like TSO, UFO, checksumming offloadings, etc. It is * responsibility of the user to disable those offloadings (e.g. using * ifconfig on FreeBSD or ethtool -K on Linux) for an interface that is being * used in netmap mode. If the offloadings are not disabled, GSO and/or * unchecksummed packets may be dropped immediately or end up in the host RX * ring, and will be dropped as soon as the packet reaches another netmap * adapter. */ /* * struct netmap_slot is a buffer descriptor */ struct netmap_slot { uint32_t buf_idx; /* buffer index */ uint16_t len; /* length for this slot */ uint16_t flags; /* buf changed, etc. */ uint64_t ptr; /* pointer for indirect buffers */ }; /* * The following flags control how the slot is used */ #define NS_BUF_CHANGED 0x0001 /* buf_idx changed */ /* * must be set whenever buf_idx is changed (as it might be * necessary to recompute the physical address and mapping) * * It is also set by the kernel whenever the buf_idx is * changed internally (e.g., by pipes). Applications may * use this information to know when they can reuse the * contents of previously prepared buffers. */ #define NS_REPORT 0x0002 /* ask the hardware to report results */ /* * Request notification when slot is used by the hardware. * Normally transmit completions are handled lazily and * may be unreported. This flag lets us know when a slot * has been sent (e.g. to terminate the sender). */ #define NS_FORWARD 0x0004 /* pass packet 'forward' */ /* * (Only for physical ports, rx rings with NR_FORWARD set). * Slot released to the kernel (i.e. before ring->head) with * this flag set are passed to the peer ring (host/NIC), * thus restoring the host-NIC connection for these slots. * This supports efficient traffic monitoring or firewalling. */ #define NS_NO_LEARN 0x0008 /* disable bridge learning */ /* * On a VALE switch, do not 'learn' the source port for * this buffer. */ #define NS_INDIRECT 0x0010 /* userspace buffer */ /* * (VALE tx rings only) data is in a userspace buffer, * whose address is in the 'ptr' field in the slot. */ #define NS_MOREFRAG 0x0020 /* packet has more fragments */ /* * (VALE ports, ptnetmap ports and some NIC ports, e.g. * ixgbe and i40e on Linux) * Set on all but the last slot of a multi-segment packet. * The 'len' field refers to the individual fragment. */ #define NS_PORT_SHIFT 8 #define NS_PORT_MASK (0xff << NS_PORT_SHIFT) /* * The high 8 bits of the flag, if not zero, indicate the * destination port for the VALE switch, overriding * the lookup table. */ #define NS_RFRAGS(_slot) ( ((_slot)->flags >> 8) & 0xff) /* * (VALE rx rings only) the high 8 bits * are the number of fragments. */ /* * struct netmap_ring * * Netmap representation of a TX or RX ring (also known as "queue"). * This is a queue implemented as a fixed-size circular array. * At the software level the important fields are: head, cur, tail. * * In TX rings: * * head first slot available for transmission. * cur wakeup point. select() and poll() will unblock * when 'tail' moves past 'cur' * tail (readonly) first slot reserved to the kernel * * [head .. tail-1] can be used for new packets to send; * 'head' and 'cur' must be incremented as slots are filled * with new packets to be sent; * 'cur' can be moved further ahead if we need more space * for new transmissions. XXX todo (2014-03-12) * * In RX rings: * * head first valid received packet * cur wakeup point. select() and poll() will unblock * when 'tail' moves past 'cur' * tail (readonly) first slot reserved to the kernel * * [head .. tail-1] contain received packets; * 'head' and 'cur' must be incremented as slots are consumed * and can be returned to the kernel; * 'cur' can be moved further ahead if we want to wait for * new packets without returning the previous ones. * * DATA OWNERSHIP/LOCKING: * The netmap_ring, and all slots and buffers in the range * [head .. tail-1] are owned by the user program; * the kernel only accesses them during a netmap system call * and in the user thread context. * * Other slots and buffers are reserved for use by the kernel */ struct netmap_ring { /* * buf_ofs is meant to be used through macros. * It contains the offset of the buffer region from this * descriptor. */ const int64_t buf_ofs; const uint32_t num_slots; /* number of slots in the ring. */ const uint32_t nr_buf_size; const uint16_t ringid; const uint16_t dir; /* 0: tx, 1: rx */ uint32_t head; /* (u) first user slot */ uint32_t cur; /* (u) wakeup point */ uint32_t tail; /* (k) first kernel slot */ uint32_t flags; struct timeval ts; /* (k) time of last *sync() */ /* opaque room for a mutex or similar object */ #if !defined(_WIN32) || defined(__CYGWIN__) uint8_t __attribute__((__aligned__(NM_CACHE_ALIGN))) sem[128]; #else uint8_t __declspec(align(NM_CACHE_ALIGN)) sem[128]; #endif /* the slots follow. This struct has variable size */ struct netmap_slot slot[0]; /* array of slots. */ }; /* * RING FLAGS */ #define NR_TIMESTAMP 0x0002 /* set timestamp on *sync() */ /* * updates the 'ts' field on each netmap syscall. This saves * saves a separate gettimeofday(), and is not much worse than * software timestamps generated in the interrupt handler. */ #define NR_FORWARD 0x0004 /* enable NS_FORWARD for ring */ /* * Enables the NS_FORWARD slot flag for the ring. */ +/* + * Helper functions for kernel and userspace + */ /* + * check if space is available in the ring. + */ +static inline int +nm_ring_empty(struct netmap_ring *ring) +{ + return (ring->cur == ring->tail); +} + +/* * Netmap representation of an interface and its queue(s). * This is initialized by the kernel when binding a file * descriptor to a port, and should be considered as readonly * by user programs. The kernel never uses it. * * There is one netmap_if for each file descriptor on which we want * to select/poll. * select/poll operates on one or all pairs depending on the value of * nmr_queueid passed on the ioctl. */ struct netmap_if { char ni_name[IFNAMSIZ]; /* name of the interface. */ const uint32_t ni_version; /* API version, currently unused */ const uint32_t ni_flags; /* properties */ #define NI_PRIV_MEM 0x1 /* private memory region */ /* * The number of packet rings available in netmap mode. * Physical NICs can have different numbers of tx and rx rings. * Physical NICs also have a 'host' ring pair. * Additionally, clients can request additional ring pairs to * be used for internal communication. */ const uint32_t ni_tx_rings; /* number of HW tx rings */ const uint32_t ni_rx_rings; /* number of HW rx rings */ uint32_t ni_bufs_head; /* head index for extra bufs */ uint32_t ni_spare1[5]; /* * The following array contains the offset of each netmap ring * from this structure, in the following order: * NIC tx rings (ni_tx_rings); host tx ring (1); extra tx rings; * NIC rx rings (ni_rx_rings); host tx ring (1); extra rx rings. * * The area is filled up by the kernel on NIOCREGIF, * and then only read by userspace code. */ const ssize_t ring_ofs[0]; }; +/* Legacy interface to interact with a netmap control device. + * Included for backward compatibility. The user should not include this + * file directly. */ +#include "netmap_legacy.h" -#ifndef NIOCREGIF /* - * ioctl names and related fields + * New API to control netmap control devices. New applications should only use + * nmreq_xyz structs with the NIOCCTRL ioctl() command. * - * NIOCTXSYNC, NIOCRXSYNC synchronize tx or rx queues, - * whose identity is set in NIOCREGIF through nr_ringid. - * These are non blocking and take no argument. + * NIOCCTRL takes a nmreq_header struct, which contains the required + * API version, the name of a netmap port, a command type, and pointers + * to request body and options. * - * NIOCGINFO takes a struct ifreq, the interface name is the input, - * the outputs are number of queues and number of descriptor - * for each queue (useful to set number of threads etc.). - * The info returned is only advisory and may change before - * the interface is bound to a file descriptor. + * nr_name (in) + * The name of the port (em0, valeXXX:YYY, eth0{pn1 etc.) * - * NIOCREGIF takes an interface name within a struct nmre, - * and activates netmap mode on the interface (if possible). + * nr_version (in/out) + * Must match NETMAP_API as used in the kernel, error otherwise. + * Always returns the desired value on output. * - * The argument to NIOCGINFO/NIOCREGIF overlays struct ifreq so we - * can pass it down to other NIC-related ioctls. + * nr_reqtype (in) + * One of the NETMAP_REQ_* command types below * - * The actual argument (struct nmreq) has a number of options to request - * different functions. - * The following are used in NIOCREGIF when nr_cmd == 0: + * nr_body (in) + * Pointer to a command-specific struct, described by one + * of the struct nmreq_xyz below. * - * nr_name (in) - * The name of the port (em0, valeXXX:YYY, etc.) - * limited to IFNAMSIZ for backward compatibility. + * nr_options (in) + * Command specific options, if any. * - * nr_version (in/out) - * Must match NETMAP_API as used in the kernel, error otherwise. - * Always returns the desired value on output. + * A NETMAP_REQ_REGISTER command activates netmap mode on the netmap + * port (e.g. physical interface) specified by nmreq_header.nr_name. + * The request body (struct nmreq_register) has several arguments to + * specify how the port is to be registered. * - * nr_tx_slots, nr_tx_slots, nr_tx_rings, nr_rx_rings (in/out) - * On input, non-zero values may be used to reconfigure the port - * according to the requested values, but this is not guaranteed. - * On output the actual values in use are reported. + * nr_tx_slots, nr_tx_slots, nr_tx_rings, nr_rx_rings (in/out) + * On input, non-zero values may be used to reconfigure the port + * according to the requested values, but this is not guaranteed. + * On output the actual values in use are reported. * - * nr_ringid (in) - * Indicates how rings should be bound to the file descriptors. - * If nr_flags != 0, then the low bits (in NETMAP_RING_MASK) - * are used to indicate the ring number, and nr_flags specifies - * the actual rings to bind. NETMAP_NO_TX_POLL is unaffected. + * nr_mode (in) + * Indicate what set of rings must be bound to the netmap + * device (e.g. all NIC rings, host rings only, NIC and + * host rings, ...). Values are in NR_REG_*. * - * NOTE: THE FOLLOWING (nr_flags == 0) IS DEPRECATED: - * If nr_flags == 0, NETMAP_HW_RING and NETMAP_SW_RING control - * the binding as follows: - * 0 (default) binds all physical rings - * NETMAP_HW_RING | ring number binds a single ring pair - * NETMAP_SW_RING binds only the host tx/rx rings + * nr_ringid (in) + * If nr_mode == NR_REG_ONE_NIC (only a single couple of TX/RX + * rings), indicate which NIC TX and/or RX ring is to be bound + * (0..nr_*x_rings-1). * - * NETMAP_NO_TX_POLL can be OR-ed to make select()/poll() push - * packets on tx rings only if POLLOUT is set. - * The default is to push any pending packet. + * nr_flags (in) + * Indicate special options for how to open the port. * - * NETMAP_DO_RX_POLL can be OR-ed to make select()/poll() release - * packets on rx rings also when POLLIN is NOT set. - * The default is to touch the rx ring only with POLLIN. - * Note that this is the opposite of TX because it - * reflects the common usage. + * NR_NO_TX_POLL can be OR-ed to make select()/poll() push + * packets on tx rings only if POLLOUT is set. + * The default is to push any pending packet. * - * NOTE: NETMAP_PRIV_MEM IS DEPRECATED, use nr_arg2 instead. - * NETMAP_PRIV_MEM is set on return for ports that do not use - * the global memory allocator. - * This information is not significant and applications - * should look at the region id in nr_arg2 + * NR_DO_RX_POLL can be OR-ed to make select()/poll() release + * packets on rx rings also when POLLIN is NOT set. + * The default is to touch the rx ring only with POLLIN. + * Note that this is the opposite of TX because it + * reflects the common usage. * - * nr_flags is the recommended mode to indicate which rings should - * be bound to a file descriptor. Values are NR_REG_* + * Other options are NR_MONITOR_TX, NR_MONITOR_RX, NR_ZCOPY_MON, + * NR_EXCLUSIVE, NR_RX_RINGS_ONLY, NR_TX_RINGS_ONLY and + * NR_ACCEPT_VNET_HDR. * - * nr_arg1 (in) The number of extra rings to be reserved. - * Especially when allocating a VALE port the system only - * allocates the amount of memory needed for the port. - * If more shared memory rings are desired (e.g. for pipes), - * the first invocation for the same basename/allocator - * should specify a suitable number. Memory cannot be - * extended after the first allocation without closing - * all ports on the same region. - * - * nr_arg2 (in/out) The identity of the memory region used. + * nr_mem_id (in/out) + * The identity of the memory region used. * On input, 0 means the system decides autonomously, * other values may try to select a specific region. * On return the actual value is reported. * Region '1' is the global allocator, normally shared * by all interfaces. Other values are private regions. * If two ports the same region zero-copy is possible. * - * nr_arg3 (in/out) number of extra buffers to be allocated. + * nr_extra_bufs (in/out) + * Number of extra buffers to be allocated. * + * The other NETMAP_REQ_* commands are described below. * - * - * nr_cmd (in) if non-zero indicates a special command: - * NETMAP_BDG_ATTACH and nr_name = vale*:ifname - * attaches the NIC to the switch; nr_ringid specifies - * which rings to use. Used by vale-ctl -a ... - * nr_arg1 = NETMAP_BDG_HOST also attaches the host port - * as in vale-ctl -h ... - * - * NETMAP_BDG_DETACH and nr_name = vale*:ifname - * disconnects a previously attached NIC. - * Used by vale-ctl -d ... - * - * NETMAP_BDG_LIST - * list the configuration of VALE switches. - * - * NETMAP_BDG_VNET_HDR - * Set the virtio-net header length used by the client - * of a VALE switch port. - * - * NETMAP_BDG_NEWIF - * create a persistent VALE port with name nr_name. - * Used by vale-ctl -n ... - * - * NETMAP_BDG_DELIF - * delete a persistent VALE port. Used by vale-ctl -d ... - * - * nr_arg1, nr_arg2, nr_arg3 (in/out) command specific - * - * - * */ +/* maximum size of a request, including all options */ +#define NETMAP_REQ_MAXSIZE 4096 +/* Header common to all request options. */ +struct nmreq_option { + /* Pointer ot the next option. */ + uint64_t nro_next; + /* Option type. */ + uint32_t nro_reqtype; + /* (out) status of the option: + * 0: recognized and processed + * !=0: errno value + */ + uint32_t nro_status; +}; + +/* Header common to all requests. Do not reorder these fields, as we need + * the second one (nr_reqtype) to know how much to copy from/to userspace. */ +struct nmreq_header { + uint16_t nr_version; /* API version */ + uint16_t nr_reqtype; /* nmreq type (NETMAP_REQ_*) */ + uint32_t nr_reserved; /* must be zero */ +#define NETMAP_REQ_IFNAMSIZ 64 + char nr_name[NETMAP_REQ_IFNAMSIZ]; /* port name */ + uint64_t nr_options; /* command-specific options */ + uint64_t nr_body; /* ptr to nmreq_xyz struct */ +}; + +enum { + /* Register a netmap port with the device. */ + NETMAP_REQ_REGISTER = 1, + /* Get information from a netmap port. */ + NETMAP_REQ_PORT_INFO_GET, + /* Attach a netmap port to a VALE switch. */ + NETMAP_REQ_VALE_ATTACH, + /* Detach a netmap port from a VALE switch. */ + NETMAP_REQ_VALE_DETACH, + /* List the ports attached to a VALE switch. */ + NETMAP_REQ_VALE_LIST, + /* Set the port header length (was virtio-net header length). */ + NETMAP_REQ_PORT_HDR_SET, + /* Get the port header length (was virtio-net header length). */ + NETMAP_REQ_PORT_HDR_GET, + /* Create a new persistent VALE port. */ + NETMAP_REQ_VALE_NEWIF, + /* Delete a persistent VALE port. */ + NETMAP_REQ_VALE_DELIF, + /* Enable polling kernel thread(s) on an attached VALE port. */ + NETMAP_REQ_VALE_POLLING_ENABLE, + /* Disable polling kernel thread(s) on an attached VALE port. */ + NETMAP_REQ_VALE_POLLING_DISABLE, + /* Get info about the pools of a memory allocator. */ + NETMAP_REQ_POOLS_INFO_GET, +}; + +enum { + /* On NETMAP_REQ_REGISTER, ask netmap to use memory allocated + * from user-space allocated memory pools (e.g. hugepages). */ + NETMAP_REQ_OPT_EXTMEM = 1, +}; + /* - * struct nmreq overlays a struct ifreq (just the name) + * nr_reqtype: NETMAP_REQ_REGISTER + * Bind (register) a netmap port to this control device. */ -struct nmreq { - char nr_name[IFNAMSIZ]; - uint32_t nr_version; /* API version */ - uint32_t nr_offset; /* nifp offset in the shared region */ - uint32_t nr_memsize; /* size of the shared region */ +struct nmreq_register { + uint64_t nr_offset; /* nifp offset in the shared region */ + uint64_t nr_memsize; /* size of the shared region */ uint32_t nr_tx_slots; /* slots in tx rings */ uint32_t nr_rx_slots; /* slots in rx rings */ uint16_t nr_tx_rings; /* number of tx rings */ uint16_t nr_rx_rings; /* number of rx rings */ + uint16_t nr_mem_id; /* id of the memory allocator */ uint16_t nr_ringid; /* ring(s) we care about */ -#define NETMAP_HW_RING 0x4000 /* single NIC ring pair */ -#define NETMAP_SW_RING 0x2000 /* only host ring pair */ + uint32_t nr_mode; /* specify NR_REG_* modes */ -#define NETMAP_RING_MASK 0x0fff /* the ring number */ - -#define NETMAP_NO_TX_POLL 0x1000 /* no automatic txsync on poll */ - -#define NETMAP_DO_RX_POLL 0x8000 /* DO automatic rxsync on poll */ - - uint16_t nr_cmd; -#define NETMAP_BDG_ATTACH 1 /* attach the NIC */ -#define NETMAP_BDG_DETACH 2 /* detach the NIC */ -#define NETMAP_BDG_REGOPS 3 /* register bridge callbacks */ -#define NETMAP_BDG_LIST 4 /* get bridge's info */ -#define NETMAP_BDG_VNET_HDR 5 /* set the port virtio-net-hdr length */ -#define NETMAP_BDG_OFFSET NETMAP_BDG_VNET_HDR /* deprecated alias */ -#define NETMAP_BDG_NEWIF 6 /* create a virtual port */ -#define NETMAP_BDG_DELIF 7 /* destroy a virtual port */ -#define NETMAP_PT_HOST_CREATE 8 /* create ptnetmap kthreads */ -#define NETMAP_PT_HOST_DELETE 9 /* delete ptnetmap kthreads */ -#define NETMAP_BDG_POLLING_ON 10 /* delete polling kthread */ -#define NETMAP_BDG_POLLING_OFF 11 /* delete polling kthread */ -#define NETMAP_VNET_HDR_GET 12 /* get the port virtio-net-hdr length */ -#define NETMAP_POOLS_INFO_GET 13 /* get memory allocator pools info */ -#define NETMAP_POOLS_CREATE 14 /* create a new memory allocator */ - uint16_t nr_arg1; /* reserve extra rings in NIOCREGIF */ -#define NETMAP_BDG_HOST 1 /* attach the host stack on ATTACH */ - - uint16_t nr_arg2; - uint32_t nr_arg3; /* req. extra buffers in NIOCREGIF */ - uint32_t nr_flags; - /* various modes, extends nr_ringid */ - uint32_t spare2[1]; -}; - -#define NR_REG_MASK 0xf /* values for nr_flags */ -enum { NR_REG_DEFAULT = 0, /* backward compat, should not be used. */ - NR_REG_ALL_NIC = 1, - NR_REG_SW = 2, - NR_REG_NIC_SW = 3, - NR_REG_ONE_NIC = 4, - NR_REG_PIPE_MASTER = 5, - NR_REG_PIPE_SLAVE = 6, -}; -/* monitor uses the NR_REG to select the rings to monitor */ + uint64_t nr_flags; /* additional flags (see below) */ +/* monitors use nr_ringid and nr_mode to select the rings to monitor */ #define NR_MONITOR_TX 0x100 #define NR_MONITOR_RX 0x200 #define NR_ZCOPY_MON 0x400 /* request exclusive access to the selected rings */ #define NR_EXCLUSIVE 0x800 /* request ptnetmap host support */ #define NR_PASSTHROUGH_HOST NR_PTNETMAP_HOST /* deprecated */ #define NR_PTNETMAP_HOST 0x1000 #define NR_RX_RINGS_ONLY 0x2000 #define NR_TX_RINGS_ONLY 0x4000 /* Applications set this flag if they are able to deal with virtio-net headers, * that is send/receive frames that start with a virtio-net header. * If not set, NIOCREGIF will fail with netmap ports that require applications * to use those headers. If the flag is set, the application can use the * NETMAP_VNET_HDR_GET command to figure out the header length. */ #define NR_ACCEPT_VNET_HDR 0x8000 +/* The following two have the same meaning of NETMAP_NO_TX_POLL and + * NETMAP_DO_RX_POLL. */ +#define NR_DO_RX_POLL 0x10000 +#define NR_NO_TX_POLL 0x20000 -#define NM_BDG_NAME "vale" /* prefix for bridge port name */ + uint32_t nr_extra_bufs; /* number of requested extra buffers */ +}; -#ifdef _WIN32 -/* - * Windows does not have _IOWR(). _IO(), _IOW() and _IOR() are defined - * in ws2def.h but not sure if they are in the form we need. - * We therefore redefine them in a convenient way to use for DeviceIoControl - * signatures. - */ -#undef _IO // ws2def.h -#define _WIN_NM_IOCTL_TYPE 40000 -#define _IO(_c, _n) CTL_CODE(_WIN_NM_IOCTL_TYPE, ((_n) + 0x800) , \ - METHOD_BUFFERED, FILE_ANY_ACCESS ) -#define _IO_direct(_c, _n) CTL_CODE(_WIN_NM_IOCTL_TYPE, ((_n) + 0x800) , \ - METHOD_OUT_DIRECT, FILE_ANY_ACCESS ) +/* Valid values for nmreq_register.nr_mode (see above). */ +enum { NR_REG_DEFAULT = 0, /* backward compat, should not be used. */ + NR_REG_ALL_NIC = 1, + NR_REG_SW = 2, + NR_REG_NIC_SW = 3, + NR_REG_ONE_NIC = 4, + NR_REG_PIPE_MASTER = 5, /* deprecated, use "x{y" port name syntax */ + NR_REG_PIPE_SLAVE = 6, /* deprecated, use "x}y" port name syntax */ +}; -#define _IOWR(_c, _n, _s) _IO(_c, _n) +/* A single ioctl number is shared by all the new API command. + * Demultiplexing is done using the nr_hdr.nr_reqtype field. + * FreeBSD uses the size value embedded in the _IOWR to determine + * how much to copy in/out, so we define the ioctl() command + * specifying only nmreq_header, and copyin/copyout the rest. */ +#define NIOCCTRL _IOWR('i', 151, struct nmreq_header) -/* We havesome internal sysctl in addition to the externally visible ones */ -#define NETMAP_MMAP _IO_direct('i', 160) // note METHOD_OUT_DIRECT -#define NETMAP_POLL _IO('i', 162) +/* The ioctl commands to sync TX/RX netmap rings. + * NIOCTXSYNC, NIOCRXSYNC synchronize tx or rx queues, + * whose identity is set in NIOCREGIF through nr_ringid. + * These are non blocking and take no argument. */ +#define NIOCTXSYNC _IO('i', 148) /* sync tx queues */ +#define NIOCRXSYNC _IO('i', 149) /* sync rx queues */ -/* and also two setsockopt for sysctl emulation */ -#define NETMAP_SETSOCKOPT _IO('i', 140) -#define NETMAP_GETSOCKOPT _IO('i', 141) +/* + * nr_reqtype: NETMAP_REQ_PORT_INFO_GET + * Get information about a netmap port, including number of rings. + * slots per ring, id of the memory allocator, etc. + */ +struct nmreq_port_info_get { + uint64_t nr_offset; /* nifp offset in the shared region */ + uint64_t nr_memsize; /* size of the shared region */ + uint32_t nr_tx_slots; /* slots in tx rings */ + uint32_t nr_rx_slots; /* slots in rx rings */ + uint16_t nr_tx_rings; /* number of tx rings */ + uint16_t nr_rx_rings; /* number of rx rings */ + uint16_t nr_mem_id; /* id of the memory allocator */ +}; +#define NM_BDG_NAME "vale" /* prefix for bridge port name */ -//These linknames are for the Netmap Core Driver -#define NETMAP_NT_DEVICE_NAME L"\\Device\\NETMAP" -#define NETMAP_DOS_DEVICE_NAME L"\\DosDevices\\netmap" +/* + * nr_reqtype: NETMAP_REQ_VALE_ATTACH + * Attach a netmap port to a VALE switch. Both the name of the netmap + * port and the VALE switch are specified through the nr_name argument. + * The attach operation could need to register a port, so at least + * the same arguments are available. + * port_index will contain the index where the port has been attached. + */ +struct nmreq_vale_attach { + struct nmreq_register reg; + uint32_t port_index; +}; -//Definition of a structure used to pass a virtual address within an IOCTL -typedef struct _MEMORY_ENTRY { - PVOID pUsermodeVirtualAddress; -} MEMORY_ENTRY, *PMEMORY_ENTRY; +/* + * nr_reqtype: NETMAP_REQ_VALE_DETACH + * Detach a netmap port from a VALE switch. Both the name of the netmap + * port and the VALE switch are specified through the nr_name argument. + * port_index will contain the index where the port was attached. + */ +struct nmreq_vale_detach { + uint32_t port_index; +}; -typedef struct _POLL_REQUEST_DATA { - int events; - int timeout; - int revents; -} POLL_REQUEST_DATA; +/* + * nr_reqtype: NETMAP_REQ_VALE_LIST + * List the ports of a VALE switch. + */ +struct nmreq_vale_list { + /* Name of the VALE port (valeXXX:YYY) or empty. */ + uint16_t nr_bridge_idx; + uint32_t nr_port_idx; +}; -#endif /* _WIN32 */ +/* + * nr_reqtype: NETMAP_REQ_PORT_HDR_SET or NETMAP_REQ_PORT_HDR_GET + * Set the port header length. + */ +struct nmreq_port_hdr { + uint32_t nr_hdr_len; +}; /* - * FreeBSD uses the size value embedded in the _IOWR to determine - * how much to copy in/out. So we need it to match the actual - * data structure we pass. We put some spares in the structure - * to ease compatibility with other versions + * nr_reqtype: NETMAP_REQ_VALE_NEWIF + * Create a new persistent VALE port. */ -#define NIOCGINFO _IOWR('i', 145, struct nmreq) /* return IF info */ -#define NIOCREGIF _IOWR('i', 146, struct nmreq) /* interface register */ -#define NIOCTXSYNC _IO('i', 148) /* sync tx queues */ -#define NIOCRXSYNC _IO('i', 149) /* sync rx queues */ -#define NIOCCONFIG _IOWR('i',150, struct nm_ifreq) /* for ext. modules */ -#endif /* !NIOCREGIF */ +struct nmreq_vale_newif { + uint32_t nr_tx_slots; /* slots in tx rings */ + uint32_t nr_rx_slots; /* slots in rx rings */ + uint16_t nr_tx_rings; /* number of tx rings */ + uint16_t nr_rx_rings; /* number of rx rings */ + uint16_t nr_mem_id; /* id of the memory allocator */ +}; - /* - * Helper functions for kernel and userspace + * nr_reqtype: NETMAP_REQ_VALE_POLLING_ENABLE or NETMAP_REQ_VALE_POLLING_DISABLE + * Enable or disable polling kthreads on a VALE port. */ +struct nmreq_vale_polling { + uint32_t nr_mode; +#define NETMAP_POLLING_MODE_SINGLE_CPU 1 +#define NETMAP_POLLING_MODE_MULTI_CPU 2 + uint32_t nr_first_cpu_id; + uint32_t nr_num_polling_cpus; +}; /* - * check if space is available in the ring. + * nr_reqtype: NETMAP_REQ_POOLS_INFO_GET + * Get info about the pools of the memory allocator of the port bound + * to a given netmap control device (used i.e. by a ptnetmap-enabled + * hypervisor). The nr_hdr.nr_name field is ignored. */ -static inline int -nm_ring_empty(struct netmap_ring *ring) -{ - return (ring->cur == ring->tail); -} +struct nmreq_pools_info { + uint64_t nr_memsize; + uint16_t nr_mem_id; + uint64_t nr_if_pool_offset; + uint32_t nr_if_pool_objtotal; + uint32_t nr_if_pool_objsize; + uint64_t nr_ring_pool_offset; + uint32_t nr_ring_pool_objtotal; + uint32_t nr_ring_pool_objsize; + uint64_t nr_buf_pool_offset; + uint32_t nr_buf_pool_objtotal; + uint32_t nr_buf_pool_objsize; +}; /* - * Opaque structure that is passed to an external kernel - * module via ioctl(fd, NIOCCONFIG, req) for a user-owned - * bridge port (at this point ephemeral VALE interface). + * data for NETMAP_REQ_OPT_* options */ -#define NM_IFRDATA_LEN 256 -struct nm_ifreq { - char nifr_name[IFNAMSIZ]; - char data[NM_IFRDATA_LEN]; + +struct nmreq_opt_extmem { + struct nmreq_option nro_opt; /* common header */ + uint64_t nro_usrptr; /* (in) ptr to usr memory */ + struct nmreq_pools_info nro_info; /* (in/out) */ }; #endif /* _NET_NETMAP_H_ */ Index: head/sys/net/netmap_legacy.h =================================================================== --- head/sys/net/netmap_legacy.h (nonexistent) +++ head/sys/net/netmap_legacy.h (revision 332423) @@ -0,0 +1,264 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``S IS''AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NET_NETMAP_LEGACY_H_ +#define _NET_NETMAP_LEGACY_H_ + +/* + * $FreeBSD$ + * + * ioctl names and related fields + * + * NIOCTXSYNC, NIOCRXSYNC synchronize tx or rx queues, + * whose identity is set in NIOCREGIF through nr_ringid. + * These are non blocking and take no argument. + * + * NIOCGINFO takes a struct ifreq, the interface name is the input, + * the outputs are number of queues and number of descriptor + * for each queue (useful to set number of threads etc.). + * The info returned is only advisory and may change before + * the interface is bound to a file descriptor. + * + * NIOCREGIF takes an interface name within a struct nmre, + * and activates netmap mode on the interface (if possible). + * + * The argument to NIOCGINFO/NIOCREGIF overlays struct ifreq so we + * can pass it down to other NIC-related ioctls. + * + * The actual argument (struct nmreq) has a number of options to request + * different functions. + * The following are used in NIOCREGIF when nr_cmd == 0: + * + * nr_name (in) + * The name of the port (em0, valeXXX:YYY, etc.) + * limited to IFNAMSIZ for backward compatibility. + * + * nr_version (in/out) + * Must match NETMAP_API as used in the kernel, error otherwise. + * Always returns the desired value on output. + * + * nr_tx_slots, nr_tx_slots, nr_tx_rings, nr_rx_rings (in/out) + * On input, non-zero values may be used to reconfigure the port + * according to the requested values, but this is not guaranteed. + * On output the actual values in use are reported. + * + * nr_ringid (in) + * Indicates how rings should be bound to the file descriptors. + * If nr_flags != 0, then the low bits (in NETMAP_RING_MASK) + * are used to indicate the ring number, and nr_flags specifies + * the actual rings to bind. NETMAP_NO_TX_POLL is unaffected. + * + * NOTE: THE FOLLOWING (nr_flags == 0) IS DEPRECATED: + * If nr_flags == 0, NETMAP_HW_RING and NETMAP_SW_RING control + * the binding as follows: + * 0 (default) binds all physical rings + * NETMAP_HW_RING | ring number binds a single ring pair + * NETMAP_SW_RING binds only the host tx/rx rings + * + * NETMAP_NO_TX_POLL can be OR-ed to make select()/poll() push + * packets on tx rings only if POLLOUT is set. + * The default is to push any pending packet. + * + * NETMAP_DO_RX_POLL can be OR-ed to make select()/poll() release + * packets on rx rings also when POLLIN is NOT set. + * The default is to touch the rx ring only with POLLIN. + * Note that this is the opposite of TX because it + * reflects the common usage. + * + * NOTE: NETMAP_PRIV_MEM IS DEPRECATED, use nr_arg2 instead. + * NETMAP_PRIV_MEM is set on return for ports that do not use + * the global memory allocator. + * This information is not significant and applications + * should look at the region id in nr_arg2 + * + * nr_flags is the recommended mode to indicate which rings should + * be bound to a file descriptor. Values are NR_REG_* + * + * nr_arg1 (in) The number of extra rings to be reserved. + * Especially when allocating a VALE port the system only + * allocates the amount of memory needed for the port. + * If more shared memory rings are desired (e.g. for pipes), + * the first invocation for the same basename/allocator + * should specify a suitable number. Memory cannot be + * extended after the first allocation without closing + * all ports on the same region. + * + * nr_arg2 (in/out) The identity of the memory region used. + * On input, 0 means the system decides autonomously, + * other values may try to select a specific region. + * On return the actual value is reported. + * Region '1' is the global allocator, normally shared + * by all interfaces. Other values are private regions. + * If two ports the same region zero-copy is possible. + * + * nr_arg3 (in/out) number of extra buffers to be allocated. + * + * + * + * nr_cmd (in) if non-zero indicates a special command: + * NETMAP_BDG_ATTACH and nr_name = vale*:ifname + * attaches the NIC to the switch; nr_ringid specifies + * which rings to use. Used by vale-ctl -a ... + * nr_arg1 = NETMAP_BDG_HOST also attaches the host port + * as in vale-ctl -h ... + * + * NETMAP_BDG_DETACH and nr_name = vale*:ifname + * disconnects a previously attached NIC. + * Used by vale-ctl -d ... + * + * NETMAP_BDG_LIST + * list the configuration of VALE switches. + * + * NETMAP_BDG_VNET_HDR + * Set the virtio-net header length used by the client + * of a VALE switch port. + * + * NETMAP_BDG_NEWIF + * create a persistent VALE port with name nr_name. + * Used by vale-ctl -n ... + * + * NETMAP_BDG_DELIF + * delete a persistent VALE port. Used by vale-ctl -d ... + * + * nr_arg1, nr_arg2, nr_arg3 (in/out) command specific + * + * + * + */ + + +/* + * struct nmreq overlays a struct ifreq (just the name) + */ +struct nmreq { + char nr_name[IFNAMSIZ]; + uint32_t nr_version; /* API version */ + uint32_t nr_offset; /* nifp offset in the shared region */ + uint32_t nr_memsize; /* size of the shared region */ + uint32_t nr_tx_slots; /* slots in tx rings */ + uint32_t nr_rx_slots; /* slots in rx rings */ + uint16_t nr_tx_rings; /* number of tx rings */ + uint16_t nr_rx_rings; /* number of rx rings */ + + uint16_t nr_ringid; /* ring(s) we care about */ +#define NETMAP_HW_RING 0x4000 /* single NIC ring pair */ +#define NETMAP_SW_RING 0x2000 /* only host ring pair */ + +#define NETMAP_RING_MASK 0x0fff /* the ring number */ + +#define NETMAP_NO_TX_POLL 0x1000 /* no automatic txsync on poll */ + +#define NETMAP_DO_RX_POLL 0x8000 /* DO automatic rxsync on poll */ + + uint16_t nr_cmd; +#define NETMAP_BDG_ATTACH 1 /* attach the NIC */ +#define NETMAP_BDG_DETACH 2 /* detach the NIC */ +#define NETMAP_BDG_REGOPS 3 /* register bridge callbacks */ +#define NETMAP_BDG_LIST 4 /* get bridge's info */ +#define NETMAP_BDG_VNET_HDR 5 /* set the port virtio-net-hdr length */ +#define NETMAP_BDG_NEWIF 6 /* create a virtual port */ +#define NETMAP_BDG_DELIF 7 /* destroy a virtual port */ +#define NETMAP_PT_HOST_CREATE 8 /* create ptnetmap kthreads */ +#define NETMAP_PT_HOST_DELETE 9 /* delete ptnetmap kthreads */ +#define NETMAP_BDG_POLLING_ON 10 /* delete polling kthread */ +#define NETMAP_BDG_POLLING_OFF 11 /* delete polling kthread */ +#define NETMAP_VNET_HDR_GET 12 /* get the port virtio-net-hdr length */ + uint16_t nr_arg1; /* reserve extra rings in NIOCREGIF */ +#define NETMAP_BDG_HOST 1 /* nr_arg1 value for NETMAP_BDG_ATTACH */ + + uint16_t nr_arg2; /* id of the memory allocator */ + uint32_t nr_arg3; /* req. extra buffers in NIOCREGIF */ + uint32_t nr_flags; /* specify NR_REG_* mode and other flags */ +#define NR_REG_MASK 0xf /* to extract NR_REG_* mode from nr_flags */ + /* various modes, extends nr_ringid */ + uint32_t spare2[1]; +}; + +#ifdef _WIN32 +/* + * Windows does not have _IOWR(). _IO(), _IOW() and _IOR() are defined + * in ws2def.h but not sure if they are in the form we need. + * We therefore redefine them in a convenient way to use for DeviceIoControl + * signatures. + */ +#undef _IO // ws2def.h +#define _WIN_NM_IOCTL_TYPE 40000 +#define _IO(_c, _n) CTL_CODE(_WIN_NM_IOCTL_TYPE, ((_n) + 0x800) , \ + METHOD_BUFFERED, FILE_ANY_ACCESS ) +#define _IO_direct(_c, _n) CTL_CODE(_WIN_NM_IOCTL_TYPE, ((_n) + 0x800) , \ + METHOD_OUT_DIRECT, FILE_ANY_ACCESS ) + +#define _IOWR(_c, _n, _s) _IO(_c, _n) + +/* We havesome internal sysctl in addition to the externally visible ones */ +#define NETMAP_MMAP _IO_direct('i', 160) // note METHOD_OUT_DIRECT +#define NETMAP_POLL _IO('i', 162) + +/* and also two setsockopt for sysctl emulation */ +#define NETMAP_SETSOCKOPT _IO('i', 140) +#define NETMAP_GETSOCKOPT _IO('i', 141) + + +/* These linknames are for the Netmap Core Driver */ +#define NETMAP_NT_DEVICE_NAME L"\\Device\\NETMAP" +#define NETMAP_DOS_DEVICE_NAME L"\\DosDevices\\netmap" + +/* Definition of a structure used to pass a virtual address within an IOCTL */ +typedef struct _MEMORY_ENTRY { + PVOID pUsermodeVirtualAddress; +} MEMORY_ENTRY, *PMEMORY_ENTRY; + +typedef struct _POLL_REQUEST_DATA { + int events; + int timeout; + int revents; +} POLL_REQUEST_DATA; +#endif /* _WIN32 */ + +/* + * Opaque structure that is passed to an external kernel + * module via ioctl(fd, NIOCCONFIG, req) for a user-owned + * bridge port (at this point ephemeral VALE interface). + */ +#define NM_IFRDATA_LEN 256 +struct nm_ifreq { + char nifr_name[IFNAMSIZ]; + char data[NM_IFRDATA_LEN]; +}; + +/* + * FreeBSD uses the size value embedded in the _IOWR to determine + * how much to copy in/out. So we need it to match the actual + * data structure we pass. We put some spares in the structure + * to ease compatibility with other versions + */ +#define NIOCGINFO _IOWR('i', 145, struct nmreq) /* return IF info */ +#define NIOCREGIF _IOWR('i', 146, struct nmreq) /* interface register */ +#define NIOCCONFIG _IOWR('i',150, struct nm_ifreq) /* for ext. modules */ + +#endif /* _NET_NETMAP_LEGACY_H_ */ Property changes on: head/sys/net/netmap_legacy.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/net/netmap_user.h =================================================================== --- head/sys/net/netmap_user.h (revision 332422) +++ head/sys/net/netmap_user.h (revision 332423) @@ -1,1318 +1,1138 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2011-2016 Universita` di Pisa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * Functions and macros to manipulate netmap structures and packets * in userspace. See netmap(4) for more information. * * The address of the struct netmap_if, say nifp, is computed from the * value returned from ioctl(.., NIOCREG, ...) and the mmap region: * ioctl(fd, NIOCREG, &req); * mem = mmap(0, ... ); * nifp = NETMAP_IF(mem, req.nr_nifp); * (so simple, we could just do it manually) * * From there: * struct netmap_ring *NETMAP_TXRING(nifp, index) * struct netmap_ring *NETMAP_RXRING(nifp, index) * we can access ring->cur, ring->head, ring->tail, etc. * * ring->slot[i] gives us the i-th slot (we can access * directly len, flags, buf_idx) * * char *buf = NETMAP_BUF(ring, x) returns a pointer to * the buffer numbered x * * All ring indexes (head, cur, tail) should always move forward. * To compute the next index in a circular ring you can use * i = nm_ring_next(ring, i); * * To ease porting apps from pcap to netmap we supply a few fuctions * that can be called to open, close, read and write on netmap in a way * similar to libpcap. Note that the read/write function depend on * an ioctl()/select()/poll() being issued to refill rings or push * packets out. * * In order to use these, include #define NETMAP_WITH_LIBS * in the source file that invokes these functions. */ #ifndef _NET_NETMAP_USER_H_ #define _NET_NETMAP_USER_H_ #define NETMAP_DEVICE_NAME "/dev/netmap" #ifdef __CYGWIN__ /* * we can compile userspace apps with either cygwin or msvc, * and we use _WIN32 to identify windows specific code */ #ifndef _WIN32 #define _WIN32 #endif /* _WIN32 */ #endif /* __CYGWIN__ */ #ifdef _WIN32 #undef NETMAP_DEVICE_NAME #define NETMAP_DEVICE_NAME "/proc/sys/DosDevices/Global/netmap" #include #include #include #endif /* _WIN32 */ #include #include /* apple needs sockaddr */ #include /* IFNAMSIZ */ #include #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif /* likely and unlikely */ #include -#include /* nmreq_pointer_get() */ /* helper macro */ #define _NETMAP_OFFSET(type, ptr, offset) \ ((type)(void *)((char *)(ptr) + (offset))) #define NETMAP_IF(_base, _ofs) _NETMAP_OFFSET(struct netmap_if *, _base, _ofs) #define NETMAP_TXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \ nifp, (nifp)->ring_ofs[index] ) #define NETMAP_RXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \ nifp, (nifp)->ring_ofs[index + (nifp)->ni_tx_rings + 1] ) #define NETMAP_BUF(ring, index) \ - ((char *)(ring) + (ring)->buf_ofs + ((long)(index)*(ring)->nr_buf_size)) + ((char *)(ring) + (ring)->buf_ofs + ((index)*(ring)->nr_buf_size)) #define NETMAP_BUF_IDX(ring, buf) \ ( ((char *)(buf) - ((char *)(ring) + (ring)->buf_ofs) ) / \ (ring)->nr_buf_size ) static inline uint32_t nm_ring_next(struct netmap_ring *r, uint32_t i) { return ( unlikely(i + 1 == r->num_slots) ? 0 : i + 1); } /* * Return 1 if we have pending transmissions in the tx ring. * When everything is complete ring->head = ring->tail + 1 (modulo ring size) */ static inline int nm_tx_pending(struct netmap_ring *r) { return nm_ring_next(r, r->tail) != r->head; } static inline uint32_t nm_ring_space(struct netmap_ring *ring) { int ret = ring->tail - ring->cur; if (ret < 0) ret += ring->num_slots; return ret; } #ifdef NETMAP_WITH_LIBS /* * Support for simple I/O libraries. * Include other system headers required for compiling this. */ #ifndef HAVE_NETMAP_WITH_LIBS #define HAVE_NETMAP_WITH_LIBS #include #include #include #include /* memset */ #include #include /* EINVAL */ #include /* O_RDWR */ #include /* close() */ #include #include #ifndef ND /* debug macros */ /* debug support */ #define ND(_fmt, ...) do {} while(0) #define D(_fmt, ...) \ do { \ struct timeval _t0; \ gettimeofday(&_t0, NULL); \ fprintf(stderr, "%03d.%06d %s [%d] " _fmt "\n", \ (int)(_t0.tv_sec % 1000), (int)_t0.tv_usec, \ __FUNCTION__, __LINE__, ##__VA_ARGS__); \ } while (0) /* Rate limited version of "D", lps indicates how many per second */ #define RD(lps, format, ...) \ do { \ static int __t0, __cnt; \ struct timeval __xxts; \ gettimeofday(&__xxts, NULL); \ if (__t0 != __xxts.tv_sec) { \ __t0 = __xxts.tv_sec; \ __cnt = 0; \ } \ if (__cnt++ < lps) { \ D(format, ##__VA_ARGS__); \ } \ } while (0) #endif struct nm_pkthdr { /* first part is the same as pcap_pkthdr */ struct timeval ts; uint32_t caplen; uint32_t len; uint64_t flags; /* NM_MORE_PKTS etc */ #define NM_MORE_PKTS 1 struct nm_desc *d; struct netmap_slot *slot; uint8_t *buf; }; struct nm_stat { /* same as pcap_stat */ u_int ps_recv; u_int ps_drop; u_int ps_ifdrop; #ifdef WIN32 /* XXX or _WIN32 ? */ u_int bs_capt; #endif /* WIN32 */ }; #define NM_ERRBUF_SIZE 512 struct nm_desc { struct nm_desc *self; /* point to self if netmap. */ int fd; void *mem; - uint64_t memsize; + uint32_t memsize; int done_mmap; /* set if mem is the result of mmap */ struct netmap_if * const nifp; uint16_t first_tx_ring, last_tx_ring, cur_tx_ring; uint16_t first_rx_ring, last_rx_ring, cur_rx_ring; struct nmreq req; /* also contains the nr_name = ifname */ struct nm_pkthdr hdr; /* * The memory contains netmap_if, rings and then buffers. * Given a pointer (e.g. to nm_inject) we can compare with * mem/buf_start/buf_end to tell if it is a buffer or * some other descriptor in our region. * We also store a pointer to some ring as it helps in the * translation from buffer indexes to addresses. */ struct netmap_ring * const some_ring; void * const buf_start; void * const buf_end; /* parameters from pcap_open_live */ int snaplen; int promisc; int to_ms; char *errbuf; /* save flags so we can restore them on close */ uint32_t if_flags; uint32_t if_reqcap; uint32_t if_curcap; struct nm_stat st; char msg[NM_ERRBUF_SIZE]; }; /* * when the descriptor is open correctly, d->self == d * Eventually we should also use some magic number. */ #define P2NMD(p) ((struct nm_desc *)(p)) #define IS_NETMAP_DESC(d) ((d) && P2NMD(d)->self == P2NMD(d)) #define NETMAP_FD(d) (P2NMD(d)->fd) /* * this is a slightly optimized copy routine which rounds * to multiple of 64 bytes and is often faster than dealing * with other odd sizes. We assume there is enough room * in the source and destination buffers. */ static inline void nm_pkt_copy(const void *_src, void *_dst, int l) { const uint64_t *src = (const uint64_t *)_src; uint64_t *dst = (uint64_t *)_dst; if (unlikely(l >= 1024 || l % 64)) { memcpy(dst, src, l); return; } for (; likely(l > 0); l-=64) { *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; } } /* * The callback, invoked on each received packet. Same as libpcap */ typedef void (*nm_cb_t)(u_char *, const struct nm_pkthdr *, const u_char *d); /* *--- the pcap-like API --- * * nm_open() opens a file descriptor, binds to a port and maps memory. * * ifname (netmap:foo or vale:foo) is the port name * a suffix can indicate the follwing: * ^ bind the host (sw) ring pair * * bind host and NIC ring pairs * -NN bind individual NIC ring pair * {NN bind master side of pipe NN * }NN bind slave side of pipe NN * a suffix starting with / and the following flags, * in any order: * x exclusive access * z zero copy monitor (both tx and rx) * t monitor tx side (copy monitor) * r monitor rx side (copy monitor) * R bind only RX ring(s) * T bind only TX ring(s) * * req provides the initial values of nmreq before parsing ifname. * Remember that the ifname parsing will override the ring * number in nm_ringid, and part of nm_flags; * flags special functions, normally 0 * indicates which fields of *arg are significant * arg special functions, normally NULL * if passed a netmap_desc with mem != NULL, * use that memory instead of mmap. */ static struct nm_desc *nm_open(const char *ifname, const struct nmreq *req, uint64_t flags, const struct nm_desc *arg); /* * nm_open can import some fields from the parent descriptor. * These flags control which ones. * Also in flags you can specify NETMAP_NO_TX_POLL and NETMAP_DO_RX_POLL, * which set the initial value for these flags. * Note that the 16 low bits of the flags are reserved for data * that may go into the nmreq. */ enum { NM_OPEN_NO_MMAP = 0x040000, /* reuse mmap from parent */ NM_OPEN_IFNAME = 0x080000, /* nr_name, nr_ringid, nr_flags */ NM_OPEN_ARG1 = 0x100000, NM_OPEN_ARG2 = 0x200000, NM_OPEN_ARG3 = 0x400000, NM_OPEN_RING_CFG = 0x800000, /* tx|rx rings|slots */ - NM_OPEN_EXTMEM = 0x1000000, }; /* * nm_close() closes and restores the port to its previous state */ static int nm_close(struct nm_desc *); /* * nm_mmap() do mmap or inherit from parent if the nr_arg2 * (memory block) matches. */ static int nm_mmap(struct nm_desc *, const struct nm_desc *); /* * nm_inject() is the same as pcap_inject() * nm_dispatch() is the same as pcap_dispatch() * nm_nextpkt() is the same as pcap_next() */ static int nm_inject(struct nm_desc *, const void *, size_t); static int nm_dispatch(struct nm_desc *, int, nm_cb_t, u_char *); static u_char *nm_nextpkt(struct nm_desc *, struct nm_pkthdr *); #ifdef _WIN32 intptr_t _get_osfhandle(int); /* defined in io.h in windows */ /* * In windows we do not have yet native poll support, so we keep track * of file descriptors associated to netmap ports to emulate poll on * them and fall back on regular poll on other file descriptors. */ struct win_netmap_fd_list { struct win_netmap_fd_list *next; int win_netmap_fd; HANDLE win_netmap_handle; }; /* * list head containing all the netmap opened fd and their * windows HANDLE counterparts */ static struct win_netmap_fd_list *win_netmap_fd_list_head; static void win_insert_fd_record(int fd) { struct win_netmap_fd_list *curr; for (curr = win_netmap_fd_list_head; curr; curr = curr->next) { if (fd == curr->win_netmap_fd) { return; } } curr = calloc(1, sizeof(*curr)); curr->next = win_netmap_fd_list_head; curr->win_netmap_fd = fd; curr->win_netmap_handle = IntToPtr(_get_osfhandle(fd)); win_netmap_fd_list_head = curr; } void win_remove_fd_record(int fd) { struct win_netmap_fd_list *curr = win_netmap_fd_list_head; struct win_netmap_fd_list *prev = NULL; for (; curr ; prev = curr, curr = curr->next) { if (fd != curr->win_netmap_fd) continue; /* found the entry */ if (prev == NULL) { /* we are freeing the first entry */ win_netmap_fd_list_head = curr->next; } else { prev->next = curr->next; } free(curr); break; } } HANDLE win_get_netmap_handle(int fd) { struct win_netmap_fd_list *curr; for (curr = win_netmap_fd_list_head; curr; curr = curr->next) { if (fd == curr->win_netmap_fd) { return curr->win_netmap_handle; } } return NULL; } /* * we need to wrap ioctl and mmap, at least for the netmap file descriptors */ /* * use this function only from netmap_user.h internal functions * same as ioctl, returns 0 on success and -1 on error */ static int win_nm_ioctl_internal(HANDLE h, int32_t ctlCode, void *arg) { DWORD bReturn = 0, szIn, szOut; BOOL ioctlReturnStatus; void *inParam = arg, *outParam = arg; switch (ctlCode) { case NETMAP_POLL: szIn = sizeof(POLL_REQUEST_DATA); szOut = sizeof(POLL_REQUEST_DATA); break; case NETMAP_MMAP: szIn = 0; szOut = sizeof(void*); inParam = NULL; /* nothing on input */ break; case NIOCTXSYNC: case NIOCRXSYNC: szIn = 0; szOut = 0; break; case NIOCREGIF: szIn = sizeof(struct nmreq); szOut = sizeof(struct nmreq); break; case NIOCCONFIG: D("unsupported NIOCCONFIG!"); return -1; default: /* a regular ioctl */ D("invalid ioctl %x on netmap fd", ctlCode); return -1; } ioctlReturnStatus = DeviceIoControl(h, ctlCode, inParam, szIn, outParam, szOut, &bReturn, NULL); // XXX note windows returns 0 on error or async call, 1 on success // we could call GetLastError() to figure out what happened return ioctlReturnStatus ? 0 : -1; } /* * this function is what must be called from user-space programs * same as ioctl, returns 0 on success and -1 on error */ static int win_nm_ioctl(int fd, int32_t ctlCode, void *arg) { HANDLE h = win_get_netmap_handle(fd); if (h == NULL) { return ioctl(fd, ctlCode, arg); } else { return win_nm_ioctl_internal(h, ctlCode, arg); } } #define ioctl win_nm_ioctl /* from now on, within this file ... */ /* * We cannot use the native mmap on windows * The only parameter used is "fd", the other ones are just declared to * make this signature comparable to the FreeBSD/Linux one */ static void * win32_mmap_emulated(void *addr, size_t length, int prot, int flags, int fd, int32_t offset) { HANDLE h = win_get_netmap_handle(fd); if (h == NULL) { return mmap(addr, length, prot, flags, fd, offset); } else { MEMORY_ENTRY ret; return win_nm_ioctl_internal(h, NETMAP_MMAP, &ret) ? NULL : ret.pUsermodeVirtualAddress; } } #define mmap win32_mmap_emulated #include /* XXX needed to use the structure pollfd */ static int win_nm_poll(struct pollfd *fds, int nfds, int timeout) { HANDLE h; if (nfds != 1 || fds == NULL || (h = win_get_netmap_handle(fds->fd)) == NULL) {; return poll(fds, nfds, timeout); } else { POLL_REQUEST_DATA prd; prd.timeout = timeout; prd.events = fds->events; win_nm_ioctl_internal(h, NETMAP_POLL, &prd); if ((prd.revents == POLLERR) || (prd.revents == STATUS_TIMEOUT)) { return -1; } return 1; } } #define poll win_nm_poll static int win_nm_open(char* pathname, int flags) { if (strcmp(pathname, NETMAP_DEVICE_NAME) == 0) { int fd = open(NETMAP_DEVICE_NAME, O_RDWR); if (fd < 0) { return -1; } win_insert_fd_record(fd); return fd; } else { return open(pathname, flags); } } #define open win_nm_open static int win_nm_close(int fd) { if (fd != -1) { close(fd); if (win_get_netmap_handle(fd) != NULL) { win_remove_fd_record(fd); } } return 0; } #define close win_nm_close #endif /* _WIN32 */ static int nm_is_identifier(const char *s, const char *e) { for (; s != e; s++) { if (!isalnum(*s) && *s != '_') { return 0; } } return 1; } -static void -nm_init_offsets(struct nm_desc *d) -{ - struct netmap_if *nifp = NETMAP_IF(d->mem, d->req.nr_offset); - struct netmap_ring *r = NETMAP_RXRING(nifp, d->first_rx_ring); - if ((void *)r == (void *)nifp) { - /* the descriptor is open for TX only */ - r = NETMAP_TXRING(nifp, d->first_tx_ring); - } - - *(struct netmap_if **)(uintptr_t)&(d->nifp) = nifp; - *(struct netmap_ring **)(uintptr_t)&d->some_ring = r; - *(void **)(uintptr_t)&d->buf_start = NETMAP_BUF(r, 0); - *(void **)(uintptr_t)&d->buf_end = - (char *)d->mem + d->memsize; -} - #define MAXERRMSG 80 -#define NM_PARSE_OK 0 -#define NM_PARSE_MEMID 1 static int -nm_parse_one(const char *ifname, struct nmreq *d, char **out, int memid_allowed) +nm_parse(const char *ifname, struct nm_desc *d, char *err) { int is_vale; const char *port = NULL; const char *vpname = NULL; u_int namelen; uint32_t nr_ringid = 0, nr_flags; char errmsg[MAXERRMSG] = ""; long num; uint16_t nr_arg2 = 0; enum { P_START, P_RNGSFXOK, P_GETNUM, P_FLAGS, P_FLAGSOK, P_MEMID } p_state; errno = 0; - if (strncmp(ifname, "netmap:", 7) && - strncmp(ifname, NM_BDG_NAME, strlen(NM_BDG_NAME))) { - snprintf(errmsg, MAXERRMSG, "invalid port name: %s", ifname); - errno = EINVAL; - goto fail; - } - is_vale = (ifname[0] == 'v'); if (is_vale) { port = index(ifname, ':'); if (port == NULL) { snprintf(errmsg, MAXERRMSG, "missing ':' in vale name"); goto fail; } if (!nm_is_identifier(ifname + 4, port)) { snprintf(errmsg, MAXERRMSG, "invalid bridge name"); goto fail; } vpname = ++port; } else { ifname += 7; port = ifname; } /* scan for a separator */ for (; *port && !index("-*^{}/@", *port); port++) ; if (is_vale && !nm_is_identifier(vpname, port)) { snprintf(errmsg, MAXERRMSG, "invalid bridge port name"); goto fail; } namelen = port - ifname; - if (namelen >= sizeof(d->nr_name)) { + if (namelen >= sizeof(d->req.nr_name)) { snprintf(errmsg, MAXERRMSG, "name too long"); goto fail; } - memcpy(d->nr_name, ifname, namelen); - d->nr_name[namelen] = '\0'; - D("name %s", d->nr_name); + memcpy(d->req.nr_name, ifname, namelen); + d->req.nr_name[namelen] = '\0'; p_state = P_START; nr_flags = NR_REG_ALL_NIC; /* default for no suffix */ while (*port) { switch (p_state) { case P_START: switch (*port) { case '^': /* only SW ring */ nr_flags = NR_REG_SW; p_state = P_RNGSFXOK; break; case '*': /* NIC and SW */ nr_flags = NR_REG_NIC_SW; p_state = P_RNGSFXOK; break; case '-': /* one NIC ring pair */ nr_flags = NR_REG_ONE_NIC; p_state = P_GETNUM; break; case '{': /* pipe (master endpoint) */ nr_flags = NR_REG_PIPE_MASTER; p_state = P_GETNUM; break; case '}': /* pipe (slave endoint) */ nr_flags = NR_REG_PIPE_SLAVE; p_state = P_GETNUM; break; case '/': /* start of flags */ p_state = P_FLAGS; break; case '@': /* start of memid */ p_state = P_MEMID; break; default: snprintf(errmsg, MAXERRMSG, "unknown modifier: '%c'", *port); goto fail; } port++; break; case P_RNGSFXOK: switch (*port) { case '/': p_state = P_FLAGS; break; case '@': p_state = P_MEMID; break; default: snprintf(errmsg, MAXERRMSG, "unexpected character: '%c'", *port); goto fail; } port++; break; case P_GETNUM: num = strtol(port, (char **)&port, 10); if (num < 0 || num >= NETMAP_RING_MASK) { snprintf(errmsg, MAXERRMSG, "'%ld' out of range [0, %d)", num, NETMAP_RING_MASK); goto fail; } nr_ringid = num & NETMAP_RING_MASK; p_state = P_RNGSFXOK; break; case P_FLAGS: case P_FLAGSOK: if (*port == '@') { port++; p_state = P_MEMID; break; } switch (*port) { case 'x': nr_flags |= NR_EXCLUSIVE; break; case 'z': nr_flags |= NR_ZCOPY_MON; break; case 't': nr_flags |= NR_MONITOR_TX; break; case 'r': nr_flags |= NR_MONITOR_RX; break; case 'R': nr_flags |= NR_RX_RINGS_ONLY; break; case 'T': nr_flags |= NR_TX_RINGS_ONLY; break; default: snprintf(errmsg, MAXERRMSG, "unrecognized flag: '%c'", *port); goto fail; } port++; p_state = P_FLAGSOK; break; case P_MEMID: - if (!memid_allowed) { + if (nr_arg2 != 0) { snprintf(errmsg, MAXERRMSG, "double setting of memid"); goto fail; } num = strtol(port, (char **)&port, 10); if (num <= 0) { - ND("non-numeric memid %s (out = %p)", port, out); - if (out == NULL) - goto fail; - *out = (char *)port; - while (*port) - port++; - } else { - nr_arg2 = num; - memid_allowed = 0; - p_state = P_RNGSFXOK; + snprintf(errmsg, MAXERRMSG, "invalid memid %ld, must be >0", num); + goto fail; } + nr_arg2 = num; + p_state = P_RNGSFXOK; break; } } - if (p_state != P_START && p_state != P_RNGSFXOK && - p_state != P_FLAGSOK && p_state != P_MEMID) { + if (p_state != P_START && p_state != P_RNGSFXOK && p_state != P_FLAGSOK) { snprintf(errmsg, MAXERRMSG, "unexpected end of port name"); goto fail; } ND("flags: %s %s %s %s", (nr_flags & NR_EXCLUSIVE) ? "EXCLUSIVE" : "", (nr_flags & NR_ZCOPY_MON) ? "ZCOPY_MON" : "", (nr_flags & NR_MONITOR_TX) ? "MONITOR_TX" : "", (nr_flags & NR_MONITOR_RX) ? "MONITOR_RX" : ""); - d->nr_flags |= nr_flags; - d->nr_ringid |= nr_ringid; - d->nr_arg2 = nr_arg2; + d->req.nr_flags |= nr_flags; + d->req.nr_ringid |= nr_ringid; + d->req.nr_arg2 = nr_arg2; - return (p_state == P_MEMID) ? NM_PARSE_MEMID : NM_PARSE_OK; + d->self = d; + + return 0; fail: if (!errno) errno = EINVAL; - if (out) - *out = strdup(errmsg); + if (err) + strncpy(err, errmsg, MAXERRMSG); return -1; } -static int -nm_interp_memid(const char *memid, struct nmreq *req, char **err) -{ - int fd = -1; - char errmsg[MAXERRMSG] = ""; - struct nmreq greq; - off_t mapsize; - struct netmap_pools_info *pi; - - /* first, try to look for a netmap port with this name */ - fd = open("/dev/netmap", O_RDONLY); - if (fd < 0) { - snprintf(errmsg, MAXERRMSG, "cannot open /dev/netmap: %s", strerror(errno)); - goto fail; - } - memset(&greq, 0, sizeof(greq)); - if (nm_parse_one(memid, &greq, err, 0) == NM_PARSE_OK) { - greq.nr_version = NETMAP_API; - if (ioctl(fd, NIOCGINFO, &greq) < 0) { - if (errno == ENOENT || errno == ENXIO) - goto try_external; - snprintf(errmsg, MAXERRMSG, "cannot getinfo for %s: %s", memid, strerror(errno)); - goto fail; - } - req->nr_arg2 = greq.nr_arg2; - close(fd); - return 0; - } -try_external: - D("trying with external memory"); - close(fd); - fd = open(memid, O_RDWR); - if (fd < 0) { - snprintf(errmsg, MAXERRMSG, "cannot open %s: %s", memid, strerror(errno)); - goto fail; - } - mapsize = lseek(fd, 0, SEEK_END); - if (mapsize < 0) { - snprintf(errmsg, MAXERRMSG, "failed to obtain filesize of %s: %s", memid, strerror(errno)); - goto fail; - } - pi = mmap(0, mapsize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); - if (pi == MAP_FAILED) { - snprintf(errmsg, MAXERRMSG, "cannot map %s: %s", memid, strerror(errno)); - goto fail; - } - req->nr_cmd = NETMAP_POOLS_CREATE; - pi->memsize = mapsize; - nmreq_pointer_put(req, pi); - D("mapped %zu bytes at %p from file %s", mapsize, pi, memid); - return 0; - -fail: - D("%s", errmsg); - close(fd); - if (err && !*err) - *err = strdup(errmsg); - return errno; -} - -static int -nm_parse(const char *ifname, struct nm_desc *d, char *errmsg) -{ - char *err; - switch (nm_parse_one(ifname, &d->req, &err, 1)) { - case NM_PARSE_OK: - D("parse OK"); - break; - case NM_PARSE_MEMID: - D("memid: %s", err); - errno = nm_interp_memid(err, &d->req, &err); - D("errno = %d", errno); - if (!errno) - break; - /* fallthrough */ - default: - D("error"); - strncpy(errmsg, err, MAXERRMSG); - errmsg[MAXERRMSG-1] = '\0'; - free(err); - return -1; - } - D("parsed name: %s", d->req.nr_name); - d->self = d; - return 0; -} - /* * Try to open, return descriptor if successful, NULL otherwise. * An invalid netmap name will return errno = 0; * You can pass a pointer to a pre-filled nm_desc to add special * parameters. Flags is used as follows - * NM_OPEN_NO_MMAP use the memory from arg, only + * NM_OPEN_NO_MMAP use the memory from arg, only XXX avoid mmap * if the nr_arg2 (memory block) matches. - * Special case: if arg is NULL, skip the - * mmap entirely (maybe because you are going - * to do it by yourself, or you plan to call - * nm_mmap() only later) * NM_OPEN_ARG1 use req.nr_arg1 from arg * NM_OPEN_ARG2 use req.nr_arg2 from arg * NM_OPEN_RING_CFG user ring config from arg */ static struct nm_desc * nm_open(const char *ifname, const struct nmreq *req, uint64_t new_flags, const struct nm_desc *arg) { struct nm_desc *d = NULL; const struct nm_desc *parent = arg; char errmsg[MAXERRMSG] = ""; uint32_t nr_reg; - struct netmap_pools_info *pi = NULL; if (strncmp(ifname, "netmap:", 7) && strncmp(ifname, NM_BDG_NAME, strlen(NM_BDG_NAME))) { errno = 0; /* name not recognised, not an error */ return NULL; } d = (struct nm_desc *)calloc(1, sizeof(*d)); if (d == NULL) { snprintf(errmsg, MAXERRMSG, "nm_desc alloc failure"); errno = ENOMEM; return NULL; } d->self = d; /* set this early so nm_close() works */ d->fd = open(NETMAP_DEVICE_NAME, O_RDWR); if (d->fd < 0) { snprintf(errmsg, MAXERRMSG, "cannot open /dev/netmap: %s", strerror(errno)); goto fail; } - if (req) { + if (req) d->req = *req; - } else { - d->req.nr_arg1 = 4; - d->req.nr_arg2 = 0; - d->req.nr_arg3 = 0; - } if (!(new_flags & NM_OPEN_IFNAME)) { - char *err; - switch (nm_parse_one(ifname, &d->req, &err, 1)) { - case NM_PARSE_OK: - break; - case NM_PARSE_MEMID: - if ((new_flags & NM_OPEN_NO_MMAP) && - IS_NETMAP_DESC(parent)) { - /* ignore the memid setting, since we are - * going to use the parent's one - */ - break; - } - errno = nm_interp_memid(err, &d->req, &err); - if (!errno) - break; - /* fallthrough */ - default: - strncpy(errmsg, err, MAXERRMSG); - errmsg[MAXERRMSG-1] = '\0'; - free(err); + if (nm_parse(ifname, d, errmsg) < 0) goto fail; - } - d->self = d; } - /* compatibility checks for POOL_SCREATE and NM_OPEN flags - * the first check may be dropped once we have a larger nreq - */ - if (d->req.nr_cmd == NETMAP_POOLS_CREATE) { - if (IS_NETMAP_DESC(parent)) { - if (new_flags & (NM_OPEN_ARG1 | NM_OPEN_ARG2 | NM_OPEN_ARG3)) { - snprintf(errmsg, MAXERRMSG, - "POOLS_CREATE is incompatibile " - "with NM_OPEN_ARG? flags"); - errno = EINVAL; - goto fail; - } - if (new_flags & NM_OPEN_NO_MMAP) { - snprintf(errmsg, MAXERRMSG, - "POOLS_CREATE is incompatible " - "with NM_OPEN_NO_MMAP flag"); - errno = EINVAL; - goto fail; - } - } - } - d->req.nr_version = NETMAP_API; d->req.nr_ringid &= NETMAP_RING_MASK; /* optionally import info from parent */ if (IS_NETMAP_DESC(parent) && new_flags) { - if (new_flags & NM_OPEN_EXTMEM) { - if (parent->req.nr_cmd == NETMAP_POOLS_CREATE) { - d->req.nr_cmd = NETMAP_POOLS_CREATE; - nmreq_pointer_put(&d->req, nmreq_pointer_get(&parent->req)); - D("Warning: not overriding arg[1-3] since external memory is being used"); - new_flags &= ~(NM_OPEN_ARG1 | NM_OPEN_ARG2 | NM_OPEN_ARG3); - } - } - if (new_flags & NM_OPEN_ARG1) { + if (new_flags & NM_OPEN_ARG1) D("overriding ARG1 %d", parent->req.nr_arg1); - d->req.nr_arg1 = parent->req.nr_arg1; - } - if (new_flags & (NM_OPEN_ARG2 | NM_OPEN_NO_MMAP)) { + d->req.nr_arg1 = new_flags & NM_OPEN_ARG1 ? + parent->req.nr_arg1 : 4; + if (new_flags & NM_OPEN_ARG2) { D("overriding ARG2 %d", parent->req.nr_arg2); - d->req.nr_arg2 = parent->req.nr_arg2; + d->req.nr_arg2 = parent->req.nr_arg2; } - if (new_flags & NM_OPEN_ARG3) { + if (new_flags & NM_OPEN_ARG3) D("overriding ARG3 %d", parent->req.nr_arg3); - d->req.nr_arg3 = parent->req.nr_arg3; - } + d->req.nr_arg3 = new_flags & NM_OPEN_ARG3 ? + parent->req.nr_arg3 : 0; if (new_flags & NM_OPEN_RING_CFG) { D("overriding RING_CFG"); d->req.nr_tx_slots = parent->req.nr_tx_slots; d->req.nr_rx_slots = parent->req.nr_rx_slots; d->req.nr_tx_rings = parent->req.nr_tx_rings; d->req.nr_rx_rings = parent->req.nr_rx_rings; } if (new_flags & NM_OPEN_IFNAME) { D("overriding ifname %s ringid 0x%x flags 0x%x", parent->req.nr_name, parent->req.nr_ringid, parent->req.nr_flags); memcpy(d->req.nr_name, parent->req.nr_name, sizeof(d->req.nr_name)); d->req.nr_ringid = parent->req.nr_ringid; d->req.nr_flags = parent->req.nr_flags; } } /* add the *XPOLL flags */ d->req.nr_ringid |= new_flags & (NETMAP_NO_TX_POLL | NETMAP_DO_RX_POLL); - if (d->req.nr_cmd == NETMAP_POOLS_CREATE) { - pi = nmreq_pointer_get(&d->req); - } - if (ioctl(d->fd, NIOCREGIF, &d->req)) { snprintf(errmsg, MAXERRMSG, "NIOCREGIF failed: %s", strerror(errno)); goto fail; } - if (pi != NULL) { - d->mem = pi; - d->memsize = pi->memsize; - nm_init_offsets(d); - } else if ((!(new_flags & NM_OPEN_NO_MMAP) || parent)) { - /* if parent is defined, do nm_mmap() even if NM_OPEN_NO_MMAP is set */ - errno = nm_mmap(d, parent); - if (errno) { - snprintf(errmsg, MAXERRMSG, "mmap failed: %s", strerror(errno)); - goto fail; - } - } - nr_reg = d->req.nr_flags & NR_REG_MASK; if (nr_reg == NR_REG_SW) { /* host stack */ d->first_tx_ring = d->last_tx_ring = d->req.nr_tx_rings; d->first_rx_ring = d->last_rx_ring = d->req.nr_rx_rings; } else if (nr_reg == NR_REG_ALL_NIC) { /* only nic */ d->first_tx_ring = 0; d->first_rx_ring = 0; d->last_tx_ring = d->req.nr_tx_rings - 1; d->last_rx_ring = d->req.nr_rx_rings - 1; } else if (nr_reg == NR_REG_NIC_SW) { d->first_tx_ring = 0; d->first_rx_ring = 0; d->last_tx_ring = d->req.nr_tx_rings; d->last_rx_ring = d->req.nr_rx_rings; } else if (nr_reg == NR_REG_ONE_NIC) { /* XXX check validity */ d->first_tx_ring = d->last_tx_ring = d->first_rx_ring = d->last_rx_ring = d->req.nr_ringid & NETMAP_RING_MASK; } else { /* pipes */ d->first_tx_ring = d->last_tx_ring = 0; d->first_rx_ring = d->last_rx_ring = 0; } + /* if parent is defined, do nm_mmap() even if NM_OPEN_NO_MMAP is set */ + if ((!(new_flags & NM_OPEN_NO_MMAP) || parent) && nm_mmap(d, parent)) { + snprintf(errmsg, MAXERRMSG, "mmap failed: %s", strerror(errno)); + goto fail; + } + + #ifdef DEBUG_NETMAP_USER { /* debugging code */ int i; D("%s tx %d .. %d %d rx %d .. %d %d", ifname, d->first_tx_ring, d->last_tx_ring, d->req.nr_tx_rings, d->first_rx_ring, d->last_rx_ring, d->req.nr_rx_rings); for (i = 0; i <= d->req.nr_tx_rings; i++) { struct netmap_ring *r = NETMAP_TXRING(d->nifp, i); D("TX%d %p h %d c %d t %d", i, r, r->head, r->cur, r->tail); } for (i = 0; i <= d->req.nr_rx_rings; i++) { struct netmap_ring *r = NETMAP_RXRING(d->nifp, i); D("RX%d %p h %d c %d t %d", i, r, r->head, r->cur, r->tail); } } #endif /* debugging */ d->cur_tx_ring = d->first_tx_ring; d->cur_rx_ring = d->first_rx_ring; return d; fail: nm_close(d); if (errmsg[0]) D("%s %s", errmsg, ifname); if (errno == 0) errno = EINVAL; return NULL; } static int nm_close(struct nm_desc *d) { /* * ugly trick to avoid unused warnings */ static void *__xxzt[] __attribute__ ((unused)) = { (void *)nm_open, (void *)nm_inject, - (void *)nm_dispatch, (void *)nm_nextpkt, - (void *)nm_parse } ; + (void *)nm_dispatch, (void *)nm_nextpkt } ; if (d == NULL || d->self != d) return EINVAL; if (d->done_mmap && d->mem) munmap(d->mem, d->memsize); if (d->fd != -1) { close(d->fd); } bzero(d, sizeof(*d)); free(d); return 0; } static int nm_mmap(struct nm_desc *d, const struct nm_desc *parent) { //XXX TODO: check if mmap is already done if (IS_NETMAP_DESC(parent) && parent->mem && parent->req.nr_arg2 == d->req.nr_arg2) { /* do not mmap, inherit from parent */ D("do not mmap, inherit from parent"); d->memsize = parent->memsize; d->mem = parent->mem; } else { /* XXX TODO: check if memsize is too large (or there is overflow) */ d->memsize = d->req.nr_memsize; d->mem = mmap(0, d->memsize, PROT_WRITE | PROT_READ, MAP_SHARED, d->fd, 0); if (d->mem == MAP_FAILED) { goto fail; } d->done_mmap = 1; } + { + struct netmap_if *nifp = NETMAP_IF(d->mem, d->req.nr_offset); + struct netmap_ring *r = NETMAP_RXRING(nifp, d->first_rx_ring); + if ((void *)r == (void *)nifp) { + /* the descriptor is open for TX only */ + r = NETMAP_TXRING(nifp, d->first_tx_ring); + } - nm_init_offsets(d); + *(struct netmap_if **)(uintptr_t)&(d->nifp) = nifp; + *(struct netmap_ring **)(uintptr_t)&d->some_ring = r; + *(void **)(uintptr_t)&d->buf_start = NETMAP_BUF(r, 0); + *(void **)(uintptr_t)&d->buf_end = + (char *)d->mem + d->memsize; + } + return 0; fail: return EINVAL; } /* * Same prototype as pcap_inject(), only need to cast. */ static int nm_inject(struct nm_desc *d, const void *buf, size_t size) { u_int c, n = d->last_tx_ring - d->first_tx_ring + 1, ri = d->cur_tx_ring; for (c = 0; c < n ; c++, ri++) { /* compute current ring to use */ struct netmap_ring *ring; uint32_t i, idx; if (ri > d->last_tx_ring) ri = d->first_tx_ring; ring = NETMAP_TXRING(d->nifp, ri); if (nm_ring_empty(ring)) { continue; } i = ring->cur; idx = ring->slot[i].buf_idx; ring->slot[i].len = size; nm_pkt_copy(buf, NETMAP_BUF(ring, idx), size); d->cur_tx_ring = ri; ring->head = ring->cur = nm_ring_next(ring, i); return size; } return 0; /* fail */ } /* * Same prototype as pcap_dispatch(), only need to cast. */ static int nm_dispatch(struct nm_desc *d, int cnt, nm_cb_t cb, u_char *arg) { int n = d->last_rx_ring - d->first_rx_ring + 1; int c, got = 0, ri = d->cur_rx_ring; d->hdr.buf = NULL; d->hdr.flags = NM_MORE_PKTS; d->hdr.d = d; if (cnt == 0) cnt = -1; /* cnt == -1 means infinite, but rings have a finite amount * of buffers and the int is large enough that we never wrap, * so we can omit checking for -1 */ for (c=0; c < n && cnt != got; c++, ri++) { /* compute current ring to use */ struct netmap_ring *ring; if (ri > d->last_rx_ring) ri = d->first_rx_ring; ring = NETMAP_RXRING(d->nifp, ri); for ( ; !nm_ring_empty(ring) && cnt != got; got++) { u_int idx, i; if (d->hdr.buf) { /* from previous round */ cb(arg, &d->hdr, d->hdr.buf); } i = ring->cur; idx = ring->slot[i].buf_idx; /* d->cur_rx_ring doesn't change inside this loop, but * set it here, so it reflects d->hdr.buf's ring */ d->cur_rx_ring = ri; d->hdr.slot = &ring->slot[i]; d->hdr.buf = (u_char *)NETMAP_BUF(ring, idx); // __builtin_prefetch(buf); d->hdr.len = d->hdr.caplen = ring->slot[i].len; d->hdr.ts = ring->ts; ring->head = ring->cur = nm_ring_next(ring, i); } } if (d->hdr.buf) { /* from previous round */ d->hdr.flags = 0; cb(arg, &d->hdr, d->hdr.buf); } return got; } static u_char * nm_nextpkt(struct nm_desc *d, struct nm_pkthdr *hdr) { int ri = d->cur_rx_ring; do { /* compute current ring to use */ struct netmap_ring *ring = NETMAP_RXRING(d->nifp, ri); if (!nm_ring_empty(ring)) { u_int i = ring->cur; u_int idx = ring->slot[i].buf_idx; u_char *buf = (u_char *)NETMAP_BUF(ring, idx); // __builtin_prefetch(buf); hdr->ts = ring->ts; hdr->len = hdr->caplen = ring->slot[i].len; ring->cur = nm_ring_next(ring, i); /* we could postpone advancing head if we want * to hold the buffer. This can be supported in * the future. */ ring->head = ring->cur; d->cur_rx_ring = ri; return buf; } ri++; if (ri > d->last_rx_ring) ri = d->first_rx_ring; } while (ri != d->cur_rx_ring); return NULL; /* nothing found */ } #endif /* !HAVE_NETMAP_WITH_LIBS */ #endif /* NETMAP_WITH_LIBS */ #endif /* _NET_NETMAP_USER_H_ */ Index: head/sys/net/netmap_virt.h =================================================================== --- head/sys/net/netmap_virt.h (revision 332422) +++ head/sys/net/netmap_virt.h (revision 332423) @@ -1,310 +1,292 @@ /* * Copyright (C) 2013-2016 Luigi Rizzo * Copyright (C) 2013-2016 Giuseppe Lettieri * Copyright (C) 2013-2016 Vincenzo Maffione * Copyright (C) 2015 Stefano Garzarella * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef NETMAP_VIRT_H #define NETMAP_VIRT_H /* * ptnetmap_memdev: device used to expose memory into the guest VM * * These macros are used in the hypervisor frontend (QEMU, bhyve) and in the * guest device driver. */ /* PCI identifiers and PCI BARs for the ptnetmap memdev * and ptnetmap network interface. */ #define PTNETMAP_MEMDEV_NAME "ptnetmap-memdev" #define PTNETMAP_PCI_VENDOR_ID 0x1b36 /* QEMU virtual devices */ #define PTNETMAP_PCI_DEVICE_ID 0x000c /* memory device */ #define PTNETMAP_PCI_NETIF_ID 0x000d /* ptnet network interface */ #define PTNETMAP_IO_PCI_BAR 0 #define PTNETMAP_MEM_PCI_BAR 1 #define PTNETMAP_MSIX_PCI_BAR 2 /* Registers for the ptnetmap memdev */ #define PTNET_MDEV_IO_MEMSIZE_LO 0 /* netmap memory size (low) */ #define PTNET_MDEV_IO_MEMSIZE_HI 4 /* netmap_memory_size (high) */ #define PTNET_MDEV_IO_MEMID 8 /* memory allocator ID in the host */ #define PTNET_MDEV_IO_IF_POOL_OFS 64 #define PTNET_MDEV_IO_IF_POOL_OBJNUM 68 #define PTNET_MDEV_IO_IF_POOL_OBJSZ 72 #define PTNET_MDEV_IO_RING_POOL_OFS 76 #define PTNET_MDEV_IO_RING_POOL_OBJNUM 80 #define PTNET_MDEV_IO_RING_POOL_OBJSZ 84 #define PTNET_MDEV_IO_BUF_POOL_OFS 88 #define PTNET_MDEV_IO_BUF_POOL_OBJNUM 92 #define PTNET_MDEV_IO_BUF_POOL_OBJSZ 96 #define PTNET_MDEV_IO_END 100 /* * ptnetmap configuration * * The ptnet kthreads (running in host kernel-space) need to be configured * in order to know how to intercept guest kicks (I/O register writes) and * how to inject MSI-X interrupts to the guest. The configuration may vary * depending on the hypervisor. Currently, we support QEMU/KVM on Linux and * and bhyve on FreeBSD. * The configuration is passed by the hypervisor to the host netmap module * by means of an ioctl() with nr_cmd=NETMAP_PT_HOST_CREATE, and it is * specified by the ptnetmap_cfg struct. This struct contains an header * with general informations and an array of entries whose size depends * on the hypervisor. The NETMAP_PT_HOST_CREATE command is issued every * time the kthreads are started. */ struct ptnetmap_cfg { #define PTNETMAP_CFGTYPE_QEMU 0x1 #define PTNETMAP_CFGTYPE_BHYVE 0x2 uint16_t cfgtype; /* how to interpret the cfg entries */ uint16_t entry_size; /* size of a config entry */ uint32_t num_rings; /* number of config entries */ void *csb_gh; /* CSB for guest --> host communication */ void *csb_hg; /* CSB for host --> guest communication */ /* Configuration entries are allocated right after the struct. */ }; /* Configuration of a ptnetmap ring for QEMU. */ struct ptnetmap_cfgentry_qemu { uint32_t ioeventfd; /* to intercept guest register access */ uint32_t irqfd; /* to inject guest interrupts */ }; /* Configuration of a ptnetmap ring for bhyve. */ struct ptnetmap_cfgentry_bhyve { uint64_t wchan; /* tsleep() parameter, to wake up kthread */ uint32_t ioctl_fd; /* ioctl fd */ /* ioctl parameters to send irq */ uint32_t ioctl_cmd; /* vmm.ko MSIX parameters for IOCTL */ struct { uint64_t msg_data; uint64_t addr; } ioctl_data; }; /* - * Structure filled-in by the kernel when asked for allocator info - * through NETMAP_POOLS_INFO_GET. Used by hypervisors supporting - * ptnetmap. - */ -struct netmap_pools_info { - uint64_t memsize; /* same as nmr->nr_memsize */ - uint32_t memid; /* same as nmr->nr_arg2 */ - uint32_t if_pool_offset; - uint32_t if_pool_objtotal; - uint32_t if_pool_objsize; - uint32_t ring_pool_offset; - uint32_t ring_pool_objtotal; - uint32_t ring_pool_objsize; - uint32_t buf_pool_offset; - uint32_t buf_pool_objtotal; - uint32_t buf_pool_objsize; -}; - -/* * Pass a pointer to a userspace buffer to be passed to kernelspace for write - * or read. Used by NETMAP_PT_HOST_CREATE and NETMAP_POOLS_INFO_GET. + * or read. Used by NETMAP_PT_HOST_CREATE. + * XXX deprecated */ static inline void nmreq_pointer_put(struct nmreq *nmr, void *userptr) { uintptr_t *pp = (uintptr_t *)&nmr->nr_arg1; *pp = (uintptr_t)userptr; } static inline void * nmreq_pointer_get(const struct nmreq *nmr) { - const uintptr_t * pp = (const uintptr_t *)&nmr->nr_arg1; + const uintptr_t *pp = (const uintptr_t *)&nmr->nr_arg1; return (void *)*pp; } /* ptnetmap features */ #define PTNETMAP_F_VNET_HDR 1 /* I/O registers for the ptnet device. */ #define PTNET_IO_PTFEAT 0 #define PTNET_IO_PTCTL 4 #define PTNET_IO_MAC_LO 8 #define PTNET_IO_MAC_HI 12 #define PTNET_IO_CSBBAH 16 /* deprecated */ #define PTNET_IO_CSBBAL 20 /* deprecated */ #define PTNET_IO_NIFP_OFS 24 #define PTNET_IO_NUM_TX_RINGS 28 #define PTNET_IO_NUM_RX_RINGS 32 #define PTNET_IO_NUM_TX_SLOTS 36 #define PTNET_IO_NUM_RX_SLOTS 40 #define PTNET_IO_VNET_HDR_LEN 44 #define PTNET_IO_HOSTMEMID 48 #define PTNET_IO_CSB_GH_BAH 52 #define PTNET_IO_CSB_GH_BAL 56 #define PTNET_IO_CSB_HG_BAH 60 #define PTNET_IO_CSB_HG_BAL 64 #define PTNET_IO_END 68 #define PTNET_IO_KICK_BASE 128 #define PTNET_IO_MASK 0xff /* ptnetmap control commands (values for PTCTL register) */ #define PTNETMAP_PTCTL_CREATE 1 #define PTNETMAP_PTCTL_DELETE 2 /* ptnetmap synchronization variables shared between guest and host */ struct ptnet_csb_gh { uint32_t head; /* GW+ HR+ the head of the guest netmap_ring */ uint32_t cur; /* GW+ HR+ the cur of the guest netmap_ring */ uint32_t guest_need_kick; /* GW+ HR+ host-->guest notification enable */ uint32_t sync_flags; /* GW+ HR+ the flags of the guest [tx|rx]sync() */ char pad[48]; /* pad to a 64 bytes cacheline */ }; struct ptnet_csb_hg { uint32_t hwcur; /* GR+ HW+ the hwcur of the host netmap_kring */ uint32_t hwtail; /* GR+ HW+ the hwtail of the host netmap_kring */ uint32_t host_need_kick; /* GR+ HW+ guest-->host notification enable */ char pad[4+48]; }; #ifdef WITH_PTNETMAP_GUEST /* ptnetmap_memdev routines used to talk with ptnetmap_memdev device driver */ struct ptnetmap_memdev; int nm_os_pt_memdev_iomap(struct ptnetmap_memdev *, vm_paddr_t *, void **, uint64_t *); void nm_os_pt_memdev_iounmap(struct ptnetmap_memdev *); uint32_t nm_os_pt_memdev_ioread(struct ptnetmap_memdev *, unsigned int); /* Guest driver: Write kring pointers (cur, head) to the CSB. * This routine is coupled with ptnetmap_host_read_kring_csb(). */ static inline void ptnetmap_guest_write_kring_csb(struct ptnet_csb_gh *ptr, uint32_t cur, uint32_t head) { /* * We need to write cur and head to the CSB but we cannot do it atomically. * There is no way we can prevent the host from reading the updated value * of one of the two and the old value of the other. However, if we make * sure that the host never reads a value of head more recent than the * value of cur we are safe. We can allow the host to read a value of cur * more recent than the value of head, since in the netmap ring cur can be * ahead of head and cur cannot wrap around head because it must be behind * tail. Inverting the order of writes below could instead result into the * host to think head went ahead of cur, which would cause the sync * prologue to fail. * * The following memory barrier scheme is used to make this happen: * * Guest Host * * STORE(cur) LOAD(head) * mb() <-----------> mb() * STORE(head) LOAD(cur) */ ptr->cur = cur; mb(); ptr->head = head; } /* Guest driver: Read kring pointers (hwcur, hwtail) from the CSB. * This routine is coupled with ptnetmap_host_write_kring_csb(). */ static inline void ptnetmap_guest_read_kring_csb(struct ptnet_csb_hg *pthg, struct netmap_kring *kring) { /* * We place a memory barrier to make sure that the update of hwtail never * overtakes the update of hwcur. * (see explanation in ptnetmap_host_write_kring_csb). */ kring->nr_hwtail = pthg->hwtail; mb(); kring->nr_hwcur = pthg->hwcur; } #endif /* WITH_PTNETMAP_GUEST */ #ifdef WITH_PTNETMAP_HOST /* * ptnetmap kernel thread routines * */ /* Functions to read and write CSB fields in the host */ #if defined (linux) #define CSB_READ(csb, field, r) (get_user(r, &csb->field)) #define CSB_WRITE(csb, field, v) (put_user(v, &csb->field)) #else /* ! linux */ #define CSB_READ(csb, field, r) (r = fuword32(&csb->field)) #define CSB_WRITE(csb, field, v) (suword32(&csb->field, v)) #endif /* ! linux */ /* Host netmap: Write kring pointers (hwcur, hwtail) to the CSB. * This routine is coupled with ptnetmap_guest_read_kring_csb(). */ static inline void ptnetmap_host_write_kring_csb(struct ptnet_csb_hg __user *ptr, uint32_t hwcur, uint32_t hwtail) { /* * The same scheme used in ptnetmap_guest_write_kring_csb() applies here. * We allow the guest to read a value of hwcur more recent than the value * of hwtail, since this would anyway result in a consistent view of the * ring state (and hwcur can never wraparound hwtail, since hwcur must be * behind head). * * The following memory barrier scheme is used to make this happen: * * Guest Host * * STORE(hwcur) LOAD(hwtail) * mb() <-------------> mb() * STORE(hwtail) LOAD(hwcur) */ CSB_WRITE(ptr, hwcur, hwcur); mb(); CSB_WRITE(ptr, hwtail, hwtail); } /* Host netmap: Read kring pointers (head, cur, sync_flags) from the CSB. * This routine is coupled with ptnetmap_guest_write_kring_csb(). */ static inline void ptnetmap_host_read_kring_csb(struct ptnet_csb_gh __user *ptr, struct netmap_ring *shadow_ring, uint32_t num_slots) { /* * We place a memory barrier to make sure that the update of head never * overtakes the update of cur. * (see explanation in ptnetmap_guest_write_kring_csb). */ CSB_READ(ptr, head, shadow_ring->head); mb(); CSB_READ(ptr, cur, shadow_ring->cur); CSB_READ(ptr, sync_flags, shadow_ring->flags); } #endif /* WITH_PTNETMAP_HOST */ #endif /* NETMAP_VIRT_H */ Index: head/sys/sys/param.h =================================================================== --- head/sys/sys/param.h (revision 332422) +++ head/sys/sys/param.h (revision 332423) @@ -1,365 +1,365 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)param.h 8.3 (Berkeley) 4/4/95 * $FreeBSD$ */ #ifndef _SYS_PARAM_H_ #define _SYS_PARAM_H_ #include #define BSD 199506 /* System version (year & month). */ #define BSD4_3 1 #define BSD4_4 1 /* * __FreeBSD_version numbers are documented in the Porter's Handbook. * If you bump the version for any reason, you should update the documentation * there. * Currently this lives here in the doc/ repository: * * head/en_US.ISO8859-1/books/porters-handbook/versions/chapter.xml * * scheme is: Rxx * 'R' is in the range 0 to 4 if this is a release branch or * X.0-CURRENT before releng/X.0 is created, otherwise 'R' is * in the range 5 to 9. */ #undef __FreeBSD_version -#define __FreeBSD_version 1200061 /* Master, propagated to newvers */ +#define __FreeBSD_version 1200062 /* Master, propagated to newvers */ /* * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD, * which by definition is always true on FreeBSD. This macro is also defined * on other systems that use the kernel of FreeBSD, such as GNU/kFreeBSD. * * It is tempting to use this macro in userland code when we want to enable * kernel-specific routines, and in fact it's fine to do this in code that * is part of FreeBSD itself. However, be aware that as presence of this * macro is still not widespread (e.g. older FreeBSD versions, 3rd party * compilers, etc), it is STRONGLY DISCOURAGED to check for this macro in * external applications without also checking for __FreeBSD__ as an * alternative. */ #undef __FreeBSD_kernel__ #define __FreeBSD_kernel__ #if defined(_KERNEL) || defined(IN_RTLD) #define P_OSREL_SIGWAIT 700000 #define P_OSREL_SIGSEGV 700004 #define P_OSREL_MAP_ANON 800104 #define P_OSREL_MAP_FSTRICT 1100036 #define P_OSREL_SHUTDOWN_ENOTCONN 1100077 #define P_OSREL_MAP_GUARD 1200035 #define P_OSREL_WRFSBASE 1200041 #define P_OSREL_CK_CYLGRP 1200046 #define P_OSREL_VMTOTAL64 1200054 #define P_OSREL_MAJOR(x) ((x) / 100000) #endif #ifndef LOCORE #include #endif /* * Machine-independent constants (some used in following include files). * Redefined constants are from POSIX 1003.1 limits file. * * MAXCOMLEN should be >= sizeof(ac_comm) (see ) */ #include #define MAXCOMLEN 19 /* max command name remembered */ #define MAXINTERP PATH_MAX /* max interpreter file name length */ #define MAXLOGNAME 33 /* max login name length (incl. NUL) */ #define MAXUPRC CHILD_MAX /* max simultaneous processes */ #define NCARGS ARG_MAX /* max bytes for an exec function */ #define NGROUPS (NGROUPS_MAX+1) /* max number groups */ #define NOFILE OPEN_MAX /* max open files per process */ #define NOGROUP 65535 /* marker for empty group set member */ #define MAXHOSTNAMELEN 256 /* max hostname size */ #define SPECNAMELEN 63 /* max length of devicename */ /* More types and definitions used throughout the kernel. */ #ifdef _KERNEL #include #include #ifndef LOCORE #include #include #endif #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #endif #ifndef _KERNEL /* Signals. */ #include #endif /* Machine type dependent parameters. */ #include #ifndef _KERNEL #include #endif #ifndef DEV_BSHIFT #define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ #endif #define DEV_BSIZE (1<>PAGE_SHIFT) #endif /* * btodb() is messy and perhaps slow because `bytes' may be an off_t. We * want to shift an unsigned type to avoid sign extension and we don't * want to widen `bytes' unnecessarily. Assume that the result fits in * a daddr_t. */ #ifndef btodb #define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ (sizeof (bytes) > sizeof(long) \ ? (daddr_t)((unsigned long long)(bytes) >> DEV_BSHIFT) \ : (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT)) #endif #ifndef dbtob #define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ ((off_t)(db) << DEV_BSHIFT) #endif #define PRIMASK 0x0ff #define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */ #define PDROP 0x200 /* OR'd with pri to stop re-entry of interlock mutex */ #define NZERO 0 /* default "nice" */ #define NBBY 8 /* number of bits in a byte */ #define NBPW sizeof(int) /* number of bytes per word (integer) */ #define CMASK 022 /* default file mask: S_IWGRP|S_IWOTH */ #define NODEV (dev_t)(-1) /* non-existent device */ /* * File system parameters and macros. * * MAXBSIZE - Filesystems are made out of blocks of at most MAXBSIZE bytes * per block. MAXBSIZE may be made larger without effecting * any existing filesystems as long as it does not exceed MAXPHYS, * and may be made smaller at the risk of not being able to use * filesystems which require a block size exceeding MAXBSIZE. * * MAXBCACHEBUF - Maximum size of a buffer in the buffer cache. This must * be >= MAXBSIZE and can be set differently for different * architectures by defining it in . * Making this larger allows NFS to do larger reads/writes. * * BKVASIZE - Nominal buffer space per buffer, in bytes. BKVASIZE is the * minimum KVM memory reservation the kernel is willing to make. * Filesystems can of course request smaller chunks. Actual * backing memory uses a chunk size of a page (PAGE_SIZE). * The default value here can be overridden on a per-architecture * basis by defining it in . * * If you make BKVASIZE too small you risk seriously fragmenting * the buffer KVM map which may slow things down a bit. If you * make it too big the kernel will not be able to optimally use * the KVM memory reserved for the buffer cache and will wind * up with too-few buffers. * * The default is 16384, roughly 2x the block size used by a * normal UFS filesystem. */ #define MAXBSIZE 65536 /* must be power of 2 */ #ifndef MAXBCACHEBUF #define MAXBCACHEBUF MAXBSIZE /* must be a power of 2 >= MAXBSIZE */ #endif #ifndef BKVASIZE #define BKVASIZE 16384 /* must be power of 2 */ #endif #define BKVAMASK (BKVASIZE-1) /* * MAXPATHLEN defines the longest permissible path length after expanding * symbolic links. It is used to allocate a temporary buffer from the buffer * pool in which to do the name expansion, hence should be a power of two, * and must be less than or equal to MAXBSIZE. MAXSYMLINKS defines the * maximum number of symbolic links that may be expanded in a path name. * It should be set high enough to allow all legitimate uses, but halt * infinite loops reasonably quickly. */ #define MAXPATHLEN PATH_MAX #define MAXSYMLINKS 32 /* Bit map related macros. */ #define setbit(a,i) (((unsigned char *)(a))[(i)/NBBY] |= 1<<((i)%NBBY)) #define clrbit(a,i) (((unsigned char *)(a))[(i)/NBBY] &= ~(1<<((i)%NBBY))) #define isset(a,i) \ (((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) #define isclr(a,i) \ ((((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) == 0) /* Macros for counting and rounding. */ #ifndef howmany #define howmany(x, y) (((x)+((y)-1))/(y)) #endif #define nitems(x) (sizeof((x)) / sizeof((x)[0])) #define rounddown(x, y) (((x)/(y))*(y)) #define rounddown2(x, y) ((x)&(~((y)-1))) /* if y is power of two */ #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* to any y */ #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) /* if y is powers of two */ #define powerof2(x) ((((x)-1)&(x))==0) /* Macros for min/max. */ #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #ifdef _KERNEL /* * Basic byte order function prototypes for non-inline functions. */ #ifndef LOCORE #ifndef _BYTEORDER_PROTOTYPED #define _BYTEORDER_PROTOTYPED __BEGIN_DECLS __uint32_t htonl(__uint32_t); __uint16_t htons(__uint16_t); __uint32_t ntohl(__uint32_t); __uint16_t ntohs(__uint16_t); __END_DECLS #endif #endif #ifndef _BYTEORDER_FUNC_DEFINED #define _BYTEORDER_FUNC_DEFINED #define htonl(x) __htonl(x) #define htons(x) __htons(x) #define ntohl(x) __ntohl(x) #define ntohs(x) __ntohs(x) #endif /* !_BYTEORDER_FUNC_DEFINED */ #endif /* _KERNEL */ /* * Scale factor for scaled integers used to count %cpu time and load avgs. * * The number of CPU `tick's that map to a unique `%age' can be expressed * by the formula (1 / (2 ^ (FSHIFT - 11))). The maximum load average that * can be calculated (assuming 32 bits) can be closely approximated using * the formula (2 ^ (2 * (16 - FSHIFT))) for (FSHIFT < 15). * * For the scheduler to maintain a 1:1 mapping of CPU `tick' to `%age', * FSHIFT must be at least 11; this gives us a maximum load avg of ~1024. */ #define FSHIFT 11 /* bits to right of fixed binary point */ #define FSCALE (1<> (PAGE_SHIFT - DEV_BSHIFT)) #define ctodb(db) /* calculates pages to devblks */ \ ((db) << (PAGE_SHIFT - DEV_BSHIFT)) /* * Old spelling of __containerof(). */ #define member2struct(s, m, x) \ ((struct s *)(void *)((char *)(x) - offsetof(struct s, m))) /* * Access a variable length array that has been declared as a fixed * length array. */ #define __PAST_END(array, offset) (((__typeof__(*(array)) *)(array))[offset]) #endif /* _SYS_PARAM_H_ */