Index: head/share/man/man9/Makefile =================================================================== --- head/share/man/man9/Makefile (revision 360135) +++ head/share/man/man9/Makefile (revision 360136) @@ -1,2352 +1,2353 @@ # $FreeBSD$ .include MAN= accept_filter.9 \ accf_data.9 \ accf_dns.9 \ accf_http.9 \ acl.9 \ alq.9 \ altq.9 \ atomic.9 \ bhnd.9 \ bhnd_erom.9 \ bios.9 \ bitset.9 \ boot.9 \ bpf.9 \ buf.9 \ buf_ring.9 \ BUF_ISLOCKED.9 \ BUF_LOCK.9 \ BUF_LOCKFREE.9 \ BUF_LOCKINIT.9 \ BUF_RECURSED.9 \ BUF_TIMELOCK.9 \ BUF_UNLOCK.9 \ bus_activate_resource.9 \ BUS_ADD_CHILD.9 \ bus_adjust_resource.9 \ bus_alloc_resource.9 \ BUS_BIND_INTR.9 \ bus_child_present.9 \ BUS_CHILD_DELETED.9 \ BUS_CHILD_DETACHED.9 \ BUS_CONFIG_INTR.9 \ bus_delayed_attach_children.9 \ BUS_DESCRIBE_INTR.9 \ bus_dma.9 \ bus_generic_attach.9 \ bus_generic_detach.9 \ bus_generic_new_pass.9 \ bus_generic_print_child.9 \ bus_generic_read_ivar.9 \ bus_generic_shutdown.9 \ BUS_GET_CPUS.9 \ bus_get_resource.9 \ bus_map_resource.9 \ BUS_NEW_PASS.9 \ BUS_PRINT_CHILD.9 \ BUS_READ_IVAR.9 \ BUS_RESCAN.9 \ bus_release_resource.9 \ bus_set_pass.9 \ bus_set_resource.9 \ BUS_SETUP_INTR.9 \ bus_space.9 \ byteorder.9 \ callout.9 \ casuword.9 \ cd.9 \ cnv.9 \ condvar.9 \ config_intrhook.9 \ contigmalloc.9 \ copy.9 \ counter.9 \ cpuset.9 \ cr_cansee.9 \ critical_enter.9 \ cr_seeothergids.9 \ cr_seeotheruids.9 \ crypto.9 \ crypto_asym.9 \ crypto_driver.9 \ crypto_request.9 \ crypto_session.9 \ CTASSERT.9 \ DB_COMMAND.9 \ DECLARE_GEOM_CLASS.9 \ DECLARE_MODULE.9 \ DEFINE_IFUNC.9 \ DELAY.9 \ devclass.9 \ devclass_find.9 \ devclass_get_device.9 \ devclass_get_devices.9 \ devclass_get_drivers.9 \ devclass_get_maxunit.9 \ devclass_get_name.9 \ devclass_get_softc.9 \ dev_clone.9 \ devfs_set_cdevpriv.9 \ device.9 \ device_add_child.9 \ DEVICE_ATTACH.9 \ device_delete_child.9 \ device_delete_children.9 \ DEVICE_DETACH.9 \ device_enable.9 \ device_find_child.9 \ device_get_children.9 \ device_get_devclass.9 \ device_get_driver.9 \ device_get_ivars.9 \ device_get_name.9 \ device_get_parent.9 \ device_get_softc.9 \ device_get_state.9 \ device_get_sysctl.9 \ device_get_unit.9 \ DEVICE_IDENTIFY.9 \ device_printf.9 \ DEVICE_PROBE.9 \ device_probe_and_attach.9 \ device_quiet.9 \ device_set_desc.9 \ device_set_driver.9 \ device_set_flags.9 \ DEVICE_SHUTDOWN.9 \ DEV_MODULE.9 \ dev_refthread.9 \ devstat.9 \ devtoname.9 \ disk.9 \ dnv.9 \ domain.9 \ domainset.9 \ dpcpu.9 \ drbr.9 \ driver.9 \ DRIVER_MODULE.9 \ efirt.9 \ epoch.9 \ EVENTHANDLER.9 \ eventtimers.9 \ extattr.9 \ fail.9 \ fdt_pinctrl.9 \ fetch.9 \ firmware.9 \ fpu_kern.9 \ g_access.9 \ g_attach.9 \ g_bio.9 \ g_consumer.9 \ g_data.9 \ get_cyclecount.9 \ getenv.9 \ getnewvnode.9 \ g_event.9 \ g_geom.9 \ g_provider.9 \ g_provider_by_name.9 \ groupmember.9 \ g_wither_geom.9 \ hash.9 \ hashinit.9 \ hexdump.9 \ hhook.9 \ ieee80211.9 \ ieee80211_amrr.9 \ ieee80211_beacon.9 \ ieee80211_bmiss.9 \ ieee80211_crypto.9 \ ieee80211_ddb.9 \ ieee80211_input.9 \ ieee80211_node.9 \ ieee80211_output.9 \ ieee80211_proto.9 \ ieee80211_radiotap.9 \ ieee80211_regdomain.9 \ ieee80211_scan.9 \ ieee80211_vap.9 \ iflib.9 \ iflibdd.9 \ iflibdi.9 \ iflibtxrx.9 \ ifnet.9 \ inittodr.9 \ insmntque.9 \ intro.9 \ ithread.9 \ KASSERT.9 \ kern_testfrwk.9 \ kernacc.9 \ kernel_mount.9 \ khelp.9 \ kobj.9 \ kproc.9 \ kqueue.9 \ kthread.9 \ ktr.9 \ lock.9 \ locking.9 \ LOCK_PROFILING.9 \ mac.9 \ make_dev.9 \ malloc.9 \ mbchain.9 \ mbuf.9 \ mbuf_tags.9 \ MD5.9 \ mdchain.9 \ memcchr.9 \ memguard.9 \ microseq.9 \ microtime.9 \ microuptime.9 \ mi_switch.9 \ mod_cc.9 \ module.9 \ MODULE_DEPEND.9 \ MODULE_PNP_INFO.9 \ MODULE_VERSION.9 \ mtx_pool.9 \ mutex.9 \ namei.9 \ netisr.9 \ nv.9 \ OF_child.9 \ OF_device_from_xref.9 \ OF_finddevice.9 \ OF_getprop.9 \ OF_node_from_xref.9 \ OF_package_to_path.9 \ ofw_bus_is_compatible.9 \ ofw_bus_status_okay.9 \ osd.9 \ owll.9 \ own.9 \ panic.9 \ PCBGROUP.9 \ p_candebug.9 \ p_cansee.9 \ pci.9 \ PCI_IOV_ADD_VF.9 \ PCI_IOV_INIT.9 \ pci_iov_schema.9 \ PCI_IOV_UNINIT.9 \ pfil.9 \ pfind.9 \ pget.9 \ pgfind.9 \ PHOLD.9 \ physio.9 \ pmap.9 \ pmap_activate.9 \ pmap_clear_modify.9 \ pmap_copy.9 \ pmap_enter.9 \ pmap_extract.9 \ pmap_growkernel.9 \ pmap_init.9 \ pmap_is_modified.9 \ pmap_is_prefaultable.9 \ pmap_map.9 \ pmap_mincore.9 \ pmap_object_init_pt.9 \ pmap_page_exists_quick.9 \ pmap_page_init.9 \ pmap_pinit.9 \ pmap_protect.9 \ pmap_qenter.9 \ pmap_quick_enter_page.9 \ pmap_release.9 \ pmap_remove.9 \ pmap_resident_count.9 \ pmap_unwire.9 \ pmap_zero_page.9 \ printf.9 \ prison_check.9 \ priv.9 \ proc_rwmem.9 \ pseudofs.9 \ psignal.9 \ pwmbus.9 \ random.9 \ random_harvest.9 \ ratecheck.9 \ redzone.9 \ refcount.9 \ resettodr.9 \ resource_int_value.9 \ rijndael.9 \ rman.9 \ rmlock.9 \ rtalloc.9 \ rtentry.9 \ runqueue.9 \ rwlock.9 \ sbuf.9 \ scheduler.9 \ SDT.9 \ securelevel_gt.9 \ selrecord.9 \ sema.9 \ seqc.9 \ sf_buf.9 \ sglist.9 \ shm_map.9 \ signal.9 \ sleep.9 \ sleepqueue.9 \ socket.9 \ stack.9 \ store.9 \ style.9 \ style.lua.9 \ ${_superio.9} \ swi.9 \ sx.9 \ syscall_helper_register.9 \ SYSCALL_MODULE.9 \ sysctl.9 \ sysctl_add_oid.9 \ sysctl_ctx_init.9 \ SYSINIT.9 \ taskqueue.9 \ tcp_functions.9 \ thread_exit.9 \ time.9 \ tvtohz.9 \ ucred.9 \ uidinfo.9 \ uio.9 \ unr.9 \ vaccess.9 \ vaccess_acl_nfs4.9 \ vaccess_acl_posix1e.9 \ vcount.9 \ vflush.9 \ VFS.9 \ vfs_busy.9 \ VFS_CHECKEXP.9 \ vfsconf.9 \ VFS_FHTOVP.9 \ vfs_getnewfsid.9 \ vfs_getopt.9 \ vfs_getvfs.9 \ VFS_MOUNT.9 \ vfs_mountedfrom.9 \ VFS_QUOTACTL.9 \ VFS_ROOT.9 \ vfs_rootmountalloc.9 \ VFS_SET.9 \ VFS_STATFS.9 \ vfs_suser.9 \ VFS_SYNC.9 \ vfs_timestamp.9 \ vfs_unbusy.9 \ VFS_UNMOUNT.9 \ vfs_unmountall.9 \ VFS_VGET.9 \ vget.9 \ vgone.9 \ vhold.9 \ vinvalbuf.9 \ vm_fault_prefault.9 \ vm_map.9 \ vm_map_check_protection.9 \ vm_map_create.9 \ vm_map_delete.9 \ vm_map_entry_resize_free.9 \ vm_map_find.9 \ vm_map_findspace.9 \ vm_map_inherit.9 \ vm_map_init.9 \ vm_map_insert.9 \ vm_map_lock.9 \ vm_map_lookup.9 \ vm_map_madvise.9 \ vm_map_max.9 \ vm_map_protect.9 \ vm_map_remove.9 \ vm_map_simplify_entry.9 \ vm_map_stack.9 \ vm_map_submap.9 \ vm_map_sync.9 \ vm_map_wire.9 \ vm_page_alloc.9 \ vm_page_bits.9 \ vm_page_busy.9 \ vm_page_deactivate.9 \ vm_page_dontneed.9 \ vm_page_aflag.9 \ vm_page_free.9 \ vm_page_grab.9 \ vm_page_insert.9 \ vm_page_lookup.9 \ vm_page_rename.9 \ vm_page_wire.9 \ vm_set_page_size.9 \ vmem.9 \ vn_fullpath.9 \ vn_isdisk.9 \ vnet.9 \ vnode.9 \ VOP_ACCESS.9 \ VOP_ACLCHECK.9 \ VOP_ADVISE.9 \ VOP_ADVLOCK.9 \ VOP_ALLOCATE.9 \ VOP_ATTRIB.9 \ VOP_BMAP.9 \ VOP_BWRITE.9 \ VOP_COPY_FILE_RANGE.9 \ VOP_CREATE.9 \ VOP_FSYNC.9 \ VOP_GETACL.9 \ VOP_GETEXTATTR.9 \ VOP_GETPAGES.9 \ VOP_INACTIVE.9 \ VOP_IOCTL.9 \ VOP_LINK.9 \ VOP_LISTEXTATTR.9 \ VOP_LOCK.9 \ VOP_LOOKUP.9 \ VOP_OPENCLOSE.9 \ VOP_PATHCONF.9 \ VOP_PRINT.9 \ VOP_RDWR.9 \ VOP_READDIR.9 \ VOP_READLINK.9 \ VOP_REALLOCBLKS.9 \ VOP_REMOVE.9 \ VOP_RENAME.9 \ VOP_REVOKE.9 \ VOP_SETACL.9 \ VOP_SETEXTATTR.9 \ VOP_STRATEGY.9 \ VOP_VPTOCNP.9 \ VOP_VPTOFH.9 \ vref.9 \ vrefcnt.9 \ vrele.9 \ vslock.9 \ watchdog.9 \ zone.9 MLINKS= unr.9 alloc_unr.9 \ unr.9 alloc_unrl.9 \ unr.9 alloc_unr_specific.9 \ unr.9 clear_unrhdr.9 \ unr.9 delete_unrhdr.9 \ unr.9 free_unr.9 \ unr.9 new_unrhdr.9 MLINKS+=accept_filter.9 accept_filt_add.9 \ accept_filter.9 accept_filt_del.9 \ accept_filter.9 accept_filt_generic_mod_event.9 \ accept_filter.9 accept_filt_get.9 MLINKS+=alq.9 ALQ.9 \ alq.9 alq_close.9 \ alq.9 alq_flush.9 \ alq.9 alq_get.9 \ alq.9 alq_getn.9 \ alq.9 alq_open.9 \ alq.9 alq_open_flags.9 \ alq.9 alq_post.9 \ alq.9 alq_post_flags.9 \ alq.9 alq_write.9 \ alq.9 alq_writen.9 MLINKS+=altq.9 ALTQ.9 MLINKS+=atomic.9 atomic_add.9 \ atomic.9 atomic_clear.9 \ atomic.9 atomic_cmpset.9 \ atomic.9 atomic_fcmpset.9 \ atomic.9 atomic_fetchadd.9 \ atomic.9 atomic_load.9 \ atomic.9 atomic_readandclear.9 \ atomic.9 atomic_set.9 \ atomic.9 atomic_store.9 \ atomic.9 atomic_subtract.9 \ atomic.9 atomic_swap.9 \ atomic.9 atomic_testandclear.9 \ atomic.9 atomic_testandset.9 \ atomic.9 atomic_thread_fence.9 MLINKS+=bhnd.9 BHND_MATCH_BOARD_TYPE.9 \ bhnd.9 BHND_MATCH_BOARD_VENDOR.9 \ bhnd.9 BHND_MATCH_CHIP_ID.9 \ bhnd.9 BHND_MATCH_CHIP_PKG.9 \ bhnd.9 BHND_MATCH_CHIP_REV.9 \ bhnd.9 BHND_MATCH_CORE_ID.9 \ bhnd.9 BHND_MATCH_CORE_VENDOR.9 \ bhnd.9 bhnd_activate_resource.9 \ bhnd.9 bhnd_alloc_pmu.9 \ bhnd.9 bhnd_alloc_resource.9 \ bhnd.9 bhnd_alloc_resource_any.9 \ bhnd.9 bhnd_alloc_resources.9 \ bhnd.9 bhnd_board_matches.9 \ bhnd.9 bhnd_bus_match_child.9 \ bhnd.9 bhnd_bus_read_1.9 \ bhnd.9 bhnd_bus_read_2.9 \ bhnd.9 bhnd_bus_read_4.9 \ bhnd.9 bhnd_bus_read_stream_1.9 \ bhnd.9 bhnd_bus_read_stream_2.9 \ bhnd.9 bhnd_bus_read_stream_4.9 \ bhnd.9 bhnd_bus_write_1.9 \ bhnd.9 bhnd_bus_write_2.9 \ bhnd.9 bhnd_bus_write_4.9 \ bhnd.9 bhnd_bus_write_stream_1.9 \ bhnd.9 bhnd_bus_write_stream_2.9 \ bhnd.9 bhnd_bus_write_stream_4.9 \ bhnd.9 bhnd_chip_matches.9 \ bhnd.9 bhnd_core_class.9 \ bhnd.9 bhnd_core_get_match_desc.9 \ bhnd.9 bhnd_core_matches.9 \ bhnd.9 bhnd_core_name.9 \ bhnd.9 bhnd_cores_equal.9 \ bhnd.9 bhnd_deactivate_resource.9 \ bhnd.9 bhnd_decode_port_rid.9 \ bhnd.9 bhnd_deregister_provider.9 \ bhnd.9 bhnd_device_lookup.9 \ bhnd.9 bhnd_device_matches.9 \ bhnd.9 bhnd_device_quirks.9 \ bhnd.9 bhnd_driver_get_erom_class.9 \ bhnd.9 bhnd_enable_clocks.9 \ bhnd.9 bhnd_find_core_class.9 \ bhnd.9 bhnd_find_core_name.9 \ bhnd.9 bhnd_format_chip_id.9 \ bhnd.9 bhnd_get_attach_type.9 \ bhnd.9 bhnd_get_chipid.9 \ bhnd.9 bhnd_get_class.9 \ bhnd.9 bhnd_get_clock_freq.9 \ bhnd.9 bhnd_get_clock_latency.9 \ bhnd.9 bhnd_get_core_index.9 \ bhnd.9 bhnd_get_core_info.9 \ bhnd.9 bhnd_get_core_unit.9 \ bhnd.9 bhnd_get_device.9 \ bhnd.9 bhnd_get_device_name.9 \ bhnd.9 bhnd_get_dma_translation.9 \ bhnd.9 bhnd_get_hwrev.9 \ bhnd.9 bhnd_get_intr_count.9 \ bhnd.9 bhnd_get_intr_ivec.9 \ bhnd.9 bhnd_get_port_count.9 \ bhnd.9 bhnd_get_port_rid.9 \ bhnd.9 bhnd_get_region_addr.9 \ bhnd.9 bhnd_get_region_count.9 \ bhnd.9 bhnd_get_vendor.9 \ bhnd.9 bhnd_get_vendor_name.9 \ bhnd.9 bhnd_hwrev_matches.9 \ bhnd.9 bhnd_is_hw_suspended.9 \ bhnd.9 bhnd_is_region_valid.9 \ bhnd.9 bhnd_map_intr.9 \ bhnd.9 bhnd_match_core.9 \ bhnd.9 bhnd_nvram_getvar.9 \ bhnd.9 bhnd_nvram_getvar_array.9 \ bhnd.9 bhnd_nvram_getvar_int.9 \ bhnd.9 bhnd_nvram_getvar_int16.9 \ bhnd.9 bhnd_nvram_getvar_int32.9 \ bhnd.9 bhnd_nvram_getvar_int8.9 \ bhnd.9 bhnd_nvram_getvar_str.9 \ bhnd.9 bhnd_nvram_getvar_uint.9 \ bhnd.9 bhnd_nvram_getvar_uint16.9 \ bhnd.9 bhnd_nvram_getvar_uint32.9 \ bhnd.9 bhnd_nvram_getvar_uint8.9 \ bhnd.9 bhnd_nvram_string_array_next.9 \ bhnd.9 bhnd_read_board_info.9 \ bhnd.9 bhnd_read_config.9 \ bhnd.9 bhnd_read_ioctl.9 \ bhnd.9 bhnd_read_iost.9 \ bhnd.9 bhnd_register_provider.9 \ bhnd.9 bhnd_release_ext_rsrc.9 \ bhnd.9 bhnd_release_pmu.9 \ bhnd.9 bhnd_release_provider.9 \ bhnd.9 bhnd_release_resource.9 \ bhnd.9 bhnd_release_resources.9 \ bhnd.9 bhnd_request_clock.9 \ bhnd.9 bhnd_request_ext_rsrc.9 \ bhnd.9 bhnd_reset_hw.9 \ bhnd.9 bhnd_retain_provider.9 \ bhnd.9 bhnd_set_custom_core_desc.9 \ bhnd.9 bhnd_set_default_core_desc.9 \ bhnd.9 bhnd_suspend_hw.9 \ bhnd.9 bhnd_unmap_intr.9 \ bhnd.9 bhnd_vendor_name.9 \ bhnd.9 bhnd_write_config.9 \ bhnd.9 bhnd_write_ioctl.9 MLINKS+=bhnd_erom.9 bhnd_erom_alloc.9 \ bhnd_erom.9 bhnd_erom_dump.9 \ bhnd_erom.9 bhnd_erom_fini_static.9 \ bhnd_erom.9 bhnd_erom_free.9 \ bhnd_erom.9 bhnd_erom_free_core_table.9 \ bhnd_erom.9 bhnd_erom_get_core_table.9 \ bhnd_erom.9 bhnd_erom_init_static.9 \ bhnd_erom.9 bhnd_erom_io.9 \ bhnd_erom.9 bhnd_erom_io_fini.9 \ bhnd_erom.9 bhnd_erom_io_map.9 \ bhnd_erom.9 bhnd_erom_io_read.9 \ bhnd_erom.9 bhnd_erom_iobus_init.9 \ bhnd_erom.9 bhnd_erom_iores_new.9 \ bhnd_erom.9 bhnd_erom_lookup_core.9 \ bhnd_erom.9 bhnd_erom_lookup_core_addr.9 \ bhnd_erom.9 bhnd_erom_probe.9 \ bhnd_erom.9 bhnd_erom_probe_driver_classes.9 MLINKS+=bitset.9 BITSET_DEFINE.9 \ bitset.9 BITSET_T_INITIALIZER.9 \ bitset.9 BITSET_FSET.9 \ bitset.9 BIT_CLR.9 \ bitset.9 BIT_COPY.9 \ bitset.9 BIT_ISSET.9 \ bitset.9 BIT_SET.9 \ bitset.9 BIT_ZERO.9 \ bitset.9 BIT_FILL.9 \ bitset.9 BIT_SETOF.9 \ bitset.9 BIT_EMPTY.9 \ bitset.9 BIT_ISFULLSET.9 \ bitset.9 BIT_FFS.9 \ bitset.9 BIT_COUNT.9 \ bitset.9 BIT_SUBSET.9 \ bitset.9 BIT_OVERLAP.9 \ bitset.9 BIT_CMP.9 \ bitset.9 BIT_OR.9 \ bitset.9 BIT_AND.9 \ bitset.9 BIT_ANDNOT.9 \ bitset.9 BIT_CLR_ATOMIC.9 \ bitset.9 BIT_SET_ATOMIC.9 \ bitset.9 BIT_SET_ATOMIC_ACQ.9 \ bitset.9 BIT_AND_ATOMIC.9 \ bitset.9 BIT_OR_ATOMIC.9 \ bitset.9 BIT_COPY_STORE_REL.9 MLINKS+=bpf.9 bpfattach.9 \ bpf.9 bpfattach2.9 \ bpf.9 bpfdetach.9 \ bpf.9 bpf_filter.9 \ bpf.9 bpf_mtap.9 \ bpf.9 bpf_mtap2.9 \ bpf.9 bpf_tap.9 \ bpf.9 bpf_validate.9 MLINKS+=buf.9 bp.9 MLINKS+=buf_ring.9 buf_ring_alloc.9 \ buf_ring.9 buf_ring_free.9 \ buf_ring.9 buf_ring_enqueue.9 \ buf_ring.9 buf_ring_enqueue_bytes.9 \ buf_ring.9 buf_ring_dequeue_mc.9 \ buf_ring.9 buf_ring_dequeue_sc.9 \ buf_ring.9 buf_ring_count.9 \ buf_ring.9 buf_ring_empty.9 \ buf_ring.9 buf_ring_full.9 \ buf_ring.9 buf_ring_peek.9 MLINKS+=bus_activate_resource.9 bus_deactivate_resource.9 MLINKS+=bus_alloc_resource.9 bus_alloc_resource_any.9 MLINKS+=BUS_BIND_INTR.9 bus_bind_intr.9 MLINKS+=BUS_DESCRIBE_INTR.9 bus_describe_intr.9 MLINKS+=bus_dma.9 busdma.9 \ bus_dma.9 bus_dmamap_create.9 \ bus_dma.9 bus_dmamap_destroy.9 \ bus_dma.9 bus_dmamap_load.9 \ bus_dma.9 bus_dmamap_load_bio.9 \ bus_dma.9 bus_dmamap_load_ccb.9 \ bus_dma.9 bus_dmamap_load_mbuf.9 \ bus_dma.9 bus_dmamap_load_mbuf_sg.9 \ bus_dma.9 bus_dmamap_load_uio.9 \ bus_dma.9 bus_dmamap_sync.9 \ bus_dma.9 bus_dmamap_unload.9 \ bus_dma.9 bus_dmamem_alloc.9 \ bus_dma.9 bus_dmamem_free.9 \ bus_dma.9 bus_dma_tag_create.9 \ bus_dma.9 bus_dma_tag_destroy.9 MLINKS+=bus_generic_read_ivar.9 bus_generic_write_ivar.9 MLINKS+=BUS_GET_CPUS.9 bus_get_cpus.9 MLINKS+=bus_map_resource.9 bus_unmap_resource.9 \ bus_map_resource.9 resource_init_map_request.9 MLINKS+=BUS_READ_IVAR.9 BUS_WRITE_IVAR.9 MLINKS+=BUS_SETUP_INTR.9 bus_setup_intr.9 \ BUS_SETUP_INTR.9 BUS_TEARDOWN_INTR.9 \ BUS_SETUP_INTR.9 bus_teardown_intr.9 MLINKS+=bus_space.9 bus_space_alloc.9 \ bus_space.9 bus_space_barrier.9 \ bus_space.9 bus_space_copy_region_1.9 \ bus_space.9 bus_space_copy_region_2.9 \ bus_space.9 bus_space_copy_region_4.9 \ bus_space.9 bus_space_copy_region_8.9 \ bus_space.9 bus_space_copy_region_stream_1.9 \ bus_space.9 bus_space_copy_region_stream_2.9 \ bus_space.9 bus_space_copy_region_stream_4.9 \ bus_space.9 bus_space_copy_region_stream_8.9 \ bus_space.9 bus_space_free.9 \ bus_space.9 bus_space_map.9 \ bus_space.9 bus_space_read_1.9 \ bus_space.9 bus_space_read_2.9 \ bus_space.9 bus_space_read_4.9 \ bus_space.9 bus_space_read_8.9 \ bus_space.9 bus_space_read_multi_1.9 \ bus_space.9 bus_space_read_multi_2.9 \ bus_space.9 bus_space_read_multi_4.9 \ bus_space.9 bus_space_read_multi_8.9 \ bus_space.9 bus_space_read_multi_stream_1.9 \ bus_space.9 bus_space_read_multi_stream_2.9 \ bus_space.9 bus_space_read_multi_stream_4.9 \ bus_space.9 bus_space_read_multi_stream_8.9 \ bus_space.9 bus_space_read_region_1.9 \ bus_space.9 bus_space_read_region_2.9 \ bus_space.9 bus_space_read_region_4.9 \ bus_space.9 bus_space_read_region_8.9 \ bus_space.9 bus_space_read_region_stream_1.9 \ bus_space.9 bus_space_read_region_stream_2.9 \ bus_space.9 bus_space_read_region_stream_4.9 \ bus_space.9 bus_space_read_region_stream_8.9 \ bus_space.9 bus_space_read_stream_1.9 \ bus_space.9 bus_space_read_stream_2.9 \ bus_space.9 bus_space_read_stream_4.9 \ bus_space.9 bus_space_read_stream_8.9 \ bus_space.9 bus_space_set_multi_1.9 \ bus_space.9 bus_space_set_multi_2.9 \ bus_space.9 bus_space_set_multi_4.9 \ bus_space.9 bus_space_set_multi_8.9 \ bus_space.9 bus_space_set_multi_stream_1.9 \ bus_space.9 bus_space_set_multi_stream_2.9 \ bus_space.9 bus_space_set_multi_stream_4.9 \ bus_space.9 bus_space_set_multi_stream_8.9 \ bus_space.9 bus_space_set_region_1.9 \ bus_space.9 bus_space_set_region_2.9 \ bus_space.9 bus_space_set_region_4.9 \ bus_space.9 bus_space_set_region_8.9 \ bus_space.9 bus_space_set_region_stream_1.9 \ bus_space.9 bus_space_set_region_stream_2.9 \ bus_space.9 bus_space_set_region_stream_4.9 \ bus_space.9 bus_space_set_region_stream_8.9 \ bus_space.9 bus_space_subregion.9 \ bus_space.9 bus_space_unmap.9 \ bus_space.9 bus_space_write_1.9 \ bus_space.9 bus_space_write_2.9 \ bus_space.9 bus_space_write_4.9 \ bus_space.9 bus_space_write_8.9 \ bus_space.9 bus_space_write_multi_1.9 \ bus_space.9 bus_space_write_multi_2.9 \ bus_space.9 bus_space_write_multi_4.9 \ bus_space.9 bus_space_write_multi_8.9 \ bus_space.9 bus_space_write_multi_stream_1.9 \ bus_space.9 bus_space_write_multi_stream_2.9 \ bus_space.9 bus_space_write_multi_stream_4.9 \ bus_space.9 bus_space_write_multi_stream_8.9 \ bus_space.9 bus_space_write_region_1.9 \ bus_space.9 bus_space_write_region_2.9 \ bus_space.9 bus_space_write_region_4.9 \ bus_space.9 bus_space_write_region_8.9 \ bus_space.9 bus_space_write_region_stream_1.9 \ bus_space.9 bus_space_write_region_stream_2.9 \ bus_space.9 bus_space_write_region_stream_4.9 \ bus_space.9 bus_space_write_region_stream_8.9 \ bus_space.9 bus_space_write_stream_1.9 \ bus_space.9 bus_space_write_stream_2.9 \ bus_space.9 bus_space_write_stream_4.9 \ bus_space.9 bus_space_write_stream_8.9 MLINKS+=byteorder.9 be16dec.9 \ byteorder.9 be16enc.9 \ byteorder.9 be16toh.9 \ byteorder.9 be32dec.9 \ byteorder.9 be32enc.9 \ byteorder.9 be32toh.9 \ byteorder.9 be64dec.9 \ byteorder.9 be64enc.9 \ byteorder.9 be64toh.9 \ byteorder.9 bswap16.9 \ byteorder.9 bswap32.9 \ byteorder.9 bswap64.9 \ byteorder.9 htobe16.9 \ byteorder.9 htobe32.9 \ byteorder.9 htobe64.9 \ byteorder.9 htole16.9 \ byteorder.9 htole32.9 \ byteorder.9 htole64.9 \ byteorder.9 le16dec.9 \ byteorder.9 le16enc.9 \ byteorder.9 le16toh.9 \ byteorder.9 le32dec.9 \ byteorder.9 le32enc.9 \ byteorder.9 le32toh.9 \ byteorder.9 le64dec.9 \ byteorder.9 le64enc.9 \ byteorder.9 le64toh.9 MLINKS+=callout.9 callout_active.9 \ callout.9 callout_async_drain.9 \ callout.9 callout_deactivate.9 \ callout.9 callout_drain.9 \ callout.9 callout_init.9 \ callout.9 callout_init_mtx.9 \ callout.9 callout_init_rm.9 \ callout.9 callout_init_rw.9 \ callout.9 callout_pending.9 \ callout.9 callout_reset.9 \ callout.9 callout_reset_curcpu.9 \ callout.9 callout_reset_on.9 \ callout.9 callout_reset_sbt.9 \ callout.9 callout_reset_sbt_curcpu.9 \ callout.9 callout_reset_sbt_on.9 \ callout.9 callout_schedule.9 \ callout.9 callout_schedule_curcpu.9 \ callout.9 callout_schedule_on.9 \ callout.9 callout_schedule_sbt.9 \ callout.9 callout_schedule_sbt_curcpu.9 \ callout.9 callout_schedule_sbt_on.9 \ callout.9 callout_stop.9 \ callout.9 callout_when.9 MLINKS+=cnv.9 cnvlist.9 \ cnv.9 cnvlist_free_binary.9 \ cnv.9 cnvlist_free_bool.9 \ cnv.9 cnvlist_free_bool_array.9 \ cnv.9 cnvlist_free_descriptor.9 \ cnv.9 cnvlist_free_descriptor_array.9 \ cnv.9 cnvlist_free_null.9 \ cnv.9 cnvlist_free_number.9 \ cnv.9 cnvlist_free_number_array.9 \ cnv.9 cnvlist_free_nvlist.9 \ cnv.9 cnvlist_free_nvlist_array.9 \ cnv.9 cnvlist_free_string.9 \ cnv.9 cnvlist_free_string_array.9 \ cnv.9 cnvlist_get_binary.9 \ cnv.9 cnvlist_get_bool.9 \ cnv.9 cnvlist_get_bool_array.9 \ cnv.9 cnvlist_get_descriptor.9 \ cnv.9 cnvlist_get_descriptor_array.9 \ cnv.9 cnvlist_get_number.9 \ cnv.9 cnvlist_get_number_array.9 \ cnv.9 cnvlist_get_nvlist.9 \ cnv.9 cnvlist_get_nvlist_array.9 \ cnv.9 cnvlist_get_string.9 \ cnv.9 cnvlist_get_string_array.9 \ cnv.9 cnvlist_take_binary.9 \ cnv.9 cnvlist_take_bool.9 \ cnv.9 cnvlist_take_bool_array.9 \ cnv.9 cnvlist_take_descriptor.9 \ cnv.9 cnvlist_take_descriptor_array.9 \ cnv.9 cnvlist_take_number.9 \ cnv.9 cnvlist_take_number_array.9 \ cnv.9 cnvlist_take_nvlist.9 \ cnv.9 cnvlist_take_nvlist_array.9 \ cnv.9 cnvlist_take_string.9 \ cnv.9 cnvlist_take_string_array.9 MLINKS+=condvar.9 cv_broadcast.9 \ condvar.9 cv_broadcastpri.9 \ condvar.9 cv_destroy.9 \ condvar.9 cv_init.9 \ condvar.9 cv_signal.9 \ condvar.9 cv_timedwait.9 \ condvar.9 cv_timedwait_sig.9 \ condvar.9 cv_timedwait_sig_sbt.9 \ condvar.9 cv_wait.9 \ condvar.9 cv_wait_sig.9 \ condvar.9 cv_wait_unlock.9 \ condvar.9 cv_wmesg.9 MLINKS+=config_intrhook.9 config_intrhook_disestablish.9 \ config_intrhook.9 config_intrhook_establish.9 \ config_intrhook.9 config_intrhook_oneshot.9 MLINKS+=contigmalloc.9 contigmalloc_domainset.9 \ contigmalloc.9 contigfree.9 MLINKS+=casuword.9 casueword.9 \ casuword.9 casueword32.9 \ casuword.9 casuword32.9 MLINKS+=copy.9 copyin.9 \ copy.9 copyin_nofault.9 \ copy.9 copyinstr.9 \ copy.9 copyout.9 \ copy.9 copyout_nofault.9 \ copy.9 copystr.9 MLINKS+=counter.9 counter_u64_alloc.9 \ counter.9 counter_u64_free.9 \ counter.9 counter_u64_add.9 \ counter.9 counter_enter.9 \ counter.9 counter_exit.9 \ counter.9 counter_u64_add_protected.9 \ counter.9 counter_u64_fetch.9 \ counter.9 counter_u64_zero.9 \ counter.9 SYSCTL_COUNTER_U64.9 \ counter.9 SYSCTL_ADD_COUNTER_U64.9 \ counter.9 SYSCTL_COUNTER_U64_ARRAY.9 \ counter.9 SYSCTL_ADD_COUNTER_U64_ARRAY.9 MLINKS+=cpuset.9 CPUSET_T_INITIALIZER.9 \ cpuset.9 CPUSET_FSET.9 \ cpuset.9 CPU_CLR.9 \ cpuset.9 CPU_COPY.9 \ cpuset.9 CPU_ISSET.9 \ cpuset.9 CPU_SET.9 \ cpuset.9 CPU_ZERO.9 \ cpuset.9 CPU_FILL.9 \ cpuset.9 CPU_SETOF.9 \ cpuset.9 CPU_EMPTY.9 \ cpuset.9 CPU_ISFULLSET.9 \ cpuset.9 CPU_FFS.9 \ cpuset.9 CPU_COUNT.9 \ cpuset.9 CPU_SUBSET.9 \ cpuset.9 CPU_OVERLAP.9 \ cpuset.9 CPU_CMP.9 \ cpuset.9 CPU_OR.9 \ cpuset.9 CPU_AND.9 \ cpuset.9 CPU_ANDNOT.9 \ cpuset.9 CPU_CLR_ATOMIC.9 \ cpuset.9 CPU_SET_ATOMIC.9 \ cpuset.9 CPU_SET_ATOMIC_ACQ.9 \ cpuset.9 CPU_AND_ATOMIC.9 \ cpuset.9 CPU_OR_ATOMIC.9 \ cpuset.9 CPU_COPY_STORE_REL.9 MLINKS+=critical_enter.9 critical.9 \ critical_enter.9 critical_exit.9 MLINKS+=crypto_asym.9 crypto_kdispatch.9 \ crypto_asym.9 crypto_kdone.9 \ crypto_asym.9 crypto_kregister.9 \ crypto_asym.9 CRYPTODEV_KPROCESS.9 MLINKS+=crypto_driver.9 crypto_apply.9 \ crypto_driver.9 crypto_contiguous_segment.9 \ crypto_driver.9 crypto_copyback.9 \ crypto_driver.9 crypto_copydata.9 \ crypto_driver.9 crypto_done.9 \ crypto_driver.9 crypto_get_driverid.9 \ crypto_driver.9 crypto_get_driver_session.9 \ + crypto_driver.9 crypto_read_iv.9 \ crypto_driver.9 crypto_unblock.9 \ crypto_driver.9 crypto_unregister_all.9 \ crypto_driver.9 CRYPTODEV_FREESESSION.9 \ crypto_driver.9 CRYPTODEV_NEWSESSION.9 \ crypto_driver.9 CRYPTODEV_PROBESESSION.9 \ crypto_driver.9 CRYPTODEV_PROCESS.9 \ crypto_driver.9 hmac_init_ipad.9 \ crypto_driver.9 hmac_init_opad.9 MLINKS+=crypto_request.9 crypto_dispatch.9 \ crypto_request.9 crypto_freereq.9 \ crypto_request.9 crypto_getreq.9 MLINKS+=crypto_session.9 crypto_auth_hash.9 \ crypto_session.9 crypto_cipher.9 \ crypto_session.9 crypto_get_params.9 \ crypto_session.9 crypto_newsession.9 \ crypto_session.9 crypto_freesession.9 MLINKS+=DB_COMMAND.9 DB_SHOW_ALL_COMMAND.9 \ DB_COMMAND.9 DB_SHOW_COMMAND.9 MLINKS+=DECLARE_MODULE.9 DECLARE_MODULE_TIED.9 MLINKS+=dev_clone.9 drain_dev_clone_events.9 MLINKS+=dev_refthread.9 devvn_refthread.9 \ dev_refthread.9 dev_relthread.9 MLINKS+=devfs_set_cdevpriv.9 devfs_clear_cdevpriv.9 \ devfs_set_cdevpriv.9 devfs_get_cdevpriv.9 MLINKS+=device_add_child.9 device_add_child_ordered.9 MLINKS+=device_enable.9 device_disable.9 \ device_enable.9 device_is_enabled.9 MLINKS+=device_get_ivars.9 device_set_ivars.9 MLINKS+=device_get_name.9 device_get_nameunit.9 MLINKS+=device_get_state.9 device_busy.9 \ device_get_state.9 device_is_alive.9 \ device_get_state.9 device_is_attached.9 \ device_get_state.9 device_unbusy.9 MLINKS+=device_get_sysctl.9 device_get_sysctl_ctx.9 \ device_get_sysctl.9 device_get_sysctl_tree.9 MLINKS+=device_quiet.9 device_is_quiet.9 \ device_quiet.9 device_verbose.9 MLINKS+=device_set_desc.9 device_get_desc.9 \ device_set_desc.9 device_set_desc_copy.9 MLINKS+=device_set_flags.9 device_get_flags.9 MLINKS+=devstat.9 devicestat.9 \ devstat.9 devstat_add_entry.9 \ devstat.9 devstat_end_transaction.9 \ devstat.9 devstat_remove_entry.9 \ devstat.9 devstat_start_transaction.9 MLINKS+=disk.9 disk_add_alias.9 \ disk.9 disk_alloc.9 \ disk.9 disk_create.9 \ disk.9 disk_destroy.9 \ disk.9 disk_gone.9 \ disk.9 disk_resize.9 MLINKS+=dnv.9 dnvlist.9 \ dnv.9 dnvlist_get_binary.9 \ dnv.9 dnvlist_get_bool.9 \ dnv.9 dnvlist_get_descriptor.9 \ dnv.9 dnvlist_get_number.9 \ dnv.9 dnvlist_get_nvlist.9 \ dnv.9 dnvlist_get_string.9 \ dnv.9 dnvlist_take_binary.9 \ dnv.9 dnvlist_take_bool.9 \ dnv.9 dnvlist_take_descriptor.9 \ dnv.9 dnvlist_take_number.9 \ dnv.9 dnvlist_take_nvlist.9 \ dnv.9 dnvlist_take_string.9 MLINKS+=domain.9 DOMAIN_SET.9 \ domain.9 domain_add.9 \ domain.9 pfctlinput.9 \ domain.9 pfctlinput2.9 \ domain.9 pffinddomain.9 \ domain.9 pffindproto.9 \ domain.9 pffindtype.9 MLINKS+=drbr.9 drbr_free.9 \ drbr.9 drbr_enqueue.9 \ drbr.9 drbr_dequeue.9 \ drbr.9 drbr_dequeue_cond.9 \ drbr.9 drbr_flush.9 \ drbr.9 drbr_empty.9 \ drbr.9 drbr_inuse.9 \ drbr.9 drbr_stats_update.9 MLINKS+=DRIVER_MODULE.9 DRIVER_MODULE_ORDERED.9 \ DRIVER_MODULE.9 EARLY_DRIVER_MODULE.9 \ DRIVER_MODULE.9 EARLY_DRIVER_MODULE_ORDERED.9 MLINKS+=epoch.9 epoch_context.9 \ epoch.9 epoch_alloc.9 \ epoch.9 epoch_free.9 \ epoch.9 epoch_enter.9 \ epoch.9 epoch_exit.9 \ epoch.9 epoch_wait.9 \ epoch.9 epoch_call.9 \ epoch.9 epoch_drain_callbacks.9 \ epoch.9 in_epoch.9 MLINKS+=EVENTHANDLER.9 EVENTHANDLER_DECLARE.9 \ EVENTHANDLER.9 EVENTHANDLER_DEFINE.9 \ EVENTHANDLER.9 EVENTHANDLER_DEREGISTER.9 \ EVENTHANDLER.9 eventhandler_deregister.9 \ EVENTHANDLER.9 eventhandler_find_list.9 \ EVENTHANDLER.9 EVENTHANDLER_INVOKE.9 \ EVENTHANDLER.9 eventhandler_prune_list.9 \ EVENTHANDLER.9 EVENTHANDLER_REGISTER.9 \ EVENTHANDLER.9 eventhandler_register.9 MLINKS+=eventtimers.9 et_register.9 \ eventtimers.9 et_deregister.9 \ eventtimers.9 et_ban.9 \ eventtimers.9 et_find.9 \ eventtimers.9 et_free.9 \ eventtimers.9 et_init.9 \ eventtimers.9 ET_LOCK.9 \ eventtimers.9 ET_UNLOCK.9 \ eventtimers.9 et_start.9 \ eventtimers.9 et_stop.9 MLINKS+=fail.9 KFAIL_POINT_CODE.9 \ fail.9 KFAIL_POINT_ERROR.9 \ fail.9 KFAIL_POINT_GOTO.9 \ fail.9 KFAIL_POINT_RETURN.9 \ fail.9 KFAIL_POINT_RETURN_VOID.9 MLINKS+=fdt_pinctrl.9 fdt_pinctrl_configure.9 \ fdt_pinctrl.9 fdt_pinctrl_configure_by_name.9 \ fdt_pinctrl.9 fdt_pinctrl_configure_tree.9 \ fdt_pinctrl.9 fdt_pinctrl_register.9 MLINKS+=fetch.9 fubyte.9 \ fetch.9 fuword.9 \ fetch.9 fuword16.9 \ fetch.9 fuword32.9 \ fetch.9 fuword64.9 \ fetch.9 fueword.9 \ fetch.9 fueword32.9 \ fetch.9 fueword64.9 MLINKS+=firmware.9 firmware_get.9 \ firmware.9 firmware_put.9 \ firmware.9 firmware_register.9 \ firmware.9 firmware_unregister.9 MLINKS+=fpu_kern.9 fpu_kern_alloc_ctx.9 \ fpu_kern.9 fpu_kern_free_ctx.9 \ fpu_kern.9 fpu_kern_enter.9 \ fpu_kern.9 fpu_kern_leave.9 \ fpu_kern.9 fpu_kern_thread.9 \ fpu_kern.9 is_fpu_kern_thread.9 MLINKS+=g_attach.9 g_detach.9 MLINKS+=g_bio.9 g_alloc_bio.9 \ g_bio.9 g_clone_bio.9 \ g_bio.9 g_destroy_bio.9 \ g_bio.9 g_duplicate_bio.9 \ g_bio.9 g_format_bio.9 \ g_bio.9 g_new_bio.9 \ g_bio.9 g_print_bio.9 \ g_bio.9 g_reset_bio.9 MLINKS+=g_consumer.9 g_destroy_consumer.9 \ g_consumer.9 g_new_consumer.9 MLINKS+=g_data.9 g_read_data.9 \ g_data.9 g_write_data.9 MLINKS+=getenv.9 freeenv.9 \ getenv.9 getenv_int.9 \ getenv.9 getenv_long.9 \ getenv.9 getenv_string.9 \ getenv.9 getenv_quad.9 \ getenv.9 getenv_uint.9 \ getenv.9 getenv_ulong.9 \ getenv.9 kern_getenv.9 \ getenv.9 kern_setenv.9 \ getenv.9 kern_unsetenv.9 \ getenv.9 setenv.9 \ getenv.9 testenv.9 \ getenv.9 unsetenv.9 MLINKS+=g_event.9 g_cancel_event.9 \ g_event.9 g_post_event.9 \ g_event.9 g_waitfor_event.9 MLINKS+=g_geom.9 g_destroy_geom.9 \ g_geom.9 g_new_geomf.9 MLINKS+=g_provider.9 g_destroy_provider.9 \ g_provider.9 g_error_provider.9 \ g_provider.9 g_new_providerf.9 MLINKS+=hash.9 hash32.9 \ hash.9 hash32_buf.9 \ hash.9 hash32_str.9 \ hash.9 hash32_stre.9 \ hash.9 hash32_strn.9 \ hash.9 hash32_strne.9 \ hash.9 jenkins_hash.9 \ hash.9 jenkins_hash32.9 MLINKS+=hashinit.9 hashdestroy.9 \ hashinit.9 hashinit_flags.9 \ hashinit.9 phashinit.9 MLINKS+=hhook.9 hhook_head_register.9 \ hhook.9 hhook_head_deregister.9 \ hhook.9 hhook_head_deregister_lookup.9 \ hhook.9 hhook_run_hooks.9 \ hhook.9 HHOOKS_RUN_IF.9 \ hhook.9 HHOOKS_RUN_LOOKUP_IF.9 MLINKS+=ieee80211.9 ieee80211_ifattach.9 \ ieee80211.9 ieee80211_ifdetach.9 MLINKS+=ieee80211_amrr.9 ieee80211_amrr_choose.9 \ ieee80211_amrr.9 ieee80211_amrr_cleanup.9 \ ieee80211_amrr.9 ieee80211_amrr_init.9 \ ieee80211_amrr.9 ieee80211_amrr_node_init.9 \ ieee80211_amrr.9 ieee80211_amrr_setinterval.9 \ ieee80211_amrr.9 ieee80211_amrr_tx_complete.9 \ ieee80211_amrr.9 ieee80211_amrr_tx_update.9 MLINKS+=ieee80211_beacon.9 ieee80211_beacon_alloc.9 \ ieee80211_beacon.9 ieee80211_beacon_notify.9 \ ieee80211_beacon.9 ieee80211_beacon_update.9 MLINKS+=ieee80211_bmiss.9 ieee80211_beacon_miss.9 MLINKS+=ieee80211_crypto.9 ieee80211_crypto_available.9 \ ieee80211_crypto.9 ieee80211_crypto_decap.9 \ ieee80211_crypto.9 ieee80211_crypto_delglobalkeys.9 \ ieee80211_crypto.9 ieee80211_crypto_delkey.9 \ ieee80211_crypto.9 ieee80211_crypto_demic.9 \ ieee80211_crypto.9 ieee80211_crypto_encap.9 \ ieee80211_crypto.9 ieee80211_crypto_enmic.9 \ ieee80211_crypto.9 ieee80211_crypto_newkey.9 \ ieee80211_crypto.9 ieee80211_crypto_register.9 \ ieee80211_crypto.9 ieee80211_crypto_reload_keys.9 \ ieee80211_crypto.9 ieee80211_crypto_setkey.9 \ ieee80211_crypto.9 ieee80211_crypto_unregister.9 \ ieee80211_crypto.9 ieee80211_key_update_begin.9 \ ieee80211_crypto.9 ieee80211_key_update_end.9 \ ieee80211_crypto.9 ieee80211_notify_michael_failure.9 \ ieee80211_crypto.9 ieee80211_notify_replay_failure.9 MLINKS+=ieee80211_input.9 ieee80211_input_all.9 MLINKS+=ieee80211_node.9 ieee80211_dump_node.9 \ ieee80211_node.9 ieee80211_dump_nodes.9 \ ieee80211_node.9 ieee80211_find_rxnode.9 \ ieee80211_node.9 ieee80211_find_rxnode_withkey.9 \ ieee80211_node.9 ieee80211_free_node.9 \ ieee80211_node.9 ieee80211_iterate_nodes.9 \ ieee80211_node.9 ieee80211_ref_node.9 \ ieee80211_node.9 ieee80211_unref_node.9 MLINKS+=ieee80211_output.9 ieee80211_process_callback.9 \ ieee80211_output.9 M_SEQNO_GET.9 \ ieee80211_output.9 M_WME_GETAC.9 MLINKS+=ieee80211_proto.9 ieee80211_new_state.9 \ ieee80211_proto.9 ieee80211_resume_all.9 \ ieee80211_proto.9 ieee80211_start_all.9 \ ieee80211_proto.9 ieee80211_stop_all.9 \ ieee80211_proto.9 ieee80211_suspend_all.9 \ ieee80211_proto.9 ieee80211_waitfor_parent.9 MLINKS+=ieee80211_radiotap.9 ieee80211_radiotap_active.9 \ ieee80211_radiotap.9 ieee80211_radiotap_active_vap.9 \ ieee80211_radiotap.9 ieee80211_radiotap_attach.9 \ ieee80211_radiotap.9 ieee80211_radiotap_tx.9 \ ieee80211_radiotap.9 radiotap.9 MLINKS+=ieee80211_regdomain.9 ieee80211_alloc_countryie.9 \ ieee80211_regdomain.9 ieee80211_init_channels.9 \ ieee80211_regdomain.9 ieee80211_sort_channels.9 MLINKS+=ieee80211_scan.9 ieee80211_add_scan.9 \ ieee80211_scan.9 ieee80211_bg_scan.9 \ ieee80211_scan.9 ieee80211_cancel_scan.9 \ ieee80211_scan.9 ieee80211_cancel_scan_any.9 \ ieee80211_scan.9 ieee80211_check_scan.9 \ ieee80211_scan.9 ieee80211_check_scan_current.9 \ ieee80211_scan.9 ieee80211_flush.9 \ ieee80211_scan.9 ieee80211_probe_curchan.9 \ ieee80211_scan.9 ieee80211_scan_assoc_fail.9 \ ieee80211_scan.9 ieee80211_scan_done.9 \ ieee80211_scan.9 ieee80211_scan_dump_channels.9 \ ieee80211_scan.9 ieee80211_scan_flush.9 \ ieee80211_scan.9 ieee80211_scan_iterate.9 \ ieee80211_scan.9 ieee80211_scan_next.9 \ ieee80211_scan.9 ieee80211_scan_timeout.9 \ ieee80211_scan.9 ieee80211_scanner_get.9 \ ieee80211_scan.9 ieee80211_scanner_register.9 \ ieee80211_scan.9 ieee80211_scanner_unregister.9 \ ieee80211_scan.9 ieee80211_scanner_unregister_all.9 \ ieee80211_scan.9 ieee80211_start_scan.9 MLINKS+=ieee80211_vap.9 ieee80211_vap_attach.9 \ ieee80211_vap.9 ieee80211_vap_detach.9 \ ieee80211_vap.9 ieee80211_vap_setup.9 MLINKS+=iflibdd.9 ifdi_attach_pre.9 \ iflibdd.9 ifdi_attach_post.9 \ iflibdd.9 ifdi_detach.9 \ iflibdd.9 ifdi_get_counter.9 \ iflibdd.9 ifdi_i2c_req.9 \ iflibdd.9 ifdi_init.9 \ iflibdd.9 ifdi_intr_enable.9 \ iflibdd.9 ifdi_intr_disable.9 \ iflibdd.9 ifdi_led_func.9 \ iflibdd.9 ifdi_link_intr_enable.9 \ iflibdd.9 ifdi_media_set.9 \ iflibdd.9 ifdi_media_status.9 \ iflibdd.9 ifdi_media_change.9 \ iflibdd.9 ifdi_mtu_set.9 \ iflibdd.9 ifdi_multi_set.9 \ iflibdd.9 ifdi_promisc_set.9 \ iflibdd.9 ifdi_queues_alloc.9 \ iflibdd.9 ifdi_queues_free.9 \ iflibdd.9 ifdi_queue_intr_enable.9 \ iflibdd.9 ifdi_resume.9 \ iflibdd.9 ifdi_rxq_setup.9 \ iflibdd.9 ifdi_stop.9 \ iflibdd.9 ifdi_suspend.9 \ iflibdd.9 ifdi_sysctl_int_delay.9 \ iflibdd.9 ifdi_timer.9 \ iflibdd.9 ifdi_txq_setup.9 \ iflibdd.9 ifdi_update_admin_status.9 \ iflibdd.9 ifdi_vf_add.9 \ iflibdd.9 ifdi_vflr_handle.9 \ iflibdd.9 ifdi_vlan_register.9 \ iflibdd.9 ifdi_vlan_unregister.9 \ iflibdd.9 ifdi_watchdog_reset.9 \ iflibdd.9 iov_init.9 \ iflibdd.9 iov_uinit.9 MLINKS+=iflibdi.9 iflib_add_int_delay_sysctl.9 \ iflibdi.9 iflib_device_attach.9 \ iflibdi.9 iflib_device_deregister.9 \ iflibdi.9 iflib_device_detach.9 \ iflibdi.9 iflib_device_suspend.9 \ iflibdi.9 iflib_device_register.9 \ iflibdi.9 iflib_device_resume.9 \ iflibdi.9 iflib_led_create.9 \ iflibdi.9 iflib_irq_alloc.9 \ iflibdi.9 iflib_irq_alloc_generic.9 \ iflibdi.9 iflib_link_intr_deferred.9 \ iflibdi.9 iflib_link_state_change.9 \ iflibdi.9 iflib_rx_intr_deferred.9 \ iflibdi.9 iflib_tx_intr_deferred.9 MLINKS+=iflibtxrx.9 isc_rxd_available.9 \ iflibtxrx.9 isc_rxd_refill.9 \ iflibtxrx.9 isc_rxd_flush.9 \ iflibtxrx.9 isc_rxd_pkt_get.9 \ iflibtxrx.9 isc_txd_credits_update.9 \ iflibtxrx.9 isc_txd_encap.9 \ iflibtxrx.9 isc_txd_flush.9 MLINKS+=ifnet.9 if_addmulti.9 \ ifnet.9 if_alloc.9 \ ifnet.9 if_alloc_dev.9 \ ifnet.9 if_alloc_domain.9 \ ifnet.9 if_allmulti.9 \ ifnet.9 if_attach.9 \ ifnet.9 if_data.9 \ ifnet.9 IF_DEQUEUE.9 \ ifnet.9 if_delmulti.9 \ ifnet.9 if_detach.9 \ ifnet.9 if_down.9 \ ifnet.9 if_findmulti.9 \ ifnet.9 if_free.9 \ ifnet.9 if_free_type.9 \ ifnet.9 if_up.9 \ ifnet.9 ifa_free.9 \ ifnet.9 ifa_ifwithaddr.9 \ ifnet.9 ifa_ifwithdstaddr.9 \ ifnet.9 ifa_ifwithnet.9 \ ifnet.9 ifa_ref.9 \ ifnet.9 ifaddr.9 \ ifnet.9 ifaddr_byindex.9 \ ifnet.9 ifaof_ifpforaddr.9 \ ifnet.9 ifioctl.9 \ ifnet.9 ifpromisc.9 \ ifnet.9 ifqueue.9 \ ifnet.9 ifunit.9 \ ifnet.9 ifunit_ref.9 MLINKS+=insmntque.9 insmntque1.9 MLINKS+=ithread.9 ithread_add_handler.9 \ ithread.9 ithread_create.9 \ ithread.9 ithread_destroy.9 \ ithread.9 ithread_priority.9 \ ithread.9 ithread_remove_handler.9 \ ithread.9 ithread_schedule.9 MLINKS+=kernacc.9 useracc.9 MLINKS+=kernel_mount.9 free_mntarg.9 \ kernel_mount.9 kernel_vmount.9 \ kernel_mount.9 mount_arg.9 \ kernel_mount.9 mount_argb.9 \ kernel_mount.9 mount_argf.9 \ kernel_mount.9 mount_argsu.9 MLINKS+=khelp.9 khelp_add_hhook.9 \ khelp.9 KHELP_DECLARE_MOD.9 \ khelp.9 KHELP_DECLARE_MOD_UMA.9 \ khelp.9 khelp_destroy_osd.9 \ khelp.9 khelp_get_id.9 \ khelp.9 khelp_get_osd.9 \ khelp.9 khelp_init_osd.9 \ khelp.9 khelp_remove_hhook.9 MLINKS+=kobj.9 DEFINE_CLASS.9 \ kobj.9 kobj_class_compile.9 \ kobj.9 kobj_class_compile_static.9 \ kobj.9 kobj_class_free.9 \ kobj.9 kobj_create.9 \ kobj.9 kobj_delete.9 \ kobj.9 kobj_init.9 \ kobj.9 kobj_init_static.9 MLINKS+=kproc.9 kproc_create.9 \ kproc.9 kproc_exit.9 \ kproc.9 kproc_kthread_add.9 \ kproc.9 kproc_resume.9 \ kproc.9 kproc_shutdown.9 \ kproc.9 kproc_start.9 \ kproc.9 kproc_suspend.9 \ kproc.9 kproc_suspend_check.9 \ kproc.9 kthread_create.9 MLINKS+=kqueue.9 knlist_add.9 \ kqueue.9 knlist_clear.9 \ kqueue.9 knlist_delete.9 \ kqueue.9 knlist_destroy.9 \ kqueue.9 knlist_empty.9 \ kqueue.9 knlist_init.9 \ kqueue.9 knlist_init_mtx.9 \ kqueue.9 knlist_init_rw_reader.9 \ kqueue.9 knlist_remove.9 \ kqueue.9 knlist_remove_inevent.9 \ kqueue.9 knote_fdclose.9 \ kqueue.9 KNOTE_LOCKED.9 \ kqueue.9 KNOTE_UNLOCKED.9 \ kqueue.9 kqfd_register.9 \ kqueue.9 kqueue_add_filteropts.9 \ kqueue.9 kqueue_del_filteropts.9 MLINKS+=kthread.9 kthread_add.9 \ kthread.9 kthread_exit.9 \ kthread.9 kthread_resume.9 \ kthread.9 kthread_shutdown.9 \ kthread.9 kthread_start.9 \ kthread.9 kthread_suspend.9 \ kthread.9 kthread_suspend_check.9 MLINKS+=ktr.9 CTR0.9 \ ktr.9 CTR1.9 \ ktr.9 CTR2.9 \ ktr.9 CTR3.9 \ ktr.9 CTR4.9 \ ktr.9 CTR5.9 \ ktr.9 CTR6.9 MLINKS+=lock.9 lockdestroy.9 \ lock.9 lockinit.9 \ lock.9 lockmgr.9 \ lock.9 lockmgr_args.9 \ lock.9 lockmgr_args_rw.9 \ lock.9 lockmgr_assert.9 \ lock.9 lockmgr_disown.9 \ lock.9 lockmgr_printinfo.9 \ lock.9 lockmgr_recursed.9 \ lock.9 lockmgr_rw.9 \ lock.9 lockstatus.9 MLINKS+=LOCK_PROFILING.9 MUTEX_PROFILING.9 MLINKS+=make_dev.9 destroy_dev.9 \ make_dev.9 destroy_dev_drain.9 \ make_dev.9 destroy_dev_sched.9 \ make_dev.9 destroy_dev_sched_cb.9 \ make_dev.9 dev_depends.9 \ make_dev.9 make_dev_alias.9 \ make_dev.9 make_dev_alias_p.9 \ make_dev.9 make_dev_cred.9 \ make_dev.9 make_dev_credf.9 \ make_dev.9 make_dev_p.9 \ make_dev.9 make_dev_s.9 MLINKS+=malloc.9 free.9 \ malloc.9 malloc_domainset.9 \ malloc.9 free_domain.9 \ malloc.9 mallocarray.9 \ malloc.9 MALLOC_DECLARE.9 \ malloc.9 MALLOC_DEFINE.9 \ malloc.9 realloc.9 \ malloc.9 reallocf.9 MLINKS+=mbchain.9 mb_detach.9 \ mbchain.9 mb_done.9 \ mbchain.9 mb_fixhdr.9 \ mbchain.9 mb_init.9 \ mbchain.9 mb_initm.9 \ mbchain.9 mb_put_int64be.9 \ mbchain.9 mb_put_int64le.9 \ mbchain.9 mb_put_mbuf.9 \ mbchain.9 mb_put_mem.9 \ mbchain.9 mb_put_uint16be.9 \ mbchain.9 mb_put_uint16le.9 \ mbchain.9 mb_put_uint32be.9 \ mbchain.9 mb_put_uint32le.9 \ mbchain.9 mb_put_uint8.9 \ mbchain.9 mb_put_uio.9 \ mbchain.9 mb_reserve.9 MLINKS+=\ mbuf.9 m_adj.9 \ mbuf.9 m_align.9 \ mbuf.9 M_ALIGN.9 \ mbuf.9 m_append.9 \ mbuf.9 m_apply.9 \ mbuf.9 m_cat.9 \ mbuf.9 m_catpkt.9 \ mbuf.9 MCHTYPE.9 \ mbuf.9 MCLGET.9 \ mbuf.9 m_collapse.9 \ mbuf.9 m_copyback.9 \ mbuf.9 m_copydata.9 \ mbuf.9 m_copym.9 \ mbuf.9 m_copypacket.9 \ mbuf.9 m_copyup.9 \ mbuf.9 m_defrag.9 \ mbuf.9 m_devget.9 \ mbuf.9 m_dup.9 \ mbuf.9 m_dup_pkthdr.9 \ mbuf.9 MEXTADD.9 \ mbuf.9 m_fixhdr.9 \ mbuf.9 m_free.9 \ mbuf.9 m_freem.9 \ mbuf.9 MGET.9 \ mbuf.9 m_get.9 \ mbuf.9 m_get2.9 \ mbuf.9 m_getjcl.9 \ mbuf.9 m_getcl.9 \ mbuf.9 MGETHDR.9 \ mbuf.9 m_gethdr.9 \ mbuf.9 m_getm.9 \ mbuf.9 m_getptr.9 \ mbuf.9 MH_ALIGN.9 \ mbuf.9 M_LEADINGSPACE.9 \ mbuf.9 m_length.9 \ mbuf.9 M_MOVE_PKTHDR.9 \ mbuf.9 m_move_pkthdr.9 \ mbuf.9 M_PREPEND.9 \ mbuf.9 m_prepend.9 \ mbuf.9 m_pulldown.9 \ mbuf.9 m_pullup.9 \ mbuf.9 m_split.9 \ mbuf.9 mtod.9 \ mbuf.9 M_TRAILINGSPACE.9 \ mbuf.9 m_unshare.9 \ mbuf.9 M_WRITABLE.9 MLINKS+=\ mbuf_tags.9 m_tag_alloc.9 \ mbuf_tags.9 m_tag_copy.9 \ mbuf_tags.9 m_tag_copy_chain.9 \ mbuf_tags.9 m_tag_delete.9 \ mbuf_tags.9 m_tag_delete_chain.9 \ mbuf_tags.9 m_tag_delete_nonpersistent.9 \ mbuf_tags.9 m_tag_find.9 \ mbuf_tags.9 m_tag_first.9 \ mbuf_tags.9 m_tag_free.9 \ mbuf_tags.9 m_tag_get.9 \ mbuf_tags.9 m_tag_init.9 \ mbuf_tags.9 m_tag_locate.9 \ mbuf_tags.9 m_tag_next.9 \ mbuf_tags.9 m_tag_prepend.9 \ mbuf_tags.9 m_tag_unlink.9 MLINKS+=MD5.9 MD5Init.9 \ MD5.9 MD5Transform.9 MLINKS+=mdchain.9 md_append_record.9 \ mdchain.9 md_done.9 \ mdchain.9 md_get_int64.9 \ mdchain.9 md_get_int64be.9 \ mdchain.9 md_get_int64le.9 \ mdchain.9 md_get_mbuf.9 \ mdchain.9 md_get_mem.9 \ mdchain.9 md_get_uint16.9 \ mdchain.9 md_get_uint16be.9 \ mdchain.9 md_get_uint16le.9 \ mdchain.9 md_get_uint32.9 \ mdchain.9 md_get_uint32be.9 \ mdchain.9 md_get_uint32le.9 \ mdchain.9 md_get_uint8.9 \ mdchain.9 md_get_uio.9 \ mdchain.9 md_initm.9 \ mdchain.9 md_next_record.9 MLINKS+=microtime.9 bintime.9 \ microtime.9 getbintime.9 \ microtime.9 getmicrotime.9 \ microtime.9 getnanotime.9 \ microtime.9 nanotime.9 MLINKS+=microuptime.9 binuptime.9 \ microuptime.9 getbinuptime.9 \ microuptime.9 getmicrouptime.9 \ microuptime.9 getnanouptime.9 \ microuptime.9 getsbinuptime.9 \ microuptime.9 nanouptime.9 \ microuptime.9 sbinuptime.9 MLINKS+=mi_switch.9 cpu_switch.9 \ mi_switch.9 cpu_throw.9 MLINKS+=mod_cc.9 CCV.9 \ mod_cc.9 DECLARE_CC_MODULE.9 MLINKS+=mtx_pool.9 mtx_pool_alloc.9 \ mtx_pool.9 mtx_pool_create.9 \ mtx_pool.9 mtx_pool_destroy.9 \ mtx_pool.9 mtx_pool_find.9 \ mtx_pool.9 mtx_pool_lock.9 \ mtx_pool.9 mtx_pool_lock_spin.9 \ mtx_pool.9 mtx_pool_unlock.9 \ mtx_pool.9 mtx_pool_unlock_spin.9 MLINKS+=mutex.9 mtx_assert.9 \ mutex.9 mtx_destroy.9 \ mutex.9 mtx_init.9 \ mutex.9 mtx_initialized.9 \ mutex.9 mtx_lock.9 \ mutex.9 mtx_lock_flags.9 \ mutex.9 mtx_lock_spin.9 \ mutex.9 mtx_lock_spin_flags.9 \ mutex.9 mtx_owned.9 \ mutex.9 mtx_recursed.9 \ mutex.9 mtx_sleep.9 \ mutex.9 MTX_SYSINIT.9 \ mutex.9 mtx_trylock.9 \ mutex.9 mtx_trylock_flags.9 \ mutex.9 mtx_trylock_spin.9 \ mutex.9 mtx_trylock_spin_flags.9 \ mutex.9 mtx_unlock.9 \ mutex.9 mtx_unlock_flags.9 \ mutex.9 mtx_unlock_spin.9 \ mutex.9 mtx_unlock_spin_flags.9 MLINKS+=namei.9 NDFREE.9 \ namei.9 NDINIT.9 MLINKS+=netisr.9 netisr_clearqdrops.9 \ netisr.9 netisr_default_flow2cpu.9 \ netisr.9 netisr_dispatch.9 \ netisr.9 netisr_dispatch_src.9 \ netisr.9 netisr_get_cpucount.9 \ netisr.9 netisr_get_cpuid.9 \ netisr.9 netisr_getqdrops.9 \ netisr.9 netisr_getqlimit.9 \ netisr.9 netisr_queue.9 \ netisr.9 netisr_queue_src.9 \ netisr.9 netisr_register.9 \ netisr.9 netisr_setqlimit.9 \ netisr.9 netisr_unregister.9 MLINKS+=nv.9 libnv.9 \ nv.9 nvlist.9 \ nv.9 nvlist_add_binary.9 \ nv.9 nvlist_add_bool.9 \ nv.9 nvlist_add_bool_array.9 \ nv.9 nvlist_add_descriptor.9 \ nv.9 nvlist_add_descriptor_array.9 \ nv.9 nvlist_add_null.9 \ nv.9 nvlist_add_number.9 \ nv.9 nvlist_add_number_array.9 \ nv.9 nvlist_add_nvlist.9 \ nv.9 nvlist_add_nvlist_array.9 \ nv.9 nvlist_add_string.9 \ nv.9 nvlist_add_stringf.9 \ nv.9 nvlist_add_stringv.9 \ nv.9 nvlist_add_string_array.9 \ nv.9 nvlist_clone.9 \ nv.9 nvlist_create.9 \ nv.9 nvlist_destroy.9 \ nv.9 nvlist_dump.9 \ nv.9 nvlist_empty.9 \ nv.9 nvlist_error.9 \ nv.9 nvlist_exists.9 \ nv.9 nvlist_exists_binary.9 \ nv.9 nvlist_exists_bool.9 \ nv.9 nvlist_exists_bool_array.9 \ nv.9 nvlist_exists_descriptor.9 \ nv.9 nvlist_exists_descriptor_array.9 \ nv.9 nvlist_exists_null.9 \ nv.9 nvlist_exists_number.9 \ nv.9 nvlist_exists_number_array.9 \ nv.9 nvlist_exists_nvlist.9 \ nv.9 nvlist_exists_nvlist_array.9 \ nv.9 nvlist_exists_string.9 \ nv.9 nvlist_exists_type.9 \ nv.9 nvlist_fdump.9 \ nv.9 nvlist_flags.9 \ nv.9 nvlist_free.9 \ nv.9 nvlist_free_binary.9 \ nv.9 nvlist_free_bool.9 \ nv.9 nvlist_free_bool_array.9 \ nv.9 nvlist_free_descriptor.9 \ nv.9 nvlist_free_descriptor_array.9 \ nv.9 nvlist_free_null.9 \ nv.9 nvlist_free_number.9 \ nv.9 nvlist_free_number_array.9 \ nv.9 nvlist_free_nvlist.9 \ nv.9 nvlist_free_nvlist_array.9 \ nv.9 nvlist_free_string.9 \ nv.9 nvlist_free_string_array.9 \ nv.9 nvlist_free_type.9 \ nv.9 nvlist_get_binary.9 \ nv.9 nvlist_get_bool.9 \ nv.9 nvlist_get_bool_array.9 \ nv.9 nvlist_get_descriptor.9 \ nv.9 nvlist_get_descriptor_array.9 \ nv.9 nvlist_get_number.9 \ nv.9 nvlist_get_number_array.9 \ nv.9 nvlist_get_nvlist.9 \ nv.9 nvlist_get_nvlist_array.9 \ nv.9 nvlist_get_parent.9 \ nv.9 nvlist_get_string.9 \ nv.9 nvlist_get_string_array.9 \ nv.9 nvlist_move_binary.9 \ nv.9 nvlist_move_descriptor.9 \ nv.9 nvlist_move_descriptor_array.9 \ nv.9 nvlist_move_nvlist.9 \ nv.9 nvlist_move_nvlist_array.9 \ nv.9 nvlist_move_string.9 \ nv.9 nvlist_move_string_array.9 \ nv.9 nvlist_next.9 \ nv.9 nvlist_pack.9 \ nv.9 nvlist_recv.9 \ nv.9 nvlist_send.9 \ nv.9 nvlist_set_error.9 \ nv.9 nvlist_size.9 \ nv.9 nvlist_take_binary.9 \ nv.9 nvlist_take_bool.9 \ nv.9 nvlist_take_bool_array.9 \ nv.9 nvlist_take_descriptor.9 \ nv.9 nvlist_take_descriptor_array.9 \ nv.9 nvlist_take_number.9 \ nv.9 nvlist_take_number_array.9 \ nv.9 nvlist_take_nvlist.9 \ nv.9 nvlist_take_nvlist_array.9 \ nv.9 nvlist_take_string.9 \ nv.9 nvlist_take_string_array.9 \ nv.9 nvlist_unpack.9 \ nv.9 nvlist_xfer.9 MLINKS+=OF_child.9 OF_parent.9 \ OF_child.9 OF_peer.9 MLINKS+=OF_device_from_xref.9 OF_device_register_xref.9 \ OF_device_from_xref.9 OF_xref_from_device.9 MLINKS+=OF_getprop.9 OF_getencprop.9 \ OF_getprop.9 OF_getencprop_alloc.9 \ OF_getprop.9 OF_getencprop_alloc_multi.9 \ OF_getprop.9 OF_getprop_alloc.9 \ OF_getprop.9 OF_getprop_alloc_multi.9 \ OF_getprop.9 OF_getproplen.9 \ OF_getprop.9 OF_hasprop.9 \ OF_getprop.9 OF_nextprop.9 \ OF_getprop.9 OF_prop_free.9 \ OF_getprop.9 OF_searchencprop.9 \ OF_getprop.9 OF_searchprop.9 \ OF_getprop.9 OF_setprop.9 MLINKS+=OF_node_from_xref.9 OF_xref_from_node.9 MLINKS+=ofw_bus_is_compatible.9 ofw_bus_is_compatible_strict.9 \ ofw_bus_is_compatible.9 ofw_bus_node_is_compatible.9 \ ofw_bus_is_compatible.9 ofw_bus_search_compatible.9 MLINKS+= ofw_bus_status_okay.9 ofw_bus_get_status.9 \ ofw_bus_status_okay.9 ofw_bus_node_status_okay.9 MLINKS+=osd.9 osd_call.9 \ osd.9 osd_del.9 \ osd.9 osd_deregister.9 \ osd.9 osd_exit.9 \ osd.9 osd_get.9 \ osd.9 osd_register.9 \ osd.9 osd_set.9 MLINKS+=panic.9 vpanic.9 MLINKS+=PCBGROUP.9 in_pcbgroup_byhash.9 \ PCBGROUP.9 in_pcbgroup_byinpcb.9 \ PCBGROUP.9 in_pcbgroup_destroy.9 \ PCBGROUP.9 in_pcbgroup_enabled.9 \ PCBGROUP.9 in_pcbgroup_init.9 \ PCBGROUP.9 in_pcbgroup_remove.9 \ PCBGROUP.9 in_pcbgroup_update.9 \ PCBGROUP.9 in_pcbgroup_update_mbuf.9 \ PCBGROUP.9 in6_pcbgroup_byhash.9 MLINKS+=pci.9 pci_alloc_msi.9 \ pci.9 pci_alloc_msix.9 \ pci.9 pci_disable_busmaster.9 \ pci.9 pci_disable_io.9 \ pci.9 pci_enable_busmaster.9 \ pci.9 pci_enable_io.9 \ pci.9 pci_find_bsf.9 \ pci.9 pci_find_cap.9 \ pci.9 pci_find_dbsf.9 \ pci.9 pci_find_device.9 \ pci.9 pci_find_extcap.9 \ pci.9 pci_find_htcap.9 \ pci.9 pci_find_pcie_root_port.9 \ pci.9 pci_get_id.9 \ pci.9 pci_get_max_read_req.9 \ pci.9 pci_get_powerstate.9 \ pci.9 pci_get_vpd_ident.9 \ pci.9 pci_get_vpd_readonly.9 \ pci.9 pci_iov_attach.9 \ pci.9 pci_iov_attach_name.9 \ pci.9 pci_iov_detach.9 \ pci.9 pci_msi_count.9 \ pci.9 pci_msix_count.9 \ pci.9 pci_msix_pba_bar.9 \ pci.9 pci_msix_table_bar.9 \ pci.9 pci_pending_msix.9 \ pci.9 pci_read_config.9 \ pci.9 pci_release_msi.9 \ pci.9 pci_remap_msix.9 \ pci.9 pci_restore_state.9 \ pci.9 pci_save_state.9 \ pci.9 pci_set_powerstate.9 \ pci.9 pci_set_max_read_req.9 \ pci.9 pci_write_config.9 \ pci.9 pcie_adjust_config.9 \ pci.9 pcie_flr.9 \ pci.9 pcie_max_completion_timeout.9 \ pci.9 pcie_read_config.9 \ pci.9 pcie_wait_for_pending_transactions.9 \ pci.9 pcie_write_config.9 MLINKS+=pci_iov_schema.9 pci_iov_schema_alloc_node.9 \ pci_iov_schema.9 pci_iov_schema_add_bool.9 \ pci_iov_schema.9 pci_iov_schema_add_string.9 \ pci_iov_schema.9 pci_iov_schema_add_uint8.9 \ pci_iov_schema.9 pci_iov_schema_add_uint16.9 \ pci_iov_schema.9 pci_iov_schema_add_uint32.9 \ pci_iov_schema.9 pci_iov_schema_add_uint64.9 \ pci_iov_schema.9 pci_iov_schema_add_unicast_mac.9 MLINKS+=pfil.9 pfil_add_hook.9 \ pfil.9 pfil_head_register.9 \ pfil.9 pfil_head_unregister.9 \ pfil.9 pfil_remove_hook.9 \ pfil.9 pfil_run_hooks.9 \ pfil.9 pfil_link.9 MLINKS+=pfind.9 zpfind.9 MLINKS+=PHOLD.9 PRELE.9 \ PHOLD.9 _PHOLD.9 \ PHOLD.9 _PRELE.9 \ PHOLD.9 PROC_ASSERT_HELD.9 \ PHOLD.9 PROC_ASSERT_NOT_HELD.9 MLINKS+=pmap_copy.9 pmap_copy_page.9 MLINKS+=pmap_extract.9 pmap_extract_and_hold.9 MLINKS+=pmap_init.9 pmap_init2.9 MLINKS+=pmap_is_modified.9 pmap_ts_referenced.9 MLINKS+=pmap_pinit.9 pmap_pinit0.9 \ pmap_pinit.9 pmap_pinit2.9 MLINKS+=pmap_qenter.9 pmap_qremove.9 MLINKS+=pmap_quick_enter_page.9 pmap_quick_remove_page.9 MLINKS+=pmap_remove.9 pmap_remove_all.9 \ pmap_remove.9 pmap_remove_pages.9 MLINKS+=pmap_resident_count.9 pmap_wired_count.9 MLINKS+=pmap_zero_page.9 pmap_zero_area.9 MLINKS+=printf.9 log.9 \ printf.9 tprintf.9 \ printf.9 uprintf.9 MLINKS+=priv.9 priv_check.9 \ priv.9 priv_check_cred.9 MLINKS+=proc_rwmem.9 proc_readmem.9 \ proc_rwmem.9 proc_writemem.9 MLINKS+=psignal.9 gsignal.9 \ psignal.9 pgsignal.9 \ psignal.9 tdsignal.9 MLINKS+=pwmbus.9 pwm.9 MLINKS+=random.9 arc4rand.9 \ random.9 arc4random.9 \ random.9 is_random_seeded.9 \ random.9 read_random.9 \ random.9 read_random_uio.9 \ random.9 srandom.9 MLINKS+=random_harvest.9 random_harvest_direct.9 \ random_harvest.9 random_harvest_fast.9 \ random_harvest.9 random_harvest_queue.9 MLINKS+=ratecheck.9 ppsratecheck.9 MLINKS+=refcount.9 refcount_acquire.9 \ refcount.9 refcount_init.9 \ refcount.9 refcount_release.9 MLINKS+=resource_int_value.9 resource_long_value.9 \ resource_int_value.9 resource_string_value.9 MLINKS+=rman.9 rman_activate_resource.9 \ rman.9 rman_adjust_resource.9 \ rman.9 rman_deactivate_resource.9 \ rman.9 rman_fini.9 \ rman.9 rman_first_free_region.9 \ rman.9 rman_get_bushandle.9 \ rman.9 rman_get_bustag.9 \ rman.9 rman_get_device.9 \ rman.9 rman_get_end.9 \ rman.9 rman_get_flags.9 \ rman.9 rman_get_mapping.9 \ rman.9 rman_get_rid.9 \ rman.9 rman_get_size.9 \ rman.9 rman_get_start.9 \ rman.9 rman_get_virtual.9 \ rman.9 rman_init.9 \ rman.9 rman_init_from_resource.9 \ rman.9 rman_is_region_manager.9 \ rman.9 rman_last_free_region.9 \ rman.9 rman_make_alignment_flags.9 \ rman.9 rman_manage_region.9 \ rman.9 rman_release_resource.9 \ rman.9 rman_reserve_resource.9 \ rman.9 rman_reserve_resource_bound.9 \ rman.9 rman_set_bushandle.9 \ rman.9 rman_set_bustag.9 \ rman.9 rman_set_mapping.9 \ rman.9 rman_set_rid.9 \ rman.9 rman_set_virtual.9 MLINKS+=rmlock.9 rm_assert.9 \ rmlock.9 rm_destroy.9 \ rmlock.9 rm_init.9 \ rmlock.9 rm_init_flags.9 \ rmlock.9 rm_rlock.9 \ rmlock.9 rm_runlock.9 \ rmlock.9 rm_sleep.9 \ rmlock.9 RM_SYSINIT.9 \ rmlock.9 RM_SYSINIT_FLAGS.9 \ rmlock.9 rm_try_rlock.9 \ rmlock.9 rm_wlock.9 \ rmlock.9 rm_wowned.9 \ rmlock.9 rm_wunlock.9 MLINKS+=rtalloc.9 rtalloc1.9 \ rtalloc.9 rtalloc_ign.9 \ rtalloc.9 RT_ADDREF.9 \ rtalloc.9 RT_LOCK.9 \ rtalloc.9 RT_REMREF.9 \ rtalloc.9 RT_RTFREE.9 \ rtalloc.9 RT_UNLOCK.9 \ rtalloc.9 RTFREE_LOCKED.9 \ rtalloc.9 RTFREE.9 \ rtalloc.9 rtfree.9 \ rtalloc.9 rtalloc1_fib.9 \ rtalloc.9 rtalloc_ign_fib.9 \ rtalloc.9 rtalloc_fib.9 MLINKS+=runqueue.9 choosethread.9 \ runqueue.9 procrunnable.9 \ runqueue.9 remrunqueue.9 \ runqueue.9 setrunqueue.9 MLINKS+=rwlock.9 rw_assert.9 \ rwlock.9 rw_destroy.9 \ rwlock.9 rw_downgrade.9 \ rwlock.9 rw_init.9 \ rwlock.9 rw_init_flags.9 \ rwlock.9 rw_initialized.9 \ rwlock.9 rw_rlock.9 \ rwlock.9 rw_runlock.9 \ rwlock.9 rw_unlock.9 \ rwlock.9 rw_sleep.9 \ rwlock.9 RW_SYSINIT.9 \ rwlock.9 RW_SYSINIT_FLAGS.9 \ rwlock.9 rw_try_rlock.9 \ rwlock.9 rw_try_upgrade.9 \ rwlock.9 rw_try_wlock.9 \ rwlock.9 rw_wlock.9 \ rwlock.9 rw_wowned.9 \ rwlock.9 rw_wunlock.9 MLINKS+=sbuf.9 sbuf_bcat.9 \ sbuf.9 sbuf_bcopyin.9 \ sbuf.9 sbuf_bcpy.9 \ sbuf.9 sbuf_cat.9 \ sbuf.9 sbuf_clear.9 \ sbuf.9 sbuf_clear_flags.9 \ sbuf.9 sbuf_copyin.9 \ sbuf.9 sbuf_cpy.9 \ sbuf.9 sbuf_data.9 \ sbuf.9 sbuf_delete.9 \ sbuf.9 sbuf_done.9 \ sbuf.9 sbuf_error.9 \ sbuf.9 sbuf_finish.9 \ sbuf.9 sbuf_get_flags.9 \ sbuf.9 sbuf_hexdump.9 \ sbuf.9 sbuf_len.9 \ sbuf.9 sbuf_new.9 \ sbuf.9 sbuf_new_auto.9 \ sbuf.9 sbuf_new_for_sysctl.9 \ sbuf.9 sbuf_nl_terminate.9 \ sbuf.9 sbuf_printf.9 \ sbuf.9 sbuf_printf_drain.9 \ sbuf.9 sbuf_putbuf.9 \ sbuf.9 sbuf_putc.9 \ sbuf.9 sbuf_set_drain.9 \ sbuf.9 sbuf_set_flags.9 \ sbuf.9 sbuf_setpos.9 \ sbuf.9 sbuf_start_section.9 \ sbuf.9 sbuf_end_section.9 \ sbuf.9 sbuf_trim.9 \ sbuf.9 sbuf_vprintf.9 MLINKS+=scheduler.9 curpriority_cmp.9 \ scheduler.9 maybe_resched.9 \ scheduler.9 propagate_priority.9 \ scheduler.9 resetpriority.9 \ scheduler.9 roundrobin.9 \ scheduler.9 roundrobin_interval.9 \ scheduler.9 schedclock.9 \ scheduler.9 schedcpu.9 \ scheduler.9 sched_setup.9 \ scheduler.9 setrunnable.9 \ scheduler.9 updatepri.9 MLINKS+=SDT.9 SDT_PROVIDER_DECLARE.9 \ SDT.9 SDT_PROVIDER_DEFINE.9 \ SDT.9 SDT_PROBE_DECLARE.9 \ SDT.9 SDT_PROBE_DEFINE.9 \ SDT.9 SDT_PROBE.9 MLINKS+=securelevel_gt.9 securelevel_ge.9 MLINKS+=selrecord.9 seldrain.9 \ selrecord.9 selwakeup.9 MLINKS+=sema.9 sema_destroy.9 \ sema.9 sema_init.9 \ sema.9 sema_post.9 \ sema.9 sema_timedwait.9 \ sema.9 sema_trywait.9 \ sema.9 sema_value.9 \ sema.9 sema_wait.9 MLINKS+=seqc.9 seqc_consistent.9 \ seqc.9 seqc_read.9 \ seqc.9 seqc_write_begin.9 \ seqc.9 seqc_write_end.9 MLINKS+=sf_buf.9 sf_buf_alloc.9 \ sf_buf.9 sf_buf_free.9 \ sf_buf.9 sf_buf_kva.9 \ sf_buf.9 sf_buf_page.9 MLINKS+=sglist.9 sglist_alloc.9 \ sglist.9 sglist_append.9 \ sglist.9 sglist_append_bio.9 \ sglist.9 sglist_append_ext_pgs.9 \ sglist.9 sglist_append_mb_ext_pgs.9 \ sglist.9 sglist_append_mbuf.9 \ sglist.9 sglist_append_phys.9 \ sglist.9 sglist_append_sglist.9 \ sglist.9 sglist_append_uio.9 \ sglist.9 sglist_append_user.9 \ sglist.9 sglist_append_vmpages.9 \ sglist.9 sglist_build.9 \ sglist.9 sglist_clone.9 \ sglist.9 sglist_consume_uio.9 \ sglist.9 sglist_count.9 \ sglist.9 sglist_count_ext_pgs.9 \ sglist.9 sglist_count_mb_ext_pgs.9 \ sglist.9 sglist_count_vmpages.9 \ sglist.9 sglist_free.9 \ sglist.9 sglist_hold.9 \ sglist.9 sglist_init.9 \ sglist.9 sglist_join.9 \ sglist.9 sglist_length.9 \ sglist.9 sglist_reset.9 \ sglist.9 sglist_slice.9 \ sglist.9 sglist_split.9 MLINKS+=shm_map.9 shm_unmap.9 MLINKS+=signal.9 cursig.9 \ signal.9 execsigs.9 \ signal.9 issignal.9 \ signal.9 killproc.9 \ signal.9 pgsigio.9 \ signal.9 postsig.9 \ signal.9 SETSETNEQ.9 \ signal.9 SETSETOR.9 \ signal.9 SIGADDSET.9 \ signal.9 SIG_CONTSIGMASK.9 \ signal.9 SIGDELSET.9 \ signal.9 SIGEMPTYSET.9 \ signal.9 sigexit.9 \ signal.9 SIGFILLSET.9 \ signal.9 siginit.9 \ signal.9 SIGISEMPTY.9 \ signal.9 SIGISMEMBER.9 \ signal.9 SIGNOTEMPTY.9 \ signal.9 signotify.9 \ signal.9 SIGPENDING.9 \ signal.9 SIGSETAND.9 \ signal.9 SIGSETCANTMASK.9 \ signal.9 SIGSETEQ.9 \ signal.9 SIGSETNAND.9 \ signal.9 SIG_STOPSIGMASK.9 \ signal.9 trapsignal.9 MLINKS+=sleep.9 msleep.9 \ sleep.9 msleep_sbt.9 \ sleep.9 msleep_spin.9 \ sleep.9 msleep_spin_sbt.9 \ sleep.9 pause.9 \ sleep.9 pause_sig.9 \ sleep.9 pause_sbt.9 \ sleep.9 tsleep.9 \ sleep.9 tsleep_sbt.9 \ sleep.9 wakeup.9 \ sleep.9 wakeup_one.9 \ sleep.9 wakeup_any.9 MLINKS+=sleepqueue.9 init_sleepqueues.9 \ sleepqueue.9 sleepq_abort.9 \ sleepqueue.9 sleepq_add.9 \ sleepqueue.9 sleepq_alloc.9 \ sleepqueue.9 sleepq_broadcast.9 \ sleepqueue.9 sleepq_free.9 \ sleepqueue.9 sleepq_lookup.9 \ sleepqueue.9 sleepq_lock.9 \ sleepqueue.9 sleepq_release.9 \ sleepqueue.9 sleepq_remove.9 \ sleepqueue.9 sleepq_set_timeout.9 \ sleepqueue.9 sleepq_set_timeout_sbt.9 \ sleepqueue.9 sleepq_signal.9 \ sleepqueue.9 sleepq_sleepcnt.9 \ sleepqueue.9 sleepq_timedwait.9 \ sleepqueue.9 sleepq_timedwait_sig.9 \ sleepqueue.9 sleepq_type.9 \ sleepqueue.9 sleepq_wait.9 \ sleepqueue.9 sleepq_wait_sig.9 MLINKS+=socket.9 soabort.9 \ socket.9 soaccept.9 \ socket.9 sobind.9 \ socket.9 socheckuid.9 \ socket.9 soclose.9 \ socket.9 soconnect.9 \ socket.9 socreate.9 \ socket.9 sodisconnect.9 \ socket.9 sodtor_set.9 \ socket.9 sodupsockaddr.9 \ socket.9 sofree.9 \ socket.9 sogetopt.9 \ socket.9 sohasoutofband.9 \ socket.9 solisten.9 \ socket.9 solisten_proto.9 \ socket.9 solisten_proto_check.9 \ socket.9 sonewconn.9 \ socket.9 sooptcopyin.9 \ socket.9 sooptcopyout.9 \ socket.9 sopoll.9 \ socket.9 sopoll_generic.9 \ socket.9 soreceive.9 \ socket.9 soreceive_dgram.9 \ socket.9 soreceive_generic.9 \ socket.9 soreceive_stream.9 \ socket.9 soreserve.9 \ socket.9 sorflush.9 \ socket.9 sosend.9 \ socket.9 sosend_dgram.9 \ socket.9 sosend_generic.9 \ socket.9 sosetopt.9 \ socket.9 soshutdown.9 \ socket.9 sotoxsocket.9 \ socket.9 soupcall_clear.9 \ socket.9 soupcall_set.9 \ socket.9 sowakeup.9 MLINKS+=stack.9 stack_copy.9 \ stack.9 stack_create.9 \ stack.9 stack_destroy.9 \ stack.9 stack_print.9 \ stack.9 stack_print_ddb.9 \ stack.9 stack_print_short.9 \ stack.9 stack_print_short_ddb.9 \ stack.9 stack_put.9 \ stack.9 stack_save.9 \ stack.9 stack_sbuf_print.9 \ stack.9 stack_sbuf_print_ddb.9 \ stack.9 stack_zero.9 MLINKS+=store.9 subyte.9 \ store.9 suword.9 \ store.9 suword16.9 \ store.9 suword32.9 \ store.9 suword64.9 MLINKS+=swi.9 swi_add.9 \ swi.9 swi_remove.9 \ swi.9 swi_sched.9 MLINKS+=sx.9 sx_assert.9 \ sx.9 sx_destroy.9 \ sx.9 sx_downgrade.9 \ sx.9 sx_init.9 \ sx.9 sx_init_flags.9 \ sx.9 sx_sleep.9 \ sx.9 sx_slock.9 \ sx.9 sx_slock_sig.9 \ sx.9 sx_sunlock.9 \ sx.9 SX_SYSINIT.9 \ sx.9 SX_SYSINIT_FLAGS.9 \ sx.9 sx_try_slock.9 \ sx.9 sx_try_upgrade.9 \ sx.9 sx_try_xlock.9 \ sx.9 sx_unlock.9 \ sx.9 sx_xholder.9 \ sx.9 sx_xlock.9 \ sx.9 sx_xlock_sig.9 \ sx.9 sx_xlocked.9 \ sx.9 sx_xunlock.9 MLINKS+=syscall_helper_register.9 syscall_helper_unregister.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER_COMPAT.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER_COMPAT_F.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER_F.9 MLINKS+=sysctl.9 SYSCTL_DECL.9 \ sysctl.9 SYSCTL_ADD_INT.9 \ sysctl.9 SYSCTL_ADD_LONG.9 \ sysctl.9 SYSCTL_ADD_NODE.9 \ sysctl.9 SYSCTL_ADD_NODE_WITH_LABEL.9 \ sysctl.9 SYSCTL_ADD_OPAQUE.9 \ sysctl.9 SYSCTL_ADD_PROC.9 \ sysctl.9 SYSCTL_ADD_QUAD.9 \ sysctl.9 SYSCTL_ADD_ROOT_NODE.9 \ sysctl.9 SYSCTL_ADD_S8.9 \ sysctl.9 SYSCTL_ADD_S16.9 \ sysctl.9 SYSCTL_ADD_S32.9 \ sysctl.9 SYSCTL_ADD_S64.9 \ sysctl.9 SYSCTL_ADD_STRING.9 \ sysctl.9 SYSCTL_ADD_STRUCT.9 \ sysctl.9 SYSCTL_ADD_TIMEVAL_SEC.9 \ sysctl.9 SYSCTL_ADD_U8.9 \ sysctl.9 SYSCTL_ADD_U16.9 \ sysctl.9 SYSCTL_ADD_U32.9 \ sysctl.9 SYSCTL_ADD_U64.9 \ sysctl.9 SYSCTL_ADD_UAUTO.9 \ sysctl.9 SYSCTL_ADD_UINT.9 \ sysctl.9 SYSCTL_ADD_ULONG.9 \ sysctl.9 SYSCTL_ADD_UMA_CUR.9 \ sysctl.9 SYSCTL_ADD_UMA_MAX.9 \ sysctl.9 SYSCTL_ADD_UQUAD.9 \ sysctl.9 SYSCTL_CHILDREN.9 \ sysctl.9 SYSCTL_STATIC_CHILDREN.9 \ sysctl.9 SYSCTL_NODE_CHILDREN.9 \ sysctl.9 SYSCTL_PARENT.9 \ sysctl.9 SYSCTL_INT.9 \ sysctl.9 SYSCTL_INT_WITH_LABEL.9 \ sysctl.9 SYSCTL_LONG.9 \ sysctl.9 sysctl_msec_to_ticks.9 \ sysctl.9 SYSCTL_NODE.9 \ sysctl.9 SYSCTL_NODE_WITH_LABEL.9 \ sysctl.9 SYSCTL_OPAQUE.9 \ sysctl.9 SYSCTL_PROC.9 \ sysctl.9 SYSCTL_QUAD.9 \ sysctl.9 SYSCTL_ROOT_NODE.9 \ sysctl.9 SYSCTL_S8.9 \ sysctl.9 SYSCTL_S16.9 \ sysctl.9 SYSCTL_S32.9 \ sysctl.9 SYSCTL_S64.9 \ sysctl.9 SYSCTL_STRING.9 \ sysctl.9 SYSCTL_STRUCT.9 \ sysctl.9 SYSCTL_TIMEVAL_SEC.9 \ sysctl.9 SYSCTL_U8.9 \ sysctl.9 SYSCTL_U16.9 \ sysctl.9 SYSCTL_U32.9 \ sysctl.9 SYSCTL_U64.9 \ sysctl.9 SYSCTL_UINT.9 \ sysctl.9 SYSCTL_ULONG.9 \ sysctl.9 SYSCTL_UMA_CUR.9 \ sysctl.9 SYSCTL_UMA_MAX.9 \ sysctl.9 SYSCTL_UQUAD.9 MLINKS+=sysctl_add_oid.9 sysctl_move_oid.9 \ sysctl_add_oid.9 sysctl_remove_oid.9 \ sysctl_add_oid.9 sysctl_remove_name.9 MLINKS+=sysctl_ctx_init.9 sysctl_ctx_entry_add.9 \ sysctl_ctx_init.9 sysctl_ctx_entry_del.9 \ sysctl_ctx_init.9 sysctl_ctx_entry_find.9 \ sysctl_ctx_init.9 sysctl_ctx_free.9 MLINKS+=SYSINIT.9 SYSUNINIT.9 MLINKS+=taskqueue.9 TASK_INIT.9 \ taskqueue.9 TASK_INITIALIZER.9 \ taskqueue.9 taskqueue_block.9 \ taskqueue.9 taskqueue_cancel.9 \ taskqueue.9 taskqueue_cancel_timeout.9 \ taskqueue.9 taskqueue_create.9 \ taskqueue.9 taskqueue_create_fast.9 \ taskqueue.9 TASKQUEUE_DECLARE.9 \ taskqueue.9 TASKQUEUE_DEFINE.9 \ taskqueue.9 TASKQUEUE_DEFINE_THREAD.9 \ taskqueue.9 taskqueue_drain.9 \ taskqueue.9 taskqueue_drain_all.9 \ taskqueue.9 taskqueue_drain_timeout.9 \ taskqueue.9 taskqueue_enqueue.9 \ taskqueue.9 taskqueue_enqueue_timeout.9 \ taskqueue.9 TASKQUEUE_FAST_DEFINE.9 \ taskqueue.9 TASKQUEUE_FAST_DEFINE_THREAD.9 \ taskqueue.9 taskqueue_free.9 \ taskqueue.9 taskqueue_member.9 \ taskqueue.9 taskqueue_quiesce.9 \ taskqueue.9 taskqueue_run.9 \ taskqueue.9 taskqueue_set_callback.9 \ taskqueue.9 taskqueue_start_threads.9 \ taskqueue.9 taskqueue_start_threads_cpuset.9 \ taskqueue.9 taskqueue_start_threads_in_proc.9 \ taskqueue.9 taskqueue_unblock.9 \ taskqueue.9 TIMEOUT_TASK_INIT.9 MLINKS+=tcp_functions.9 register_tcp_functions.9 \ tcp_functions.9 register_tcp_functions_as_name.9 \ tcp_functions.9 register_tcp_functions_as_names.9 \ tcp_functions.9 deregister_tcp_functions.9 MLINKS+=time.9 boottime.9 \ time.9 time_second.9 \ time.9 time_uptime.9 MLINKS+=ucred.9 crcopy.9 \ ucred.9 crcopysafe.9 \ ucred.9 crdup.9 \ ucred.9 crfree.9 \ ucred.9 crget.9 \ ucred.9 crhold.9 \ ucred.9 crsetgroups.9 \ ucred.9 cru2x.9 MLINKS+=uidinfo.9 uifind.9 \ uidinfo.9 uifree.9 \ uidinfo.9 uihashinit.9 \ uidinfo.9 uihold.9 MLINKS+=uio.9 uiomove.9 \ uio.9 uiomove_frombuf.9 \ uio.9 uiomove_nofault.9 .if ${MK_USB} != "no" MAN+= usbdi.9 MLINKS+=usbdi.9 usbd_do_request.9 \ usbdi.9 usbd_do_request_flags.9 \ usbdi.9 usbd_errstr.9 \ usbdi.9 usbd_lookup_id_by_info.9 \ usbdi.9 usbd_lookup_id_by_uaa.9 \ usbdi.9 usbd_transfer_clear_stall.9 \ usbdi.9 usbd_transfer_drain.9 \ usbdi.9 usbd_transfer_pending.9 \ usbdi.9 usbd_transfer_poll.9 \ usbdi.9 usbd_transfer_setup.9 \ usbdi.9 usbd_transfer_start.9 \ usbdi.9 usbd_transfer_stop.9 \ usbdi.9 usbd_transfer_submit.9 \ usbdi.9 usbd_transfer_unsetup.9 \ usbdi.9 usbd_xfer_clr_flag.9 \ usbdi.9 usbd_xfer_frame_data.9 \ usbdi.9 usbd_xfer_frame_len.9 \ usbdi.9 usbd_xfer_get_frame.9 \ usbdi.9 usbd_xfer_get_priv.9 \ usbdi.9 usbd_xfer_is_stalled.9 \ usbdi.9 usbd_xfer_max_framelen.9 \ usbdi.9 usbd_xfer_max_frames.9 \ usbdi.9 usbd_xfer_max_len.9 \ usbdi.9 usbd_xfer_set_flag.9 \ usbdi.9 usbd_xfer_set_frame_data.9 \ usbdi.9 usbd_xfer_set_frame_len.9 \ usbdi.9 usbd_xfer_set_frame_offset.9 \ usbdi.9 usbd_xfer_set_frames.9 \ usbdi.9 usbd_xfer_set_interval.9 \ usbdi.9 usbd_xfer_set_priv.9 \ usbdi.9 usbd_xfer_set_stall.9 \ usbdi.9 usbd_xfer_set_timeout.9 \ usbdi.9 usbd_xfer_softc.9 \ usbdi.9 usbd_xfer_state.9 \ usbdi.9 usbd_xfer_status.9 \ usbdi.9 usb_fifo_alloc_buffer.9 \ usbdi.9 usb_fifo_attach.9 \ usbdi.9 usb_fifo_detach.9 \ usbdi.9 usb_fifo_free_buffer.9 \ usbdi.9 usb_fifo_get_data.9 \ usbdi.9 usb_fifo_get_data_buffer.9 \ usbdi.9 usb_fifo_get_data_error.9 \ usbdi.9 usb_fifo_get_data_linear.9 \ usbdi.9 usb_fifo_put_bytes_max.9 \ usbdi.9 usb_fifo_put_data.9 \ usbdi.9 usb_fifo_put_data_buffer.9 \ usbdi.9 usb_fifo_put_data_error.9 \ usbdi.9 usb_fifo_put_data_linear.9 \ usbdi.9 usb_fifo_reset.9 \ usbdi.9 usb_fifo_softc.9 \ usbdi.9 usb_fifo_wakeup.9 .endif MLINKS+=vcount.9 count_dev.9 MLINKS+=vfsconf.9 vfs_modevent.9 \ vfsconf.9 vfs_register.9 \ vfsconf.9 vfs_unregister.9 MLINKS+=vfs_getopt.9 vfs_copyopt.9 \ vfs_getopt.9 vfs_filteropt.9 \ vfs_getopt.9 vfs_flagopt.9 \ vfs_getopt.9 vfs_getopts.9 \ vfs_getopt.9 vfs_scanopt.9 \ vfs_getopt.9 vfs_setopt.9 \ vfs_getopt.9 vfs_setopt_part.9 \ vfs_getopt.9 vfs_setopts.9 MLINKS+=vhold.9 vdrop.9 \ vhold.9 vdropl.9 \ vhold.9 vholdl.9 MLINKS+=vmem.9 vmem_add.9 \ vmem.9 vmem_alloc.9 \ vmem.9 vmem_create.9 \ vmem.9 vmem_destroy.9 \ vmem.9 vmem_free.9 \ vmem.9 vmem_xalloc.9 \ vmem.9 vmem_xfree.9 MLINKS+=vm_map_lock.9 vm_map_lock_downgrade.9 \ vm_map_lock.9 vm_map_lock_read.9 \ vm_map_lock.9 vm_map_lock_upgrade.9 \ vm_map_lock.9 vm_map_trylock.9 \ vm_map_lock.9 vm_map_trylock_read.9 \ vm_map_lock.9 vm_map_unlock.9 \ vm_map_lock.9 vm_map_unlock_read.9 MLINKS+=vm_map_lookup.9 vm_map_lookup_done.9 MLINKS+=vm_map_max.9 vm_map_min.9 \ vm_map_max.9 vm_map_pmap.9 MLINKS+=vm_map_stack.9 vm_map_growstack.9 MLINKS+=vm_map_wire.9 vm_map_wire_mapped.9 \ vm_page_wire.9 vm_page_unwire.9 \ vm_page_wire.9 vm_page_unwire_noq.9 MLINKS+=vm_page_bits.9 vm_page_clear_dirty.9 \ vm_page_bits.9 vm_page_dirty.9 \ vm_page_bits.9 vm_page_is_valid.9 \ vm_page_bits.9 vm_page_set_invalid.9 \ vm_page_bits.9 vm_page_set_validclean.9 \ vm_page_bits.9 vm_page_test_dirty.9 \ vm_page_bits.9 vm_page_undirty.9 \ vm_page_bits.9 vm_page_zero_invalid.9 MLINKS+=vm_page_busy.9 vm_page_busied.9 \ vm_page_busy.9 vm_page_busy_downgrade.9 \ vm_page_busy.9 vm_page_busy_sleep.9 \ vm_page_busy.9 vm_page_sbusied.9 \ vm_page_busy.9 vm_page_sbusy.9 \ vm_page_busy.9 vm_page_sleep_if_busy.9 \ vm_page_busy.9 vm_page_sunbusy.9 \ vm_page_busy.9 vm_page_trysbusy.9 \ vm_page_busy.9 vm_page_tryxbusy.9 \ vm_page_busy.9 vm_page_xbusied.9 \ vm_page_busy.9 vm_page_xbusy.9 \ vm_page_busy.9 vm_page_xunbusy.9 \ vm_page_busy.9 vm_page_assert_sbusied.9 \ vm_page_busy.9 vm_page_assert_unbusied.9 \ vm_page_busy.9 vm_page_assert_xbusied.9 MLINKS+=vm_page_aflag.9 vm_page_aflag_clear.9 \ vm_page_aflag.9 vm_page_aflag_set.9 \ vm_page_aflag.9 vm_page_reference.9 MLINKS+=vm_page_free.9 vm_page_free_toq.9 \ vm_page_free.9 vm_page_free_zero.9 \ vm_page_free.9 vm_page_try_to_free.9 MLINKS+=vm_page_insert.9 vm_page_remove.9 MLINKS+=vm_page_wire.9 vm_page_unwire.9 MLINKS+=VOP_ACCESS.9 VOP_ACCESSX.9 MLINKS+=VOP_ATTRIB.9 VOP_GETATTR.9 \ VOP_ATTRIB.9 VOP_SETATTR.9 MLINKS+=VOP_CREATE.9 VOP_MKDIR.9 \ VOP_CREATE.9 VOP_MKNOD.9 \ VOP_CREATE.9 VOP_SYMLINK.9 MLINKS+=VOP_FSYNC.9 VOP_FDATASYNC.9 MLINKS+=VOP_GETPAGES.9 VOP_PUTPAGES.9 MLINKS+=VOP_INACTIVE.9 VOP_RECLAIM.9 MLINKS+=VOP_LOCK.9 vn_lock.9 \ VOP_LOCK.9 VOP_ISLOCKED.9 \ VOP_LOCK.9 VOP_UNLOCK.9 MLINKS+=VOP_OPENCLOSE.9 VOP_CLOSE.9 \ VOP_OPENCLOSE.9 VOP_OPEN.9 MLINKS+=VOP_RDWR.9 VOP_READ.9 \ VOP_RDWR.9 VOP_WRITE.9 MLINKS+=VOP_REMOVE.9 VOP_RMDIR.9 MLINKS+=vnet.9 vimage.9 MLINKS+=vref.9 VREF.9 \ vref.9 vrefl.9 MLINKS+=vrele.9 vput.9 \ vrele.9 vunref.9 MLINKS+=vslock.9 vsunlock.9 MLINKS+=zone.9 uma.9 \ zone.9 uma_prealloc.9 \ zone.9 uma_reclaim.9 \ zone.9 uma_zalloc.9 \ zone.9 uma_zalloc_arg.9 \ zone.9 uma_zalloc_domain.9 \ zone.9 uma_zalloc_pcpu.9 \ zone.9 uma_zalloc_pcpu_arg.9 \ zone.9 uma_zcache_create.9 \ zone.9 uma_zcreate.9 \ zone.9 uma_zdestroy.9 \ zone.9 uma_zfree.9 \ zone.9 uma_zfree_arg.9 \ zone.9 uma_zfree_domain.9 \ zone.9 uma_zfree_pcpu.9 \ zone.9 uma_zfree_pcpu_arg.9 \ zone.9 uma_zone_get_cur.9 \ zone.9 uma_zone_get_max.9 \ zone.9 uma_zone_reclaim.9 \ zone.9 uma_zone_reserve.9 \ zone.9 uma_zone_reserve_kva.9 \ zone.9 uma_zone_set_allocf.9 \ zone.9 uma_zone_set_freef.9 \ zone.9 uma_zone_set_max.9 \ zone.9 uma_zone_set_maxaction.9 \ zone.9 uma_zone_set_maxcache.9 \ zone.9 uma_zone_set_warning.9 \ zone.9 uma_zsecond_create.9 .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386" _superio.9= superio.9 MLINKS+=superio.9 superio_devid.9 \ superio.9 superio_dev_disable.9 \ superio.9 superio_dev_enable.9 \ superio.9 superio_dev_enabled.9 \ superio.9 superio_find_dev.9 \ superio.9 superio_find_dev.9 \ superio.9 superio_get_dma.9 \ superio.9 superio_get_iobase.9 \ superio.9 superio_get_irq.9 \ superio.9 superio_get_ldn.9 \ superio.9 superio_get_type.9 \ superio.9 superio_read.9 \ superio.9 superio_revid.9 \ superio.9 superio_vendor.9 \ superio.9 superio_write.9 .endif .include Index: head/share/man/man9/crypto_driver.9 =================================================================== --- head/share/man/man9/crypto_driver.9 (revision 360135) +++ head/share/man/man9/crypto_driver.9 (revision 360136) @@ -1,392 +1,400 @@ .\" Copyright (c) 2020, Chelsio Inc .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions are met: .\" .\" 1. Redistributions of source code must retain the above copyright notice, .\" this list of conditions and the following disclaimer. .\" .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" 3. Neither the name of the Chelsio Inc nor the names of its .\" contributors may be used to endorse or promote products derived from .\" this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" .\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE .\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" .\" * Other names and brands may be claimed as the property of others. .\" .\" $FreeBSD$ .\" -.Dd March 27, 2020 +.Dd April 20, 2020 .Dt CRYPTO_DRIVER 9 .Os .Sh NAME .Nm crypto_driver .Nd interface for symmetric cryptographic drivers .Sh SYNOPSIS .In opencrypto/cryptodev.h .Ft int .Fo crypto_apply .Fa "struct cryptop *crp" .Fa "int off" .Fa "int len" .Fa "int (*f)(void *, void *, u_int)" .Fa "void *arg" .Fc .Ft void * .Fo crypto_contiguous_subsegment .Fa "struct cryptop *crp" .Fa "size_t skip" .Fa "size_t len" .Fc .Ft void .Fn crypto_copyback "struct cryptop *crp" "int off" "int size" "const void *src" .Ft void .Fn crypto_copydata "struct cryptop *crp" "int off" "int size" "void *dst" .Ft void .Fn crypto_done "struct cryptop *crp" .Ft int32_t .Fn crypto_get_driverid "device_t dev" "size_t session_size" "int flags" .Ft void * .Fn crypto_get_driver_session "crypto_session_t crypto_session" +.Ft void +.Fn crypto_read_iv "struct cryptop *crp" "void *iv" .Ft int .Fn crypto_unblock "uint32_t driverid" "int what" .Ft int .Fn crypto_unregister_all "uint32_t driverid" .Ft int .Fn CRYPTODEV_FREESESSION "device_t dev" "crypto_session_t crypto_session" .Ft int .Fo CRYPTODEV_NEWSESSION .Fa "device_t dev" .Fa "crypto_session_t crypto_session" .Fa "const struct crypto_session_params *csp" .Fc .Ft int .Fo CRYPTODEV_PROBESESSION .Fa "device_t dev" .Fa "const struct crypto_session_params *csp" .Fc .Ft int .Fn CRYPTODEV_PROCESS "device_t dev" "struct cryptop *crp" "int flags" .Ft void .Fo hmac_init_ipad .Fa "struct auth_hash *axf" .Fa "const char *key" .Fa "int klen" .Fa "void *auth_ctx" .Fc .Ft void .Fo hmac_init_opad .Fa "struct auth_hash *axf" .Fa "const char *key" .Fa "int klen" .Fa "void *auth_ctx" .Fc .Sh DESCRIPTION Symmetric cryptographic drivers process cryptographic requests submitted to sessions associated with the driver. .Pp Cryptographic drivers call .Fn crypto_get_driverid to register with the cryptographic framework. .Fa dev is the device used to service requests. The .Fn CRYPTODEV methods are defined in the method table for the device driver attached to .Fa dev . .Fa session_size specifies the size of a driver-specific per-session structure allocated by the cryptographic framework. .Fa flags is a bitmask of properties about the driver. Exactly one of .Dv CRYPTOCAP_F_SOFTWARE or .Dv CRYPTOCAP_F_HARDWARE must be specified. .Dv CRYPTOCAP_F_SOFTWARE should be used for drivers which process requests using host CPUs. .Dv CRYPTOCAP_F_HARDWARE should be used for drivers which process requests on separate co-processors. .Dv CRYPTOCAP_F_SYNC should be set for drivers which process requests synchronously in .Fn CRYPTODEV_PROCESS . .Fn crypto_get_driverid returns an opaque driver id. .Pp .Fn crypto_unregister_all unregisters a driver from the cryptographic framework. If there are any pending operations or open sessions, this function will sleep. .Fa driverid is the value returned by an earlier call to .Fn crypto_get_driverid . .Pp When a new session is created by .Fn crypto_newsession , .Fn CRYPTODEV_PROBESESSION is invoked by the cryptographic framework on each active driver to determine the best driver to use for the session. This method should inspect the session parameters in .Fa csp . If a driver does not support requests described by .Fa csp , this method should return an error value. If the driver does support requests described by .Fa csp , it should return a negative value. The framework prefers drivers with the largest negative value, similar to .Xr DEVICE_PROBE 9 . The following values are defined for non-error return values from this method: .Bl -tag -width "CRYPTODEV_PROBE_ACCEL_SOFTWARE" .It Dv CRYPTODEV_PROBE_HARDWARE The driver processes requests via a co-processor. .It Dv CRYPTODEV_PROBE_ACCEL_SOFTWARE The driver processes requests on the host CPU using optimized instructions such as AES-NI. .It Dv CRYPTODEV_PROBE_SOFTWARE The driver processes requests on the host CPU. .El .Pp This method should not sleep. .Pp Once the framework has chosen a driver for a session, the framework invokes the .Fn CRYPTODEV_NEWSESSION method to initialize driver-specific session state. Prior to calling this method, the framework allocates a per-session driver-specific data structure. This structure is initialized with zeroes, and its size is set by the .Fa session_size passed to .Fn crypto_get_driverid . This method can retrieve a pointer to this data structure by passing .Fa crypto_session to .Fn crypto_get_driver_session . Session parameters are described in .Fa csp . .Pp This method should not sleep. .Pp .Fn CRYPTODEV_FREESESSION is invoked to release any driver-specific state when a session is destroyed. The per-session driver-specific data structure is explicitly zeroed and freed by the framework after this method returns. If a driver requires no additional tear-down steps, it can leave this method undefined. .Pp This method should not sleep. .Pp .Fn CRYPTODEV_PROCESS is invoked for each request submitted to an active session. This method can either complete a request synchronously or schedule it to be completed asynchronously, but it must not sleep. .Pp If this method is not able to complete a request due to insufficient resources such as a full command queue, it can defer the request by returning .Dv ERESTART . The request will be queued by the framework and retried once the driver releases pending requests via .Fn crypto_unblock . Any requests submitted to sessions belonging to the driver will also be queued until .Fn crypto_unblock is called. .Pp If a driver encounters errors while processing a request, it should report them via the .Fa crp_etype field of .Fa crp rather than returning an error directly. .Pp .Fa flags may be set to .Dv CRYPTO_HINT_MORE if there are additional requests queued for this driver. The driver can use this as a hint to batch completion interrupts. Note that these additional requests may be from different sessions. .Pp .Fn crypto_get_driver_session returns a pointer to the driver-specific per-session data structure for the session .Fa crypto_session . This function can be used in the .Fn CRYPTODEV_NEWSESSION , .Fn CRYPTODEV_PROCESS , and .Fn CRYPTODEV_FREESESSION callbacks. .Pp .Fn crypto_copydata copies .Fa size bytes out of the data buffer for .Fa crp into a local buffer pointed to by .Fa dst . The bytes are read starting at an offset of .Fa off bytes in the request's data buffer. .Pp .Fn crypto_copyback copies .Fa size bytes from the local buffer pointed to by .Fa src into the data buffer for .Fa crp . The bytes are written starting at an offset of .Fa off bytes in the request's data buffer. +.Pp +.Fn crypto_read_iv +copies the IV or nonce for +.Fa crp +into the the local buffer pointed to by +.Fa iv . .Pp A driver calls .Fn crypto_done to mark the request .Fa crp as completed. Any errors should be set in .Fa crp_etype prior to calling this function. .Pp If a driver defers a request by returning .Dv ERESTART from .Dv CRYPTO_PROCESS , the framework will queue all requests for the driver until the driver calls .Fn crypto_unblock to indicate that the temporary resource shortage has been relieved. For example, if a driver returns .Dv ERESTART due to a full command ring, it would invoke .Fn crypto_unblock from a command completion interrupt that makes a command ring entry available. .Fa driverid is the value returned by .Fn crypto_get_driverid . .Fa what indicates which types of requests the driver is able to handle again: .Bl -tag -width "CRYPTO_ASYMQ" .It Dv CRYPTO_SYMQ indicates that the driver is able to handle symmetric requests passed to .Fn CRYPTODEV_PROCESS . .It Dv CRYPTO_ASYMQ indicates that the driver is able to handle asymmetric requests passed to .Fn CRYPTODEV_KPROCESS . .El .Pp .Fn crypto_apply is a helper routine that can be used to invoke a caller-supplied function to a region of the data buffer for .Fa crp . The function .Fa f is called one or more times. For each invocation, the first argument to .Fa f is the value of .Fa arg passed to .Fn crypto_apply . The second and third arguments to .Fa f are a pointer and length to a segment of the buffer mapped into the kernel. The function is called enough times to cover the .Fa len bytes of the data buffer which starts at an offset .Fa off . If any invocation of .Fa f returns a non-zero value, .Fn crypto_apply immediately returns that value without invoking .Fa f on any remaining segments of the region, otherwise .Fn crypto_apply returns the value from the final call to .Fa f . .Pp .Fn crypto_contiguous_subsegment attempts to locate a single, virtually-contiguous segment of the data buffer for .Fa crp . The segment must be .Fa len bytes long and start at an offset of .Fa skip bytes. If a segment is found, a pointer to the start of the segment is returned. Otherwise, .Dv NULL is returned. .Pp .Fn hmac_init_ipad prepares an authentication context to generate the inner hash of an HMAC. .Fa axf is a software implementation of an authentication algorithm such as the value returned by .Fn crypto_auth_hash . .Fa key is a pointer to a HMAC key of .Fa klen bytes. .Fa auth_ctx points to a valid authentication context for the desired algorithm. The function initializes the context with the supplied key. .Pp .Fn hmac_init_opad is similar to .Fn hmac_init_ipad except that it prepares an authentication context to generate the outer hash of an HMAC. .Sh RETURN VALUES .Fn crypto_apply returns the return value from the caller-supplied callback function. .Pp .Fn crypto_contiguous_subsegment returns a pointer to a contiguous segment or .Dv NULL . .Pp .Fn crypto_get_driverid returns a driver identifier on success or -1 on error. .Pp .Fn crypto_unblock , .Fn crypto_unregister_all , .Fn CRYPTODEV_FREESESSION , .Fn CRYPTODEV_NEWSESSION , and .Fn CRYPTODEV_PROCESS return zero on success or an error on failure. .Pp .Fn CRYPTODEV_PROBESESSION returns a negative value on success or an error on failure. .Sh SEE ALSO .Xr crypto 7 , .Xr crypto 9 , .Xr crypto_request 9 , .Xr crypto_session 9 Index: head/share/man/man9/crypto_request.9 =================================================================== --- head/share/man/man9/crypto_request.9 (revision 360135) +++ head/share/man/man9/crypto_request.9 (revision 360136) @@ -1,419 +1,409 @@ .\" Copyright (c) 2020, Chelsio Inc .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions are met: .\" .\" 1. Redistributions of source code must retain the above copyright notice, .\" this list of conditions and the following disclaimer. .\" .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" 3. Neither the name of the Chelsio Inc nor the names of its .\" contributors may be used to endorse or promote products derived from .\" this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" .\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE .\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGE. .\" .\" * Other names and brands may be claimed as the property of others. .\" .\" $FreeBSD$ .\" -.Dd March 27, 2020 +.Dd April 20, 2020 .Dt CRYPTO_REQUEST 9 .Os .Sh NAME .Nm crypto_request .Nd symmetric cryptographic operations .Sh SYNOPSIS .In opencrypto/cryptodev.h .Ft int .Fn crypto_dispatch "struct cryptop *crp" .Ft void .Fn crypto_freereq "struct cryptop *crp" .Ft "struct cryptop *" .Fn crypto_getreq "crypto_session_t cses" "int how" .Sh DESCRIPTION Each symmetric cryptographic operation in the kernel is described by an instance of .Vt struct cryptop and is associated with an active session. .Pp New requests are allocated by .Fn crypto_getreq . .Fa cses is a reference to an active session. .Fa how is passed to .Xr malloc 9 and should be set to either .Dv M_NOWAIT or .Dv M_WAITOK . The caller should then set fields in the returned structure to describe request-specific parameters. Unused fields should be left as-is. .Pp .Fn crypto_dispatch passes a crypto request to the driver attached to the request's session. If there are errors in the request's fields, this function may return an error to the caller. If errors are encountered while servicing the request, they will instead be reported to the request's callback function .Pq Fa crp_callback via .Fa crp_etype . .Pp Note that a request's callback function may be invoked before .Fn crypto_dispatch returns. .Pp Once a request has signaled completion by invoking its callback function, it should be feed via .Fn crypto_freereq . .Pp Cryptographic operations include several fields to describe the request. .Ss Buffer Types Requests are associated with a single data buffer that is modified in place. The type of the data buffer and the buffer itself are described by the following fields: .Bl -tag -width crp_buf_type .It Fa crp_buf_type The type of the data buffer. The following types are supported: .Bl -tag -width CRYPTO_BUF_CONTIG .It Dv CRYPTO_BUF_CONTIG An array of bytes mapped into the kernel's address space. .It Dv CRYPTO_BUF_UIO A scatter/gather list of kernel buffers as described in .Xr uio 9 . .It Dv CRYPTO_BUF_MBUF A network memory buffer as described in .Xr mbuf 9 . .El .It Fa crp_buf A pointer to the start of a .Dv CRYPTO_BUF_CONTIG data buffer. .It Fa crp_ilen The length of a .Dv CRYPTO_BUF_CONTIG data buffer .It Fa crp_mbuf A pointer to a .Vt struct mbuf for .Dv CRYPTO_BUF_MBUF . .It Fa crp_uio A pointer to a .Vt struct uio for .Dv CRYPTO_BUF_UIO . .It Fa crp_olen Used with compression and decompression requests to describe the updated length of the payload region in the data buffer. .Pp If a compression request increases the size of the payload, then the data buffer is unmodified, the request completes successfully, and .Fa crp_olen is set to the size the compressed data would have used. Callers can compare this to the payload region length to determine if the compressed data was discarded. .El .Ss Request Regions Each request describes one or more regions in the data buffer using. Each region is described by an offset relative to the start of the data buffer and a length. The length of some regions is the same for all requests belonging to a session. Those lengths are set in the session parameters of the associated session. All requests must define a payload region. Other regions are only required for specific session modes. The following regions are defined: .Bl -column "Payload" "crp_payload_start" "crp_payload_length" .It Sy Region Ta Sy Start Ta Sy Length Ta Sy Description .It AAD Ta Fa crp_aad_start Ta Fa crp_aad_length Ta Additional Authenticated Data .It IV Ta Fa crp_iv_start Ta Fa csp_ivlen Ta Embedded IV or nonce .It Payload Ta Fa crp_payload_start Ta Fa crp_payload_length Ta Data to encrypt, decrypt, compress, or decompress .It Digest Ta Fa crp_digest_start Ta Fa csp_auth_mlen Ta Authentication digest, hash, or tag .El .Pp Requests are permitted to operate on only a subset of the data buffer. For example, requests from IPsec operate on network packets that include headers not used as either additional authentication data (AAD) or payload data. .Ss Request Operations All requests must specify the type of operation to perform in .Fa crp_op . Available operations depend on the session's mode. .Pp Compression requests support the following operations: .Bl -tag -width CRYPTO_OP_DECOMPRESS .It Dv CRYPTO_OP_COMPRESS Compress the data in the payload region of the data buffer. .It Dv CRYPTO_OP_DECOMPRESS Decompress the data in the payload region of the data buffer. .El .Pp Cipher requests support the following operations: .Bl -tag -width CRYPTO_OP_DECRYPT .It Dv CRYPTO_OP_ENCRYPT Encrypt the data in the payload region of the data buffer. .It Dv CRYPTO_OP_DECRYPT Decrypt the data in the payload region of the data buffer. .El .Pp Digest requests support the following operations: .Bl -tag -width CRYPTO_OP_COMPUTE_DIGEST .It Dv CRYPTO_OP_COMPUTE_DIGEST Calculate a digest over the payload region of the data buffer and store the result in the digest region. .It Dv CRYPTO_OP_VERIFY_DIGEST Calculate a digest over the payload region of the data buffer. Compare the calculated digest to the existing digest from the digest region. If the digests match, complete the request successfully. If the digests do not match, fail the request with .Er EBADMSG . .El .Pp AEAD and Encrypt-then-Authenticate requests support the following operations: .Bl -tag -width CRYPTO_OP .It Dv CRYPTO_OP_ENCRYPT | Dv CRYPTO_OP_COMPUTE_DIGEST Encrypt the data in the payload region of the data buffer. Calculate a digest over the AAD and payload regions and store the result in the data buffer. .It Dv CRYPTO_OP_DECRYPT | Dv CRYPTO_OP_VERIFY_DIGEST Calculate a digest over the AAD and payload regions of the data buffer. Compare the calculated digest to the existing digest from the digest region. If the digests match, decrypt the payload region. If the digests do not match, fail the request with .Er EBADMSG . .El .Ss Request IV and/or Nonce Some cryptographic operations require an IV or nonce as an input. An IV may be stored either in the IV region of the data buffer or in .Fa crp_iv . By default, the IV is assumed to be stored in the IV region. If the IV is stored in .Fa crp_iv , .Dv CRYPTO_F_IV_SEPARATE should be set in .Fa crp_flags and .Fa crp_digest_start should be left as zero. -.Pp -An encryption request using an IV stored in the IV region may set -.Dv CRYPTO_F_IV_GENERATE -in -.Fa crp_flags -to request that the driver generate a random IV. -Note that -.Dv CRYPTO_F_IV_GENERATE -cannot be used with decryption operations or in combination with -.Dv CRYPTO_F_IV_SEPARATE . .Pp Requests that store part, but not all, of the IV in the data buffer should store the partial IV in the data buffer and pass the full IV separately in .Fa crp_iv . .Ss Request and Callback Scheduling The crypto framework provides multiple methods of scheduling the dispatch of requests to drivers along with the processing of driver callbacks. Requests use flags in .Fa crp_flags to select the desired scheduling methods. .Pp .Fn crypto_dispatch can pass the request to the session's driver via three different methods: .Bl -enum .It The request is queued to a taskqueue backed by a pool of worker threads. By default the pool is sized to provide one thread for each CPU. Worker threads dequeue requests and pass them to the driver asynchronously. .It The request is passed to the driver synchronously in the context of the thread invoking .Fn crypto_dispatch . .It The request is queued to a queue of pending requests. A single worker thread dequeues requests and passes them to the driver asynchronously. .El .Pp To select the first method (taskqueue backed by multiple threads), requests should set .Dv CRYPTO_F_ASYNC . To always use the third method (queue to single worker thread), requests should set .Dv CRYPTO_F_BATCH . If both flags are set, .Dv CRYPTO_F_ASYNC takes precedence. If neither flag is set, .Fn crypto_dispatch will first attempt the second method (invoke driver synchronously). If the driver is blocked, the request will be queued using the third method. One caveat is that the first method is only used for requests using software drivers which use host CPUs to process requests. Requests whose session is associated with a hardware driver will ignore .Dv CRYPTO_F_ASYNC and only use .Dv CRYPTO_F_BATCH to determine how requests should be scheduled. .Pp In addition to bypassing synchronous dispatch in .Fn crypto_dispatch , .Dv CRYPTO_F_BATCH requests additional changes aimed at optimizing batches of requests to the same driver. When the worker thread processes a request with .Dv CRYPTO_F_BATCH , it will search the pending request queue for any other requests for the same driver, including requests from different sessions. If any other requests are present, .Dv CRYPTO_HINT_MORE is passed to the driver's process method. Drivers may use this to batch completion interrupts. .Pp Callback function scheduling is simpler than request scheduling. Callbacks can either be invoked synchronously from .Fn crypto_done , or they can be queued to a pool of worker threads. This pool of worker threads is also sized to provide one worker thread for each CPU by default. Note that a callback function invoked synchronously from .Fn crypto_done must follow the same restrictions placed on threaded interrupt handlers. .Pp By default, callbacks are invoked asynchronously by a worker thread. If .Dv CRYPTO_F_CBIMM is set, the callback is always invoked synchronously from .Fn crypto_done . If .Dv CRYPTO_F_CBIFSYNC is set, the callback is invoked synchronously if the request was processed by a software driver or asynchronously if the request was processed by a hardware driver. .Pp If a request was scheduled to the taskqueue via .Dv CRYPTO_F_ASYNC , callbacks are always invoked asynchronously ignoring .Dv CRYPTO_F_CBIMM and .Dv CRYPTO_F_CBIFSYNC . In this case, .Dv CRYPTO_F_ASYNC_KEEPORDER may be set to ensure that callbacks for requests on a given session are invoked in the same order that requests were queued to the session via .Fn crypto_dispatch . This flag is used by IPsec to ensure that decrypted network packets are passed up the network stack in roughly the same order they were received. .Pp .Ss Other Request Fields In addition to the fields and flags enumerated above, .Vt struct cryptop includes the following: .Bl -tag -width crp_payload_length .It Fa crp_session A reference to the active session. This is set when the request is created by .Fn crypto_getreq and should not be modified. Drivers can use this to fetch driver-specific session state or session parameters. .It Fa crp_etype Error status. Either zero on success, or an error if a request fails. Set by drivers prior to completing a request via .Fn crypto_done . .It Fa crp_flags A bitmask of flags. The following flags are available in addition to flags discussed previously: .Bl -tag -width CRYPTO_F_DONE .It Dv CRYPTO_F_DONE Set by .Fa crypto_done before calling .Fa crp_callback . This flag is not very useful and will likely be removed in the future. It can only be safely checked from the callback routine at which point it is always set. .El .It Fa crp_cipher_key Pointer to a request-specific encryption key. If this value is not set, the request uses the session encryption key. .It Fa crp_auth_key Pointer to a request-specific authentication key. If this value is not set, the request uses the session authentication key. .It Fa crp_opaque An opaque pointer. This pointer permits users of the cryptographic framework to store information about a request to be used in the callback. .It Fa crp_callback Callback function. This must point to a callback function of type .Vt void (*)(struct cryptop *) . The callback function should inspect .Fa crp_etype to determine the status of the completed operation. It should also arrange for the request to be freed via .Fn crypto_freereq . .El .Sh RETURN VALUES .Fn crypto_dispatch returns an error if the request contained invalid fields, or zero if the request was valid. .Fn crypto_getreq returns a pointer to a new request structure on success, or .Dv NULL on failure. .Dv NULL can only be returned if .Dv M_NOWAIT was passed in .Fa how . .Sh SEE ALSO .Xr ipsec 4 , .Xr crypto 7 , .Xr crypto 9 , .Xr crypto_session 9 , .Xr mbuf 9 .Xr uio 9 .Sh BUGS Not all drivers properly handle mixing session and per-request keys within a single session. Consumers should either use a single key for a session specified in the session parameters or always use per-request keys. Index: head/sys/crypto/aesni/aesni.c =================================================================== --- head/sys/crypto/aesni/aesni.c (revision 360135) +++ head/sys/crypto/aesni/aesni.c (revision 360136) @@ -1,859 +1,852 @@ /*- * Copyright (c) 2005-2008 Pawel Jakub Dawidek * Copyright (c) 2010 Konstantin Belousov * Copyright (c) 2014 The FreeBSD Foundation * Copyright (c) 2017 Conrad Meyer * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) #include #elif defined(__amd64__) #include #endif static struct mtx_padalign *ctx_mtx; static struct fpu_kern_ctx **ctx_fpu; struct aesni_softc { int32_t cid; bool has_aes; bool has_sha; }; #define ACQUIRE_CTX(i, ctx) \ do { \ (i) = PCPU_GET(cpuid); \ mtx_lock(&ctx_mtx[(i)]); \ (ctx) = ctx_fpu[(i)]; \ } while (0) #define RELEASE_CTX(i, ctx) \ do { \ mtx_unlock(&ctx_mtx[(i)]); \ (i) = -1; \ (ctx) = NULL; \ } while (0) static int aesni_cipher_setup(struct aesni_session *ses, const struct crypto_session_params *csp); static int aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp); static int aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp, const struct crypto_session_params *csp); static int aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp, const struct crypto_session_params *csp); MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data"); static void aesni_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "aesni", -1) == NULL && BUS_ADD_CHILD(parent, 10, "aesni", -1) == 0) panic("aesni: could not attach"); } static void detect_cpu_features(bool *has_aes, bool *has_sha) { *has_aes = ((cpu_feature2 & CPUID2_AESNI) != 0 && (cpu_feature2 & CPUID2_SSE41) != 0); *has_sha = ((cpu_stdext_feature & CPUID_STDEXT_SHA) != 0 && (cpu_feature2 & CPUID2_SSSE3) != 0); } static int aesni_probe(device_t dev) { bool has_aes, has_sha; detect_cpu_features(&has_aes, &has_sha); if (!has_aes && !has_sha) { device_printf(dev, "No AES or SHA support.\n"); return (EINVAL); } else if (has_aes && has_sha) device_set_desc(dev, "AES-CBC,AES-CCM,AES-GCM,AES-ICM,AES-XTS,SHA1,SHA256"); else if (has_aes) device_set_desc(dev, "AES-CBC,AES-CCM,AES-GCM,AES-ICM,AES-XTS"); else device_set_desc(dev, "SHA1,SHA256"); return (0); } static void aesni_cleanctx(void) { int i; /* XXX - no way to return driverid */ CPU_FOREACH(i) { if (ctx_fpu[i] != NULL) { mtx_destroy(&ctx_mtx[i]); fpu_kern_free_ctx(ctx_fpu[i]); } ctx_fpu[i] = NULL; } free(ctx_mtx, M_AESNI); ctx_mtx = NULL; free(ctx_fpu, M_AESNI); ctx_fpu = NULL; } static int aesni_attach(device_t dev) { struct aesni_softc *sc; int i; sc = device_get_softc(dev); sc->cid = crypto_get_driverid(dev, sizeof(struct aesni_session), CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); if (sc->cid < 0) { device_printf(dev, "Could not get crypto driver id.\n"); return (ENOMEM); } ctx_mtx = malloc(sizeof *ctx_mtx * (mp_maxid + 1), M_AESNI, M_WAITOK|M_ZERO); ctx_fpu = malloc(sizeof *ctx_fpu * (mp_maxid + 1), M_AESNI, M_WAITOK|M_ZERO); CPU_FOREACH(i) { ctx_fpu[i] = fpu_kern_alloc_ctx(0); mtx_init(&ctx_mtx[i], "anifpumtx", NULL, MTX_DEF|MTX_NEW); } detect_cpu_features(&sc->has_aes, &sc->has_sha); return (0); } static int aesni_detach(device_t dev) { struct aesni_softc *sc; sc = device_get_softc(dev); crypto_unregister_all(sc->cid); aesni_cleanctx(); return (0); } static bool aesni_auth_supported(struct aesni_softc *sc, const struct crypto_session_params *csp) { if (!sc->has_sha) return (false); switch (csp->csp_auth_alg) { case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: break; default: return (false); } return (true); } static bool aesni_cipher_supported(struct aesni_softc *sc, const struct crypto_session_params *csp) { if (!sc->has_aes) return (false); switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: if (csp->csp_ivlen != AES_BLOCK_LEN) return (false); return (sc->has_aes); case CRYPTO_AES_XTS: if (csp->csp_ivlen != AES_XTS_IV_LEN) return (false); return (sc->has_aes); default: return (false); } } static int aesni_probesession(device_t dev, const struct crypto_session_params *csp) { struct aesni_softc *sc; sc = device_get_softc(dev); if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!aesni_auth_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!aesni_cipher_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: if (csp->csp_auth_mlen != 0 && csp->csp_auth_mlen != GMAC_DIGEST_LEN) return (EINVAL); if (csp->csp_ivlen != AES_GCM_IV_LEN || !sc->has_aes) return (EINVAL); break; case CRYPTO_AES_CCM_16: if (csp->csp_auth_mlen != 0 && csp->csp_auth_mlen != AES_CBC_MAC_HASH_LEN) return (EINVAL); if (csp->csp_ivlen != AES_CCM_IV_LEN || !sc->has_aes) return (EINVAL); break; default: return (EINVAL); } break; case CSP_MODE_ETA: if (!aesni_auth_supported(sc, csp) || !aesni_cipher_supported(sc, csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_ACCEL_SOFTWARE); } static int aesni_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct aesni_softc *sc; struct aesni_session *ses; int error; sc = device_get_softc(dev); ses = crypto_get_driver_session(cses); switch (csp->csp_mode) { case CSP_MODE_DIGEST: case CSP_MODE_CIPHER: case CSP_MODE_AEAD: case CSP_MODE_ETA: break; default: return (EINVAL); } error = aesni_cipher_setup(ses, csp); if (error != 0) { CRYPTDEB("setup failed"); return (error); } return (0); } static int aesni_process(device_t dev, struct cryptop *crp, int hint __unused) { struct aesni_session *ses; int error; ses = crypto_get_driver_session(crp->crp_session); error = aesni_cipher_process(ses, crp); crp->crp_etype = error; crypto_done(crp); return (0); } static uint8_t * aesni_cipher_alloc(struct cryptop *crp, int start, int length, bool *allocated) { uint8_t *addr; addr = crypto_contiguous_subsegment(crp, start, length); if (addr != NULL) { *allocated = false; return (addr); } addr = malloc(length, M_AESNI, M_NOWAIT); if (addr != NULL) { *allocated = true; crypto_copydata(crp, start, length, addr); } else *allocated = false; return (addr); } static device_method_t aesni_methods[] = { DEVMETHOD(device_identify, aesni_identify), DEVMETHOD(device_probe, aesni_probe), DEVMETHOD(device_attach, aesni_attach), DEVMETHOD(device_detach, aesni_detach), DEVMETHOD(cryptodev_probesession, aesni_probesession), DEVMETHOD(cryptodev_newsession, aesni_newsession), DEVMETHOD(cryptodev_process, aesni_process), DEVMETHOD_END }; static driver_t aesni_driver = { "aesni", aesni_methods, sizeof(struct aesni_softc), }; static devclass_t aesni_devclass; DRIVER_MODULE(aesni, nexus, aesni_driver, aesni_devclass, 0, 0); MODULE_VERSION(aesni, 1); MODULE_DEPEND(aesni, crypto, 1, 1, 1); static void intel_sha1_update(void *vctx, const void *vdata, u_int datalen) { struct sha1_ctxt *ctx = vctx; const char *data = vdata; size_t gaplen; size_t gapstart; size_t off; size_t copysiz; u_int blocks; off = 0; /* Do any aligned blocks without redundant copying. */ if (datalen >= 64 && ctx->count % 64 == 0) { blocks = datalen / 64; ctx->c.b64[0] += blocks * 64 * 8; intel_sha1_step(ctx->h.b32, data + off, blocks); off += blocks * 64; } while (off < datalen) { gapstart = ctx->count % 64; gaplen = 64 - gapstart; copysiz = (gaplen < datalen - off) ? gaplen : datalen - off; bcopy(&data[off], &ctx->m.b8[gapstart], copysiz); ctx->count += copysiz; ctx->count %= 64; ctx->c.b64[0] += copysiz * 8; if (ctx->count % 64 == 0) intel_sha1_step(ctx->h.b32, (void *)ctx->m.b8, 1); off += copysiz; } } static void SHA1_Init_fn(void *ctx) { sha1_init(ctx); } static void SHA1_Finalize_fn(void *digest, void *ctx) { sha1_result(ctx, digest); } static void intel_sha256_update(void *vctx, const void *vdata, u_int len) { SHA256_CTX *ctx = vctx; uint64_t bitlen; uint32_t r; u_int blocks; const unsigned char *src = vdata; /* Number of bytes left in the buffer from previous updates */ r = (ctx->count >> 3) & 0x3f; /* Convert the length into a number of bits */ bitlen = len << 3; /* Update number of bits */ ctx->count += bitlen; /* Handle the case where we don't need to perform any transforms */ if (len < 64 - r) { memcpy(&ctx->buf[r], src, len); return; } /* Finish the current block */ memcpy(&ctx->buf[r], src, 64 - r); intel_sha256_step(ctx->state, ctx->buf, 1); src += 64 - r; len -= 64 - r; /* Perform complete blocks */ if (len >= 64) { blocks = len / 64; intel_sha256_step(ctx->state, src, blocks); src += blocks * 64; len -= blocks * 64; } /* Copy left over data into buffer */ memcpy(ctx->buf, src, len); } static void SHA224_Init_fn(void *ctx) { SHA224_Init(ctx); } static void SHA224_Finalize_fn(void *digest, void *ctx) { SHA224_Final(digest, ctx); } static void SHA256_Init_fn(void *ctx) { SHA256_Init(ctx); } static void SHA256_Finalize_fn(void *digest, void *ctx) { SHA256_Final(digest, ctx); } static int aesni_authprepare(struct aesni_session *ses, int klen) { if (klen > SHA1_BLOCK_LEN) return (EINVAL); if ((ses->hmac && klen == 0) || (!ses->hmac && klen != 0)) return (EINVAL); return (0); } static int aesni_cipherprepare(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_AES_ICM: case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: case CRYPTO_AES_CBC: switch (csp->csp_cipher_klen * 8) { case 128: case 192: case 256: break; default: CRYPTDEB("invalid CBC/ICM/GCM key length"); return (EINVAL); } break; case CRYPTO_AES_XTS: switch (csp->csp_cipher_klen * 8) { case 256: case 512: break; default: CRYPTDEB("invalid XTS key length"); return (EINVAL); } break; default: return (EINVAL); } return (0); } static int aesni_cipher_setup(struct aesni_session *ses, const struct crypto_session_params *csp) { struct fpu_kern_ctx *ctx; int kt, ctxidx, error; switch (csp->csp_auth_alg) { case CRYPTO_SHA1_HMAC: ses->hmac = true; /* FALLTHROUGH */ case CRYPTO_SHA1: ses->hash_len = SHA1_HASH_LEN; ses->hash_init = SHA1_Init_fn; ses->hash_update = intel_sha1_update; ses->hash_finalize = SHA1_Finalize_fn; break; case CRYPTO_SHA2_224_HMAC: ses->hmac = true; /* FALLTHROUGH */ case CRYPTO_SHA2_224: ses->hash_len = SHA2_224_HASH_LEN; ses->hash_init = SHA224_Init_fn; ses->hash_update = intel_sha256_update; ses->hash_finalize = SHA224_Finalize_fn; break; case CRYPTO_SHA2_256_HMAC: ses->hmac = true; /* FALLTHROUGH */ case CRYPTO_SHA2_256: ses->hash_len = SHA2_256_HASH_LEN; ses->hash_init = SHA256_Init_fn; ses->hash_update = intel_sha256_update; ses->hash_finalize = SHA256_Finalize_fn; break; } if (ses->hash_len != 0) { if (csp->csp_auth_mlen == 0) ses->mlen = ses->hash_len; else ses->mlen = csp->csp_auth_mlen; error = aesni_authprepare(ses, csp->csp_auth_klen); if (error != 0) return (error); } error = aesni_cipherprepare(csp); if (error != 0) return (error); kt = is_fpu_kern_thread(0) || (csp->csp_cipher_alg == 0); if (!kt) { ACQUIRE_CTX(ctxidx, ctx); fpu_kern_enter(curthread, ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); } error = 0; if (csp->csp_cipher_key != NULL) aesni_cipher_setup_common(ses, csp, csp->csp_cipher_key, csp->csp_cipher_klen); if (!kt) { fpu_kern_leave(curthread, ctx); RELEASE_CTX(ctxidx, ctx); } return (error); } static int aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp) { const struct crypto_session_params *csp; struct fpu_kern_ctx *ctx; int error, ctxidx; bool kt; csp = crypto_get_params(crp->crp_session); switch (csp->csp_cipher_alg) { case CRYPTO_AES_ICM: case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); break; case CRYPTO_AES_CBC: case CRYPTO_AES_XTS: /* CBC & XTS can only handle full blocks for now */ if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0) return (EINVAL); break; } ctx = NULL; ctxidx = 0; error = 0; kt = is_fpu_kern_thread(0); if (!kt) { ACQUIRE_CTX(ctxidx, ctx); fpu_kern_enter(curthread, ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); } /* Do work */ if (csp->csp_mode == CSP_MODE_ETA) { if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { error = aesni_cipher_crypt(ses, crp, csp); if (error == 0) error = aesni_cipher_mac(ses, crp, csp); } else { error = aesni_cipher_mac(ses, crp, csp); if (error == 0) error = aesni_cipher_crypt(ses, crp, csp); } } else if (csp->csp_mode == CSP_MODE_DIGEST) error = aesni_cipher_mac(ses, crp, csp); else error = aesni_cipher_crypt(ses, crp, csp); if (!kt) { fpu_kern_leave(curthread, ctx); RELEASE_CTX(ctxidx, ctx); } return (error); } static int aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp, const struct crypto_session_params *csp) { uint8_t iv[AES_BLOCK_LEN], tag[GMAC_DIGEST_LEN], *buf, *authbuf; int error; bool encflag, allocated, authallocated; buf = aesni_cipher_alloc(crp, crp->crp_payload_start, crp->crp_payload_length, &allocated); if (buf == NULL) return (ENOMEM); authallocated = false; authbuf = NULL; if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16 || csp->csp_cipher_alg == CRYPTO_AES_CCM_16) { authbuf = aesni_cipher_alloc(crp, crp->crp_aad_start, crp->crp_aad_length, &authallocated); if (authbuf == NULL) { error = ENOMEM; goto out; } } error = 0; encflag = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); if (crp->crp_cipher_key != NULL) aesni_cipher_setup_common(ses, csp, crp->crp_cipher_key, csp->csp_cipher_klen); - /* Setup iv */ - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { - arc4rand(iv, csp->csp_ivlen, 0); - crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen, iv); - } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) - memcpy(iv, crp->crp_iv, csp->csp_ivlen); - else - crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, iv); + crypto_read_iv(crp, iv); switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (encflag) aesni_encrypt_cbc(ses->rounds, ses->enc_schedule, crp->crp_payload_length, buf, buf, iv); else aesni_decrypt_cbc(ses->rounds, ses->dec_schedule, crp->crp_payload_length, buf, iv); break; case CRYPTO_AES_ICM: /* encryption & decryption are the same */ aesni_encrypt_icm(ses->rounds, ses->enc_schedule, crp->crp_payload_length, buf, buf, iv); break; case CRYPTO_AES_XTS: if (encflag) aesni_encrypt_xts(ses->rounds, ses->enc_schedule, ses->xts_schedule, crp->crp_payload_length, buf, buf, iv); else aesni_decrypt_xts(ses->rounds, ses->dec_schedule, ses->xts_schedule, crp->crp_payload_length, buf, buf, iv); break; case CRYPTO_AES_NIST_GCM_16: if (encflag) { memset(tag, 0, sizeof(tag)); AES_GCM_encrypt(buf, buf, authbuf, iv, tag, crp->crp_payload_length, crp->crp_aad_length, csp->csp_ivlen, ses->enc_schedule, ses->rounds); crypto_copyback(crp, crp->crp_digest_start, sizeof(tag), tag); } else { crypto_copydata(crp, crp->crp_digest_start, sizeof(tag), tag); if (!AES_GCM_decrypt(buf, buf, authbuf, iv, tag, crp->crp_payload_length, crp->crp_aad_length, csp->csp_ivlen, ses->enc_schedule, ses->rounds)) error = EBADMSG; } break; case CRYPTO_AES_CCM_16: if (encflag) { memset(tag, 0, sizeof(tag)); AES_CCM_encrypt(buf, buf, authbuf, iv, tag, crp->crp_payload_length, crp->crp_aad_length, csp->csp_ivlen, ses->enc_schedule, ses->rounds); crypto_copyback(crp, crp->crp_digest_start, sizeof(tag), tag); } else { crypto_copydata(crp, crp->crp_digest_start, sizeof(tag), tag); if (!AES_CCM_decrypt(buf, buf, authbuf, iv, tag, crp->crp_payload_length, crp->crp_aad_length, csp->csp_ivlen, ses->enc_schedule, ses->rounds)) error = EBADMSG; } break; } if (allocated && error == 0) crypto_copyback(crp, crp->crp_payload_start, crp->crp_payload_length, buf); out: if (allocated) { explicit_bzero(buf, crp->crp_payload_length); free(buf, M_AESNI); } if (authallocated) { explicit_bzero(authbuf, crp->crp_aad_length); free(authbuf, M_AESNI); } return (error); } static int aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp, const struct crypto_session_params *csp) { union { struct SHA256Context sha2 __aligned(16); struct sha1_ctxt sha1 __aligned(16); } sctx; uint8_t hmac_key[SHA1_BLOCK_LEN] __aligned(16); uint32_t res[SHA2_256_HASH_LEN / sizeof(uint32_t)]; uint32_t res2[SHA2_256_HASH_LEN / sizeof(uint32_t)]; const uint8_t *key; int i, keylen; if (crp->crp_auth_key != NULL) key = crp->crp_auth_key; else key = csp->csp_auth_key; keylen = csp->csp_auth_klen; if (ses->hmac) { /* Inner hash: (K ^ IPAD) || data */ ses->hash_init(&sctx); for (i = 0; i < keylen; i++) hmac_key[i] = key[i] ^ HMAC_IPAD_VAL; for (i = keylen; i < sizeof(hmac_key); i++) hmac_key[i] = 0 ^ HMAC_IPAD_VAL; ses->hash_update(&sctx, hmac_key, sizeof(hmac_key)); crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, __DECONST(int (*)(void *, void *, u_int), ses->hash_update), &sctx); crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, __DECONST(int (*)(void *, void *, u_int), ses->hash_update), &sctx); ses->hash_finalize(res, &sctx); /* Outer hash: (K ^ OPAD) || inner hash */ ses->hash_init(&sctx); for (i = 0; i < keylen; i++) hmac_key[i] = key[i] ^ HMAC_OPAD_VAL; for (i = keylen; i < sizeof(hmac_key); i++) hmac_key[i] = 0 ^ HMAC_OPAD_VAL; ses->hash_update(&sctx, hmac_key, sizeof(hmac_key)); ses->hash_update(&sctx, res, ses->hash_len); ses->hash_finalize(res, &sctx); } else { ses->hash_init(&sctx); crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, __DECONST(int (*)(void *, void *, u_int), ses->hash_update), &sctx); crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, __DECONST(int (*)(void *, void *, u_int), ses->hash_update), &sctx); ses->hash_finalize(res, &sctx); } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, ses->mlen, res2); if (timingsafe_bcmp(res, res2, ses->mlen) != 0) return (EBADMSG); } else crypto_copyback(crp, crp->crp_digest_start, ses->mlen, res); return (0); } Index: head/sys/crypto/armv8/armv8_crypto.c =================================================================== --- head/sys/crypto/armv8/armv8_crypto.c (revision 360135) +++ head/sys/crypto/armv8/armv8_crypto.c (revision 360136) @@ -1,392 +1,385 @@ /*- * Copyright (c) 2005-2008 Pawel Jakub Dawidek * Copyright (c) 2010 Konstantin Belousov * Copyright (c) 2014,2016 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This is based on the aesni code. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct armv8_crypto_softc { int dieing; int32_t cid; struct rwlock lock; }; static struct mtx *ctx_mtx; static struct fpu_kern_ctx **ctx_vfp; #define AQUIRE_CTX(i, ctx) \ do { \ (i) = PCPU_GET(cpuid); \ mtx_lock(&ctx_mtx[(i)]); \ (ctx) = ctx_vfp[(i)]; \ } while (0) #define RELEASE_CTX(i, ctx) \ do { \ mtx_unlock(&ctx_mtx[(i)]); \ (i) = -1; \ (ctx) = NULL; \ } while (0) static int armv8_crypto_cipher_process(struct armv8_crypto_session *, struct cryptop *); MALLOC_DEFINE(M_ARMV8_CRYPTO, "armv8_crypto", "ARMv8 Crypto Data"); static void armv8_crypto_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "armv8crypto", -1) == NULL && BUS_ADD_CHILD(parent, 10, "armv8crypto", -1) == 0) panic("ARMv8 crypto: could not attach"); } static int armv8_crypto_probe(device_t dev) { uint64_t reg; int ret = ENXIO; reg = READ_SPECIALREG(id_aa64isar0_el1); switch (ID_AA64ISAR0_AES_VAL(reg)) { case ID_AA64ISAR0_AES_BASE: case ID_AA64ISAR0_AES_PMULL: ret = 0; break; } device_set_desc_copy(dev, "AES-CBC"); /* TODO: Check more fields as we support more features */ return (ret); } static int armv8_crypto_attach(device_t dev) { struct armv8_crypto_softc *sc; int i; sc = device_get_softc(dev); sc->dieing = 0; sc->cid = crypto_get_driverid(dev, sizeof(struct armv8_crypto_session), CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); if (sc->cid < 0) { device_printf(dev, "Could not get crypto driver id.\n"); return (ENOMEM); } rw_init(&sc->lock, "armv8crypto"); ctx_mtx = malloc(sizeof(*ctx_mtx) * (mp_maxid + 1), M_ARMV8_CRYPTO, M_WAITOK|M_ZERO); ctx_vfp = malloc(sizeof(*ctx_vfp) * (mp_maxid + 1), M_ARMV8_CRYPTO, M_WAITOK|M_ZERO); CPU_FOREACH(i) { ctx_vfp[i] = fpu_kern_alloc_ctx(0); mtx_init(&ctx_mtx[i], "armv8cryptoctx", NULL, MTX_DEF|MTX_NEW); } return (0); } static int armv8_crypto_detach(device_t dev) { struct armv8_crypto_softc *sc; int i; sc = device_get_softc(dev); rw_wlock(&sc->lock); sc->dieing = 1; rw_wunlock(&sc->lock); crypto_unregister_all(sc->cid); rw_destroy(&sc->lock); CPU_FOREACH(i) { if (ctx_vfp[i] != NULL) { mtx_destroy(&ctx_mtx[i]); fpu_kern_free_ctx(ctx_vfp[i]); } ctx_vfp[i] = NULL; } free(ctx_mtx, M_ARMV8_CRYPTO); ctx_mtx = NULL; free(ctx_vfp, M_ARMV8_CRYPTO); ctx_vfp = NULL; return (0); } static int armv8_crypto_probesession(device_t dev, const struct crypto_session_params *csp) { if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (csp->csp_ivlen != AES_BLOCK_LEN) return (EINVAL); switch (csp->csp_cipher_klen * 8) { case 128: case 192: case 256: break; default: return (EINVAL); } break; default: return (EINVAL); } default: return (EINVAL); } return (CRYPTODEV_PROBE_ACCEL_SOFTWARE); } static void armv8_crypto_cipher_setup(struct armv8_crypto_session *ses, const struct crypto_session_params *csp) { int i; switch (csp->csp_cipher_klen * 8) { case 128: ses->rounds = AES128_ROUNDS; break; case 192: ses->rounds = AES192_ROUNDS; break; case 256: ses->rounds = AES256_ROUNDS; break; default: panic("invalid CBC key length"); } rijndaelKeySetupEnc(ses->enc_schedule, csp->csp_cipher_key, csp->csp_cipher_klen * 8); rijndaelKeySetupDec(ses->dec_schedule, csp->csp_cipher_key, csp->csp_cipher_klen * 8); for (i = 0; i < nitems(ses->enc_schedule); i++) { ses->enc_schedule[i] = bswap32(ses->enc_schedule[i]); ses->dec_schedule[i] = bswap32(ses->dec_schedule[i]); } } static int armv8_crypto_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct armv8_crypto_softc *sc; struct armv8_crypto_session *ses; sc = device_get_softc(dev); rw_wlock(&sc->lock); if (sc->dieing) { rw_wunlock(&sc->lock); return (EINVAL); } ses = crypto_get_driver_session(cses); armv8_crypto_cipher_setup(ses, csp); rw_wunlock(&sc->lock); return (0); } static int armv8_crypto_process(device_t dev, struct cryptop *crp, int hint __unused) { struct armv8_crypto_session *ses; int error; /* We can only handle full blocks for now */ if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0) { error = EINVAL; goto out; } ses = crypto_get_driver_session(crp->crp_session); error = armv8_crypto_cipher_process(ses, crp); out: crp->crp_etype = error; crypto_done(crp); return (error); } static uint8_t * armv8_crypto_cipher_alloc(struct cryptop *crp, int *allocated) { uint8_t *addr; addr = crypto_contiguous_subsegment(crp, crp->crp_payload_start, crp->crp_payload_length); if (addr != NULL) { *allocated = 0; return (addr); } addr = malloc(crp->crp_payload_length, M_ARMV8_CRYPTO, M_NOWAIT); if (addr != NULL) { *allocated = 1; crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, addr); } else *allocated = 0; return (addr); } static int armv8_crypto_cipher_process(struct armv8_crypto_session *ses, struct cryptop *crp) { const struct crypto_session_params *csp; struct fpu_kern_ctx *ctx; uint8_t *buf; uint8_t iv[AES_BLOCK_LEN]; int allocated, i; int encflag; int kt; csp = crypto_get_params(crp->crp_session); encflag = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); buf = armv8_crypto_cipher_alloc(crp, &allocated); if (buf == NULL) return (ENOMEM); kt = is_fpu_kern_thread(0); if (!kt) { AQUIRE_CTX(i, ctx); fpu_kern_enter(curthread, ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); } if (crp->crp_cipher_key != NULL) { panic("armv8: new cipher key"); } - /* Setup iv */ - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { - arc4rand(iv, csp->csp_ivlen, 0); - crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen, iv); - } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) - memcpy(iv, crp->crp_iv, csp->csp_ivlen); - else - crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, iv); + crypto_read_iv(crp, iv); /* Do work */ switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (encflag) armv8_aes_encrypt_cbc(ses->rounds, ses->enc_schedule, crp->crp_payload_length, buf, buf, iv); else armv8_aes_decrypt_cbc(ses->rounds, ses->dec_schedule, crp->crp_payload_length, buf, iv); break; } if (allocated) crypto_copyback(crp, crp->crp_payload_start, crp->crp_payload_length, buf); if (!kt) { fpu_kern_leave(curthread, ctx); RELEASE_CTX(i, ctx); } if (allocated) { bzero(buf, crp->crp_payload_length); free(buf, M_ARMV8_CRYPTO); } return (0); } static device_method_t armv8_crypto_methods[] = { DEVMETHOD(device_identify, armv8_crypto_identify), DEVMETHOD(device_probe, armv8_crypto_probe), DEVMETHOD(device_attach, armv8_crypto_attach), DEVMETHOD(device_detach, armv8_crypto_detach), DEVMETHOD(cryptodev_probesession, armv8_crypto_probesession), DEVMETHOD(cryptodev_newsession, armv8_crypto_newsession), DEVMETHOD(cryptodev_process, armv8_crypto_process), DEVMETHOD_END, }; static DEFINE_CLASS_0(armv8crypto, armv8_crypto_driver, armv8_crypto_methods, sizeof(struct armv8_crypto_softc)); static devclass_t armv8_crypto_devclass; DRIVER_MODULE(armv8crypto, nexus, armv8_crypto_driver, armv8_crypto_devclass, 0, 0); Index: head/sys/crypto/ccp/ccp_hardware.c =================================================================== --- head/sys/crypto/ccp/ccp_hardware.c (revision 360135) +++ head/sys/crypto/ccp/ccp_hardware.c (revision 360136) @@ -1,2113 +1,2107 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2017 Chelsio Communications, Inc. * Copyright (c) 2017 Conrad Meyer * All rights reserved. * Largely borrowed from ccr(4), Written by: John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include "ccp.h" #include "ccp_hardware.h" #include "ccp_lsb.h" CTASSERT(sizeof(struct ccp_desc) == 32); static struct ccp_xts_unitsize_map_entry { enum ccp_xts_unitsize cxu_id; unsigned cxu_size; } ccp_xts_unitsize_map[] = { { CCP_XTS_AES_UNIT_SIZE_16, 16 }, { CCP_XTS_AES_UNIT_SIZE_512, 512 }, { CCP_XTS_AES_UNIT_SIZE_1024, 1024 }, { CCP_XTS_AES_UNIT_SIZE_2048, 2048 }, { CCP_XTS_AES_UNIT_SIZE_4096, 4096 }, }; SYSCTL_NODE(_hw, OID_AUTO, ccp, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "ccp node"); unsigned g_ccp_ring_order = 11; SYSCTL_UINT(_hw_ccp, OID_AUTO, ring_order, CTLFLAG_RDTUN, &g_ccp_ring_order, 0, "Set CCP ring order. (1 << this) == ring size. Min: 6, Max: 16"); /* * Zero buffer, sufficient for padding LSB entries, that does not span a page * boundary */ static const char g_zeroes[32] __aligned(32); static inline uint32_t ccp_read_4(struct ccp_softc *sc, uint32_t offset) { return (bus_space_read_4(sc->pci_bus_tag, sc->pci_bus_handle, offset)); } static inline void ccp_write_4(struct ccp_softc *sc, uint32_t offset, uint32_t value) { bus_space_write_4(sc->pci_bus_tag, sc->pci_bus_handle, offset, value); } static inline uint32_t ccp_read_queue_4(struct ccp_softc *sc, unsigned queue, uint32_t offset) { /* * Each queue gets its own 4kB register space. Queue 0 is at 0x1000. */ return (ccp_read_4(sc, (CMD_Q_STATUS_INCR * (1 + queue)) + offset)); } static inline void ccp_write_queue_4(struct ccp_softc *sc, unsigned queue, uint32_t offset, uint32_t value) { ccp_write_4(sc, (CMD_Q_STATUS_INCR * (1 + queue)) + offset, value); } void ccp_queue_write_tail(struct ccp_queue *qp) { ccp_write_queue_4(qp->cq_softc, qp->cq_qindex, CMD_Q_TAIL_LO_BASE, ((uint32_t)qp->desc_ring_bus_addr) + (Q_DESC_SIZE * qp->cq_tail)); } /* * Given a queue and a reserved LSB entry index, compute the LSB *entry id* of * that entry for the queue's private LSB region. */ static inline uint8_t ccp_queue_lsb_entry(struct ccp_queue *qp, unsigned lsb_entry) { return ((qp->private_lsb * LSB_REGION_LENGTH + lsb_entry)); } /* * Given a queue and a reserved LSB entry index, compute the LSB *address* of * that entry for the queue's private LSB region. */ static inline uint32_t ccp_queue_lsb_address(struct ccp_queue *qp, unsigned lsb_entry) { return (ccp_queue_lsb_entry(qp, lsb_entry) * LSB_ENTRY_SIZE); } /* * Some terminology: * * LSB - Local Storage Block * ========================= * * 8 segments/regions, each containing 16 entries. * * Each entry contains 256 bits (32 bytes). * * Segments are virtually addressed in commands, but accesses cannot cross * segment boundaries. Virtual map uses an identity mapping by default * (virtual segment N corresponds to physical segment N). * * Access to a physical region can be restricted to any subset of all five * queues. * * "Pass-through" mode * =================== * * Pass-through is a generic DMA engine, much like ioat(4). Some nice * features: * * - Supports byte-swapping for endian conversion (32- or 256-bit words) * - AND, OR, XOR with fixed 256-bit mask * - CRC32 of data (may be used in tandem with bswap, but not bit operations) * - Read/write of LSB * - Memset * * If bit manipulation mode is enabled, input must be a multiple of 256 bits * (32 bytes). * * If byte-swapping is enabled, input must be a multiple of the word size. * * Zlib mode -- only usable from one queue at a time, single job at a time. * ======================================================================== * * Only usable from private host, aka PSP? Not host processor? * * RNG. * ==== * * Raw bits are conditioned with AES and fed through CTR_DRBG. Output goes in * a ring buffer readable by software. * * NIST SP 800-90B Repetition Count and Adaptive Proportion health checks are * implemented on the raw input stream and may be enabled to verify min-entropy * of 0.5 bits per bit. */ static void ccp_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *baddr; KASSERT(error == 0, ("%s: error:%d", __func__, error)); baddr = arg; *baddr = segs->ds_addr; } static int ccp_hw_attach_queue(device_t dev, uint64_t lsbmask, unsigned queue) { struct ccp_softc *sc; struct ccp_queue *qp; void *desc; size_t ringsz, num_descriptors; int error; desc = NULL; sc = device_get_softc(dev); qp = &sc->queues[queue]; /* * Don't bother allocating a ring for queues the host isn't allowed to * drive. */ if ((sc->valid_queues & (1 << queue)) == 0) return (0); ccp_queue_decode_lsb_regions(sc, lsbmask, queue); /* Ignore queues that do not have any LSB access. */ if (qp->lsb_mask == 0) { device_printf(dev, "Ignoring queue %u with no LSB access\n", queue); sc->valid_queues &= ~(1 << queue); return (0); } num_descriptors = 1 << sc->ring_size_order; ringsz = sizeof(struct ccp_desc) * num_descriptors; /* * "Queue_Size" is order - 1. * * Queue must be aligned to 5+Queue_Size+1 == 5 + order bits. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 1 << (5 + sc->ring_size_order), #if defined(__i386__) && !defined(PAE) 0, BUS_SPACE_MAXADDR, #else (bus_addr_t)1 << 32, BUS_SPACE_MAXADDR_48BIT, #endif BUS_SPACE_MAXADDR, NULL, NULL, ringsz, 1, ringsz, 0, NULL, NULL, &qp->ring_desc_tag); if (error != 0) goto out; error = bus_dmamem_alloc(qp->ring_desc_tag, &desc, BUS_DMA_ZERO | BUS_DMA_WAITOK, &qp->ring_desc_map); if (error != 0) goto out; error = bus_dmamap_load(qp->ring_desc_tag, qp->ring_desc_map, desc, ringsz, ccp_dmamap_cb, &qp->desc_ring_bus_addr, BUS_DMA_WAITOK); if (error != 0) goto out; qp->desc_ring = desc; qp->completions_ring = malloc(num_descriptors * sizeof(*qp->completions_ring), M_CCP, M_ZERO | M_WAITOK); /* Zero control register; among other things, clears the RUN flag. */ qp->qcontrol = 0; ccp_write_queue_4(sc, queue, CMD_Q_CONTROL_BASE, qp->qcontrol); ccp_write_queue_4(sc, queue, CMD_Q_INT_ENABLE_BASE, 0); /* Clear any leftover interrupt status flags */ ccp_write_queue_4(sc, queue, CMD_Q_INTERRUPT_STATUS_BASE, ALL_INTERRUPTS); qp->qcontrol |= (sc->ring_size_order - 1) << CMD_Q_SIZE_SHIFT; ccp_write_queue_4(sc, queue, CMD_Q_TAIL_LO_BASE, (uint32_t)qp->desc_ring_bus_addr); ccp_write_queue_4(sc, queue, CMD_Q_HEAD_LO_BASE, (uint32_t)qp->desc_ring_bus_addr); /* * Enable completion interrupts, as well as error or administrative * halt interrupts. We don't use administrative halts, but they * shouldn't trip unless we do, so it ought to be harmless. */ ccp_write_queue_4(sc, queue, CMD_Q_INT_ENABLE_BASE, INT_COMPLETION | INT_ERROR | INT_QUEUE_STOPPED); qp->qcontrol |= (qp->desc_ring_bus_addr >> 32) << CMD_Q_PTR_HI_SHIFT; qp->qcontrol |= CMD_Q_RUN; ccp_write_queue_4(sc, queue, CMD_Q_CONTROL_BASE, qp->qcontrol); out: if (error != 0) { if (qp->desc_ring != NULL) bus_dmamap_unload(qp->ring_desc_tag, qp->ring_desc_map); if (desc != NULL) bus_dmamem_free(qp->ring_desc_tag, desc, qp->ring_desc_map); if (qp->ring_desc_tag != NULL) bus_dma_tag_destroy(qp->ring_desc_tag); } return (error); } static void ccp_hw_detach_queue(device_t dev, unsigned queue) { struct ccp_softc *sc; struct ccp_queue *qp; sc = device_get_softc(dev); qp = &sc->queues[queue]; /* * Don't bother allocating a ring for queues the host isn't allowed to * drive. */ if ((sc->valid_queues & (1 << queue)) == 0) return; free(qp->completions_ring, M_CCP); bus_dmamap_unload(qp->ring_desc_tag, qp->ring_desc_map); bus_dmamem_free(qp->ring_desc_tag, qp->desc_ring, qp->ring_desc_map); bus_dma_tag_destroy(qp->ring_desc_tag); } static int ccp_map_pci_bar(device_t dev) { struct ccp_softc *sc; sc = device_get_softc(dev); sc->pci_resource_id = PCIR_BAR(2); sc->pci_resource = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->pci_resource_id, RF_ACTIVE); if (sc->pci_resource == NULL) { device_printf(dev, "unable to allocate pci resource\n"); return (ENODEV); } sc->pci_resource_id_msix = PCIR_BAR(5); sc->pci_resource_msix = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->pci_resource_id_msix, RF_ACTIVE); if (sc->pci_resource_msix == NULL) { device_printf(dev, "unable to allocate pci resource msix\n"); bus_release_resource(dev, SYS_RES_MEMORY, sc->pci_resource_id, sc->pci_resource); return (ENODEV); } sc->pci_bus_tag = rman_get_bustag(sc->pci_resource); sc->pci_bus_handle = rman_get_bushandle(sc->pci_resource); return (0); } static void ccp_unmap_pci_bar(device_t dev) { struct ccp_softc *sc; sc = device_get_softc(dev); bus_release_resource(dev, SYS_RES_MEMORY, sc->pci_resource_id_msix, sc->pci_resource_msix); bus_release_resource(dev, SYS_RES_MEMORY, sc->pci_resource_id, sc->pci_resource); } const static struct ccp_error_code { uint8_t ce_code; const char *ce_name; int ce_errno; const char *ce_desc; } ccp_error_codes[] = { { 0x01, "ILLEGAL_ENGINE", EIO, "Requested engine was invalid" }, { 0x03, "ILLEGAL_FUNCTION_TYPE", EIO, "A non-supported function type was specified" }, { 0x04, "ILLEGAL_FUNCTION_MODE", EIO, "A non-supported function mode was specified" }, { 0x05, "ILLEGAL_FUNCTION_ENCRYPT", EIO, "A CMAC type was specified when ENCRYPT was not specified" }, { 0x06, "ILLEGAL_FUNCTION_SIZE", EIO, "A non-supported function size was specified.\n" "AES-CFB: Size was not 127 or 7;\n" "3DES-CFB: Size was not 7;\n" "RSA: See supported size table (7.4.2);\n" "ECC: Size was greater than 576 bits." }, { 0x07, "Zlib_MISSING_INIT_EOM", EIO, "Zlib command does not have INIT and EOM set" }, { 0x08, "ILLEGAL_FUNCTION_RSVD", EIO, "Reserved bits in a function specification were not 0" }, { 0x09, "ILLEGAL_BUFFER_LENGTH", EIO, "The buffer length specified was not correct for the selected engine" }, { 0x0A, "VLSB_FAULT", EIO, "Illegal VLSB segment mapping:\n" "Undefined VLSB segment mapping or\n" "mapping to unsupported LSB segment id" }, { 0x0B, "ILLEGAL_MEM_ADDR", EFAULT, "The specified source/destination buffer access was illegal:\n" "Data buffer located in a LSB location disallowed by the LSB protection masks; or\n" "Data buffer not completely contained within a single segment; or\n" "Pointer with Fixed=1 is not 32-bit aligned; or\n" "Pointer with Fixed=1 attempted to reference non-AXI1 (local) memory." }, { 0x0C, "ILLEGAL_MEM_SEL", EIO, "A src_mem, dst_mem, or key_mem field was illegal:\n" "A field was set to a reserved value; or\n" "A public command attempted to reference AXI1 (local) or GART memory; or\n" "A Zlib command attmpted to use the LSB." }, { 0x0D, "ILLEGAL_CONTEXT_ADDR", EIO, "The specified context location was illegal:\n" "Context located in a LSB location disallowed by the LSB protection masks; or\n" "Context not completely contained within a single segment." }, { 0x0E, "ILLEGAL_KEY_ADDR", EIO, "The specified key location was illegal:\n" "Key located in a LSB location disallowed by the LSB protection masks; or\n" "Key not completely contained within a single segment." }, { 0x12, "CMD_TIMEOUT", EIO, "A command timeout violation occurred" }, /* XXX Could fill out these descriptions too */ { 0x13, "IDMA0_AXI_SLVERR", EIO, "" }, { 0x14, "IDMA0_AXI_DECERR", EIO, "" }, { 0x16, "IDMA1_AXI_SLVERR", EIO, "" }, { 0x17, "IDMA1_AXI_DECERR", EIO, "" }, { 0x19, "ZLIBVHB_AXI_SLVERR", EIO, "" }, { 0x1A, "ZLIBVHB_AXI_DECERR", EIO, "" }, { 0x1C, "ZLIB_UNEXPECTED_EOM", EIO, "" }, { 0x1D, "ZLIB_EXTRA_DATA", EIO, "" }, { 0x1E, "ZLIB_BTYPE", EIO, "" }, { 0x20, "ZLIB_UNDEFINED_DISTANCE_SYMBOL", EIO, "" }, { 0x21, "ZLIB_CODE_LENGTH_SYMBOL", EIO, "" }, { 0x22, "ZLIB_VHB_ILLEGAL_FETCH", EIO, "" }, { 0x23, "ZLIB_UNCOMPRESSED_LEN", EIO, "" }, { 0x24, "ZLIB_LIMIT_REACHED", EIO, "" }, { 0x25, "ZLIB_CHECKSUM_MISMATCH", EIO, "" }, { 0x26, "ODMA0_AXI_SLVERR", EIO, "" }, { 0x27, "ODMA0_AXI_DECERR", EIO, "" }, { 0x29, "ODMA1_AXI_SLVERR", EIO, "" }, { 0x2A, "ODMA1_AXI_DECERR", EIO, "" }, { 0x2B, "LSB_PARITY_ERR", EIO, "A read from the LSB encountered a parity error" }, }; static void ccp_intr_handle_error(struct ccp_queue *qp, const struct ccp_desc *desc) { struct ccp_completion_ctx *cctx; const struct ccp_error_code *ec; struct ccp_softc *sc; uint32_t status, error, esource, faultblock; unsigned q, idx; int errno; sc = qp->cq_softc; q = qp->cq_qindex; status = ccp_read_queue_4(sc, q, CMD_Q_STATUS_BASE); error = status & STATUS_ERROR_MASK; /* Decode error status */ ec = NULL; for (idx = 0; idx < nitems(ccp_error_codes); idx++) if (ccp_error_codes[idx].ce_code == error) { ec = &ccp_error_codes[idx]; break; } esource = (status >> STATUS_ERRORSOURCE_SHIFT) & STATUS_ERRORSOURCE_MASK; faultblock = (status >> STATUS_VLSB_FAULTBLOCK_SHIFT) & STATUS_VLSB_FAULTBLOCK_MASK; device_printf(sc->dev, "Error: %s (%u) Source: %u Faulting LSB block: %u\n", (ec != NULL) ? ec->ce_name : "(reserved)", error, esource, faultblock); if (ec != NULL) device_printf(sc->dev, "Error description: %s\n", ec->ce_desc); /* TODO Could format the desc nicely here */ idx = desc - qp->desc_ring; DPRINTF(sc->dev, "Bad descriptor index: %u contents: %32D\n", idx, (const void *)desc, " "); /* * TODO Per § 14.4 "Error Handling," DMA_Status, DMA_Read/Write_Status, * Zlib Decompress status may be interesting. */ while (true) { /* Keep unused descriptors zero for next use. */ memset(&qp->desc_ring[idx], 0, sizeof(qp->desc_ring[idx])); cctx = &qp->completions_ring[idx]; /* * Restart procedure described in § 14.2.5. Could be used by HoC if we * used that. * * Advance HEAD_LO past bad descriptor + any remaining in * transaction manually, then restart queue. */ idx = (idx + 1) % (1 << sc->ring_size_order); /* Callback function signals end of transaction */ if (cctx->callback_fn != NULL) { if (ec == NULL) errno = EIO; else errno = ec->ce_errno; /* TODO More specific error code */ cctx->callback_fn(qp, cctx->session, cctx->callback_arg, errno); cctx->callback_fn = NULL; break; } } qp->cq_head = idx; qp->cq_waiting = false; wakeup(&qp->cq_tail); DPRINTF(sc->dev, "%s: wrote sw head:%u\n", __func__, qp->cq_head); ccp_write_queue_4(sc, q, CMD_Q_HEAD_LO_BASE, (uint32_t)qp->desc_ring_bus_addr + (idx * Q_DESC_SIZE)); ccp_write_queue_4(sc, q, CMD_Q_CONTROL_BASE, qp->qcontrol); DPRINTF(sc->dev, "%s: Restarted queue\n", __func__); } static void ccp_intr_run_completions(struct ccp_queue *qp, uint32_t ints) { struct ccp_completion_ctx *cctx; struct ccp_softc *sc; const struct ccp_desc *desc; uint32_t headlo, idx; unsigned q, completed; sc = qp->cq_softc; q = qp->cq_qindex; mtx_lock(&qp->cq_lock); /* * Hardware HEAD_LO points to the first incomplete descriptor. Process * any submitted and completed descriptors, up to but not including * HEAD_LO. */ headlo = ccp_read_queue_4(sc, q, CMD_Q_HEAD_LO_BASE); idx = (headlo - (uint32_t)qp->desc_ring_bus_addr) / Q_DESC_SIZE; DPRINTF(sc->dev, "%s: hw head:%u sw head:%u\n", __func__, idx, qp->cq_head); completed = 0; while (qp->cq_head != idx) { DPRINTF(sc->dev, "%s: completing:%u\n", __func__, qp->cq_head); cctx = &qp->completions_ring[qp->cq_head]; if (cctx->callback_fn != NULL) { cctx->callback_fn(qp, cctx->session, cctx->callback_arg, 0); cctx->callback_fn = NULL; } /* Keep unused descriptors zero for next use. */ memset(&qp->desc_ring[qp->cq_head], 0, sizeof(qp->desc_ring[qp->cq_head])); qp->cq_head = (qp->cq_head + 1) % (1 << sc->ring_size_order); completed++; } if (completed > 0) { qp->cq_waiting = false; wakeup(&qp->cq_tail); } DPRINTF(sc->dev, "%s: wrote sw head:%u\n", __func__, qp->cq_head); /* * Desc points to the first incomplete descriptor, at the time we read * HEAD_LO. If there was an error flagged in interrupt status, the HW * will not proceed past the erroneous descriptor by itself. */ desc = &qp->desc_ring[idx]; if ((ints & INT_ERROR) != 0) ccp_intr_handle_error(qp, desc); mtx_unlock(&qp->cq_lock); } static void ccp_intr_handler(void *arg) { struct ccp_softc *sc = arg; size_t i; uint32_t ints; DPRINTF(sc->dev, "%s: interrupt\n", __func__); /* * We get one global interrupt per PCI device, shared over all of * its queues. Scan each valid queue on interrupt for flags indicating * activity. */ for (i = 0; i < nitems(sc->queues); i++) { if ((sc->valid_queues & (1 << i)) == 0) continue; ints = ccp_read_queue_4(sc, i, CMD_Q_INTERRUPT_STATUS_BASE); if (ints == 0) continue; #if 0 DPRINTF(sc->dev, "%s: %x interrupts on queue %zu\n", __func__, (unsigned)ints, i); #endif /* Write back 1s to clear interrupt status bits. */ ccp_write_queue_4(sc, i, CMD_Q_INTERRUPT_STATUS_BASE, ints); /* * If there was an error, we still need to run completions on * any descriptors prior to the error. The completions handler * invoked below will also handle the error descriptor. */ if ((ints & (INT_COMPLETION | INT_ERROR)) != 0) ccp_intr_run_completions(&sc->queues[i], ints); if ((ints & INT_QUEUE_STOPPED) != 0) device_printf(sc->dev, "%s: queue %zu stopped\n", __func__, i); } /* Re-enable interrupts after processing */ for (i = 0; i < nitems(sc->queues); i++) { if ((sc->valid_queues & (1 << i)) == 0) continue; ccp_write_queue_4(sc, i, CMD_Q_INT_ENABLE_BASE, INT_COMPLETION | INT_ERROR | INT_QUEUE_STOPPED); } } static int ccp_intr_filter(void *arg) { struct ccp_softc *sc = arg; size_t i; /* TODO: Split individual queues into separate taskqueues? */ for (i = 0; i < nitems(sc->queues); i++) { if ((sc->valid_queues & (1 << i)) == 0) continue; /* Mask interrupt until task completes */ ccp_write_queue_4(sc, i, CMD_Q_INT_ENABLE_BASE, 0); } return (FILTER_SCHEDULE_THREAD); } static int ccp_setup_interrupts(struct ccp_softc *sc) { uint32_t nvec; int rid, error, n, ridcopy; n = pci_msix_count(sc->dev); if (n < 1) { device_printf(sc->dev, "%s: msix_count: %d\n", __func__, n); return (ENXIO); } nvec = n; error = pci_alloc_msix(sc->dev, &nvec); if (error != 0) { device_printf(sc->dev, "%s: alloc_msix error: %d\n", __func__, error); return (error); } if (nvec < 1) { device_printf(sc->dev, "%s: alloc_msix: 0 vectors\n", __func__); return (ENXIO); } if (nvec > nitems(sc->intr_res)) { device_printf(sc->dev, "%s: too many vectors: %u\n", __func__, nvec); nvec = nitems(sc->intr_res); } for (rid = 1; rid < 1 + nvec; rid++) { ridcopy = rid; sc->intr_res[rid - 1] = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &ridcopy, RF_ACTIVE); if (sc->intr_res[rid - 1] == NULL) { device_printf(sc->dev, "%s: Failed to alloc IRQ resource\n", __func__); return (ENXIO); } sc->intr_tag[rid - 1] = NULL; error = bus_setup_intr(sc->dev, sc->intr_res[rid - 1], INTR_MPSAFE | INTR_TYPE_MISC, ccp_intr_filter, ccp_intr_handler, sc, &sc->intr_tag[rid - 1]); if (error != 0) device_printf(sc->dev, "%s: setup_intr: %d\n", __func__, error); } sc->intr_count = nvec; return (error); } static void ccp_release_interrupts(struct ccp_softc *sc) { unsigned i; for (i = 0; i < sc->intr_count; i++) { if (sc->intr_tag[i] != NULL) bus_teardown_intr(sc->dev, sc->intr_res[i], sc->intr_tag[i]); if (sc->intr_res[i] != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, rman_get_rid(sc->intr_res[i]), sc->intr_res[i]); } pci_release_msi(sc->dev); } int ccp_hw_attach(device_t dev) { struct ccp_softc *sc; uint64_t lsbmask; uint32_t version, lsbmasklo, lsbmaskhi; unsigned queue_idx, j; int error; bool bars_mapped, interrupts_setup; queue_idx = 0; bars_mapped = interrupts_setup = false; sc = device_get_softc(dev); error = ccp_map_pci_bar(dev); if (error != 0) { device_printf(dev, "%s: couldn't map BAR(s)\n", __func__); goto out; } bars_mapped = true; error = pci_enable_busmaster(dev); if (error != 0) { device_printf(dev, "%s: couldn't enable busmaster\n", __func__); goto out; } sc->ring_size_order = g_ccp_ring_order; if (sc->ring_size_order < 6 || sc->ring_size_order > 16) { device_printf(dev, "bogus hw.ccp.ring_order\n"); error = EINVAL; goto out; } sc->valid_queues = ccp_read_4(sc, CMD_QUEUE_MASK_OFFSET); version = ccp_read_4(sc, VERSION_REG); if ((version & VERSION_NUM_MASK) < 5) { device_printf(dev, "driver supports version 5 and later hardware\n"); error = ENXIO; goto out; } error = ccp_setup_interrupts(sc); if (error != 0) goto out; interrupts_setup = true; sc->hw_version = version & VERSION_NUM_MASK; sc->num_queues = (version >> VERSION_NUMVQM_SHIFT) & VERSION_NUMVQM_MASK; sc->num_lsb_entries = (version >> VERSION_LSBSIZE_SHIFT) & VERSION_LSBSIZE_MASK; sc->hw_features = version & VERSION_CAP_MASK; /* * Copy private LSB mask to public registers to enable access to LSB * from all queues allowed by BIOS. */ lsbmasklo = ccp_read_4(sc, LSB_PRIVATE_MASK_LO_OFFSET); lsbmaskhi = ccp_read_4(sc, LSB_PRIVATE_MASK_HI_OFFSET); ccp_write_4(sc, LSB_PUBLIC_MASK_LO_OFFSET, lsbmasklo); ccp_write_4(sc, LSB_PUBLIC_MASK_HI_OFFSET, lsbmaskhi); lsbmask = ((uint64_t)lsbmaskhi << 30) | lsbmasklo; for (; queue_idx < nitems(sc->queues); queue_idx++) { error = ccp_hw_attach_queue(dev, lsbmask, queue_idx); if (error != 0) { device_printf(dev, "%s: couldn't attach queue %u\n", __func__, queue_idx); goto out; } } ccp_assign_lsb_regions(sc, lsbmask); out: if (error != 0) { if (interrupts_setup) ccp_release_interrupts(sc); for (j = 0; j < queue_idx; j++) ccp_hw_detach_queue(dev, j); if (sc->ring_size_order != 0) pci_disable_busmaster(dev); if (bars_mapped) ccp_unmap_pci_bar(dev); } return (error); } void ccp_hw_detach(device_t dev) { struct ccp_softc *sc; unsigned i; sc = device_get_softc(dev); for (i = 0; i < nitems(sc->queues); i++) ccp_hw_detach_queue(dev, i); ccp_release_interrupts(sc); pci_disable_busmaster(dev); ccp_unmap_pci_bar(dev); } static int __must_check ccp_passthrough(struct ccp_queue *qp, bus_addr_t dst, enum ccp_memtype dst_type, bus_addr_t src, enum ccp_memtype src_type, bus_size_t len, enum ccp_passthru_byteswap swapmode, enum ccp_passthru_bitwise bitmode, bool interrupt, const struct ccp_completion_ctx *cctx) { struct ccp_desc *desc; if (ccp_queue_get_ring_space(qp) == 0) return (EAGAIN); desc = &qp->desc_ring[qp->cq_tail]; memset(desc, 0, sizeof(*desc)); desc->engine = CCP_ENGINE_PASSTHRU; desc->pt.ioc = interrupt; desc->pt.byteswap = swapmode; desc->pt.bitwise = bitmode; desc->length = len; desc->src_lo = (uint32_t)src; desc->src_hi = src >> 32; desc->src_mem = src_type; desc->dst_lo = (uint32_t)dst; desc->dst_hi = dst >> 32; desc->dst_mem = dst_type; if (bitmode != CCP_PASSTHRU_BITWISE_NOOP) desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_KEY); if (cctx != NULL) memcpy(&qp->completions_ring[qp->cq_tail], cctx, sizeof(*cctx)); qp->cq_tail = (qp->cq_tail + 1) % (1 << qp->cq_softc->ring_size_order); return (0); } static int __must_check ccp_passthrough_sgl(struct ccp_queue *qp, bus_addr_t lsb_addr, bool tolsb, struct sglist *sgl, bus_size_t len, bool interrupt, const struct ccp_completion_ctx *cctx) { struct sglist_seg *seg; size_t i, remain, nb; int error; remain = len; for (i = 0; i < sgl->sg_nseg && remain != 0; i++) { seg = &sgl->sg_segs[i]; /* crp lengths are int, so 32-bit min() is ok. */ nb = min(remain, seg->ss_len); if (tolsb) error = ccp_passthrough(qp, lsb_addr, CCP_MEMTYPE_SB, seg->ss_paddr, CCP_MEMTYPE_SYSTEM, nb, CCP_PASSTHRU_BYTESWAP_NOOP, CCP_PASSTHRU_BITWISE_NOOP, (nb == remain) && interrupt, cctx); else error = ccp_passthrough(qp, seg->ss_paddr, CCP_MEMTYPE_SYSTEM, lsb_addr, CCP_MEMTYPE_SB, nb, CCP_PASSTHRU_BYTESWAP_NOOP, CCP_PASSTHRU_BITWISE_NOOP, (nb == remain) && interrupt, cctx); if (error != 0) return (error); remain -= nb; } return (0); } /* * Note that these vectors are in reverse of the usual order. */ const struct SHA_vectors { uint32_t SHA1[8]; uint32_t SHA224[8]; uint32_t SHA256[8]; uint64_t SHA384[8]; uint64_t SHA512[8]; } SHA_H __aligned(PAGE_SIZE) = { .SHA1 = { 0xc3d2e1f0ul, 0x10325476ul, 0x98badcfeul, 0xefcdab89ul, 0x67452301ul, 0, 0, 0, }, .SHA224 = { 0xbefa4fa4ul, 0x64f98fa7ul, 0x68581511ul, 0xffc00b31ul, 0xf70e5939ul, 0x3070dd17ul, 0x367cd507ul, 0xc1059ed8ul, }, .SHA256 = { 0x5be0cd19ul, 0x1f83d9abul, 0x9b05688cul, 0x510e527ful, 0xa54ff53aul, 0x3c6ef372ul, 0xbb67ae85ul, 0x6a09e667ul, }, .SHA384 = { 0x47b5481dbefa4fa4ull, 0xdb0c2e0d64f98fa7ull, 0x8eb44a8768581511ull, 0x67332667ffc00b31ull, 0x152fecd8f70e5939ull, 0x9159015a3070dd17ull, 0x629a292a367cd507ull, 0xcbbb9d5dc1059ed8ull, }, .SHA512 = { 0x5be0cd19137e2179ull, 0x1f83d9abfb41bd6bull, 0x9b05688c2b3e6c1full, 0x510e527fade682d1ull, 0xa54ff53a5f1d36f1ull, 0x3c6ef372fe94f82bull, 0xbb67ae8584caa73bull, 0x6a09e667f3bcc908ull, }, }; /* * Ensure vectors do not cross a page boundary. * * Disabled due to a new Clang error: "expression is not an integral constant * expression." GCC (cross toolchain) seems to handle this assertion with * _Static_assert just fine. */ #if 0 CTASSERT(PAGE_SIZE - ((uintptr_t)&SHA_H % PAGE_SIZE) >= sizeof(SHA_H)); #endif const struct SHA_Defn { enum sha_version version; const void *H_vectors; size_t H_size; struct auth_hash *axf; enum ccp_sha_type engine_type; } SHA_definitions[] = { { .version = SHA1, .H_vectors = SHA_H.SHA1, .H_size = sizeof(SHA_H.SHA1), .axf = &auth_hash_hmac_sha1, .engine_type = CCP_SHA_TYPE_1, }, #if 0 { .version = SHA2_224, .H_vectors = SHA_H.SHA224, .H_size = sizeof(SHA_H.SHA224), .axf = &auth_hash_hmac_sha2_224, .engine_type = CCP_SHA_TYPE_224, }, #endif { .version = SHA2_256, .H_vectors = SHA_H.SHA256, .H_size = sizeof(SHA_H.SHA256), .axf = &auth_hash_hmac_sha2_256, .engine_type = CCP_SHA_TYPE_256, }, { .version = SHA2_384, .H_vectors = SHA_H.SHA384, .H_size = sizeof(SHA_H.SHA384), .axf = &auth_hash_hmac_sha2_384, .engine_type = CCP_SHA_TYPE_384, }, { .version = SHA2_512, .H_vectors = SHA_H.SHA512, .H_size = sizeof(SHA_H.SHA512), .axf = &auth_hash_hmac_sha2_512, .engine_type = CCP_SHA_TYPE_512, }, }; static int __must_check ccp_sha_single_desc(struct ccp_queue *qp, const struct SHA_Defn *defn, vm_paddr_t addr, size_t len, bool start, bool end, uint64_t msgbits) { struct ccp_desc *desc; if (ccp_queue_get_ring_space(qp) == 0) return (EAGAIN); desc = &qp->desc_ring[qp->cq_tail]; memset(desc, 0, sizeof(*desc)); desc->engine = CCP_ENGINE_SHA; desc->som = start; desc->eom = end; desc->sha.type = defn->engine_type; desc->length = len; if (end) { desc->sha_len_lo = (uint32_t)msgbits; desc->sha_len_hi = msgbits >> 32; } desc->src_lo = (uint32_t)addr; desc->src_hi = addr >> 32; desc->src_mem = CCP_MEMTYPE_SYSTEM; desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_SHA); qp->cq_tail = (qp->cq_tail + 1) % (1 << qp->cq_softc->ring_size_order); return (0); } static int __must_check ccp_sha(struct ccp_queue *qp, enum sha_version version, struct sglist *sgl_src, struct sglist *sgl_dst, const struct ccp_completion_ctx *cctx) { const struct SHA_Defn *defn; struct sglist_seg *seg; size_t i, msgsize, remaining, nb; uint32_t lsbaddr; int error; for (i = 0; i < nitems(SHA_definitions); i++) if (SHA_definitions[i].version == version) break; if (i == nitems(SHA_definitions)) return (EINVAL); defn = &SHA_definitions[i]; /* XXX validate input ??? */ /* Load initial SHA state into LSB */ /* XXX ensure H_vectors don't span page boundaries */ error = ccp_passthrough(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_SHA), CCP_MEMTYPE_SB, pmap_kextract((vm_offset_t)defn->H_vectors), CCP_MEMTYPE_SYSTEM, roundup2(defn->H_size, LSB_ENTRY_SIZE), CCP_PASSTHRU_BYTESWAP_NOOP, CCP_PASSTHRU_BITWISE_NOOP, false, NULL); if (error != 0) return (error); /* Execute series of SHA updates on correctly sized buffers */ msgsize = 0; for (i = 0; i < sgl_src->sg_nseg; i++) { seg = &sgl_src->sg_segs[i]; msgsize += seg->ss_len; error = ccp_sha_single_desc(qp, defn, seg->ss_paddr, seg->ss_len, i == 0, i == sgl_src->sg_nseg - 1, msgsize << 3); if (error != 0) return (error); } /* Copy result out to sgl_dst */ remaining = roundup2(defn->H_size, LSB_ENTRY_SIZE); lsbaddr = ccp_queue_lsb_address(qp, LSB_ENTRY_SHA); for (i = 0; i < sgl_dst->sg_nseg; i++) { seg = &sgl_dst->sg_segs[i]; /* crp lengths are int, so 32-bit min() is ok. */ nb = min(remaining, seg->ss_len); error = ccp_passthrough(qp, seg->ss_paddr, CCP_MEMTYPE_SYSTEM, lsbaddr, CCP_MEMTYPE_SB, nb, CCP_PASSTHRU_BYTESWAP_NOOP, CCP_PASSTHRU_BITWISE_NOOP, (cctx != NULL) ? (nb == remaining) : false, (nb == remaining) ? cctx : NULL); if (error != 0) return (error); remaining -= nb; lsbaddr += nb; if (remaining == 0) break; } return (0); } static void byteswap256(uint64_t *buffer) { uint64_t t; t = bswap64(buffer[3]); buffer[3] = bswap64(buffer[0]); buffer[0] = t; t = bswap64(buffer[2]); buffer[2] = bswap64(buffer[1]); buffer[1] = t; } /* * Translate CCP internal LSB hash format into a standard hash ouput. * * Manipulates input buffer with byteswap256 operation. */ static void ccp_sha_copy_result(char *output, char *buffer, enum sha_version version) { const struct SHA_Defn *defn; size_t i; for (i = 0; i < nitems(SHA_definitions); i++) if (SHA_definitions[i].version == version) break; if (i == nitems(SHA_definitions)) panic("bogus sha version auth_mode %u\n", (unsigned)version); defn = &SHA_definitions[i]; /* Swap 256bit manually -- DMA engine can, but with limitations */ byteswap256((void *)buffer); if (defn->axf->hashsize > LSB_ENTRY_SIZE) byteswap256((void *)(buffer + LSB_ENTRY_SIZE)); switch (defn->version) { case SHA1: memcpy(output, buffer + 12, defn->axf->hashsize); break; #if 0 case SHA2_224: memcpy(output, buffer + XXX, defn->axf->hashsize); break; #endif case SHA2_256: memcpy(output, buffer, defn->axf->hashsize); break; case SHA2_384: memcpy(output, buffer + LSB_ENTRY_SIZE * 3 - defn->axf->hashsize, defn->axf->hashsize - LSB_ENTRY_SIZE); memcpy(output + defn->axf->hashsize - LSB_ENTRY_SIZE, buffer, LSB_ENTRY_SIZE); break; case SHA2_512: memcpy(output, buffer + LSB_ENTRY_SIZE, LSB_ENTRY_SIZE); memcpy(output + LSB_ENTRY_SIZE, buffer, LSB_ENTRY_SIZE); break; } } static void ccp_do_hmac_done(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp, int error) { char ihash[SHA2_512_HASH_LEN /* max hash len */]; union authctx auth_ctx; struct auth_hash *axf; axf = s->hmac.auth_hash; s->pending--; if (error != 0) { crp->crp_etype = error; goto out; } /* Do remaining outer hash over small inner hash in software */ axf->Init(&auth_ctx); axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize); ccp_sha_copy_result(ihash, s->hmac.res, s->hmac.auth_mode); #if 0 INSECURE_DEBUG(dev, "%s sha intermediate=%64D\n", __func__, (u_char *)ihash, " "); #endif axf->Update(&auth_ctx, ihash, axf->hashsize); axf->Final(s->hmac.res, &auth_ctx); if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len, ihash); if (timingsafe_bcmp(s->hmac.res, ihash, s->hmac.hash_len) != 0) crp->crp_etype = EBADMSG; } else crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len, s->hmac.res); /* Avoid leaking key material */ explicit_bzero(&auth_ctx, sizeof(auth_ctx)); explicit_bzero(s->hmac.res, sizeof(s->hmac.res)); out: crypto_done(crp); } static void ccp_hmac_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp, int error) { struct cryptop *crp; crp = vcrp; ccp_do_hmac_done(qp, s, crp, error); } static int __must_check ccp_do_hmac(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp, const struct ccp_completion_ctx *cctx) { device_t dev; struct auth_hash *axf; int error; dev = qp->cq_softc->dev; axf = s->hmac.auth_hash; /* * Populate the SGL describing inside hash contents. We want to hash * the ipad (key XOR fixed bit pattern) concatenated with the user * data. */ sglist_reset(qp->cq_sg_ulptx); error = sglist_append(qp->cq_sg_ulptx, s->hmac.ipad, axf->blocksize); if (error != 0) return (error); if (crp->crp_aad_length != 0) { error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp, crp->crp_aad_start, crp->crp_aad_length); if (error != 0) return (error); } error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp, crp->crp_payload_start, crp->crp_payload_length); if (error != 0) { DPRINTF(dev, "%s: sglist too short\n", __func__); return (error); } /* Populate SGL for output -- use hmac.res buffer. */ sglist_reset(qp->cq_sg_dst); error = sglist_append(qp->cq_sg_dst, s->hmac.res, roundup2(axf->hashsize, LSB_ENTRY_SIZE)); if (error != 0) return (error); error = ccp_sha(qp, s->hmac.auth_mode, qp->cq_sg_ulptx, qp->cq_sg_dst, cctx); if (error != 0) { DPRINTF(dev, "%s: ccp_sha error\n", __func__); return (error); } return (0); } int __must_check ccp_hmac(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp) { struct ccp_completion_ctx ctx; ctx.callback_fn = ccp_hmac_done; ctx.callback_arg = crp; ctx.session = s; return (ccp_do_hmac(qp, s, crp, &ctx)); } static void ccp_byteswap(char *data, size_t len) { size_t i; char t; len--; for (i = 0; i < len; i++, len--) { t = data[i]; data[i] = data[len]; data[len] = t; } } static void ccp_blkcipher_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp, int error) { struct cryptop *crp; explicit_bzero(&s->blkcipher.iv, sizeof(s->blkcipher.iv)); crp = vcrp; s->pending--; if (error != 0) crp->crp_etype = error; DPRINTF(qp->cq_softc->dev, "%s: qp=%p crp=%p\n", __func__, qp, crp); crypto_done(crp); } static void ccp_collect_iv(struct cryptop *crp, const struct crypto_session_params *csp, char *iv) { - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { - arc4rand(iv, csp->csp_ivlen, 0); - crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen, iv); - } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) - memcpy(iv, crp->crp_iv, csp->csp_ivlen); - else - crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, iv); + crypto_read_iv(crp, iv); /* * If the input IV is 12 bytes, append an explicit counter of 1. */ if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16 && csp->csp_ivlen == 12) *(uint32_t *)&iv[12] = htobe32(1); if (csp->csp_cipher_alg == CRYPTO_AES_XTS && csp->csp_ivlen < AES_BLOCK_LEN) memset(&iv[csp->csp_ivlen], 0, AES_BLOCK_LEN - csp->csp_ivlen); /* Reverse order of IV material for HW */ INSECURE_DEBUG(NULL, "%s: IV: %16D len: %u\n", __func__, iv, " ", csp->csp_ivlen); /* * For unknown reasons, XTS mode expects the IV in the reverse byte * order to every other AES mode. */ if (csp->csp_cipher_alg != CRYPTO_AES_XTS) ccp_byteswap(iv, AES_BLOCK_LEN); } static int __must_check ccp_do_pst_to_lsb(struct ccp_queue *qp, uint32_t lsbaddr, const void *src, size_t len) { int error; sglist_reset(qp->cq_sg_ulptx); error = sglist_append(qp->cq_sg_ulptx, __DECONST(void *, src), len); if (error != 0) return (error); error = ccp_passthrough_sgl(qp, lsbaddr, true, qp->cq_sg_ulptx, len, false, NULL); return (error); } static int __must_check ccp_do_xts(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp, enum ccp_cipher_dir dir, const struct ccp_completion_ctx *cctx) { struct ccp_desc *desc; device_t dev; unsigned i; enum ccp_xts_unitsize usize; /* IV and Key data are already loaded */ dev = qp->cq_softc->dev; for (i = 0; i < nitems(ccp_xts_unitsize_map); i++) if (ccp_xts_unitsize_map[i].cxu_size == crp->crp_payload_length) { usize = ccp_xts_unitsize_map[i].cxu_id; break; } if (i >= nitems(ccp_xts_unitsize_map)) return (EINVAL); for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) { struct sglist_seg *seg; seg = &qp->cq_sg_ulptx->sg_segs[i]; desc = &qp->desc_ring[qp->cq_tail]; desc->engine = CCP_ENGINE_XTS_AES; desc->som = (i == 0); desc->eom = (i == qp->cq_sg_ulptx->sg_nseg - 1); desc->ioc = (desc->eom && cctx != NULL); DPRINTF(dev, "%s: XTS %u: som:%d eom:%d ioc:%d dir:%d\n", __func__, qp->cq_tail, (int)desc->som, (int)desc->eom, (int)desc->ioc, (int)dir); if (desc->ioc) memcpy(&qp->completions_ring[qp->cq_tail], cctx, sizeof(*cctx)); desc->aes_xts.encrypt = dir; desc->aes_xts.type = s->blkcipher.cipher_type; desc->aes_xts.size = usize; DPRINTF(dev, "XXX %s: XTS %u: type:%u size:%u\n", __func__, qp->cq_tail, (unsigned)desc->aes_xts.type, (unsigned)desc->aes_xts.size); desc->length = seg->ss_len; desc->src_lo = (uint32_t)seg->ss_paddr; desc->src_hi = (seg->ss_paddr >> 32); desc->src_mem = CCP_MEMTYPE_SYSTEM; /* Crypt in-place */ desc->dst_lo = desc->src_lo; desc->dst_hi = desc->src_hi; desc->dst_mem = desc->src_mem; desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY); desc->key_hi = 0; desc->key_mem = CCP_MEMTYPE_SB; desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV); qp->cq_tail = (qp->cq_tail + 1) % (1 << qp->cq_softc->ring_size_order); } return (0); } static int __must_check ccp_do_blkcipher(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp, const struct ccp_completion_ctx *cctx) { const struct crypto_session_params *csp; struct ccp_desc *desc; char *keydata; device_t dev; enum ccp_cipher_dir dir; int error, iv_len; size_t keydata_len; unsigned i, j; dev = qp->cq_softc->dev; if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) { DPRINTF(dev, "%s: empty\n", __func__); return (EINVAL); } if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0) { DPRINTF(dev, "%s: len modulo: %d\n", __func__, crp->crp_payload_length); return (EINVAL); } /* * Individual segments must be multiples of AES block size for the HW * to process it. Non-compliant inputs aren't bogus, just not doable * on this hardware. */ for (i = 0; i < qp->cq_sg_crp->sg_nseg; i++) if ((qp->cq_sg_crp->sg_segs[i].ss_len % AES_BLOCK_LEN) != 0) { DPRINTF(dev, "%s: seg modulo: %zu\n", __func__, qp->cq_sg_crp->sg_segs[i].ss_len); return (EINVAL); } /* Gather IV/nonce data */ csp = crypto_get_params(crp->crp_session); ccp_collect_iv(crp, csp, s->blkcipher.iv); iv_len = csp->csp_ivlen; if (csp->csp_cipher_alg == CRYPTO_AES_XTS) iv_len = AES_BLOCK_LEN; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) dir = CCP_CIPHER_DIR_ENCRYPT; else dir = CCP_CIPHER_DIR_DECRYPT; /* Set up passthrough op(s) to copy IV into LSB */ error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_IV), s->blkcipher.iv, iv_len); if (error != 0) return (error); /* * Initialize keydata and keydata_len for GCC. The default case of the * following switch is impossible to reach, but GCC doesn't know that. */ keydata_len = 0; keydata = NULL; switch (csp->csp_cipher_alg) { case CRYPTO_AES_XTS: for (j = 0; j < nitems(ccp_xts_unitsize_map); j++) if (ccp_xts_unitsize_map[j].cxu_size == crp->crp_payload_length) break; /* Input buffer must be a supported UnitSize */ if (j >= nitems(ccp_xts_unitsize_map)) { device_printf(dev, "%s: rejected block size: %u\n", __func__, crp->crp_payload_length); return (EOPNOTSUPP); } /* FALLTHROUGH */ case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: keydata = s->blkcipher.enckey; keydata_len = s->blkcipher.key_len; break; } INSECURE_DEBUG(dev, "%s: KEY(%zu): %16D\n", __func__, keydata_len, keydata, " "); if (csp->csp_cipher_alg == CRYPTO_AES_XTS) INSECURE_DEBUG(dev, "%s: KEY(XTS): %64D\n", __func__, keydata, " "); /* Reverse order of key material for HW */ ccp_byteswap(keydata, keydata_len); /* Store key material into LSB to avoid page boundaries */ if (csp->csp_cipher_alg == CRYPTO_AES_XTS) { /* * XTS mode uses 2 256-bit vectors for the primary key and the * tweak key. For 128-bit keys, the vectors are zero-padded. * * After byteswapping the combined OCF-provided K1:K2 vector * above, we need to reverse the order again so the hardware * gets the swapped keys in the order K1':K2'. */ error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_KEY + 1), keydata, keydata_len / 2); if (error != 0) return (error); error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_KEY), keydata + (keydata_len / 2), keydata_len / 2); /* Zero-pad 128 bit keys */ if (keydata_len == 32) { if (error != 0) return (error); error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_KEY) + keydata_len / 2, g_zeroes, keydata_len / 2); if (error != 0) return (error); error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_KEY + 1) + keydata_len / 2, g_zeroes, keydata_len / 2); } } else error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_KEY), keydata, keydata_len); if (error != 0) return (error); /* * Point SGLs at the subset of cryptop buffer contents representing the * data. */ sglist_reset(qp->cq_sg_ulptx); error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp, crp->crp_payload_start, crp->crp_payload_length); if (error != 0) return (error); INSECURE_DEBUG(dev, "%s: Contents: %16D\n", __func__, (void *)PHYS_TO_DMAP(qp->cq_sg_ulptx->sg_segs[0].ss_paddr), " "); DPRINTF(dev, "%s: starting AES ops @ %u\n", __func__, qp->cq_tail); if (ccp_queue_get_ring_space(qp) < qp->cq_sg_ulptx->sg_nseg) return (EAGAIN); if (csp->csp_cipher_alg == CRYPTO_AES_XTS) return (ccp_do_xts(qp, s, crp, dir, cctx)); for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) { struct sglist_seg *seg; seg = &qp->cq_sg_ulptx->sg_segs[i]; desc = &qp->desc_ring[qp->cq_tail]; desc->engine = CCP_ENGINE_AES; desc->som = (i == 0); desc->eom = (i == qp->cq_sg_ulptx->sg_nseg - 1); desc->ioc = (desc->eom && cctx != NULL); DPRINTF(dev, "%s: AES %u: som:%d eom:%d ioc:%d dir:%d\n", __func__, qp->cq_tail, (int)desc->som, (int)desc->eom, (int)desc->ioc, (int)dir); if (desc->ioc) memcpy(&qp->completions_ring[qp->cq_tail], cctx, sizeof(*cctx)); desc->aes.encrypt = dir; desc->aes.mode = s->blkcipher.cipher_mode; desc->aes.type = s->blkcipher.cipher_type; if (csp->csp_cipher_alg == CRYPTO_AES_ICM) /* * Size of CTR value in bits, - 1. ICM mode uses all * 128 bits as counter. */ desc->aes.size = 127; DPRINTF(dev, "%s: AES %u: mode:%u type:%u size:%u\n", __func__, qp->cq_tail, (unsigned)desc->aes.mode, (unsigned)desc->aes.type, (unsigned)desc->aes.size); desc->length = seg->ss_len; desc->src_lo = (uint32_t)seg->ss_paddr; desc->src_hi = (seg->ss_paddr >> 32); desc->src_mem = CCP_MEMTYPE_SYSTEM; /* Crypt in-place */ desc->dst_lo = desc->src_lo; desc->dst_hi = desc->src_hi; desc->dst_mem = desc->src_mem; desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY); desc->key_hi = 0; desc->key_mem = CCP_MEMTYPE_SB; desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV); qp->cq_tail = (qp->cq_tail + 1) % (1 << qp->cq_softc->ring_size_order); } return (0); } int __must_check ccp_blkcipher(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp) { struct ccp_completion_ctx ctx; ctx.callback_fn = ccp_blkcipher_done; ctx.session = s; ctx.callback_arg = crp; return (ccp_do_blkcipher(qp, s, crp, &ctx)); } static void ccp_authenc_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp, int error) { struct cryptop *crp; explicit_bzero(&s->blkcipher.iv, sizeof(s->blkcipher.iv)); crp = vcrp; ccp_do_hmac_done(qp, s, crp, error); } int __must_check ccp_authenc(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp) { struct ccp_completion_ctx ctx; int error; ctx.callback_fn = ccp_authenc_done; ctx.session = s; ctx.callback_arg = crp; /* Perform first operation */ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) error = ccp_do_blkcipher(qp, s, crp, NULL); else error = ccp_do_hmac(qp, s, crp, NULL); if (error != 0) return (error); /* Perform second operation */ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) error = ccp_do_hmac(qp, s, crp, &ctx); else error = ccp_do_blkcipher(qp, s, crp, &ctx); return (error); } static int __must_check ccp_do_ghash_aad(struct ccp_queue *qp, struct ccp_session *s) { struct ccp_desc *desc; struct sglist_seg *seg; unsigned i; if (ccp_queue_get_ring_space(qp) < qp->cq_sg_ulptx->sg_nseg) return (EAGAIN); for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) { seg = &qp->cq_sg_ulptx->sg_segs[i]; desc = &qp->desc_ring[qp->cq_tail]; desc->engine = CCP_ENGINE_AES; desc->aes.mode = CCP_AES_MODE_GHASH; desc->aes.type = s->blkcipher.cipher_type; desc->aes.encrypt = CCP_AES_MODE_GHASH_AAD; desc->som = (i == 0); desc->length = seg->ss_len; desc->src_lo = (uint32_t)seg->ss_paddr; desc->src_hi = (seg->ss_paddr >> 32); desc->src_mem = CCP_MEMTYPE_SYSTEM; desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV); desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY); desc->key_mem = CCP_MEMTYPE_SB; qp->cq_tail = (qp->cq_tail + 1) % (1 << qp->cq_softc->ring_size_order); } return (0); } static int __must_check ccp_do_gctr(struct ccp_queue *qp, struct ccp_session *s, enum ccp_cipher_dir dir, struct sglist_seg *seg, bool som, bool eom) { struct ccp_desc *desc; if (ccp_queue_get_ring_space(qp) == 0) return (EAGAIN); desc = &qp->desc_ring[qp->cq_tail]; desc->engine = CCP_ENGINE_AES; desc->aes.mode = CCP_AES_MODE_GCTR; desc->aes.type = s->blkcipher.cipher_type; desc->aes.encrypt = dir; desc->aes.size = 8 * (seg->ss_len % GMAC_BLOCK_LEN) - 1; desc->som = som; desc->eom = eom; /* Trailing bytes will be masked off by aes.size above. */ desc->length = roundup2(seg->ss_len, GMAC_BLOCK_LEN); desc->dst_lo = desc->src_lo = (uint32_t)seg->ss_paddr; desc->dst_hi = desc->src_hi = seg->ss_paddr >> 32; desc->dst_mem = desc->src_mem = CCP_MEMTYPE_SYSTEM; desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV); desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY); desc->key_mem = CCP_MEMTYPE_SB; qp->cq_tail = (qp->cq_tail + 1) % (1 << qp->cq_softc->ring_size_order); return (0); } static int __must_check ccp_do_ghash_final(struct ccp_queue *qp, struct ccp_session *s) { struct ccp_desc *desc; if (ccp_queue_get_ring_space(qp) == 0) return (EAGAIN); desc = &qp->desc_ring[qp->cq_tail]; desc->engine = CCP_ENGINE_AES; desc->aes.mode = CCP_AES_MODE_GHASH; desc->aes.type = s->blkcipher.cipher_type; desc->aes.encrypt = CCP_AES_MODE_GHASH_FINAL; desc->length = GMAC_BLOCK_LEN; desc->src_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_GHASH_IN); desc->src_mem = CCP_MEMTYPE_SB; desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV); desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY); desc->key_mem = CCP_MEMTYPE_SB; desc->dst_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_GHASH); desc->dst_mem = CCP_MEMTYPE_SB; qp->cq_tail = (qp->cq_tail + 1) % (1 << qp->cq_softc->ring_size_order); return (0); } static void ccp_gcm_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp, int error) { char tag[GMAC_DIGEST_LEN]; struct cryptop *crp; crp = vcrp; s->pending--; if (error != 0) { crp->crp_etype = error; goto out; } /* Encrypt is done. Decrypt needs to verify tag. */ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) goto out; /* Copy in message tag. */ crypto_copydata(crp, crp->crp_digest_start, s->gmac.hash_len, tag); /* Verify tag against computed GMAC */ if (timingsafe_bcmp(tag, s->gmac.final_block, s->gmac.hash_len) != 0) crp->crp_etype = EBADMSG; out: explicit_bzero(&s->blkcipher.iv, sizeof(s->blkcipher.iv)); explicit_bzero(&s->gmac.final_block, sizeof(s->gmac.final_block)); crypto_done(crp); } int __must_check ccp_gcm(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp) { const struct crypto_session_params *csp; struct ccp_completion_ctx ctx; enum ccp_cipher_dir dir; device_t dev; unsigned i; int error; if (s->blkcipher.key_len == 0) return (EINVAL); dev = qp->cq_softc->dev; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) dir = CCP_CIPHER_DIR_ENCRYPT; else dir = CCP_CIPHER_DIR_DECRYPT; /* Zero initial GHASH portion of context */ memset(s->blkcipher.iv, 0, sizeof(s->blkcipher.iv)); /* Gather IV data */ csp = crypto_get_params(crp->crp_session); ccp_collect_iv(crp, csp, s->blkcipher.iv); /* Reverse order of key material for HW */ ccp_byteswap(s->blkcipher.enckey, s->blkcipher.key_len); /* Prepare input buffer of concatenated lengths for final GHASH */ be64enc(s->gmac.final_block, (uint64_t)crp->crp_aad_length * 8); be64enc(&s->gmac.final_block[8], (uint64_t)crp->crp_payload_length * 8); /* Send IV + initial zero GHASH, key data, and lengths buffer to LSB */ error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_IV), s->blkcipher.iv, 32); if (error != 0) return (error); error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_KEY), s->blkcipher.enckey, s->blkcipher.key_len); if (error != 0) return (error); error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_GHASH_IN), s->gmac.final_block, GMAC_BLOCK_LEN); if (error != 0) return (error); /* First step - compute GHASH over AAD */ if (crp->crp_aad_length != 0) { sglist_reset(qp->cq_sg_ulptx); error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp, crp->crp_aad_start, crp->crp_aad_length); if (error != 0) return (error); /* This engine cannot process non-block multiple AAD data. */ for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) if ((qp->cq_sg_ulptx->sg_segs[i].ss_len % GMAC_BLOCK_LEN) != 0) { DPRINTF(dev, "%s: AD seg modulo: %zu\n", __func__, qp->cq_sg_ulptx->sg_segs[i].ss_len); return (EINVAL); } error = ccp_do_ghash_aad(qp, s); if (error != 0) return (error); } /* Feed data piece by piece into GCTR */ sglist_reset(qp->cq_sg_ulptx); error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp, crp->crp_payload_start, crp->crp_payload_length); if (error != 0) return (error); /* * All segments except the last must be even multiples of AES block * size for the HW to process it. Non-compliant inputs aren't bogus, * just not doable on this hardware. * * XXX: Well, the hardware will produce a valid tag for shorter final * segment inputs, but it will still write out a block-sized plaintext * or ciphertext chunk. For a typical CRP this tramples trailing data, * including the provided message tag. So, reject such inputs for now. */ for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) if ((qp->cq_sg_ulptx->sg_segs[i].ss_len % AES_BLOCK_LEN) != 0) { DPRINTF(dev, "%s: seg modulo: %zu\n", __func__, qp->cq_sg_ulptx->sg_segs[i].ss_len); return (EINVAL); } for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) { struct sglist_seg *seg; seg = &qp->cq_sg_ulptx->sg_segs[i]; error = ccp_do_gctr(qp, s, dir, seg, (i == 0 && crp->crp_aad_length == 0), i == (qp->cq_sg_ulptx->sg_nseg - 1)); if (error != 0) return (error); } /* Send just initial IV (not GHASH!) to LSB again */ error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_IV), s->blkcipher.iv, AES_BLOCK_LEN); if (error != 0) return (error); ctx.callback_fn = ccp_gcm_done; ctx.session = s; ctx.callback_arg = crp; /* Compute final hash and copy result back */ error = ccp_do_ghash_final(qp, s); if (error != 0) return (error); /* When encrypting, copy computed tag out to caller buffer. */ sglist_reset(qp->cq_sg_ulptx); if (dir == CCP_CIPHER_DIR_ENCRYPT) error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp, crp->crp_digest_start, s->gmac.hash_len); else /* * For decrypting, copy the computed tag out to our session * buffer to verify in our callback. */ error = sglist_append(qp->cq_sg_ulptx, s->gmac.final_block, s->gmac.hash_len); if (error != 0) return (error); error = ccp_passthrough_sgl(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_GHASH), false, qp->cq_sg_ulptx, s->gmac.hash_len, true, &ctx); return (error); } #define MAX_TRNG_RETRIES 10 u_int random_ccp_read(void *v, u_int c) { uint32_t *buf; u_int i, j; KASSERT(c % sizeof(*buf) == 0, ("%u not multiple of u_long", c)); buf = v; for (i = c; i > 0; i -= sizeof(*buf)) { for (j = 0; j < MAX_TRNG_RETRIES; j++) { *buf = ccp_read_4(g_ccp_softc, TRNG_OUT_OFFSET); if (*buf != 0) break; } if (j == MAX_TRNG_RETRIES) return (0); buf++; } return (c); } #ifdef DDB void db_ccp_show_hw(struct ccp_softc *sc) { db_printf(" queue mask: 0x%x\n", ccp_read_4(sc, CMD_QUEUE_MASK_OFFSET)); db_printf(" queue prio: 0x%x\n", ccp_read_4(sc, CMD_QUEUE_PRIO_OFFSET)); db_printf(" reqid: 0x%x\n", ccp_read_4(sc, CMD_REQID_CONFIG_OFFSET)); db_printf(" trng output: 0x%x\n", ccp_read_4(sc, TRNG_OUT_OFFSET)); db_printf(" cmd timeout: 0x%x\n", ccp_read_4(sc, CMD_CMD_TIMEOUT_OFFSET)); db_printf(" lsb public mask lo: 0x%x\n", ccp_read_4(sc, LSB_PUBLIC_MASK_LO_OFFSET)); db_printf(" lsb public mask hi: 0x%x\n", ccp_read_4(sc, LSB_PUBLIC_MASK_HI_OFFSET)); db_printf(" lsb private mask lo: 0x%x\n", ccp_read_4(sc, LSB_PRIVATE_MASK_LO_OFFSET)); db_printf(" lsb private mask hi: 0x%x\n", ccp_read_4(sc, LSB_PRIVATE_MASK_HI_OFFSET)); db_printf(" version: 0x%x\n", ccp_read_4(sc, VERSION_REG)); } void db_ccp_show_queue_hw(struct ccp_queue *qp) { const struct ccp_error_code *ec; struct ccp_softc *sc; uint32_t status, error, esource, faultblock, headlo, qcontrol; unsigned q, i; sc = qp->cq_softc; q = qp->cq_qindex; qcontrol = ccp_read_queue_4(sc, q, CMD_Q_CONTROL_BASE); db_printf(" qcontrol: 0x%x%s%s\n", qcontrol, (qcontrol & CMD_Q_RUN) ? " RUN" : "", (qcontrol & CMD_Q_HALTED) ? " HALTED" : ""); db_printf(" tail_lo: 0x%x\n", ccp_read_queue_4(sc, q, CMD_Q_TAIL_LO_BASE)); headlo = ccp_read_queue_4(sc, q, CMD_Q_HEAD_LO_BASE); db_printf(" head_lo: 0x%x\n", headlo); db_printf(" int enable: 0x%x\n", ccp_read_queue_4(sc, q, CMD_Q_INT_ENABLE_BASE)); db_printf(" interrupt status: 0x%x\n", ccp_read_queue_4(sc, q, CMD_Q_INTERRUPT_STATUS_BASE)); status = ccp_read_queue_4(sc, q, CMD_Q_STATUS_BASE); db_printf(" status: 0x%x\n", status); db_printf(" int stats: 0x%x\n", ccp_read_queue_4(sc, q, CMD_Q_INT_STATUS_BASE)); error = status & STATUS_ERROR_MASK; if (error == 0) return; esource = (status >> STATUS_ERRORSOURCE_SHIFT) & STATUS_ERRORSOURCE_MASK; faultblock = (status >> STATUS_VLSB_FAULTBLOCK_SHIFT) & STATUS_VLSB_FAULTBLOCK_MASK; ec = NULL; for (i = 0; i < nitems(ccp_error_codes); i++) if (ccp_error_codes[i].ce_code == error) break; if (i < nitems(ccp_error_codes)) ec = &ccp_error_codes[i]; db_printf(" Error: %s (%u) Source: %u Faulting LSB block: %u\n", (ec != NULL) ? ec->ce_name : "(reserved)", error, esource, faultblock); if (ec != NULL) db_printf(" Error description: %s\n", ec->ce_desc); i = (headlo - (uint32_t)qp->desc_ring_bus_addr) / Q_DESC_SIZE; db_printf(" Bad descriptor idx: %u contents:\n %32D\n", i, (void *)&qp->desc_ring[i], " "); } #endif Index: head/sys/crypto/via/padlock_cipher.c =================================================================== --- head/sys/crypto/via/padlock_cipher.c (revision 360135) +++ head/sys/crypto/via/padlock_cipher.c (revision 360136) @@ -1,247 +1,241 @@ /*- * Copyright (c) 2005-2006 Pawel Jakub Dawidek * Copyright (c) 2004 Mark R V Murray * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $OpenBSD: via.c,v 1.3 2004/06/15 23:36:55 deraadt Exp $ */ /*- * Copyright (c) 2003 Jason Wright * Copyright (c) 2003, 2004 Theo de Raadt * All rights reserved. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #define PADLOCK_ROUND_COUNT_AES128 10 #define PADLOCK_ROUND_COUNT_AES192 12 #define PADLOCK_ROUND_COUNT_AES256 14 #define PADLOCK_ALGORITHM_TYPE_AES 0 #define PADLOCK_KEY_GENERATION_HW 0 #define PADLOCK_KEY_GENERATION_SW 1 #define PADLOCK_DIRECTION_ENCRYPT 0 #define PADLOCK_DIRECTION_DECRYPT 1 #define PADLOCK_KEY_SIZE_128 0 #define PADLOCK_KEY_SIZE_192 1 #define PADLOCK_KEY_SIZE_256 2 MALLOC_DECLARE(M_PADLOCK); static __inline void padlock_cbc(void *in, void *out, size_t count, void *key, union padlock_cw *cw, void *iv) { #ifdef __GNUCLIKE_ASM /* The .byte line is really VIA C3 "xcrypt-cbc" instruction */ __asm __volatile( "pushf \n\t" "popf \n\t" "rep \n\t" ".byte 0x0f, 0xa7, 0xd0" : "+a" (iv), "+c" (count), "+D" (out), "+S" (in) : "b" (key), "d" (cw) : "cc", "memory" ); #endif } static void padlock_cipher_key_setup(struct padlock_session *ses, const void *key, int klen) { union padlock_cw *cw; int i; cw = &ses->ses_cw; if (cw->cw_key_generation == PADLOCK_KEY_GENERATION_SW) { /* Build expanded keys for both directions */ rijndaelKeySetupEnc(ses->ses_ekey, key, klen * 8); rijndaelKeySetupDec(ses->ses_dkey, key, klen * 8); for (i = 0; i < 4 * (RIJNDAEL_MAXNR + 1); i++) { ses->ses_ekey[i] = ntohl(ses->ses_ekey[i]); ses->ses_dkey[i] = ntohl(ses->ses_dkey[i]); } } else { bcopy(key, ses->ses_ekey, klen); bcopy(key, ses->ses_dkey, klen); } } int padlock_cipher_setup(struct padlock_session *ses, const struct crypto_session_params *csp) { union padlock_cw *cw; if (csp->csp_cipher_klen != 16 && csp->csp_cipher_klen != 25 && csp->csp_cipher_klen != 32) { return (EINVAL); } cw = &ses->ses_cw; bzero(cw, sizeof(*cw)); cw->cw_algorithm_type = PADLOCK_ALGORITHM_TYPE_AES; cw->cw_key_generation = PADLOCK_KEY_GENERATION_SW; cw->cw_intermediate = 0; switch (csp->csp_cipher_klen * 8) { case 128: cw->cw_round_count = PADLOCK_ROUND_COUNT_AES128; cw->cw_key_size = PADLOCK_KEY_SIZE_128; #ifdef HW_KEY_GENERATION /* This doesn't buy us much, that's why it is commented out. */ cw->cw_key_generation = PADLOCK_KEY_GENERATION_HW; #endif break; case 192: cw->cw_round_count = PADLOCK_ROUND_COUNT_AES192; cw->cw_key_size = PADLOCK_KEY_SIZE_192; break; case 256: cw->cw_round_count = PADLOCK_ROUND_COUNT_AES256; cw->cw_key_size = PADLOCK_KEY_SIZE_256; break; } if (csp->csp_cipher_key != NULL) { padlock_cipher_key_setup(ses, csp->csp_cipher_key, csp->csp_cipher_klen); } return (0); } /* * Function checks if the given buffer is already 16 bytes aligned. * If it is there is no need to allocate new buffer. * If it isn't, new buffer is allocated. */ static u_char * padlock_cipher_alloc(struct cryptop *crp, int *allocated) { u_char *addr; addr = crypto_contiguous_subsegment(crp, crp->crp_payload_start, crp->crp_payload_length); if (((uintptr_t)addr & 0xf) == 0) { /* 16 bytes aligned? */ *allocated = 0; return (addr); } *allocated = 1; addr = malloc(crp->crp_payload_length + 16, M_PADLOCK, M_NOWAIT); return (addr); } int padlock_cipher_process(struct padlock_session *ses, struct cryptop *crp, const struct crypto_session_params *csp) { union padlock_cw *cw; struct thread *td; u_char *buf, *abuf; uint32_t *key; uint8_t iv[AES_BLOCK_LEN] __aligned(16); int allocated; buf = padlock_cipher_alloc(crp, &allocated); if (buf == NULL) return (ENOMEM); /* Buffer has to be 16 bytes aligned. */ abuf = PADLOCK_ALIGN(buf); if (crp->crp_cipher_key != NULL) { padlock_cipher_key_setup(ses, crp->crp_cipher_key, csp->csp_cipher_klen); } cw = &ses->ses_cw; cw->cw_filler0 = 0; cw->cw_filler1 = 0; cw->cw_filler2 = 0; cw->cw_filler3 = 0; - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { - arc4rand(iv, AES_BLOCK_LEN, 0); - crypto_copyback(crp, crp->crp_iv_start, AES_BLOCK_LEN, iv); - } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) - memcpy(iv, crp->crp_iv, AES_BLOCK_LEN); - else - crypto_copydata(crp, crp->crp_iv_start, AES_BLOCK_LEN, iv); + crypto_read_iv(crp, iv); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { cw->cw_direction = PADLOCK_DIRECTION_ENCRYPT; key = ses->ses_ekey; } else { cw->cw_direction = PADLOCK_DIRECTION_DECRYPT; key = ses->ses_dkey; } if (allocated) { crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, abuf); } td = curthread; fpu_kern_enter(td, ses->ses_fpu_ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR); padlock_cbc(abuf, abuf, crp->crp_payload_length / AES_BLOCK_LEN, key, cw, iv); fpu_kern_leave(td, ses->ses_fpu_ctx); if (allocated) { crypto_copyback(crp, crp->crp_payload_start, crp->crp_payload_length, abuf); explicit_bzero(buf, crp->crp_payload_length + 16); free(buf, M_PADLOCK); } return (0); } Index: head/sys/dev/cesa/cesa.c =================================================================== --- head/sys/dev/cesa/cesa.c (revision 360135) +++ head/sys/dev/cesa/cesa.c (revision 360136) @@ -1,1842 +1,1833 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2009-2011 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * CESA SRAM Memory Map: * * +------------------------+ <= sc->sc_sram_base_va + CESA_SRAM_SIZE * | | * | DATA | * | | * +------------------------+ <= sc->sc_sram_base_va + CESA_DATA(0) * | struct cesa_sa_data | * +------------------------+ * | struct cesa_sa_hdesc | * +------------------------+ <= sc->sc_sram_base_va */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include #include "cesa.h" static int cesa_probe(device_t); static int cesa_attach(device_t); static int cesa_attach_late(device_t); static int cesa_detach(device_t); static void cesa_intr(void *); static int cesa_probesession(device_t, const struct crypto_session_params *); static int cesa_newsession(device_t, crypto_session_t, const struct crypto_session_params *); static int cesa_process(device_t, struct cryptop *, int); static struct resource_spec cesa_res_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_MEMORY, 1, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static device_method_t cesa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cesa_probe), DEVMETHOD(device_attach, cesa_attach), DEVMETHOD(device_detach, cesa_detach), /* Crypto device methods */ DEVMETHOD(cryptodev_probesession, cesa_probesession), DEVMETHOD(cryptodev_newsession, cesa_newsession), DEVMETHOD(cryptodev_process, cesa_process), DEVMETHOD_END }; static driver_t cesa_driver = { "cesa", cesa_methods, sizeof (struct cesa_softc) }; static devclass_t cesa_devclass; DRIVER_MODULE(cesa, simplebus, cesa_driver, cesa_devclass, 0, 0); MODULE_DEPEND(cesa, crypto, 1, 1, 1); static void cesa_dump_cshd(struct cesa_softc *sc, struct cesa_sa_hdesc *cshd) { #ifdef DEBUG device_t dev; dev = sc->sc_dev; device_printf(dev, "CESA SA Hardware Descriptor:\n"); device_printf(dev, "\t\tconfig: 0x%08X\n", cshd->cshd_config); device_printf(dev, "\t\te_src: 0x%08X\n", cshd->cshd_enc_src); device_printf(dev, "\t\te_dst: 0x%08X\n", cshd->cshd_enc_dst); device_printf(dev, "\t\te_dlen: 0x%08X\n", cshd->cshd_enc_dlen); device_printf(dev, "\t\te_key: 0x%08X\n", cshd->cshd_enc_key); device_printf(dev, "\t\te_iv_1: 0x%08X\n", cshd->cshd_enc_iv); device_printf(dev, "\t\te_iv_2: 0x%08X\n", cshd->cshd_enc_iv_buf); device_printf(dev, "\t\tm_src: 0x%08X\n", cshd->cshd_mac_src); device_printf(dev, "\t\tm_dst: 0x%08X\n", cshd->cshd_mac_dst); device_printf(dev, "\t\tm_dlen: 0x%08X\n", cshd->cshd_mac_dlen); device_printf(dev, "\t\tm_tlen: 0x%08X\n", cshd->cshd_mac_total_dlen); device_printf(dev, "\t\tm_iv_i: 0x%08X\n", cshd->cshd_mac_iv_in); device_printf(dev, "\t\tm_iv_o: 0x%08X\n", cshd->cshd_mac_iv_out); #endif } static void cesa_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct cesa_dma_mem *cdm; if (error) return; KASSERT(nseg == 1, ("Got wrong number of DMA segments, should be 1.")); cdm = arg; cdm->cdm_paddr = segs->ds_addr; } static int cesa_alloc_dma_mem(struct cesa_softc *sc, struct cesa_dma_mem *cdm, bus_size_t size) { int error; KASSERT(cdm->cdm_vaddr == NULL, ("%s(): DMA memory descriptor in use.", __func__)); error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ size, 1, /* maxsize, nsegments */ size, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &cdm->cdm_tag); /* dmat */ if (error) { device_printf(sc->sc_dev, "failed to allocate busdma tag, error" " %i!\n", error); goto err1; } error = bus_dmamem_alloc(cdm->cdm_tag, &cdm->cdm_vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &cdm->cdm_map); if (error) { device_printf(sc->sc_dev, "failed to allocate DMA safe" " memory, error %i!\n", error); goto err2; } error = bus_dmamap_load(cdm->cdm_tag, cdm->cdm_map, cdm->cdm_vaddr, size, cesa_alloc_dma_mem_cb, cdm, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "cannot get address of the DMA" " memory, error %i\n", error); goto err3; } return (0); err3: bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map); err2: bus_dma_tag_destroy(cdm->cdm_tag); err1: cdm->cdm_vaddr = NULL; return (error); } static void cesa_free_dma_mem(struct cesa_dma_mem *cdm) { bus_dmamap_unload(cdm->cdm_tag, cdm->cdm_map); bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map); bus_dma_tag_destroy(cdm->cdm_tag); cdm->cdm_vaddr = NULL; } static void cesa_sync_dma_mem(struct cesa_dma_mem *cdm, bus_dmasync_op_t op) { /* Sync only if dma memory is valid */ if (cdm->cdm_vaddr != NULL) bus_dmamap_sync(cdm->cdm_tag, cdm->cdm_map, op); } static void cesa_sync_desc(struct cesa_softc *sc, bus_dmasync_op_t op) { cesa_sync_dma_mem(&sc->sc_tdesc_cdm, op); cesa_sync_dma_mem(&sc->sc_sdesc_cdm, op); cesa_sync_dma_mem(&sc->sc_requests_cdm, op); } static struct cesa_request * cesa_alloc_request(struct cesa_softc *sc) { struct cesa_request *cr; CESA_GENERIC_ALLOC_LOCKED(sc, cr, requests); if (!cr) return (NULL); STAILQ_INIT(&cr->cr_tdesc); STAILQ_INIT(&cr->cr_sdesc); return (cr); } static void cesa_free_request(struct cesa_softc *sc, struct cesa_request *cr) { /* Free TDMA descriptors assigned to this request */ CESA_LOCK(sc, tdesc); STAILQ_CONCAT(&sc->sc_free_tdesc, &cr->cr_tdesc); CESA_UNLOCK(sc, tdesc); /* Free SA descriptors assigned to this request */ CESA_LOCK(sc, sdesc); STAILQ_CONCAT(&sc->sc_free_sdesc, &cr->cr_sdesc); CESA_UNLOCK(sc, sdesc); /* Unload DMA memory associated with request */ if (cr->cr_dmap_loaded) { bus_dmamap_unload(sc->sc_data_dtag, cr->cr_dmap); cr->cr_dmap_loaded = 0; } CESA_GENERIC_FREE_LOCKED(sc, cr, requests); } static void cesa_enqueue_request(struct cesa_softc *sc, struct cesa_request *cr) { CESA_LOCK(sc, requests); STAILQ_INSERT_TAIL(&sc->sc_ready_requests, cr, cr_stq); CESA_UNLOCK(sc, requests); } static struct cesa_tdma_desc * cesa_alloc_tdesc(struct cesa_softc *sc) { struct cesa_tdma_desc *ctd; CESA_GENERIC_ALLOC_LOCKED(sc, ctd, tdesc); if (!ctd) device_printf(sc->sc_dev, "TDMA descriptors pool exhaused. " "Consider increasing CESA_TDMA_DESCRIPTORS.\n"); return (ctd); } static struct cesa_sa_desc * cesa_alloc_sdesc(struct cesa_softc *sc, struct cesa_request *cr) { struct cesa_sa_desc *csd; CESA_GENERIC_ALLOC_LOCKED(sc, csd, sdesc); if (!csd) { device_printf(sc->sc_dev, "SA descriptors pool exhaused. " "Consider increasing CESA_SA_DESCRIPTORS.\n"); return (NULL); } STAILQ_INSERT_TAIL(&cr->cr_sdesc, csd, csd_stq); /* Fill-in SA descriptor with default values */ csd->csd_cshd->cshd_enc_key = CESA_SA_DATA(csd_key); csd->csd_cshd->cshd_enc_iv = CESA_SA_DATA(csd_iv); csd->csd_cshd->cshd_enc_iv_buf = CESA_SA_DATA(csd_iv); csd->csd_cshd->cshd_enc_src = 0; csd->csd_cshd->cshd_enc_dst = 0; csd->csd_cshd->cshd_enc_dlen = 0; csd->csd_cshd->cshd_mac_dst = CESA_SA_DATA(csd_hash); csd->csd_cshd->cshd_mac_iv_in = CESA_SA_DATA(csd_hiv_in); csd->csd_cshd->cshd_mac_iv_out = CESA_SA_DATA(csd_hiv_out); csd->csd_cshd->cshd_mac_src = 0; csd->csd_cshd->cshd_mac_dlen = 0; return (csd); } static struct cesa_tdma_desc * cesa_tdma_copy(struct cesa_softc *sc, bus_addr_t dst, bus_addr_t src, bus_size_t size) { struct cesa_tdma_desc *ctd; ctd = cesa_alloc_tdesc(sc); if (!ctd) return (NULL); ctd->ctd_cthd->cthd_dst = dst; ctd->ctd_cthd->cthd_src = src; ctd->ctd_cthd->cthd_byte_count = size; /* Handle special control packet */ if (size != 0) ctd->ctd_cthd->cthd_flags = CESA_CTHD_OWNED; else ctd->ctd_cthd->cthd_flags = 0; return (ctd); } static struct cesa_tdma_desc * cesa_tdma_copyin_sa_data(struct cesa_softc *sc, struct cesa_request *cr) { return (cesa_tdma_copy(sc, sc->sc_sram_base_pa + sizeof(struct cesa_sa_hdesc), cr->cr_csd_paddr, sizeof(struct cesa_sa_data))); } static struct cesa_tdma_desc * cesa_tdma_copyout_sa_data(struct cesa_softc *sc, struct cesa_request *cr) { return (cesa_tdma_copy(sc, cr->cr_csd_paddr, sc->sc_sram_base_pa + sizeof(struct cesa_sa_hdesc), sizeof(struct cesa_sa_data))); } static struct cesa_tdma_desc * cesa_tdma_copy_sdesc(struct cesa_softc *sc, struct cesa_sa_desc *csd) { return (cesa_tdma_copy(sc, sc->sc_sram_base_pa, csd->csd_cshd_paddr, sizeof(struct cesa_sa_hdesc))); } static void cesa_append_tdesc(struct cesa_request *cr, struct cesa_tdma_desc *ctd) { struct cesa_tdma_desc *ctd_prev; if (!STAILQ_EMPTY(&cr->cr_tdesc)) { ctd_prev = STAILQ_LAST(&cr->cr_tdesc, cesa_tdma_desc, ctd_stq); ctd_prev->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr; } ctd->ctd_cthd->cthd_next = 0; STAILQ_INSERT_TAIL(&cr->cr_tdesc, ctd, ctd_stq); } static int cesa_append_packet(struct cesa_softc *sc, struct cesa_request *cr, struct cesa_packet *cp, struct cesa_sa_desc *csd) { struct cesa_tdma_desc *ctd, *tmp; /* Copy SA descriptor for this packet */ ctd = cesa_tdma_copy_sdesc(sc, csd); if (!ctd) return (ENOMEM); cesa_append_tdesc(cr, ctd); /* Copy data to be processed */ STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyin, ctd_stq, tmp) cesa_append_tdesc(cr, ctd); STAILQ_INIT(&cp->cp_copyin); /* Insert control descriptor */ ctd = cesa_tdma_copy(sc, 0, 0, 0); if (!ctd) return (ENOMEM); cesa_append_tdesc(cr, ctd); /* Copy back results */ STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyout, ctd_stq, tmp) cesa_append_tdesc(cr, ctd); STAILQ_INIT(&cp->cp_copyout); return (0); } static void cesa_set_mkey(struct cesa_session *cs, int alg, const uint8_t *mkey, int mklen) { union authctx auth_ctx; uint32_t *hout; uint32_t *hin; int i; hin = (uint32_t *)cs->cs_hiv_in; hout = (uint32_t *)cs->cs_hiv_out; switch (alg) { case CRYPTO_MD5_HMAC: hmac_init_ipad(&auth_hash_hmac_md5, mkey, mklen, &auth_ctx); memcpy(hin, auth_ctx.md5ctx.state, sizeof(auth_ctx.md5ctx.state)); hmac_init_opad(&auth_hash_hmac_md5, mkey, mklen, &auth_ctx); memcpy(hout, auth_ctx.md5ctx.state, sizeof(auth_ctx.md5ctx.state)); break; case CRYPTO_SHA1_HMAC: hmac_init_ipad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx); memcpy(hin, auth_ctx.sha1ctx.h.b32, sizeof(auth_ctx.sha1ctx.h.b32)); hmac_init_opad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx); memcpy(hout, auth_ctx.sha1ctx.h.b32, sizeof(auth_ctx.sha1ctx.h.b32)); break; case CRYPTO_SHA2_256_HMAC: hmac_init_ipad(&auth_hash_hmac_sha2_256, mkey, mklen, &auth_ctx); memcpy(hin, auth_ctx.sha256ctx.state, sizeof(auth_ctx.sha256ctx.state)); hmac_init_opad(&auth_hash_hmac_sha2_256, mkey, mklen, &auth_ctx); memcpy(hout, auth_ctx.sha256ctx.state, sizeof(auth_ctx.sha256ctx.state)); break; default: panic("shouldn't get here"); } for (i = 0; i < CESA_MAX_HASH_LEN / sizeof(uint32_t); i++) { hin[i] = htobe32(hin[i]); hout[i] = htobe32(hout[i]); } } static int cesa_prep_aes_key(struct cesa_session *cs, const struct crypto_session_params *csp) { uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)]; uint32_t *dkey; int i; rijndaelKeySetupEnc(ek, cs->cs_key, csp->csp_cipher_klen * 8); cs->cs_config &= ~CESA_CSH_AES_KLEN_MASK; dkey = (uint32_t *)cs->cs_aes_dkey; switch (csp->csp_cipher_klen) { case 16: cs->cs_config |= CESA_CSH_AES_KLEN_128; for (i = 0; i < 4; i++) *dkey++ = htobe32(ek[4 * 10 + i]); break; case 24: cs->cs_config |= CESA_CSH_AES_KLEN_192; for (i = 0; i < 4; i++) *dkey++ = htobe32(ek[4 * 12 + i]); for (i = 0; i < 2; i++) *dkey++ = htobe32(ek[4 * 11 + 2 + i]); break; case 32: cs->cs_config |= CESA_CSH_AES_KLEN_256; for (i = 0; i < 4; i++) *dkey++ = htobe32(ek[4 * 14 + i]); for (i = 0; i < 4; i++) *dkey++ = htobe32(ek[4 * 13 + i]); break; default: return (EINVAL); } return (0); } static void cesa_start_packet(struct cesa_packet *cp, unsigned int size) { cp->cp_size = size; cp->cp_offset = 0; STAILQ_INIT(&cp->cp_copyin); STAILQ_INIT(&cp->cp_copyout); } static int cesa_fill_packet(struct cesa_softc *sc, struct cesa_packet *cp, bus_dma_segment_t *seg) { struct cesa_tdma_desc *ctd; unsigned int bsize; /* Calculate size of block copy */ bsize = MIN(seg->ds_len, cp->cp_size - cp->cp_offset); if (bsize > 0) { ctd = cesa_tdma_copy(sc, sc->sc_sram_base_pa + CESA_DATA(cp->cp_offset), seg->ds_addr, bsize); if (!ctd) return (-ENOMEM); STAILQ_INSERT_TAIL(&cp->cp_copyin, ctd, ctd_stq); ctd = cesa_tdma_copy(sc, seg->ds_addr, sc->sc_sram_base_pa + CESA_DATA(cp->cp_offset), bsize); if (!ctd) return (-ENOMEM); STAILQ_INSERT_TAIL(&cp->cp_copyout, ctd, ctd_stq); seg->ds_len -= bsize; seg->ds_addr += bsize; cp->cp_offset += bsize; } return (bsize); } static void cesa_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { unsigned int mpsize, fragmented; unsigned int mlen, mskip, tmlen; struct cesa_chain_info *cci; unsigned int elen, eskip; unsigned int skip, len; struct cesa_sa_desc *csd; struct cesa_request *cr; struct cryptop *crp; struct cesa_softc *sc; struct cesa_packet cp; bus_dma_segment_t seg; uint32_t config; int size; cci = arg; sc = cci->cci_sc; cr = cci->cci_cr; crp = cr->cr_crp; if (error) { cci->cci_error = error; return; } /* * Only do a combined op if the AAD is adjacent to the payload * and the AAD length is a multiple of the IV length. The * checks against 'config' are to avoid recursing when the * logic below invokes separate operations. */ config = cci->cci_config; if (((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC || (config & CESA_CSHD_OP_MASK) == CESA_CSHD_ENC_AND_MAC) && crp->crp_aad_length != 0 && (crp->crp_aad_length & (cr->cr_cs->cs_ivlen - 1)) != 0) { /* * Data alignment in the request does not meet CESA requiremnts * for combined encryption/decryption and hashing. We have to * split the request to separate operations and process them * one by one. */ if ((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC) { config &= ~CESA_CSHD_OP_MASK; cci->cci_config = config | CESA_CSHD_MAC; cesa_create_chain_cb(cci, segs, nseg, 0); cci->cci_config = config | CESA_CSHD_ENC; cesa_create_chain_cb(cci, segs, nseg, 0); } else { config &= ~CESA_CSHD_OP_MASK; cci->cci_config = config | CESA_CSHD_ENC; cesa_create_chain_cb(cci, segs, nseg, 0); cci->cci_config = config | CESA_CSHD_MAC; cesa_create_chain_cb(cci, segs, nseg, 0); } return; } mskip = mlen = eskip = elen = 0; if (crp->crp_aad_length == 0) { skip = crp->crp_payload_start; len = crp->crp_payload_length; switch (config & CESA_CSHD_OP_MASK) { case CESA_CSHD_ENC: eskip = skip; elen = len; break; case CESA_CSHD_MAC: mskip = skip; mlen = len; break; default: eskip = skip; elen = len; mskip = skip; mlen = len; break; } } else { /* * For an encryption-only separate request, only * process the payload. For combined requests and * hash-only requests, process the entire region. */ switch (config & CESA_CSHD_OP_MASK) { case CESA_CSHD_ENC: skip = crp->crp_payload_start; len = crp->crp_payload_length; eskip = skip; elen = len; break; case CESA_CSHD_MAC: skip = crp->crp_aad_start; len = crp->crp_aad_length + crp->crp_payload_length; mskip = skip; mlen = len; break; default: skip = crp->crp_aad_start; len = crp->crp_aad_length + crp->crp_payload_length; mskip = skip; mlen = len; eskip = crp->crp_payload_start; elen = crp->crp_payload_length; break; } } tmlen = mlen; fragmented = 0; mpsize = CESA_MAX_PACKET_SIZE; mpsize &= ~((cr->cr_cs->cs_ivlen - 1) | (cr->cr_cs->cs_mblen - 1)); /* Start first packet in chain */ cesa_start_packet(&cp, MIN(mpsize, len)); while (nseg-- && len > 0) { seg = *(segs++); /* * Skip data in buffer on which neither ENC nor MAC operation * is requested. */ if (skip > 0) { size = MIN(skip, seg.ds_len); skip -= size; seg.ds_addr += size; seg.ds_len -= size; if (eskip > 0) eskip -= size; if (mskip > 0) mskip -= size; if (seg.ds_len == 0) continue; } while (1) { /* * Fill in current packet with data. Break if there is * no more data in current DMA segment or an error * occurred. */ size = cesa_fill_packet(sc, &cp, &seg); if (size <= 0) { error = -size; break; } len -= size; /* If packet is full, append it to the chain */ if (cp.cp_size == cp.cp_offset) { csd = cesa_alloc_sdesc(sc, cr); if (!csd) { error = ENOMEM; break; } /* Create SA descriptor for this packet */ csd->csd_cshd->cshd_config = cci->cci_config; csd->csd_cshd->cshd_mac_total_dlen = tmlen; /* * Enable fragmentation if request will not fit * into one packet. */ if (len > 0) { if (!fragmented) { fragmented = 1; csd->csd_cshd->cshd_config |= CESA_CSHD_FRAG_FIRST; } else csd->csd_cshd->cshd_config |= CESA_CSHD_FRAG_MIDDLE; } else if (fragmented) csd->csd_cshd->cshd_config |= CESA_CSHD_FRAG_LAST; if (eskip < cp.cp_size && elen > 0) { csd->csd_cshd->cshd_enc_src = CESA_DATA(eskip); csd->csd_cshd->cshd_enc_dst = CESA_DATA(eskip); csd->csd_cshd->cshd_enc_dlen = MIN(elen, cp.cp_size - eskip); } if (mskip < cp.cp_size && mlen > 0) { csd->csd_cshd->cshd_mac_src = CESA_DATA(mskip); csd->csd_cshd->cshd_mac_dlen = MIN(mlen, cp.cp_size - mskip); } elen -= csd->csd_cshd->cshd_enc_dlen; eskip -= MIN(eskip, cp.cp_size); mlen -= csd->csd_cshd->cshd_mac_dlen; mskip -= MIN(mskip, cp.cp_size); cesa_dump_cshd(sc, csd->csd_cshd); /* Append packet to the request */ error = cesa_append_packet(sc, cr, &cp, csd); if (error) break; /* Start a new packet, as current is full */ cesa_start_packet(&cp, MIN(mpsize, len)); } } if (error) break; } if (error) { /* * Move all allocated resources to the request. They will be * freed later. */ STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyin); STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyout); cci->cci_error = error; } } static int cesa_create_chain(struct cesa_softc *sc, const struct crypto_session_params *csp, struct cesa_request *cr) { struct cesa_chain_info cci; struct cesa_tdma_desc *ctd; uint32_t config; int error; error = 0; CESA_LOCK_ASSERT(sc, sessions); /* Create request metadata */ if (csp->csp_cipher_klen != 0) { if (csp->csp_cipher_alg == CRYPTO_AES_CBC && !CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op)) memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_aes_dkey, csp->csp_cipher_klen); else memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_key, csp->csp_cipher_klen); } if (csp->csp_auth_klen != 0) { memcpy(cr->cr_csd->csd_hiv_in, cr->cr_cs->cs_hiv_in, CESA_MAX_HASH_LEN); memcpy(cr->cr_csd->csd_hiv_out, cr->cr_cs->cs_hiv_out, CESA_MAX_HASH_LEN); } ctd = cesa_tdma_copyin_sa_data(sc, cr); if (!ctd) return (ENOMEM); cesa_append_tdesc(cr, ctd); /* Prepare SA configuration */ config = cr->cr_cs->cs_config; if (csp->csp_cipher_alg != 0 && !CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op)) config |= CESA_CSHD_DECRYPT; switch (csp->csp_mode) { case CSP_MODE_CIPHER: config |= CESA_CSHD_ENC; break; case CSP_MODE_DIGEST: config |= CESA_CSHD_MAC; break; case CSP_MODE_ETA: config |= (config & CESA_CSHD_DECRYPT) ? CESA_CSHD_MAC_AND_ENC : CESA_CSHD_ENC_AND_MAC; break; } /* Create data packets */ cci.cci_sc = sc; cci.cci_cr = cr; cci.cci_config = config; cci.cci_error = 0; error = bus_dmamap_load_crp(sc->sc_data_dtag, cr->cr_dmap, cr->cr_crp, cesa_create_chain_cb, &cci, BUS_DMA_NOWAIT); if (!error) cr->cr_dmap_loaded = 1; if (cci.cci_error) error = cci.cci_error; if (error) return (error); /* Read back request metadata */ ctd = cesa_tdma_copyout_sa_data(sc, cr); if (!ctd) return (ENOMEM); cesa_append_tdesc(cr, ctd); return (0); } static void cesa_execute(struct cesa_softc *sc) { struct cesa_tdma_desc *prev_ctd, *ctd; struct cesa_request *prev_cr, *cr; CESA_LOCK(sc, requests); /* * If ready list is empty, there is nothing to execute. If queued list * is not empty, the hardware is busy and we cannot start another * execution. */ if (STAILQ_EMPTY(&sc->sc_ready_requests) || !STAILQ_EMPTY(&sc->sc_queued_requests)) { CESA_UNLOCK(sc, requests); return; } /* Move all ready requests to queued list */ STAILQ_CONCAT(&sc->sc_queued_requests, &sc->sc_ready_requests); STAILQ_INIT(&sc->sc_ready_requests); /* Create one execution chain from all requests on the list */ if (STAILQ_FIRST(&sc->sc_queued_requests) != STAILQ_LAST(&sc->sc_queued_requests, cesa_request, cr_stq)) { prev_cr = NULL; cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); STAILQ_FOREACH(cr, &sc->sc_queued_requests, cr_stq) { if (prev_cr) { ctd = STAILQ_FIRST(&cr->cr_tdesc); prev_ctd = STAILQ_LAST(&prev_cr->cr_tdesc, cesa_tdma_desc, ctd_stq); prev_ctd->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr; } prev_cr = cr; } cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* Start chain execution in hardware */ cr = STAILQ_FIRST(&sc->sc_queued_requests); ctd = STAILQ_FIRST(&cr->cr_tdesc); CESA_TDMA_WRITE(sc, CESA_TDMA_ND, ctd->ctd_cthd_paddr); if (sc->sc_soc_id == MV_DEV_88F6828 || sc->sc_soc_id == MV_DEV_88F6820 || sc->sc_soc_id == MV_DEV_88F6810) CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE | CESA_SA_CMD_SHA2); else CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE); CESA_UNLOCK(sc, requests); } static int cesa_setup_sram(struct cesa_softc *sc) { phandle_t sram_node; ihandle_t sram_ihandle; pcell_t sram_handle, sram_reg[2]; void *sram_va; int rv; rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "sram-handle", (void *)&sram_handle, sizeof(sram_handle)); if (rv <= 0) return (rv); sram_ihandle = (ihandle_t)sram_handle; sram_node = OF_instance_to_package(sram_ihandle); rv = OF_getencprop(sram_node, "reg", (void *)sram_reg, sizeof(sram_reg)); if (rv <= 0) return (rv); sc->sc_sram_base_pa = sram_reg[0]; /* Store SRAM size to be able to unmap in detach() */ sc->sc_sram_size = sram_reg[1]; if (sc->sc_soc_id != MV_DEV_88F6828 && sc->sc_soc_id != MV_DEV_88F6820 && sc->sc_soc_id != MV_DEV_88F6810) return (0); /* SRAM memory was not mapped in platform_sram_devmap(), map it now */ sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size); if (sram_va == NULL) return (ENOMEM); sc->sc_sram_base_va = (vm_offset_t)sram_va; return (0); } /* * Function: device_from_node * This function returns appropriate device_t to phandle_t * Parameters: * root - device where you want to start search * if you provide NULL here, function will take * "root0" device as root. * node - we are checking every device_t to be * appropriate with this. */ static device_t device_from_node(device_t root, phandle_t node) { device_t *children, retval; int nkid, i; /* Nothing matches no node */ if (node == -1) return (NULL); if (root == NULL) /* Get root of device tree */ if ((root = device_lookup_by_name("root0")) == NULL) return (NULL); if (device_get_children(root, &children, &nkid) != 0) return (NULL); retval = NULL; for (i = 0; i < nkid; i++) { /* Check if device and node matches */ if (OFW_BUS_GET_NODE(root, children[i]) == node) { retval = children[i]; break; } /* or go deeper */ if ((retval = device_from_node(children[i], node)) != NULL) break; } free(children, M_TEMP); return (retval); } static int cesa_setup_sram_armada(struct cesa_softc *sc) { phandle_t sram_node; ihandle_t sram_ihandle; pcell_t sram_handle[2]; void *sram_va; int rv, j; struct resource_list rl; struct resource_list_entry *rle; struct simplebus_softc *ssc; device_t sdev; /* Get refs to SRAMS from CESA node */ rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "marvell,crypto-srams", (void *)sram_handle, sizeof(sram_handle)); if (rv <= 0) return (rv); if (sc->sc_cesa_engine_id >= 2) return (ENXIO); /* Get SRAM node on the basis of sc_cesa_engine_id */ sram_ihandle = (ihandle_t)sram_handle[sc->sc_cesa_engine_id]; sram_node = OF_instance_to_package(sram_ihandle); /* Get device_t of simplebus (sram_node parent) */ sdev = device_from_node(NULL, OF_parent(sram_node)); if (!sdev) return (ENXIO); ssc = device_get_softc(sdev); resource_list_init(&rl); /* Parse reg property to resource list */ ofw_bus_reg_to_rl(sdev, sram_node, ssc->acells, ssc->scells, &rl); /* We expect only one resource */ rle = resource_list_find(&rl, SYS_RES_MEMORY, 0); if (rle == NULL) return (ENXIO); /* Remap through ranges property */ for (j = 0; j < ssc->nranges; j++) { if (rle->start >= ssc->ranges[j].bus && rle->end < ssc->ranges[j].bus + ssc->ranges[j].size) { rle->start -= ssc->ranges[j].bus; rle->start += ssc->ranges[j].host; rle->end -= ssc->ranges[j].bus; rle->end += ssc->ranges[j].host; } } sc->sc_sram_base_pa = rle->start; sc->sc_sram_size = rle->count; /* SRAM memory was not mapped in platform_sram_devmap(), map it now */ sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size); if (sram_va == NULL) return (ENOMEM); sc->sc_sram_base_va = (vm_offset_t)sram_va; return (0); } struct ofw_compat_data cesa_devices[] = { { "mrvl,cesa", (uintptr_t)true }, { "marvell,armada-38x-crypto", (uintptr_t)true }, { NULL, 0 } }; static int cesa_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, cesa_devices)->ocd_data) return (ENXIO); device_set_desc(dev, "Marvell Cryptographic Engine and Security " "Accelerator"); return (BUS_PROBE_DEFAULT); } static int cesa_attach(device_t dev) { static int engine_idx = 0; struct simplebus_devinfo *ndi; struct resource_list *rl; struct cesa_softc *sc; if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto")) return (cesa_attach_late(dev)); /* * Get simplebus_devinfo which contains * resource list filled with adresses and * interrupts read form FDT. * Let's correct it by splitting resources * for each engine. */ if ((ndi = device_get_ivars(dev)) == NULL) return (ENXIO); rl = &ndi->rl; switch (engine_idx) { case 0: /* Update regs values */ resource_list_add(rl, SYS_RES_MEMORY, 0, CESA0_TDMA_ADDR, CESA0_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE); resource_list_add(rl, SYS_RES_MEMORY, 1, CESA0_CESA_ADDR, CESA0_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE); /* Remove unused interrupt */ resource_list_delete(rl, SYS_RES_IRQ, 1); break; case 1: /* Update regs values */ resource_list_add(rl, SYS_RES_MEMORY, 0, CESA1_TDMA_ADDR, CESA1_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE); resource_list_add(rl, SYS_RES_MEMORY, 1, CESA1_CESA_ADDR, CESA1_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE); /* Remove unused interrupt */ resource_list_delete(rl, SYS_RES_IRQ, 0); resource_list_find(rl, SYS_RES_IRQ, 1)->rid = 0; break; default: device_printf(dev, "Bad cesa engine_idx\n"); return (ENXIO); } sc = device_get_softc(dev); sc->sc_cesa_engine_id = engine_idx; /* * Call simplebus_add_device only once. * It will create second cesa driver instance * with the same FDT node as first instance. * When second driver reach this function, * it will be configured to use second cesa engine */ if (engine_idx == 0) simplebus_add_device(device_get_parent(dev), ofw_bus_get_node(dev), 0, "cesa", 1, NULL); engine_idx++; return (cesa_attach_late(dev)); } static int cesa_attach_late(device_t dev) { struct cesa_softc *sc; uint32_t d, r, val; int error; int i; sc = device_get_softc(dev); sc->sc_blocked = 0; sc->sc_error = 0; sc->sc_dev = dev; soc_id(&d, &r); switch (d) { case MV_DEV_88F6281: case MV_DEV_88F6282: /* Check if CESA peripheral device has power turned on */ if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) == CPU_PM_CTRL_CRYPTO) { device_printf(dev, "not powered on\n"); return (ENXIO); } sc->sc_tperr = 0; break; case MV_DEV_88F6828: case MV_DEV_88F6820: case MV_DEV_88F6810: sc->sc_tperr = 0; break; case MV_DEV_MV78100: case MV_DEV_MV78100_Z0: /* Check if CESA peripheral device has power turned on */ if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) != CPU_PM_CTRL_CRYPTO) { device_printf(dev, "not powered on\n"); return (ENXIO); } sc->sc_tperr = CESA_ICR_TPERR; break; default: return (ENXIO); } sc->sc_soc_id = d; /* Initialize mutexes */ mtx_init(&sc->sc_sc_lock, device_get_nameunit(dev), "CESA Shared Data", MTX_DEF); mtx_init(&sc->sc_tdesc_lock, device_get_nameunit(dev), "CESA TDMA Descriptors Pool", MTX_DEF); mtx_init(&sc->sc_sdesc_lock, device_get_nameunit(dev), "CESA SA Descriptors Pool", MTX_DEF); mtx_init(&sc->sc_requests_lock, device_get_nameunit(dev), "CESA Requests Pool", MTX_DEF); mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev), "CESA Sessions Pool", MTX_DEF); /* Allocate I/O and IRQ resources */ error = bus_alloc_resources(dev, cesa_res_spec, sc->sc_res); if (error) { device_printf(dev, "could not allocate resources\n"); goto err0; } /* Acquire SRAM base address */ if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto")) error = cesa_setup_sram(sc); else error = cesa_setup_sram_armada(sc); if (error) { device_printf(dev, "could not setup SRAM\n"); goto err1; } /* Setup interrupt handler */ error = bus_setup_intr(dev, sc->sc_res[RES_CESA_IRQ], INTR_TYPE_NET | INTR_MPSAFE, NULL, cesa_intr, sc, &(sc->sc_icookie)); if (error) { device_printf(dev, "could not setup engine completion irq\n"); goto err2; } /* Create DMA tag for processed data */ error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ CESA_MAX_REQUEST_SIZE, /* maxsize */ CESA_MAX_FRAGMENTS, /* nsegments */ CESA_MAX_REQUEST_SIZE, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->sc_data_dtag); /* dmat */ if (error) goto err3; /* Initialize data structures: TDMA Descriptors Pool */ error = cesa_alloc_dma_mem(sc, &sc->sc_tdesc_cdm, CESA_TDMA_DESCRIPTORS * sizeof(struct cesa_tdma_hdesc)); if (error) goto err4; STAILQ_INIT(&sc->sc_free_tdesc); for (i = 0; i < CESA_TDMA_DESCRIPTORS; i++) { sc->sc_tdesc[i].ctd_cthd = (struct cesa_tdma_hdesc *)(sc->sc_tdesc_cdm.cdm_vaddr) + i; sc->sc_tdesc[i].ctd_cthd_paddr = sc->sc_tdesc_cdm.cdm_paddr + (i * sizeof(struct cesa_tdma_hdesc)); STAILQ_INSERT_TAIL(&sc->sc_free_tdesc, &sc->sc_tdesc[i], ctd_stq); } /* Initialize data structures: SA Descriptors Pool */ error = cesa_alloc_dma_mem(sc, &sc->sc_sdesc_cdm, CESA_SA_DESCRIPTORS * sizeof(struct cesa_sa_hdesc)); if (error) goto err5; STAILQ_INIT(&sc->sc_free_sdesc); for (i = 0; i < CESA_SA_DESCRIPTORS; i++) { sc->sc_sdesc[i].csd_cshd = (struct cesa_sa_hdesc *)(sc->sc_sdesc_cdm.cdm_vaddr) + i; sc->sc_sdesc[i].csd_cshd_paddr = sc->sc_sdesc_cdm.cdm_paddr + (i * sizeof(struct cesa_sa_hdesc)); STAILQ_INSERT_TAIL(&sc->sc_free_sdesc, &sc->sc_sdesc[i], csd_stq); } /* Initialize data structures: Requests Pool */ error = cesa_alloc_dma_mem(sc, &sc->sc_requests_cdm, CESA_REQUESTS * sizeof(struct cesa_sa_data)); if (error) goto err6; STAILQ_INIT(&sc->sc_free_requests); STAILQ_INIT(&sc->sc_ready_requests); STAILQ_INIT(&sc->sc_queued_requests); for (i = 0; i < CESA_REQUESTS; i++) { sc->sc_requests[i].cr_csd = (struct cesa_sa_data *)(sc->sc_requests_cdm.cdm_vaddr) + i; sc->sc_requests[i].cr_csd_paddr = sc->sc_requests_cdm.cdm_paddr + (i * sizeof(struct cesa_sa_data)); /* Preallocate DMA maps */ error = bus_dmamap_create(sc->sc_data_dtag, 0, &sc->sc_requests[i].cr_dmap); if (error && i > 0) { i--; do { bus_dmamap_destroy(sc->sc_data_dtag, sc->sc_requests[i].cr_dmap); } while (i--); goto err7; } STAILQ_INSERT_TAIL(&sc->sc_free_requests, &sc->sc_requests[i], cr_stq); } /* * Initialize TDMA: * - Burst limit: 128 bytes, * - Outstanding reads enabled, * - No byte-swap. */ val = CESA_TDMA_CR_DBL128 | CESA_TDMA_CR_SBL128 | CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS | CESA_TDMA_CR_ENABLE; if (sc->sc_soc_id == MV_DEV_88F6828 || sc->sc_soc_id == MV_DEV_88F6820 || sc->sc_soc_id == MV_DEV_88F6810) val |= CESA_TDMA_NUM_OUTSTAND; CESA_TDMA_WRITE(sc, CESA_TDMA_CR, val); /* * Initialize SA: * - SA descriptor is present at beginning of CESA SRAM, * - Multi-packet chain mode, * - Cooperation with TDMA enabled. */ CESA_REG_WRITE(sc, CESA_SA_DPR, 0); CESA_REG_WRITE(sc, CESA_SA_CR, CESA_SA_CR_ACTIVATE_TDMA | CESA_SA_CR_WAIT_FOR_TDMA | CESA_SA_CR_MULTI_MODE); /* Unmask interrupts */ CESA_REG_WRITE(sc, CESA_ICR, 0); CESA_REG_WRITE(sc, CESA_ICM, CESA_ICM_ACCTDMA | sc->sc_tperr); CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0); CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, CESA_TDMA_EMR_MISS | CESA_TDMA_EMR_DOUBLE_HIT | CESA_TDMA_EMR_BOTH_HIT | CESA_TDMA_EMR_DATA_ERROR); /* Register in OCF */ sc->sc_cid = crypto_get_driverid(dev, sizeof(struct cesa_session), CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto err8; } return (0); err8: for (i = 0; i < CESA_REQUESTS; i++) bus_dmamap_destroy(sc->sc_data_dtag, sc->sc_requests[i].cr_dmap); err7: cesa_free_dma_mem(&sc->sc_requests_cdm); err6: cesa_free_dma_mem(&sc->sc_sdesc_cdm); err5: cesa_free_dma_mem(&sc->sc_tdesc_cdm); err4: bus_dma_tag_destroy(sc->sc_data_dtag); err3: bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie); err2: if (sc->sc_soc_id == MV_DEV_88F6828 || sc->sc_soc_id == MV_DEV_88F6820 || sc->sc_soc_id == MV_DEV_88F6810) pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size); err1: bus_release_resources(dev, cesa_res_spec, sc->sc_res); err0: mtx_destroy(&sc->sc_sessions_lock); mtx_destroy(&sc->sc_requests_lock); mtx_destroy(&sc->sc_sdesc_lock); mtx_destroy(&sc->sc_tdesc_lock); mtx_destroy(&sc->sc_sc_lock); return (ENXIO); } static int cesa_detach(device_t dev) { struct cesa_softc *sc; int i; sc = device_get_softc(dev); /* TODO: Wait for queued requests completion before shutdown. */ /* Mask interrupts */ CESA_REG_WRITE(sc, CESA_ICM, 0); CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, 0); /* Unregister from OCF */ crypto_unregister_all(sc->sc_cid); /* Free DMA Maps */ for (i = 0; i < CESA_REQUESTS; i++) bus_dmamap_destroy(sc->sc_data_dtag, sc->sc_requests[i].cr_dmap); /* Free DMA Memory */ cesa_free_dma_mem(&sc->sc_requests_cdm); cesa_free_dma_mem(&sc->sc_sdesc_cdm); cesa_free_dma_mem(&sc->sc_tdesc_cdm); /* Free DMA Tag */ bus_dma_tag_destroy(sc->sc_data_dtag); /* Stop interrupt */ bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie); /* Relase I/O and IRQ resources */ bus_release_resources(dev, cesa_res_spec, sc->sc_res); /* Unmap SRAM memory */ if (sc->sc_soc_id == MV_DEV_88F6828 || sc->sc_soc_id == MV_DEV_88F6820 || sc->sc_soc_id == MV_DEV_88F6810) pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size); /* Destroy mutexes */ mtx_destroy(&sc->sc_sessions_lock); mtx_destroy(&sc->sc_requests_lock); mtx_destroy(&sc->sc_sdesc_lock); mtx_destroy(&sc->sc_tdesc_lock); mtx_destroy(&sc->sc_sc_lock); return (0); } static void cesa_intr(void *arg) { STAILQ_HEAD(, cesa_request) requests; struct cesa_request *cr, *tmp; struct cesa_softc *sc; uint32_t ecr, icr; uint8_t hash[HASH_MAX_LEN]; int blocked; sc = arg; /* Ack interrupt */ ecr = CESA_TDMA_READ(sc, CESA_TDMA_ECR); CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0); icr = CESA_REG_READ(sc, CESA_ICR); CESA_REG_WRITE(sc, CESA_ICR, 0); /* Check for TDMA errors */ if (ecr & CESA_TDMA_ECR_MISS) { device_printf(sc->sc_dev, "TDMA Miss error detected!\n"); sc->sc_error = EIO; } if (ecr & CESA_TDMA_ECR_DOUBLE_HIT) { device_printf(sc->sc_dev, "TDMA Double Hit error detected!\n"); sc->sc_error = EIO; } if (ecr & CESA_TDMA_ECR_BOTH_HIT) { device_printf(sc->sc_dev, "TDMA Both Hit error detected!\n"); sc->sc_error = EIO; } if (ecr & CESA_TDMA_ECR_DATA_ERROR) { device_printf(sc->sc_dev, "TDMA Data error detected!\n"); sc->sc_error = EIO; } /* Check for CESA errors */ if (icr & sc->sc_tperr) { device_printf(sc->sc_dev, "CESA SRAM Parity error detected!\n"); sc->sc_error = EIO; } /* If there is nothing more to do, return */ if ((icr & CESA_ICR_ACCTDMA) == 0) return; /* Get all finished requests */ CESA_LOCK(sc, requests); STAILQ_INIT(&requests); STAILQ_CONCAT(&requests, &sc->sc_queued_requests); STAILQ_INIT(&sc->sc_queued_requests); CESA_UNLOCK(sc, requests); /* Execute all ready requests */ cesa_execute(sc); /* Process completed requests */ cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); STAILQ_FOREACH_SAFE(cr, &requests, cr_stq, tmp) { bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); cr->cr_crp->crp_etype = sc->sc_error; if (cr->cr_cs->cs_hlen != 0 && cr->cr_crp->crp_etype == 0) { if (cr->cr_crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(cr->cr_crp, cr->cr_crp->crp_digest_start, cr->cr_cs->cs_hlen, hash); if (timingsafe_bcmp(hash, cr->cr_csd->csd_hash, cr->cr_cs->cs_hlen) != 0) cr->cr_crp->crp_etype = EBADMSG; } else crypto_copyback(cr->cr_crp, cr->cr_crp->crp_digest_start, cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash); } crypto_done(cr->cr_crp); cesa_free_request(sc, cr); } cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->sc_error = 0; /* Unblock driver if it ran out of resources */ CESA_LOCK(sc, sc); blocked = sc->sc_blocked; sc->sc_blocked = 0; CESA_UNLOCK(sc, sc); if (blocked) crypto_unblock(sc->sc_cid, blocked); } static bool cesa_cipher_supported(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (csp->csp_ivlen != AES_BLOCK_LEN) return (false); break; case CRYPTO_DES_CBC: if (csp->csp_ivlen != DES_BLOCK_LEN) return (false); break; case CRYPTO_3DES_CBC: if (csp->csp_ivlen != DES3_BLOCK_LEN) return (false); break; default: return (false); } if (csp->csp_cipher_klen > CESA_MAX_KEY_LEN) return (false); return (true); } static bool cesa_auth_supported(struct cesa_softc *sc, const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_SHA2_256_HMAC: if (!(sc->sc_soc_id == MV_DEV_88F6828 || sc->sc_soc_id == MV_DEV_88F6820 || sc->sc_soc_id == MV_DEV_88F6810)) return (false); /* FALLTHROUGH */ case CRYPTO_MD5: case CRYPTO_MD5_HMAC: case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: break; default: return (false); } if (csp->csp_auth_klen > CESA_MAX_MKEY_LEN) return (false); return (true); } static int cesa_probesession(device_t dev, const struct crypto_session_params *csp) { struct cesa_softc *sc; sc = device_get_softc(dev); if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!cesa_auth_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!cesa_cipher_supported(csp)) return (EINVAL); break; case CSP_MODE_ETA: if (!cesa_auth_supported(sc, csp) || !cesa_cipher_supported(csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_HARDWARE); } static int cesa_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct cesa_session *cs; struct cesa_softc *sc; int error; sc = device_get_softc(dev); error = 0; /* Allocate session */ cs = crypto_get_driver_session(cses); /* Prepare CESA configuration */ cs->cs_config = 0; cs->cs_ivlen = 1; cs->cs_mblen = 1; switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC; cs->cs_ivlen = AES_BLOCK_LEN; break; case CRYPTO_DES_CBC: cs->cs_config |= CESA_CSHD_DES | CESA_CSHD_CBC; cs->cs_ivlen = DES_BLOCK_LEN; break; case CRYPTO_3DES_CBC: cs->cs_config |= CESA_CSHD_3DES | CESA_CSHD_3DES_EDE | CESA_CSHD_CBC; cs->cs_ivlen = DES3_BLOCK_LEN; break; } switch (csp->csp_auth_alg) { case CRYPTO_MD5: cs->cs_mblen = 1; cs->cs_hlen = (csp->csp_auth_mlen == 0) ? MD5_HASH_LEN : csp->csp_auth_mlen; cs->cs_config |= CESA_CSHD_MD5; break; case CRYPTO_MD5_HMAC: cs->cs_mblen = MD5_BLOCK_LEN; cs->cs_hlen = (csp->csp_auth_mlen == 0) ? MD5_HASH_LEN : csp->csp_auth_mlen; cs->cs_config |= CESA_CSHD_MD5_HMAC; if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN) cs->cs_config |= CESA_CSHD_96_BIT_HMAC; break; case CRYPTO_SHA1: cs->cs_mblen = 1; cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN : csp->csp_auth_mlen; cs->cs_config |= CESA_CSHD_SHA1; break; case CRYPTO_SHA1_HMAC: cs->cs_mblen = SHA1_BLOCK_LEN; cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN : csp->csp_auth_mlen; cs->cs_config |= CESA_CSHD_SHA1_HMAC; if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN) cs->cs_config |= CESA_CSHD_96_BIT_HMAC; break; case CRYPTO_SHA2_256_HMAC: cs->cs_mblen = SHA2_256_BLOCK_LEN; cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA2_256_HASH_LEN : csp->csp_auth_mlen; cs->cs_config |= CESA_CSHD_SHA2_256_HMAC; break; } /* Save cipher key */ if (csp->csp_cipher_key != NULL) { memcpy(cs->cs_key, csp->csp_cipher_key, csp->csp_cipher_klen); if (csp->csp_cipher_alg == CRYPTO_AES_CBC) error = cesa_prep_aes_key(cs, csp); } /* Save digest key */ if (csp->csp_auth_key != NULL) cesa_set_mkey(cs, csp->csp_auth_alg, csp->csp_auth_key, csp->csp_auth_klen); return (error); } static int cesa_process(device_t dev, struct cryptop *crp, int hint) { const struct crypto_session_params *csp; struct cesa_request *cr; struct cesa_session *cs; struct cesa_softc *sc; int error; sc = device_get_softc(dev); error = 0; cs = crypto_get_driver_session(crp->crp_session); csp = crypto_get_params(crp->crp_session); /* Check and parse input */ if (crp->crp_ilen > CESA_MAX_REQUEST_SIZE) { crp->crp_etype = E2BIG; crypto_done(crp); return (0); } /* * For requests with AAD, only requests where the AAD is * immediately adjacent to the payload are supported. */ if (crp->crp_aad_length != 0 && (crp->crp_aad_start + crp->crp_aad_length) != crp->crp_payload_start) { crp->crp_etype = EINVAL; crypto_done(crp); return (0); } /* * Get request descriptor. Block driver if there is no free * descriptors in pool. */ cr = cesa_alloc_request(sc); if (!cr) { CESA_LOCK(sc, sc); sc->sc_blocked = CRYPTO_SYMQ; CESA_UNLOCK(sc, sc); return (ERESTART); } /* Prepare request */ cr->cr_crp = crp; cr->cr_cs = cs; CESA_LOCK(sc, sessions); cesa_sync_desc(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - if (csp->csp_cipher_alg != 0) { - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { - arc4rand(cr->cr_csd->csd_iv, csp->csp_ivlen, 0); - crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen, - cr->cr_csd->csd_iv); - } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) - memcpy(cr->cr_csd->csd_iv, crp->crp_iv, csp->csp_ivlen); - else - crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, - cr->cr_csd->csd_iv); - } + if (csp->csp_cipher_alg != 0) + crypto_read_iv(crp, cr->cr_csd->csd_iv); if (crp->crp_cipher_key != NULL) { memcpy(cs->cs_key, crp->crp_cipher_key, csp->csp_cipher_klen); if (csp->csp_cipher_alg == CRYPTO_AES_CBC) error = cesa_prep_aes_key(cs, csp); } if (!error && crp->crp_auth_key != NULL) cesa_set_mkey(cs, csp->csp_auth_alg, crp->crp_auth_key, csp->csp_auth_klen); /* Convert request to chain of TDMA and SA descriptors */ if (!error) error = cesa_create_chain(sc, csp, cr); cesa_sync_desc(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CESA_UNLOCK(sc, sessions); if (error) { cesa_free_request(sc, cr); crp->crp_etype = error; crypto_done(crp); return (0); } bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Enqueue request to execution */ cesa_enqueue_request(sc, cr); /* Start execution, if we have no more requests in queue */ if ((hint & CRYPTO_HINT_MORE) == 0) cesa_execute(sc); return (0); } Index: head/sys/dev/cxgbe/crypto/t4_crypto.c =================================================================== --- head/sys/dev/cxgbe/crypto/t4_crypto.c (revision 360135) +++ head/sys/dev/cxgbe/crypto/t4_crypto.c (revision 360136) @@ -1,2814 +1,2790 @@ /*- * Copyright (c) 2017 Chelsio Communications, Inc. * All rights reserved. * Written by: John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include "common/common.h" #include "crypto/t4_crypto.h" /* * Requests consist of: * * +-------------------------------+ * | struct fw_crypto_lookaside_wr | * +-------------------------------+ * | struct ulp_txpkt | * +-------------------------------+ * | struct ulptx_idata | * +-------------------------------+ * | struct cpl_tx_sec_pdu | * +-------------------------------+ * | struct cpl_tls_tx_scmd_fmt | * +-------------------------------+ * | key context header | * +-------------------------------+ * | AES key | ----- For requests with AES * +-------------------------------+ * | Hash state | ----- For hash-only requests * +-------------------------------+ - * | IPAD (16-byte aligned) | \ * +-------------------------------+ +---- For requests with HMAC * | OPAD (16-byte aligned) | / * +-------------------------------+ - * | GMAC H | ----- For AES-GCM * +-------------------------------+ - * | struct cpl_rx_phys_dsgl | \ * +-------------------------------+ +---- Destination buffer for * | PHYS_DSGL entries | / non-hash-only requests * +-------------------------------+ - * | 16 dummy bytes | ----- Only for HMAC/hash-only requests * +-------------------------------+ * | IV | ----- If immediate IV * +-------------------------------+ * | Payload | ----- If immediate Payload * +-------------------------------+ - * | struct ulptx_sgl | \ * +-------------------------------+ +---- If payload via SGL * | SGL entries | / * +-------------------------------+ - * * Note that the key context must be padded to ensure 16-byte alignment. * For HMAC requests, the key consists of the partial hash of the IPAD * followed by the partial hash of the OPAD. * * Replies consist of: * * +-------------------------------+ * | struct cpl_fw6_pld | * +-------------------------------+ * | hash digest | ----- For HMAC request with * +-------------------------------+ 'hash_size' set in work request * * A 32-bit big-endian error status word is supplied in the last 4 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a * "MAC" error and bit 1 indicates a "PAD" error. * * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message * in the request is returned in data[1] of the CPL_FW6_PLD message. * * For block cipher replies, the updated IV is supplied in data[2] and * data[3] of the CPL_FW6_PLD message. * * For hash replies where the work request set 'hash_size' to request * a copy of the hash in the reply, the hash digest is supplied * immediately following the CPL_FW6_PLD message. */ /* * The crypto engine supports a maximum AAD size of 511 bytes. */ #define MAX_AAD_LEN 511 /* * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG * entries. While the CPL includes a 16-bit length field, the T6 can * sometimes hang if an error occurs while processing a request with a * single DSGL entry larger than 2k. */ #define MAX_RX_PHYS_DSGL_SGE 32 #define DSGL_SGE_MAXLEN 2048 /* * The adapter only supports requests with a total input or output * length of 64k-1 or smaller. Longer requests either result in hung * requests or incorrect results. */ #define MAX_REQUEST_SIZE 65535 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto"); struct ccr_session_hmac { struct auth_hash *auth_hash; int hash_len; unsigned int partial_digest_len; unsigned int auth_mode; unsigned int mk_size; char pads[CHCR_HASH_MAX_BLOCK_SIZE_128 * 2]; }; struct ccr_session_gmac { int hash_len; char ghash_h[GMAC_BLOCK_LEN]; }; struct ccr_session_ccm_mac { int hash_len; }; struct ccr_session_blkcipher { unsigned int cipher_mode; unsigned int key_len; unsigned int iv_len; __be32 key_ctx_hdr; char enckey[CHCR_AES_MAX_KEY_LEN]; char deckey[CHCR_AES_MAX_KEY_LEN]; }; struct ccr_port { struct sge_wrq *txq; struct sge_rxq *rxq; int tx_channel_id; u_int active_sessions; }; struct ccr_session { bool active; int pending; enum { HASH, HMAC, BLKCIPHER, ETA, GCM, CCM } mode; struct ccr_port *port; union { struct ccr_session_hmac hmac; struct ccr_session_gmac gmac; struct ccr_session_ccm_mac ccm_mac; }; struct ccr_session_blkcipher blkcipher; }; struct ccr_softc { struct adapter *adapter; device_t dev; uint32_t cid; struct mtx lock; bool detaching; struct ccr_port ports[MAX_NPORTS]; u_int port_mask; /* * Pre-allocate S/G lists used when preparing a work request. * 'sg_crp' contains an sglist describing the entire buffer * for a 'struct cryptop'. 'sg_ulptx' is used to describe * the data the engine should DMA as input via ULPTX_SGL. * 'sg_dsgl' is used to describe the destination that cipher * text and a tag should be written to. */ struct sglist *sg_crp; struct sglist *sg_ulptx; struct sglist *sg_dsgl; /* * Pre-allocate a dummy output buffer for the IV and AAD for * AEAD requests. */ char *iv_aad_buf; struct sglist *sg_iv_aad; /* Statistics. */ uint64_t stats_blkcipher_encrypt; uint64_t stats_blkcipher_decrypt; uint64_t stats_hash; uint64_t stats_hmac; uint64_t stats_eta_encrypt; uint64_t stats_eta_decrypt; uint64_t stats_gcm_encrypt; uint64_t stats_gcm_decrypt; uint64_t stats_ccm_encrypt; uint64_t stats_ccm_decrypt; uint64_t stats_wr_nomem; uint64_t stats_inflight; uint64_t stats_mac_error; uint64_t stats_pad_error; uint64_t stats_bad_session; uint64_t stats_sglist_error; uint64_t stats_process_error; uint64_t stats_sw_fallback; }; /* * Crypto requests involve two kind of scatter/gather lists. * * Non-hash-only requests require a PHYS_DSGL that describes the * location to store the results of the encryption or decryption * operation. This SGL uses a different format (PHYS_DSGL) and should * exclude the skip bytes at the start of the data as well as any AAD * or IV. For authenticated encryption requests it should include the * destination of the hash or tag. * * The input payload may either be supplied inline as immediate data, * or via a standard ULP_TX SGL. This SGL should include AAD, * ciphertext, and the hash or tag for authenticated decryption * requests. * * These scatter/gather lists can describe different subsets of the * buffer described by the crypto operation. ccr_populate_sglist() * generates a scatter/gather list that covers the entire crypto * operation buffer that is then used to construct the other * scatter/gather lists. */ static int ccr_populate_sglist(struct sglist *sg, struct cryptop *crp) { int error; sglist_reset(sg); switch (crp->crp_buf_type) { case CRYPTO_BUF_MBUF: error = sglist_append_mbuf(sg, crp->crp_mbuf); break; case CRYPTO_BUF_UIO: error = sglist_append_uio(sg, crp->crp_uio); break; case CRYPTO_BUF_CONTIG: error = sglist_append(sg, crp->crp_buf, crp->crp_ilen); break; default: error = EINVAL; } return (error); } /* * Segments in 'sg' larger than 'maxsegsize' are counted as multiple * segments. */ static int ccr_count_sgl(struct sglist *sg, int maxsegsize) { int i, nsegs; nsegs = 0; for (i = 0; i < sg->sg_nseg; i++) nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize); return (nsegs); } /* These functions deal with PHYS_DSGL for the reply buffer. */ static inline int ccr_phys_dsgl_len(int nsegs) { int len; len = (nsegs / 8) * sizeof(struct phys_sge_pairs); if ((nsegs % 8) != 0) { len += sizeof(uint16_t) * 8; len += roundup2(nsegs % 8, 2) * sizeof(uint64_t); } return (len); } static void ccr_write_phys_dsgl(struct ccr_softc *sc, struct ccr_session *s, void *dst, int nsegs) { struct sglist *sg; struct cpl_rx_phys_dsgl *cpl; struct phys_sge_pairs *sgl; vm_paddr_t paddr; size_t seglen; u_int i, j; sg = sc->sg_dsgl; cpl = dst; cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) | V_CPL_RX_PHYS_DSGL_ISRDMA(0)); cpl->pcirlxorder_to_noofsgentr = htobe32( V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) | V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) | V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) | V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs)); cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; cpl->rss_hdr_int.qid = htobe16(s->port->rxq->iq.abs_id); cpl->rss_hdr_int.hash_val = 0; sgl = (struct phys_sge_pairs *)(cpl + 1); j = 0; for (i = 0; i < sg->sg_nseg; i++) { seglen = sg->sg_segs[i].ss_len; paddr = sg->sg_segs[i].ss_paddr; do { sgl->addr[j] = htobe64(paddr); if (seglen > DSGL_SGE_MAXLEN) { sgl->len[j] = htobe16(DSGL_SGE_MAXLEN); paddr += DSGL_SGE_MAXLEN; seglen -= DSGL_SGE_MAXLEN; } else { sgl->len[j] = htobe16(seglen); seglen = 0; } j++; if (j == 8) { sgl++; j = 0; } } while (seglen != 0); } MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs); } /* These functions deal with the ULPTX_SGL for input payload. */ static inline int ccr_ulptx_sgl_len(int nsegs) { u_int n; nsegs--; /* first segment is part of ulptx_sgl */ n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); return (roundup2(n, 16)); } static void ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs) { struct ulptx_sgl *usgl; struct sglist *sg; struct sglist_seg *ss; int i; sg = sc->sg_ulptx; MPASS(nsegs == sg->sg_nseg); ss = &sg->sg_segs[0]; usgl = dst; usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | V_ULPTX_NSGE(nsegs)); usgl->len0 = htobe32(ss->ss_len); usgl->addr0 = htobe64(ss->ss_paddr); ss++; for (i = 0; i < sg->sg_nseg - 1; i++) { usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len); usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr); ss++; } } static bool ccr_use_imm_data(u_int transhdr_len, u_int input_len) { if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN) return (false); if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) > SGE_MAX_WR_LEN) return (false); return (true); } static void ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s, struct chcr_wr *crwr, u_int kctx_len, u_int wr_len, u_int imm_len, u_int sgl_len, u_int hash_size, struct cryptop *crp) { u_int cctx_size, idata_len; cctx_size = sizeof(struct _key_ctx) + kctx_len; crwr->wreq.op_to_cctx_size = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) | V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) | V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) | V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) | V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4)); crwr->wreq.len16_pkd = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16)); crwr->wreq.session_id = 0; crwr->wreq.rx_chid_to_rx_q_id = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(s->port->tx_channel_id) | V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) | V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) | V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) | V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) | V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) | V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(s->port->rxq->iq.abs_id)); crwr->wreq.key_addr = 0; crwr->wreq.pld_size_hash_size = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) | V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size)); crwr->wreq.cookie = htobe64((uintptr_t)crp); crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DATAMODIFY(0) | V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) | V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(s->port->rxq->iq.abs_id) | V_ULP_TXPKT_RO(1)); crwr->ulptx.len = htobe32( ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16)); crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | V_ULP_TX_SC_MORE(sgl_len != 0 ? 1 : 0)); idata_len = wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len; if (imm_len % 16 != 0) idata_len -= 16 - imm_len % 16; crwr->sc_imm.len = htobe32(idata_len); } static int ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) { struct chcr_wr *crwr; struct wrqe *wr; struct auth_hash *axf; char *dst; u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len; u_int hmac_ctrl, imm_len, iopad_size; int error, sgl_nsegs, sgl_len, use_opad; /* Reject requests with too large of an input buffer. */ if (crp->crp_payload_length > MAX_REQUEST_SIZE) return (EFBIG); axf = s->hmac.auth_hash; if (s->mode == HMAC) { use_opad = 1; hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC; } else { use_opad = 0; hmac_ctrl = SCMD_HMAC_CTRL_NOP; } /* PADs must be 128-bit aligned. */ iopad_size = roundup2(s->hmac.partial_digest_len, 16); /* * The 'key' part of the context includes the aligned IPAD and * OPAD. */ kctx_len = iopad_size; if (use_opad) kctx_len += iopad_size; hash_size_in_response = axf->hashsize; transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); if (crp->crp_payload_length == 0) { imm_len = axf->blocksize; sgl_nsegs = 0; sgl_len = 0; } else if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length)) { imm_len = crp->crp_payload_length; sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(sc->sg_ulptx); error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); sgl_nsegs = sc->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; if (wr_len > SGE_MAX_WR_LEN) return (EFBIG); wr = alloc_wrqe(wr_len, s->port->txq); if (wr == NULL) { sc->stats_wr_nomem++; return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, hash_size_in_response, crp); crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(0)); crwr->sec_cpl.pldlen = htobe32(crp->crp_payload_length == 0 ? axf->blocksize : crp->crp_payload_length); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_NOP) | V_SCMD_AUTH_MODE(s->hmac.auth_mode) | V_SCMD_HMAC_CTRL(hmac_ctrl)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_LAST_FRAG(0) | V_SCMD_MORE_FRAGS(crp->crp_payload_length == 0 ? 1 : 0) | V_SCMD_MAC_ONLY(1)); memcpy(crwr->key_ctx.key, s->hmac.pads, kctx_len); /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */ kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | V_KEY_CONTEXT_OPAD_PRESENT(use_opad) | V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) | V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1)); dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES; if (crp->crp_payload_length == 0) { dst[0] = 0x80; if (s->mode == HMAC) *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) = htobe64(axf->blocksize << 3); } else if (imm_len != 0) crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, dst); else ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); return (0); } static int ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { uint8_t hash[HASH_MAX_LEN]; if (error) return (error); if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len, hash); if (timingsafe_bcmp((cpl + 1), hash, s->hmac.hash_len) != 0) return (EBADMSG); } else crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len, (cpl + 1)); return (0); } static int ccr_blkcipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) { char iv[CHCR_MAX_CRYPTO_IV_LEN]; struct chcr_wr *crwr; struct wrqe *wr; char *dst; u_int kctx_len, key_half, op_type, transhdr_len, wr_len; u_int imm_len, iv_len; int dsgl_nsegs, dsgl_len; int sgl_nsegs, sgl_len; int error; if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) return (EINVAL); if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC && (crp->crp_payload_length % AES_BLOCK_LEN) != 0) return (EINVAL); /* Reject requests with too large of an input buffer. */ if (crp->crp_payload_length > MAX_REQUEST_SIZE) return (EFBIG); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) op_type = CHCR_ENCRYPT_OP; else op_type = CHCR_DECRYPT_OP; sglist_reset(sc->sg_dsgl); error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) return (EFBIG); dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); /* The 'key' must be 128-bit aligned. */ kctx_len = roundup2(s->blkcipher.key_len, 16); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); /* For AES-XTS we send a 16-byte IV in the work request. */ if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) iv_len = AES_BLOCK_LEN; else iv_len = s->blkcipher.iv_len; if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length + iv_len)) { imm_len = crp->crp_payload_length; sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(sc->sg_ulptx); error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); sgl_nsegs = sc->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + sgl_len; if (wr_len > SGE_MAX_WR_LEN) return (EFBIG); wr = alloc_wrqe(wr_len, s->port->txq); if (wr == NULL) { sc->stats_wr_nomem++; return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); - /* - * Read the existing IV from the request or generate a random - * one if none is provided. - */ - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { - arc4rand(iv, s->blkcipher.iv_len, 0); - crypto_copyback(crp, crp->crp_iv_start, s->blkcipher.iv_len, - iv); - } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) - memcpy(iv, crp->crp_iv, s->blkcipher.iv_len); - else - crypto_copydata(crp, crp->crp_iv_start, s->blkcipher.iv_len, - iv); + crypto_read_iv(crp, iv); /* Zero the remainder of the IV for AES-XTS. */ memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len); ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, crp); crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); crwr->sec_cpl.pldlen = htobe32(iv_len + crp->crp_payload_length); crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTART(iv_len + 1) | V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | V_SCMD_ENC_DEC_CTRL(op_type) | V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) | V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) | V_SCMD_IV_SIZE(iv_len / 2) | V_SCMD_NUM_IVS(0)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_IV_GEN_CTRL(0) | V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len)); crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; switch (s->blkcipher.cipher_mode) { case SCMD_CIPH_MODE_AES_CBC: if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); else memcpy(crwr->key_ctx.key, s->blkcipher.deckey, s->blkcipher.key_len); break; case SCMD_CIPH_MODE_AES_CTR: memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); break; case SCMD_CIPH_MODE_AES_XTS: key_half = s->blkcipher.key_len / 2; memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, key_half); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) memcpy(crwr->key_ctx.key + key_half, s->blkcipher.enckey, key_half); else memcpy(crwr->key_ctx.key + key_half, s->blkcipher.deckey, key_half); break; } dst = (char *)(crwr + 1) + kctx_len; ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs); dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; memcpy(dst, iv, iv_len); dst += iv_len; if (imm_len != 0) crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, dst); else ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); return (0); } static int ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { /* * The updated IV to permit chained requests is at * cpl->data[2], but OCF doesn't permit chained requests. */ return (error); } /* * 'hashsize' is the length of a full digest. 'authsize' is the * requested digest length for this operation which may be less * than 'hashsize'. */ static int ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize) { if (authsize == 10) return (SCMD_HMAC_CTRL_TRUNC_RFC4366); if (authsize == 12) return (SCMD_HMAC_CTRL_IPSEC_96BIT); if (authsize == hashsize / 2) return (SCMD_HMAC_CTRL_DIV2); return (SCMD_HMAC_CTRL_NO_TRUNC); } static int ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) { char iv[CHCR_MAX_CRYPTO_IV_LEN]; struct chcr_wr *crwr; struct wrqe *wr; struct auth_hash *axf; char *dst; u_int kctx_len, key_half, op_type, transhdr_len, wr_len; u_int hash_size_in_response, imm_len, iopad_size, iv_len; u_int aad_start, aad_stop; u_int auth_insert; u_int cipher_start, cipher_stop; u_int hmac_ctrl, input_len; int dsgl_nsegs, dsgl_len; int sgl_nsegs, sgl_len; int error; /* * If there is a need in the future, requests with an empty * payload could be supported as HMAC-only requests. */ if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) return (EINVAL); if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC && (crp->crp_payload_length % AES_BLOCK_LEN) != 0) return (EINVAL); /* For AES-XTS we send a 16-byte IV in the work request. */ if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) iv_len = AES_BLOCK_LEN; else iv_len = s->blkcipher.iv_len; if (crp->crp_aad_length + iv_len > MAX_AAD_LEN) return (EINVAL); axf = s->hmac.auth_hash; hash_size_in_response = s->hmac.hash_len; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) op_type = CHCR_ENCRYPT_OP; else op_type = CHCR_DECRYPT_OP; /* * The output buffer consists of the cipher text followed by * the hash when encrypting. For decryption it only contains * the plain text. * * Due to a firmware bug, the output buffer must include a * dummy output buffer for the IV and AAD prior to the real * output buffer. */ if (op_type == CHCR_ENCRYPT_OP) { if (iv_len + crp->crp_aad_length + crp->crp_payload_length + hash_size_in_response > MAX_REQUEST_SIZE) return (EFBIG); } else { if (iv_len + crp->crp_aad_length + crp->crp_payload_length > MAX_REQUEST_SIZE) return (EFBIG); } sglist_reset(sc->sg_dsgl); error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len + crp->crp_aad_length); if (error) return (error); error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_ENCRYPT_OP) { error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crp->crp_digest_start, hash_size_in_response); if (error) return (error); } dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) return (EFBIG); dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); /* PADs must be 128-bit aligned. */ iopad_size = roundup2(s->hmac.partial_digest_len, 16); /* * The 'key' part of the key context consists of the key followed * by the IPAD and OPAD. */ kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); /* * The input buffer consists of the IV, any AAD, and then the * cipher/plain text. For decryption requests the hash is * appended after the cipher text. * * The IV is always stored at the start of the input buffer * even though it may be duplicated in the payload. The * crypto engine doesn't work properly if the IV offset points * inside of the AAD region, so a second copy is always * required. */ input_len = crp->crp_aad_length + crp->crp_payload_length; /* * The firmware hangs if sent a request which is a * bit smaller than MAX_REQUEST_SIZE. In particular, the * firmware appears to require 512 - 16 bytes of spare room * along with the size of the hash even if the hash isn't * included in the input buffer. */ if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) > MAX_REQUEST_SIZE) return (EFBIG); if (op_type == CHCR_DECRYPT_OP) input_len += hash_size_in_response; if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { imm_len = input_len; sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(sc->sg_ulptx); if (crp->crp_aad_length != 0) { error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crp->crp_aad_start, crp->crp_aad_length); if (error) return (error); } error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_DECRYPT_OP) { error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crp->crp_digest_start, hash_size_in_response); if (error) return (error); } sgl_nsegs = sc->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } /* * Any auth-only data before the cipher region is marked as AAD. * Auth-data that overlaps with the cipher region is placed in * the auth section. */ if (crp->crp_aad_length != 0) { aad_start = iv_len + 1; aad_stop = aad_start + crp->crp_aad_length - 1; } else { aad_start = 0; aad_stop = 0; } cipher_start = iv_len + crp->crp_aad_length + 1; if (op_type == CHCR_DECRYPT_OP) cipher_stop = hash_size_in_response; else cipher_stop = 0; if (op_type == CHCR_DECRYPT_OP) auth_insert = hash_size_in_response; else auth_insert = 0; wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + sgl_len; if (wr_len > SGE_MAX_WR_LEN) return (EFBIG); wr = alloc_wrqe(wr_len, s->port->txq); if (wr == NULL) { sc->stats_wr_nomem++; return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); - /* - * Read the existing IV from the request or generate a random - * one if none is provided. - */ - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { - arc4rand(iv, s->blkcipher.iv_len, 0); - crypto_copyback(crp, crp->crp_iv_start, s->blkcipher.iv_len, - iv); - } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) - memcpy(iv, crp->crp_iv, s->blkcipher.iv_len); - else - crypto_copydata(crp, crp->crp_iv_start, s->blkcipher.iv_len, - iv); + crypto_read_iv(crp, iv); /* Zero the remainder of the IV for AES-XTS. */ memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len); ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp); crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( V_CPL_TX_SEC_PDU_AADSTART(aad_start) | V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4)); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) | V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response); crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | V_SCMD_ENC_DEC_CTRL(op_type) | V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | V_SCMD_AUTH_MODE(s->hmac.auth_mode) | V_SCMD_HMAC_CTRL(hmac_ctrl) | V_SCMD_IV_SIZE(iv_len / 2) | V_SCMD_NUM_IVS(0)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_IV_GEN_CTRL(0) | V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; switch (s->blkcipher.cipher_mode) { case SCMD_CIPH_MODE_AES_CBC: if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); else memcpy(crwr->key_ctx.key, s->blkcipher.deckey, s->blkcipher.key_len); break; case SCMD_CIPH_MODE_AES_CTR: memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); break; case SCMD_CIPH_MODE_AES_XTS: key_half = s->blkcipher.key_len / 2; memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, key_half); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) memcpy(crwr->key_ctx.key + key_half, s->blkcipher.enckey, key_half); else memcpy(crwr->key_ctx.key + key_half, s->blkcipher.deckey, key_half); break; } dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); memcpy(dst, s->hmac.pads, iopad_size * 2); dst = (char *)(crwr + 1) + kctx_len; ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs); dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; memcpy(dst, iv, iv_len); dst += iv_len; if (imm_len != 0) { if (crp->crp_aad_length != 0) { crypto_copydata(crp, crp->crp_aad_start, crp->crp_aad_length, dst); dst += crp->crp_aad_length; } crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, dst); dst += crp->crp_payload_length; if (op_type == CHCR_DECRYPT_OP) crypto_copydata(crp, crp->crp_digest_start, hash_size_in_response, dst); } else ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); return (0); } static int ccr_eta_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { /* * The updated IV to permit chained requests is at * cpl->data[2], but OCF doesn't permit chained requests. */ return (error); } static int ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) { char iv[CHCR_MAX_CRYPTO_IV_LEN]; struct chcr_wr *crwr; struct wrqe *wr; char *dst; u_int iv_len, kctx_len, op_type, transhdr_len, wr_len; u_int hash_size_in_response, imm_len; u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; u_int hmac_ctrl, input_len; int dsgl_nsegs, dsgl_len; int sgl_nsegs, sgl_len; int error; if (s->blkcipher.key_len == 0) return (EINVAL); /* * The crypto engine doesn't handle GCM requests with an empty * payload, so handle those in software instead. */ if (crp->crp_payload_length == 0) return (EMSGSIZE); if (crp->crp_aad_length + AES_BLOCK_LEN > MAX_AAD_LEN) return (EMSGSIZE); hash_size_in_response = s->gmac.hash_len; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) op_type = CHCR_ENCRYPT_OP; else op_type = CHCR_DECRYPT_OP; /* * The IV handling for GCM in OCF is a bit more complicated in * that IPSec provides a full 16-byte IV (including the * counter), whereas the /dev/crypto interface sometimes * provides a full 16-byte IV (if no IV is provided in the * ioctl) and sometimes a 12-byte IV (if the IV was explicit). * * When provided a 12-byte IV, assume the IV is really 16 bytes * with a counter in the last 4 bytes initialized to 1. * * While iv_len is checked below, the value is currently * always set to 12 when creating a GCM session in this driver * due to limitations in OCF (there is no way to know what the * IV length of a given request will be). This means that the * driver always assumes as 12-byte IV for now. */ if (s->blkcipher.iv_len == 12) iv_len = AES_BLOCK_LEN; else iv_len = s->blkcipher.iv_len; /* * GCM requests should always provide an explicit IV. */ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); /* * The output buffer consists of the cipher text followed by * the tag when encrypting. For decryption it only contains * the plain text. * * Due to a firmware bug, the output buffer must include a * dummy output buffer for the IV and AAD prior to the real * output buffer. */ if (op_type == CHCR_ENCRYPT_OP) { if (iv_len + crp->crp_aad_length + crp->crp_payload_length + hash_size_in_response > MAX_REQUEST_SIZE) return (EFBIG); } else { if (iv_len + crp->crp_aad_length + crp->crp_payload_length > MAX_REQUEST_SIZE) return (EFBIG); } sglist_reset(sc->sg_dsgl); error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len + crp->crp_aad_length); if (error) return (error); error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_ENCRYPT_OP) { error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crp->crp_digest_start, hash_size_in_response); if (error) return (error); } dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) return (EFBIG); dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); /* * The 'key' part of the key context consists of the key followed * by the Galois hash key. */ kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); /* * The input buffer consists of the IV, any AAD, and then the * cipher/plain text. For decryption requests the hash is * appended after the cipher text. * * The IV is always stored at the start of the input buffer * even though it may be duplicated in the payload. The * crypto engine doesn't work properly if the IV offset points * inside of the AAD region, so a second copy is always * required. */ input_len = crp->crp_aad_length + crp->crp_payload_length; if (op_type == CHCR_DECRYPT_OP) input_len += hash_size_in_response; if (input_len > MAX_REQUEST_SIZE) return (EFBIG); if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { imm_len = input_len; sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(sc->sg_ulptx); if (crp->crp_aad_length != 0) { error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crp->crp_aad_start, crp->crp_aad_length); if (error) return (error); } error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_DECRYPT_OP) { error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crp->crp_digest_start, hash_size_in_response); if (error) return (error); } sgl_nsegs = sc->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } if (crp->crp_aad_length != 0) { aad_start = iv_len + 1; aad_stop = aad_start + crp->crp_aad_length - 1; } else { aad_start = 0; aad_stop = 0; } cipher_start = iv_len + crp->crp_aad_length + 1; if (op_type == CHCR_DECRYPT_OP) cipher_stop = hash_size_in_response; else cipher_stop = 0; if (op_type == CHCR_DECRYPT_OP) auth_insert = hash_size_in_response; else auth_insert = 0; wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + sgl_len; if (wr_len > SGE_MAX_WR_LEN) return (EFBIG); wr = alloc_wrqe(wr_len, s->port->txq); if (wr == NULL) { sc->stats_wr_nomem++; return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); memcpy(iv, crp->crp_iv, s->blkcipher.iv_len); if (s->blkcipher.iv_len == 12) *(uint32_t *)&iv[12] = htobe32(1); ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, crp); crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); /* * NB: cipherstop is explicitly set to 0. On encrypt it * should normally be set to 0 anyway. However, for decrypt * the cipher ends before the tag in the ETA case (and * authstop is set to stop before the tag), but for GCM the * cipher still runs to the end of the buffer. Not sure if * this is intentional or a firmware quirk, but it is required * for working tag validation with GCM decryption. */ crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( V_CPL_TX_SEC_PDU_AADSTART(aad_start) | V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response); crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | V_SCMD_ENC_DEC_CTRL(op_type) | V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_GCM) | V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_GHASH) | V_SCMD_HMAC_CTRL(hmac_ctrl) | V_SCMD_IV_SIZE(iv_len / 2) | V_SCMD_NUM_IVS(0)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_IV_GEN_CTRL(0) | V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN); dst = (char *)(crwr + 1) + kctx_len; ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs); dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; memcpy(dst, iv, iv_len); dst += iv_len; if (imm_len != 0) { if (crp->crp_aad_length != 0) { crypto_copydata(crp, crp->crp_aad_start, crp->crp_aad_length, dst); dst += crp->crp_aad_length; } crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, dst); dst += crp->crp_payload_length; if (op_type == CHCR_DECRYPT_OP) crypto_copydata(crp, crp->crp_digest_start, hash_size_in_response, dst); } else ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); return (0); } static int ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { /* * The updated IV to permit chained requests is at * cpl->data[2], but OCF doesn't permit chained requests. * * Note that the hardware should always verify the GMAC hash. */ return (error); } /* * Handle a GCM request that is not supported by the crypto engine by * performing the operation in software. Derived from swcr_authenc(). */ static void ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp) { struct auth_hash *axf; struct enc_xform *exf; void *auth_ctx; uint8_t *kschedule; char block[GMAC_BLOCK_LEN]; char digest[GMAC_DIGEST_LEN]; char iv[AES_BLOCK_LEN]; int error, i, len; auth_ctx = NULL; kschedule = NULL; /* Initialize the MAC. */ switch (s->blkcipher.key_len) { case 16: axf = &auth_hash_nist_gmac_aes_128; break; case 24: axf = &auth_hash_nist_gmac_aes_192; break; case 32: axf = &auth_hash_nist_gmac_aes_256; break; default: error = EINVAL; goto out; } auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT); if (auth_ctx == NULL) { error = ENOMEM; goto out; } axf->Init(auth_ctx); axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len); /* Initialize the cipher. */ exf = &enc_xform_aes_nist_gcm; error = exf->setkey(&kschedule, s->blkcipher.enckey, s->blkcipher.key_len); if (error) goto out; /* * This assumes a 12-byte IV from the crp. See longer comment * above in ccr_gcm() for more details. */ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) { error = EINVAL; goto out; } memcpy(iv, crp->crp_iv, 12); *(uint32_t *)&iv[12] = htobe32(1); axf->Reinit(auth_ctx, iv, sizeof(iv)); /* MAC the AAD. */ for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) { len = imin(crp->crp_aad_length - i, sizeof(block)); crypto_copydata(crp, crp->crp_aad_start + i, len, block); bzero(block + len, sizeof(block) - len); axf->Update(auth_ctx, block, sizeof(block)); } exf->reinit(kschedule, iv); /* Do encryption with MAC */ for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { len = imin(crp->crp_payload_length - i, sizeof(block)); crypto_copydata(crp, crp->crp_payload_start + i, len, block); bzero(block + len, sizeof(block) - len); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { exf->encrypt(kschedule, block); axf->Update(auth_ctx, block, len); crypto_copyback(crp, crp->crp_payload_start + i, len, block); } else { axf->Update(auth_ctx, block, len); } } /* Length block. */ bzero(block, sizeof(block)); ((uint32_t *)block)[1] = htobe32(crp->crp_aad_length * 8); ((uint32_t *)block)[3] = htobe32(crp->crp_payload_length * 8); axf->Update(auth_ctx, block, sizeof(block)); /* Finalize MAC. */ axf->Final(digest, auth_ctx); /* Inject or validate tag. */ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { crypto_copyback(crp, crp->crp_digest_start, sizeof(digest), digest); error = 0; } else { char digest2[GMAC_DIGEST_LEN]; crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2), digest2); if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) { error = 0; /* Tag matches, decrypt data. */ for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { len = imin(crp->crp_payload_length - i, sizeof(block)); crypto_copydata(crp, crp->crp_payload_start + i, len, block); bzero(block + len, sizeof(block) - len); exf->decrypt(kschedule, block); crypto_copyback(crp, crp->crp_payload_start + i, len, block); } } else error = EBADMSG; } exf->zerokey(&kschedule); out: if (auth_ctx != NULL) { memset(auth_ctx, 0, axf->ctxsize); free(auth_ctx, M_CCR); } crp->crp_etype = error; crypto_done(crp); } static void generate_ccm_b0(struct cryptop *crp, u_int hash_size_in_response, const char *iv, char *b0) { u_int i, payload_len; /* NB: L is already set in the first byte of the IV. */ memcpy(b0, iv, CCM_B0_SIZE); /* Set length of hash in bits 3 - 5. */ b0[0] |= (((hash_size_in_response - 2) / 2) << 3); /* Store the payload length as a big-endian value. */ payload_len = crp->crp_payload_length; for (i = 0; i < iv[0]; i++) { b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len; payload_len >>= 8; } /* * If there is AAD in the request, set bit 6 in the flags * field and store the AAD length as a big-endian value at the * start of block 1. This only assumes a 16-bit AAD length * since T6 doesn't support large AAD sizes. */ if (crp->crp_aad_length != 0) { b0[0] |= (1 << 6); *(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crp->crp_aad_length); } } static int ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp) { char iv[CHCR_MAX_CRYPTO_IV_LEN]; struct ulptx_idata *idata; struct chcr_wr *crwr; struct wrqe *wr; char *dst; u_int iv_len, kctx_len, op_type, transhdr_len, wr_len; u_int aad_len, b0_len, hash_size_in_response, imm_len; u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; u_int hmac_ctrl, input_len; int dsgl_nsegs, dsgl_len; int sgl_nsegs, sgl_len; int error; if (s->blkcipher.key_len == 0) return (EINVAL); /* * The crypto engine doesn't handle CCM requests with an empty * payload, so handle those in software instead. */ if (crp->crp_payload_length == 0) return (EMSGSIZE); /* * CCM always includes block 0 in the AAD before AAD from the * request. */ b0_len = CCM_B0_SIZE; if (crp->crp_aad_length != 0) b0_len += CCM_AAD_FIELD_SIZE; aad_len = b0_len + crp->crp_aad_length; /* * CCM requests should always provide an explicit IV (really * the nonce). */ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); /* * Always assume a 12 byte input nonce for now since that is * what OCF always generates. The full IV in the work request * is 16 bytes. */ iv_len = AES_BLOCK_LEN; if (iv_len + aad_len > MAX_AAD_LEN) return (EMSGSIZE); hash_size_in_response = s->ccm_mac.hash_len; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) op_type = CHCR_ENCRYPT_OP; else op_type = CHCR_DECRYPT_OP; /* * The output buffer consists of the cipher text followed by * the tag when encrypting. For decryption it only contains * the plain text. * * Due to a firmware bug, the output buffer must include a * dummy output buffer for the IV and AAD prior to the real * output buffer. */ if (op_type == CHCR_ENCRYPT_OP) { if (iv_len + aad_len + crp->crp_payload_length + hash_size_in_response > MAX_REQUEST_SIZE) return (EFBIG); } else { if (iv_len + aad_len + crp->crp_payload_length > MAX_REQUEST_SIZE) return (EFBIG); } sglist_reset(sc->sg_dsgl); error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len + aad_len); if (error) return (error); error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_ENCRYPT_OP) { error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crp->crp_digest_start, hash_size_in_response); if (error) return (error); } dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) return (EFBIG); dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); /* * The 'key' part of the key context consists of two copies of * the AES key. */ kctx_len = roundup2(s->blkcipher.key_len, 16) * 2; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); /* * The input buffer consists of the IV, AAD (including block * 0), and then the cipher/plain text. For decryption * requests the hash is appended after the cipher text. * * The IV is always stored at the start of the input buffer * even though it may be duplicated in the payload. The * crypto engine doesn't work properly if the IV offset points * inside of the AAD region, so a second copy is always * required. */ input_len = aad_len + crp->crp_payload_length; if (op_type == CHCR_DECRYPT_OP) input_len += hash_size_in_response; if (input_len > MAX_REQUEST_SIZE) return (EFBIG); if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { imm_len = input_len; sgl_nsegs = 0; sgl_len = 0; } else { /* Block 0 is passed as immediate data. */ imm_len = b0_len; sglist_reset(sc->sg_ulptx); if (crp->crp_aad_length != 0) { error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crp->crp_aad_start, crp->crp_aad_length); if (error) return (error); } error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); if (op_type == CHCR_DECRYPT_OP) { error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crp->crp_digest_start, hash_size_in_response); if (error) return (error); } sgl_nsegs = sc->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } aad_start = iv_len + 1; aad_stop = aad_start + aad_len - 1; cipher_start = aad_stop + 1; if (op_type == CHCR_DECRYPT_OP) cipher_stop = hash_size_in_response; else cipher_stop = 0; if (op_type == CHCR_DECRYPT_OP) auth_insert = hash_size_in_response; else auth_insert = 0; wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) + sgl_len; if (wr_len > SGE_MAX_WR_LEN) return (EFBIG); wr = alloc_wrqe(wr_len, s->port->txq); if (wr == NULL) { sc->stats_wr_nomem++; return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); /* * Read the nonce from the request. Use the nonce to generate * the full IV with the counter set to 0. */ memset(iv, 0, iv_len); iv[0] = (15 - AES_CCM_IV_LEN) - 1; memcpy(iv + 1, crp->crp_iv, AES_CCM_IV_LEN); ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0, crp); crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(s->port->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); /* * NB: cipherstop is explicitly set to 0. See comments above * in ccr_gcm(). */ crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( V_CPL_TX_SEC_PDU_AADSTART(aad_start) | V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ hmac_ctrl = ccr_hmac_ctrl(AES_CBC_MAC_HASH_LEN, hash_size_in_response); crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) | V_SCMD_ENC_DEC_CTRL(op_type) | V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 0 : 1) | V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CCM) | V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_CBCMAC) | V_SCMD_HMAC_CTRL(hmac_ctrl) | V_SCMD_IV_SIZE(iv_len / 2) | V_SCMD_NUM_IVS(0)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_IV_GEN_CTRL(0) | V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len)); crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); memcpy(crwr->key_ctx.key + roundup(s->blkcipher.key_len, 16), s->blkcipher.enckey, s->blkcipher.key_len); dst = (char *)(crwr + 1) + kctx_len; ccr_write_phys_dsgl(sc, s, dst, dsgl_nsegs); dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; memcpy(dst, iv, iv_len); dst += iv_len; generate_ccm_b0(crp, hash_size_in_response, iv, dst); if (sgl_nsegs == 0) { dst += b0_len; if (crp->crp_aad_length != 0) { crypto_copydata(crp, crp->crp_aad_start, crp->crp_aad_length, dst); dst += crp->crp_aad_length; } crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, dst); dst += crp->crp_payload_length; if (op_type == CHCR_DECRYPT_OP) crypto_copydata(crp, crp->crp_digest_start, hash_size_in_response, dst); } else { dst += CCM_B0_SIZE; if (b0_len > CCM_B0_SIZE) { /* * If there is AAD, insert padding including a * ULP_TX_SC_NOOP so that the ULP_TX_SC_DSGL * is 16-byte aligned. */ KASSERT(b0_len - CCM_B0_SIZE == CCM_AAD_FIELD_SIZE, ("b0_len mismatch")); memset(dst + CCM_AAD_FIELD_SIZE, 0, 8 - CCM_AAD_FIELD_SIZE); idata = (void *)(dst + 8); idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); idata->len = htobe32(0); dst = (void *)(idata + 1); } ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); } /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); return (0); } static int ccr_ccm_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { /* * The updated IV to permit chained requests is at * cpl->data[2], but OCF doesn't permit chained requests. * * Note that the hardware should always verify the CBC MAC * hash. */ return (error); } /* * Handle a CCM request that is not supported by the crypto engine by * performing the operation in software. Derived from swcr_authenc(). */ static void ccr_ccm_soft(struct ccr_session *s, struct cryptop *crp) { struct auth_hash *axf; struct enc_xform *exf; union authctx *auth_ctx; uint8_t *kschedule; char block[CCM_CBC_BLOCK_LEN]; char digest[AES_CBC_MAC_HASH_LEN]; char iv[AES_CCM_IV_LEN]; int error, i, len; auth_ctx = NULL; kschedule = NULL; /* Initialize the MAC. */ switch (s->blkcipher.key_len) { case 16: axf = &auth_hash_ccm_cbc_mac_128; break; case 24: axf = &auth_hash_ccm_cbc_mac_192; break; case 32: axf = &auth_hash_ccm_cbc_mac_256; break; default: error = EINVAL; goto out; } auth_ctx = malloc(axf->ctxsize, M_CCR, M_NOWAIT); if (auth_ctx == NULL) { error = ENOMEM; goto out; } axf->Init(auth_ctx); axf->Setkey(auth_ctx, s->blkcipher.enckey, s->blkcipher.key_len); /* Initialize the cipher. */ exf = &enc_xform_ccm; error = exf->setkey(&kschedule, s->blkcipher.enckey, s->blkcipher.key_len); if (error) goto out; if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) { error = EINVAL; goto out; } memcpy(iv, crp->crp_iv, AES_CCM_IV_LEN); auth_ctx->aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length; auth_ctx->aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length; axf->Reinit(auth_ctx, iv, sizeof(iv)); /* MAC the AAD. */ for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) { len = imin(crp->crp_aad_length - i, sizeof(block)); crypto_copydata(crp, crp->crp_aad_start + i, len, block); bzero(block + len, sizeof(block) - len); axf->Update(auth_ctx, block, sizeof(block)); } exf->reinit(kschedule, iv); /* Do encryption/decryption with MAC */ for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { len = imin(crp->crp_payload_length - i, sizeof(block)); crypto_copydata(crp, crp->crp_payload_start + i, len, block); bzero(block + len, sizeof(block) - len); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { axf->Update(auth_ctx, block, len); exf->encrypt(kschedule, block); crypto_copyback(crp, crp->crp_payload_start + i, len, block); } else { exf->decrypt(kschedule, block); axf->Update(auth_ctx, block, len); } } /* Finalize MAC. */ axf->Final(digest, auth_ctx); /* Inject or validate tag. */ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { crypto_copyback(crp, crp->crp_digest_start, sizeof(digest), digest); error = 0; } else { char digest2[AES_CBC_MAC_HASH_LEN]; crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2), digest2); if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) { error = 0; /* Tag matches, decrypt data. */ exf->reinit(kschedule, iv); for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) { len = imin(crp->crp_payload_length - i, sizeof(block)); crypto_copydata(crp, crp->crp_payload_start + i, len, block); bzero(block + len, sizeof(block) - len); exf->decrypt(kschedule, block); crypto_copyback(crp, crp->crp_payload_start + i, len, block); } } else error = EBADMSG; } exf->zerokey(&kschedule); out: if (auth_ctx != NULL) { memset(auth_ctx, 0, axf->ctxsize); free(auth_ctx, M_CCR); } crp->crp_etype = error; crypto_done(crp); } static void ccr_identify(driver_t *driver, device_t parent) { struct adapter *sc; sc = device_get_softc(parent); if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE && device_find_child(parent, "ccr", -1) == NULL) device_add_child(parent, "ccr", -1); } static int ccr_probe(device_t dev) { device_set_desc(dev, "Chelsio Crypto Accelerator"); return (BUS_PROBE_DEFAULT); } static void ccr_sysctls(struct ccr_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *oid, *port_oid; struct sysctl_oid_list *children; char buf[16]; int i; ctx = device_get_sysctl_ctx(sc->dev); /* * dev.ccr.X. */ oid = device_get_sysctl_tree(sc->dev); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "port_mask", CTLFLAG_RW, &sc->port_mask, 0, "Mask of enabled ports"); /* * dev.ccr.X.stats. */ oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD, &sc->stats_hash, 0, "Hash requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD, &sc->stats_hmac, 0, "HMAC requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD, &sc->stats_blkcipher_encrypt, 0, "Cipher encryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD, &sc->stats_blkcipher_decrypt, 0, "Cipher decryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "eta_encrypt", CTLFLAG_RD, &sc->stats_eta_encrypt, 0, "Combined AES+HMAC encryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "eta_decrypt", CTLFLAG_RD, &sc->stats_eta_decrypt, 0, "Combined AES+HMAC decryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD, &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD, &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_encrypt", CTLFLAG_RD, &sc->stats_ccm_encrypt, 0, "AES-CCM encryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ccm_decrypt", CTLFLAG_RD, &sc->stats_ccm_decrypt, 0, "AES-CCM decryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD, &sc->stats_wr_nomem, 0, "Work request memory allocation failures"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD, &sc->stats_inflight, 0, "Requests currently pending"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD, &sc->stats_mac_error, 0, "MAC errors"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD, &sc->stats_pad_error, 0, "Padding errors"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD, &sc->stats_bad_session, 0, "Requests with invalid session ID"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD, &sc->stats_sglist_error, 0, "Requests for which DMA mapping failed"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD, &sc->stats_process_error, 0, "Requests failed during queueing"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sw_fallback", CTLFLAG_RD, &sc->stats_sw_fallback, 0, "Requests processed by falling back to software"); /* * dev.ccr.X.stats.port */ port_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "port", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Per-port statistics"); for (i = 0; i < nitems(sc->ports); i++) { if (sc->ports[i].rxq == NULL) continue; /* * dev.ccr.X.stats.port.Y */ snprintf(buf, sizeof(buf), "%d", i); oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(port_oid), OID_AUTO, buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, buf); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "active_sessions", CTLFLAG_RD, &sc->ports[i].active_sessions, 0, "Count of active sessions"); } } static void ccr_init_port(struct ccr_softc *sc, int port) { sc->ports[port].txq = &sc->adapter->sge.ctrlq[port]; sc->ports[port].rxq = &sc->adapter->sge.rxq[sc->adapter->port[port]->vi->first_rxq]; sc->ports[port].tx_channel_id = port; _Static_assert(sizeof(sc->port_mask) * NBBY >= MAX_NPORTS - 1, "Too many ports to fit in port_mask"); sc->port_mask |= 1u << port; } static int ccr_attach(device_t dev) { struct ccr_softc *sc; int32_t cid; int i; sc = device_get_softc(dev); sc->dev = dev; sc->adapter = device_get_softc(device_get_parent(dev)); for_each_port(sc->adapter, i) { ccr_init_port(sc, i); } cid = crypto_get_driverid(dev, sizeof(struct ccr_session), CRYPTOCAP_F_HARDWARE); if (cid < 0) { device_printf(dev, "could not get crypto driver id\n"); return (ENXIO); } sc->cid = cid; sc->adapter->ccr_softc = sc; mtx_init(&sc->lock, "ccr", NULL, MTX_DEF); sc->sg_crp = sglist_alloc(TX_SGL_SEGS, M_WAITOK); sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK); sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK); sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK); sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK); ccr_sysctls(sc); return (0); } static int ccr_detach(device_t dev) { struct ccr_softc *sc; sc = device_get_softc(dev); mtx_lock(&sc->lock); sc->detaching = true; mtx_unlock(&sc->lock); crypto_unregister_all(sc->cid); mtx_destroy(&sc->lock); sglist_free(sc->sg_iv_aad); free(sc->iv_aad_buf, M_CCR); sglist_free(sc->sg_dsgl); sglist_free(sc->sg_ulptx); sglist_free(sc->sg_crp); sc->adapter->ccr_softc = NULL; return (0); } static void ccr_init_hash_digest(struct ccr_session *s) { union authctx auth_ctx; struct auth_hash *axf; axf = s->hmac.auth_hash; axf->Init(&auth_ctx); t4_copy_partial_hash(axf->type, &auth_ctx, s->hmac.pads); } static bool ccr_aes_check_keylen(int alg, int klen) { switch (klen * 8) { case 128: case 192: if (alg == CRYPTO_AES_XTS) return (false); break; case 256: break; case 512: if (alg != CRYPTO_AES_XTS) return (false); break; default: return (false); } return (true); } static void ccr_aes_setkey(struct ccr_session *s, const void *key, int klen) { unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size; unsigned int opad_present; if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) kbits = (klen / 2) * 8; else kbits = klen * 8; switch (kbits) { case 128: ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; break; case 192: ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; break; case 256: ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; break; default: panic("should not get here"); } s->blkcipher.key_len = klen; memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len); switch (s->blkcipher.cipher_mode) { case SCMD_CIPH_MODE_AES_CBC: case SCMD_CIPH_MODE_AES_XTS: t4_aes_getdeckey(s->blkcipher.deckey, key, kbits); break; } kctx_len = roundup2(s->blkcipher.key_len, 16); switch (s->mode) { case ETA: mk_size = s->hmac.mk_size; opad_present = 1; iopad_size = roundup2(s->hmac.partial_digest_len, 16); kctx_len += iopad_size * 2; break; case GCM: mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; opad_present = 0; kctx_len += GMAC_BLOCK_LEN; break; case CCM: switch (kbits) { case 128: mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; break; case 192: mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; break; case 256: mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; break; default: panic("should not get here"); } opad_present = 0; kctx_len *= 2; break; default: mk_size = CHCR_KEYCTX_NO_KEY; opad_present = 0; break; } kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | V_KEY_CONTEXT_DUAL_CK(s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS) | V_KEY_CONTEXT_OPAD_PRESENT(opad_present) | V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) | V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1)); } static bool ccr_auth_supported(const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA2_384: case CRYPTO_SHA2_512: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: break; default: return (false); } return (true); } static bool ccr_cipher_supported(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (csp->csp_ivlen != AES_BLOCK_LEN) return (false); break; case CRYPTO_AES_ICM: if (csp->csp_ivlen != AES_BLOCK_LEN) return (false); break; case CRYPTO_AES_XTS: if (csp->csp_ivlen != AES_XTS_IV_LEN) return (false); break; default: return (false); } return (ccr_aes_check_keylen(csp->csp_cipher_alg, csp->csp_cipher_klen)); } static int ccr_cipher_mode(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: return (SCMD_CIPH_MODE_AES_CBC); case CRYPTO_AES_ICM: return (SCMD_CIPH_MODE_AES_CTR); case CRYPTO_AES_NIST_GCM_16: return (SCMD_CIPH_MODE_AES_GCM); case CRYPTO_AES_XTS: return (SCMD_CIPH_MODE_AES_XTS); case CRYPTO_AES_CCM_16: return (SCMD_CIPH_MODE_AES_CCM); default: return (SCMD_CIPH_MODE_NOP); } } static int ccr_probesession(device_t dev, const struct crypto_session_params *csp) { unsigned int cipher_mode; if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!ccr_auth_supported(csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!ccr_cipher_supported(csp)) return (EINVAL); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: if (csp->csp_ivlen != AES_GCM_IV_LEN) return (EINVAL); if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > AES_GMAC_HASH_LEN) return (EINVAL); break; case CRYPTO_AES_CCM_16: if (csp->csp_ivlen != AES_CCM_IV_LEN) return (EINVAL); if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > AES_CBC_MAC_HASH_LEN) return (EINVAL); break; default: return (EINVAL); } break; case CSP_MODE_ETA: if (!ccr_auth_supported(csp) || !ccr_cipher_supported(csp)) return (EINVAL); break; default: return (EINVAL); } if (csp->csp_cipher_klen != 0) { cipher_mode = ccr_cipher_mode(csp); if (cipher_mode == SCMD_CIPH_MODE_NOP) return (EINVAL); } return (CRYPTODEV_PROBE_HARDWARE); } /* * Select an available port with the lowest number of active sessions. */ static struct ccr_port * ccr_choose_port(struct ccr_softc *sc) { struct ccr_port *best, *p; int i; mtx_assert(&sc->lock, MA_OWNED); best = NULL; for (i = 0; i < nitems(sc->ports); i++) { p = &sc->ports[i]; /* Ignore non-existent ports. */ if (p->rxq == NULL) continue; /* * XXX: Ignore ports whose queues aren't initialized. * This is racy as the rxq can be destroyed by the * associated VI detaching. Eventually ccr should use * dedicated queues. */ if (p->rxq->iq.adapter == NULL || p->txq->adapter == NULL) continue; if ((sc->port_mask & (1u << i)) == 0) continue; if (best == NULL || p->active_sessions < best->active_sessions) best = p; } return (best); } static int ccr_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct ccr_softc *sc; struct ccr_session *s; struct auth_hash *auth_hash; unsigned int auth_mode, cipher_mode, mk_size; unsigned int partial_digest_len; switch (csp->csp_auth_alg) { case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: auth_hash = &auth_hash_hmac_sha1; auth_mode = SCMD_AUTH_MODE_SHA1; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; partial_digest_len = SHA1_HASH_LEN; break; case CRYPTO_SHA2_224: case CRYPTO_SHA2_224_HMAC: auth_hash = &auth_hash_hmac_sha2_224; auth_mode = SCMD_AUTH_MODE_SHA224; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; partial_digest_len = SHA2_256_HASH_LEN; break; case CRYPTO_SHA2_256: case CRYPTO_SHA2_256_HMAC: auth_hash = &auth_hash_hmac_sha2_256; auth_mode = SCMD_AUTH_MODE_SHA256; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; partial_digest_len = SHA2_256_HASH_LEN; break; case CRYPTO_SHA2_384: case CRYPTO_SHA2_384_HMAC: auth_hash = &auth_hash_hmac_sha2_384; auth_mode = SCMD_AUTH_MODE_SHA512_384; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; partial_digest_len = SHA2_512_HASH_LEN; break; case CRYPTO_SHA2_512: case CRYPTO_SHA2_512_HMAC: auth_hash = &auth_hash_hmac_sha2_512; auth_mode = SCMD_AUTH_MODE_SHA512_512; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; partial_digest_len = SHA2_512_HASH_LEN; break; default: auth_hash = NULL; auth_mode = SCMD_AUTH_MODE_NOP; mk_size = 0; partial_digest_len = 0; break; } cipher_mode = ccr_cipher_mode(csp); #ifdef INVARIANTS switch (csp->csp_mode) { case CSP_MODE_CIPHER: if (cipher_mode == SCMD_CIPH_MODE_NOP || cipher_mode == SCMD_CIPH_MODE_AES_GCM || cipher_mode == SCMD_CIPH_MODE_AES_CCM) panic("invalid cipher algo"); break; case CSP_MODE_DIGEST: if (auth_mode == SCMD_AUTH_MODE_NOP) panic("invalid auth algo"); break; case CSP_MODE_AEAD: if (cipher_mode != SCMD_CIPH_MODE_AES_GCM && cipher_mode != SCMD_CIPH_MODE_AES_CCM) panic("invalid aead cipher algo"); if (auth_mode != SCMD_AUTH_MODE_NOP) panic("invalid aead auth aglo"); break; case CSP_MODE_ETA: if (cipher_mode == SCMD_CIPH_MODE_NOP || cipher_mode == SCMD_CIPH_MODE_AES_GCM || cipher_mode == SCMD_CIPH_MODE_AES_CCM) panic("invalid cipher algo"); if (auth_mode == SCMD_AUTH_MODE_NOP) panic("invalid auth algo"); break; default: panic("invalid csp mode"); } #endif sc = device_get_softc(dev); mtx_lock(&sc->lock); if (sc->detaching) { mtx_unlock(&sc->lock); return (ENXIO); } s = crypto_get_driver_session(cses); s->port = ccr_choose_port(sc); if (s->port == NULL) { mtx_unlock(&sc->lock); return (ENXIO); } switch (csp->csp_mode) { case CSP_MODE_AEAD: if (cipher_mode == SCMD_CIPH_MODE_AES_CCM) s->mode = CCM; else s->mode = GCM; break; case CSP_MODE_ETA: s->mode = ETA; break; case CSP_MODE_DIGEST: if (csp->csp_auth_klen != 0) s->mode = HMAC; else s->mode = HASH; break; case CSP_MODE_CIPHER: s->mode = BLKCIPHER; break; } if (s->mode == GCM) { if (csp->csp_auth_mlen == 0) s->gmac.hash_len = AES_GMAC_HASH_LEN; else s->gmac.hash_len = csp->csp_auth_mlen; t4_init_gmac_hash(csp->csp_cipher_key, csp->csp_cipher_klen, s->gmac.ghash_h); } else if (s->mode == CCM) { if (csp->csp_auth_mlen == 0) s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN; else s->ccm_mac.hash_len = csp->csp_auth_mlen; } else if (auth_mode != SCMD_AUTH_MODE_NOP) { s->hmac.auth_hash = auth_hash; s->hmac.auth_mode = auth_mode; s->hmac.mk_size = mk_size; s->hmac.partial_digest_len = partial_digest_len; if (csp->csp_auth_mlen == 0) s->hmac.hash_len = auth_hash->hashsize; else s->hmac.hash_len = csp->csp_auth_mlen; if (csp->csp_auth_key != NULL) t4_init_hmac_digest(auth_hash, partial_digest_len, csp->csp_auth_key, csp->csp_auth_klen, s->hmac.pads); else ccr_init_hash_digest(s); } if (cipher_mode != SCMD_CIPH_MODE_NOP) { s->blkcipher.cipher_mode = cipher_mode; s->blkcipher.iv_len = csp->csp_ivlen; if (csp->csp_cipher_key != NULL) ccr_aes_setkey(s, csp->csp_cipher_key, csp->csp_cipher_klen); } s->active = true; s->port->active_sessions++; mtx_unlock(&sc->lock); return (0); } static void ccr_freesession(device_t dev, crypto_session_t cses) { struct ccr_softc *sc; struct ccr_session *s; sc = device_get_softc(dev); s = crypto_get_driver_session(cses); mtx_lock(&sc->lock); if (s->pending != 0) device_printf(dev, "session %p freed with %d pending requests\n", s, s->pending); s->active = false; s->port->active_sessions--; mtx_unlock(&sc->lock); } static int ccr_process(device_t dev, struct cryptop *crp, int hint) { const struct crypto_session_params *csp; struct ccr_softc *sc; struct ccr_session *s; int error; csp = crypto_get_params(crp->crp_session); s = crypto_get_driver_session(crp->crp_session); sc = device_get_softc(dev); mtx_lock(&sc->lock); error = ccr_populate_sglist(sc->sg_crp, crp); if (error) { sc->stats_sglist_error++; goto out; } switch (s->mode) { case HASH: error = ccr_hash(sc, s, crp); if (error == 0) sc->stats_hash++; break; case HMAC: if (crp->crp_auth_key != NULL) t4_init_hmac_digest(s->hmac.auth_hash, s->hmac.partial_digest_len, crp->crp_auth_key, csp->csp_auth_klen, s->hmac.pads); error = ccr_hash(sc, s, crp); if (error == 0) sc->stats_hmac++; break; case BLKCIPHER: if (crp->crp_cipher_key != NULL) ccr_aes_setkey(s, crp->crp_cipher_key, csp->csp_cipher_klen); error = ccr_blkcipher(sc, s, crp); if (error == 0) { if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) sc->stats_blkcipher_encrypt++; else sc->stats_blkcipher_decrypt++; } break; case ETA: if (crp->crp_auth_key != NULL) t4_init_hmac_digest(s->hmac.auth_hash, s->hmac.partial_digest_len, crp->crp_auth_key, csp->csp_auth_klen, s->hmac.pads); if (crp->crp_cipher_key != NULL) ccr_aes_setkey(s, crp->crp_cipher_key, csp->csp_cipher_klen); error = ccr_eta(sc, s, crp); if (error == 0) { if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) sc->stats_eta_encrypt++; else sc->stats_eta_decrypt++; } break; case GCM: if (crp->crp_cipher_key != NULL) { t4_init_gmac_hash(crp->crp_cipher_key, csp->csp_cipher_klen, s->gmac.ghash_h); ccr_aes_setkey(s, crp->crp_cipher_key, csp->csp_cipher_klen); } if (crp->crp_payload_length == 0) { mtx_unlock(&sc->lock); ccr_gcm_soft(s, crp); return (0); } error = ccr_gcm(sc, s, crp); if (error == EMSGSIZE) { sc->stats_sw_fallback++; mtx_unlock(&sc->lock); ccr_gcm_soft(s, crp); return (0); } if (error == 0) { if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) sc->stats_gcm_encrypt++; else sc->stats_gcm_decrypt++; } break; case CCM: if (crp->crp_cipher_key != NULL) { ccr_aes_setkey(s, crp->crp_cipher_key, csp->csp_cipher_klen); } error = ccr_ccm(sc, s, crp); if (error == EMSGSIZE) { sc->stats_sw_fallback++; mtx_unlock(&sc->lock); ccr_ccm_soft(s, crp); return (0); } if (error == 0) { if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) sc->stats_ccm_encrypt++; else sc->stats_ccm_decrypt++; } break; } if (error == 0) { s->pending++; sc->stats_inflight++; } else sc->stats_process_error++; out: mtx_unlock(&sc->lock); if (error) { crp->crp_etype = error; crypto_done(crp); } return (0); } static int do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct ccr_softc *sc = iq->adapter->ccr_softc; struct ccr_session *s; const struct cpl_fw6_pld *cpl; struct cryptop *crp; uint32_t status; int error; if (m != NULL) cpl = mtod(m, const void *); else cpl = (const void *)(rss + 1); crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]); s = crypto_get_driver_session(crp->crp_session); status = be64toh(cpl->data[0]); if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status)) error = EBADMSG; else error = 0; mtx_lock(&sc->lock); s->pending--; sc->stats_inflight--; switch (s->mode) { case HASH: case HMAC: error = ccr_hash_done(sc, s, crp, cpl, error); break; case BLKCIPHER: error = ccr_blkcipher_done(sc, s, crp, cpl, error); break; case ETA: error = ccr_eta_done(sc, s, crp, cpl, error); break; case GCM: error = ccr_gcm_done(sc, s, crp, cpl, error); break; case CCM: error = ccr_ccm_done(sc, s, crp, cpl, error); break; } if (error == EBADMSG) { if (CHK_MAC_ERR_BIT(status)) sc->stats_mac_error++; if (CHK_PAD_ERR_BIT(status)) sc->stats_pad_error++; } mtx_unlock(&sc->lock); crp->crp_etype = error; crypto_done(crp); m_freem(m); return (0); } static int ccr_modevent(module_t mod, int cmd, void *arg) { switch (cmd) { case MOD_LOAD: t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld); return (0); case MOD_UNLOAD: t4_register_cpl_handler(CPL_FW6_PLD, NULL); return (0); default: return (EOPNOTSUPP); } } static device_method_t ccr_methods[] = { DEVMETHOD(device_identify, ccr_identify), DEVMETHOD(device_probe, ccr_probe), DEVMETHOD(device_attach, ccr_attach), DEVMETHOD(device_detach, ccr_detach), DEVMETHOD(cryptodev_probesession, ccr_probesession), DEVMETHOD(cryptodev_newsession, ccr_newsession), DEVMETHOD(cryptodev_freesession, ccr_freesession), DEVMETHOD(cryptodev_process, ccr_process), DEVMETHOD_END }; static driver_t ccr_driver = { "ccr", ccr_methods, sizeof(struct ccr_softc) }; static devclass_t ccr_devclass; DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL); MODULE_VERSION(ccr, 1); MODULE_DEPEND(ccr, crypto, 1, 1, 1); MODULE_DEPEND(ccr, t6nex, 1, 1, 1); Index: head/sys/dev/glxsb/glxsb.c =================================================================== --- head/sys/dev/glxsb/glxsb.c (revision 360135) +++ head/sys/dev/glxsb/glxsb.c (revision 360136) @@ -1,779 +1,773 @@ /* $OpenBSD: glxsb.c,v 1.7 2007/02/12 14:31:45 tom Exp $ */ /* * Copyright (c) 2006 Tom Cosgrove * Copyright (c) 2003, 2004 Theo de Raadt * Copyright (c) 2003 Jason Wright * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * Driver for the security block on the AMD Geode LX processors * http://www.amd.com/files/connectivitysolutions/geode/geode_lx/33234d_lx_ds.pdf */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include "glxsb.h" #define PCI_VENDOR_AMD 0x1022 /* AMD */ #define PCI_PRODUCT_AMD_GEODE_LX_CRYPTO 0x2082 /* Geode LX Crypto */ #define SB_GLD_MSR_CAP 0x58002000 /* RO - Capabilities */ #define SB_GLD_MSR_CONFIG 0x58002001 /* RW - Master Config */ #define SB_GLD_MSR_SMI 0x58002002 /* RW - SMI */ #define SB_GLD_MSR_ERROR 0x58002003 /* RW - Error */ #define SB_GLD_MSR_PM 0x58002004 /* RW - Power Mgmt */ #define SB_GLD_MSR_DIAG 0x58002005 /* RW - Diagnostic */ #define SB_GLD_MSR_CTRL 0x58002006 /* RW - Security Block Cntrl */ /* For GLD_MSR_CTRL: */ #define SB_GMC_DIV0 0x0000 /* AES update divisor values */ #define SB_GMC_DIV1 0x0001 #define SB_GMC_DIV2 0x0002 #define SB_GMC_DIV3 0x0003 #define SB_GMC_DIV_MASK 0x0003 #define SB_GMC_SBI 0x0004 /* AES swap bits */ #define SB_GMC_SBY 0x0008 /* AES swap bytes */ #define SB_GMC_TW 0x0010 /* Time write (EEPROM) */ #define SB_GMC_T_SEL0 0x0000 /* RNG post-proc: none */ #define SB_GMC_T_SEL1 0x0100 /* RNG post-proc: LFSR */ #define SB_GMC_T_SEL2 0x0200 /* RNG post-proc: whitener */ #define SB_GMC_T_SEL3 0x0300 /* RNG LFSR+whitener */ #define SB_GMC_T_SEL_MASK 0x0300 #define SB_GMC_T_NE 0x0400 /* Noise (generator) Enable */ #define SB_GMC_T_TM 0x0800 /* RNG test mode */ /* (deterministic) */ /* Security Block configuration/control registers (offsets from base) */ #define SB_CTL_A 0x0000 /* RW - SB Control A */ #define SB_CTL_B 0x0004 /* RW - SB Control B */ #define SB_AES_INT 0x0008 /* RW - SB AES Interrupt */ #define SB_SOURCE_A 0x0010 /* RW - Source A */ #define SB_DEST_A 0x0014 /* RW - Destination A */ #define SB_LENGTH_A 0x0018 /* RW - Length A */ #define SB_SOURCE_B 0x0020 /* RW - Source B */ #define SB_DEST_B 0x0024 /* RW - Destination B */ #define SB_LENGTH_B 0x0028 /* RW - Length B */ #define SB_WKEY 0x0030 /* WO - Writable Key 0-3 */ #define SB_WKEY_0 0x0030 /* WO - Writable Key 0 */ #define SB_WKEY_1 0x0034 /* WO - Writable Key 1 */ #define SB_WKEY_2 0x0038 /* WO - Writable Key 2 */ #define SB_WKEY_3 0x003C /* WO - Writable Key 3 */ #define SB_CBC_IV 0x0040 /* RW - CBC IV 0-3 */ #define SB_CBC_IV_0 0x0040 /* RW - CBC IV 0 */ #define SB_CBC_IV_1 0x0044 /* RW - CBC IV 1 */ #define SB_CBC_IV_2 0x0048 /* RW - CBC IV 2 */ #define SB_CBC_IV_3 0x004C /* RW - CBC IV 3 */ #define SB_RANDOM_NUM 0x0050 /* RW - Random Number */ #define SB_RANDOM_NUM_STATUS 0x0054 /* RW - Random Number Status */ #define SB_EEPROM_COMM 0x0800 /* RW - EEPROM Command */ #define SB_EEPROM_ADDR 0x0804 /* RW - EEPROM Address */ #define SB_EEPROM_DATA 0x0808 /* RW - EEPROM Data */ #define SB_EEPROM_SEC_STATE 0x080C /* RW - EEPROM Security State */ /* For SB_CTL_A and _B */ #define SB_CTL_ST 0x0001 /* Start operation (enc/dec) */ #define SB_CTL_ENC 0x0002 /* Encrypt (0 is decrypt) */ #define SB_CTL_DEC 0x0000 /* Decrypt */ #define SB_CTL_WK 0x0004 /* Use writable key (we set) */ #define SB_CTL_DC 0x0008 /* Destination coherent */ #define SB_CTL_SC 0x0010 /* Source coherent */ #define SB_CTL_CBC 0x0020 /* CBC (0 is ECB) */ /* For SB_AES_INT */ #define SB_AI_DISABLE_AES_A 0x0001 /* Disable AES A compl int */ #define SB_AI_ENABLE_AES_A 0x0000 /* Enable AES A compl int */ #define SB_AI_DISABLE_AES_B 0x0002 /* Disable AES B compl int */ #define SB_AI_ENABLE_AES_B 0x0000 /* Enable AES B compl int */ #define SB_AI_DISABLE_EEPROM 0x0004 /* Disable EEPROM op comp int */ #define SB_AI_ENABLE_EEPROM 0x0000 /* Enable EEPROM op compl int */ #define SB_AI_AES_A_COMPLETE 0x10000 /* AES A operation complete */ #define SB_AI_AES_B_COMPLETE 0x20000 /* AES B operation complete */ #define SB_AI_EEPROM_COMPLETE 0x40000 /* EEPROM operation complete */ #define SB_AI_CLEAR_INTR \ (SB_AI_DISABLE_AES_A | SB_AI_DISABLE_AES_B |\ SB_AI_DISABLE_EEPROM | SB_AI_AES_A_COMPLETE |\ SB_AI_AES_B_COMPLETE | SB_AI_EEPROM_COMPLETE) #define SB_RNS_TRNG_VALID 0x0001 /* in SB_RANDOM_NUM_STATUS */ #define SB_MEM_SIZE 0x0810 /* Size of memory block */ #define SB_AES_ALIGN 0x0010 /* Source and dest buffers */ /* must be 16-byte aligned */ #define SB_AES_BLOCK_SIZE 0x0010 /* * The Geode LX security block AES acceleration doesn't perform scatter- * gather: it just takes source and destination addresses. Therefore the * plain- and ciphertexts need to be contiguous. To this end, we allocate * a buffer for both, and accept the overhead of copying in and out. If * the number of bytes in one operation is bigger than allowed for by the * buffer (buffer is twice the size of the max length, as it has both input * and output) then we have to perform multiple encryptions/decryptions. */ #define GLXSB_MAX_AES_LEN 16384 MALLOC_DEFINE(M_GLXSB, "glxsb_data", "Glxsb Data"); struct glxsb_dma_map { bus_dmamap_t dma_map; /* DMA map */ bus_dma_segment_t dma_seg; /* segments */ int dma_nsegs; /* #segments */ int dma_size; /* size */ caddr_t dma_vaddr; /* virtual address */ bus_addr_t dma_paddr; /* physical address */ }; struct glxsb_taskop { struct glxsb_session *to_ses; /* crypto session */ struct cryptop *to_crp; /* cryptop to perfom */ }; struct glxsb_softc { device_t sc_dev; /* device backpointer */ struct resource *sc_sr; /* resource */ int sc_rid; /* resource rid */ struct callout sc_rngco; /* RNG callout */ int sc_rnghz; /* RNG callout ticks */ bus_dma_tag_t sc_dmat; /* DMA tag */ struct glxsb_dma_map sc_dma; /* DMA map */ int32_t sc_cid; /* crypto tag */ struct mtx sc_task_mtx; /* task mutex */ struct taskqueue *sc_tq; /* task queue */ struct task sc_cryptotask; /* task */ struct glxsb_taskop sc_to; /* task's crypto operation */ int sc_task_count; /* tasks count */ }; static int glxsb_probe(device_t); static int glxsb_attach(device_t); static int glxsb_detach(device_t); static void glxsb_dmamap_cb(void *, bus_dma_segment_t *, int, int); static int glxsb_dma_alloc(struct glxsb_softc *); static void glxsb_dma_pre_op(struct glxsb_softc *, struct glxsb_dma_map *); static void glxsb_dma_post_op(struct glxsb_softc *, struct glxsb_dma_map *); static void glxsb_dma_free(struct glxsb_softc *, struct glxsb_dma_map *); static void glxsb_rnd(void *); static int glxsb_crypto_setup(struct glxsb_softc *); static int glxsb_crypto_probesession(device_t, const struct crypto_session_params *); static int glxsb_crypto_newsession(device_t, crypto_session_t, const struct crypto_session_params *); static void glxsb_crypto_freesession(device_t, crypto_session_t); static int glxsb_aes(struct glxsb_softc *, uint32_t, uint32_t, uint32_t, const void *, int, const void *); static int glxsb_crypto_encdec(struct cryptop *, struct glxsb_session *, struct glxsb_softc *); static void glxsb_crypto_task(void *, int); static int glxsb_crypto_process(device_t, struct cryptop *, int); static device_method_t glxsb_methods[] = { /* device interface */ DEVMETHOD(device_probe, glxsb_probe), DEVMETHOD(device_attach, glxsb_attach), DEVMETHOD(device_detach, glxsb_detach), /* crypto device methods */ DEVMETHOD(cryptodev_probesession, glxsb_crypto_probesession), DEVMETHOD(cryptodev_newsession, glxsb_crypto_newsession), DEVMETHOD(cryptodev_freesession, glxsb_crypto_freesession), DEVMETHOD(cryptodev_process, glxsb_crypto_process), {0,0} }; static driver_t glxsb_driver = { "glxsb", glxsb_methods, sizeof(struct glxsb_softc) }; static devclass_t glxsb_devclass; DRIVER_MODULE(glxsb, pci, glxsb_driver, glxsb_devclass, 0, 0); MODULE_VERSION(glxsb, 1); MODULE_DEPEND(glxsb, crypto, 1, 1, 1); static int glxsb_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_AMD && pci_get_device(dev) == PCI_PRODUCT_AMD_GEODE_LX_CRYPTO) { device_set_desc(dev, "AMD Geode LX Security Block (AES-128-CBC, RNG)"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int glxsb_attach(device_t dev) { struct glxsb_softc *sc = device_get_softc(dev); uint64_t msr; sc->sc_dev = dev; msr = rdmsr(SB_GLD_MSR_CAP); if ((msr & 0xFFFF00) != 0x130400) { device_printf(dev, "unknown ID 0x%x\n", (int)((msr & 0xFFFF00) >> 16)); return (ENXIO); } pci_enable_busmaster(dev); /* Map in the security block configuration/control registers */ sc->sc_rid = PCIR_BAR(0); sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid, RF_ACTIVE); if (sc->sc_sr == NULL) { device_printf(dev, "cannot map register space\n"); return (ENXIO); } /* * Configure the Security Block. * * We want to enable the noise generator (T_NE), and enable the * linear feedback shift register and whitener post-processing * (T_SEL = 3). Also ensure that test mode (deterministic values) * is disabled. */ msr = rdmsr(SB_GLD_MSR_CTRL); msr &= ~(SB_GMC_T_TM | SB_GMC_T_SEL_MASK); msr |= SB_GMC_T_NE | SB_GMC_T_SEL3; #if 0 msr |= SB_GMC_SBI | SB_GMC_SBY; /* for AES, if necessary */ #endif wrmsr(SB_GLD_MSR_CTRL, msr); /* Disable interrupts */ bus_write_4(sc->sc_sr, SB_AES_INT, SB_AI_CLEAR_INTR); /* Allocate a contiguous DMA-able buffer to work in */ if (glxsb_dma_alloc(sc) != 0) goto fail0; /* Initialize our task queue */ sc->sc_tq = taskqueue_create("glxsb_taskq", M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sc->sc_tq); if (sc->sc_tq == NULL) { device_printf(dev, "cannot create task queue\n"); goto fail0; } if (taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(dev)) != 0) { device_printf(dev, "cannot start task queue\n"); goto fail1; } TASK_INIT(&sc->sc_cryptotask, 0, glxsb_crypto_task, sc); /* Initialize crypto */ if (glxsb_crypto_setup(sc) != 0) goto fail1; /* Install a periodic collector for the "true" (AMD's word) RNG */ if (hz > 100) sc->sc_rnghz = hz / 100; else sc->sc_rnghz = 1; callout_init(&sc->sc_rngco, 1); glxsb_rnd(sc); return (0); fail1: taskqueue_free(sc->sc_tq); fail0: bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_sr); return (ENXIO); } static int glxsb_detach(device_t dev) { struct glxsb_softc *sc = device_get_softc(dev); crypto_unregister_all(sc->sc_cid); callout_drain(&sc->sc_rngco); taskqueue_drain(sc->sc_tq, &sc->sc_cryptotask); bus_generic_detach(dev); glxsb_dma_free(sc, &sc->sc_dma); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_sr); taskqueue_free(sc->sc_tq); mtx_destroy(&sc->sc_task_mtx); return (0); } /* * callback for bus_dmamap_load() */ static void glxsb_dmamap_cb(void *arg, bus_dma_segment_t *seg, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = seg[0].ds_addr; } static int glxsb_dma_alloc(struct glxsb_softc *sc) { struct glxsb_dma_map *dma = &sc->sc_dma; int rc; dma->dma_nsegs = 1; dma->dma_size = GLXSB_MAX_AES_LEN * 2; /* Setup DMA descriptor area */ rc = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ SB_AES_ALIGN, 0, /* alignments, bounds */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma->dma_size, /* maxsize */ dma->dma_nsegs, /* nsegments */ dma->dma_size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sc_dmat); if (rc != 0) { device_printf(sc->sc_dev, "cannot allocate DMA tag (%d)\n", rc); return (rc); } rc = bus_dmamem_alloc(sc->sc_dmat, (void **)&dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (rc != 0) { device_printf(sc->sc_dev, "cannot allocate DMA memory of %d bytes (%d)\n", dma->dma_size, rc); goto fail0; } rc = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr, dma->dma_size, glxsb_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT); if (rc != 0) { device_printf(sc->sc_dev, "cannot load DMA memory for %d bytes (%d)\n", dma->dma_size, rc); goto fail1; } return (0); fail1: bus_dmamem_free(sc->sc_dmat, dma->dma_vaddr, dma->dma_map); fail0: bus_dma_tag_destroy(sc->sc_dmat); return (rc); } static void glxsb_dma_pre_op(struct glxsb_softc *sc, struct glxsb_dma_map *dma) { bus_dmamap_sync(sc->sc_dmat, dma->dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void glxsb_dma_post_op(struct glxsb_softc *sc, struct glxsb_dma_map *dma) { bus_dmamap_sync(sc->sc_dmat, dma->dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); } static void glxsb_dma_free(struct glxsb_softc *sc, struct glxsb_dma_map *dma) { bus_dmamap_unload(sc->sc_dmat, dma->dma_map); bus_dmamem_free(sc->sc_dmat, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(sc->sc_dmat); } static void glxsb_rnd(void *v) { struct glxsb_softc *sc = v; uint32_t status, value; status = bus_read_4(sc->sc_sr, SB_RANDOM_NUM_STATUS); if (status & SB_RNS_TRNG_VALID) { value = bus_read_4(sc->sc_sr, SB_RANDOM_NUM); /* feed with one uint32 */ /* MarkM: FIX!! Check that this does not swamp the harvester! */ random_harvest_queue(&value, sizeof(value), RANDOM_PURE_GLXSB); } callout_reset(&sc->sc_rngco, sc->sc_rnghz, glxsb_rnd, sc); } static int glxsb_crypto_setup(struct glxsb_softc *sc) { sc->sc_cid = crypto_get_driverid(sc->sc_dev, sizeof(struct glxsb_session), CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(sc->sc_dev, "cannot get crypto driver id\n"); return (ENOMEM); } mtx_init(&sc->sc_task_mtx, "glxsb_crypto_mtx", NULL, MTX_DEF); return (0); } static int glxsb_crypto_probesession(device_t dev, const struct crypto_session_params *csp) { if (csp->csp_flags != 0) return (EINVAL); /* * We only support HMAC algorithms to be able to work with * ipsec(4), so if we are asked only for authentication without * encryption, don't pretend we can accelerate it. */ switch (csp->csp_mode) { case CSP_MODE_ETA: switch (csp->csp_auth_alg) { case CRYPTO_NULL_HMAC: case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_RIPEMD160_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: break; default: return (EINVAL); } /* FALLTHROUGH */ case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: if (csp->csp_cipher_klen * 8 != 128) return (EINVAL); break; default: return (EINVAL); } default: return (EINVAL); } return (CRYPTODEV_PROBE_HARDWARE); } static int glxsb_crypto_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct glxsb_softc *sc = device_get_softc(dev); struct glxsb_session *ses; int error; ses = crypto_get_driver_session(cses); /* Copy the key (Geode LX wants the primary key only) */ if (csp->csp_cipher_key != NULL) bcopy(csp->csp_cipher_key, ses->ses_key, sizeof(ses->ses_key)); if (csp->csp_auth_alg != 0) { error = glxsb_hash_setup(ses, csp); if (error != 0) { glxsb_crypto_freesession(sc->sc_dev, cses); return (error); } } return (0); } static void glxsb_crypto_freesession(device_t dev, crypto_session_t cses) { struct glxsb_session *ses; ses = crypto_get_driver_session(cses); glxsb_hash_free(ses); } static int glxsb_aes(struct glxsb_softc *sc, uint32_t control, uint32_t psrc, uint32_t pdst, const void *key, int len, const void *iv) { uint32_t status; int i; if (len & 0xF) { device_printf(sc->sc_dev, "len must be a multiple of 16 (not %d)\n", len); return (EINVAL); } /* Set the source */ bus_write_4(sc->sc_sr, SB_SOURCE_A, psrc); /* Set the destination address */ bus_write_4(sc->sc_sr, SB_DEST_A, pdst); /* Set the data length */ bus_write_4(sc->sc_sr, SB_LENGTH_A, len); /* Set the IV */ if (iv != NULL) { bus_write_region_4(sc->sc_sr, SB_CBC_IV, iv, 4); control |= SB_CTL_CBC; } /* Set the key */ bus_write_region_4(sc->sc_sr, SB_WKEY, key, 4); /* Ask the security block to do it */ bus_write_4(sc->sc_sr, SB_CTL_A, control | SB_CTL_WK | SB_CTL_DC | SB_CTL_SC | SB_CTL_ST); /* * Now wait until it is done. * * We do a busy wait. Obviously the number of iterations of * the loop required to perform the AES operation depends upon * the number of bytes to process. * * On a 500 MHz Geode LX we see * * length (bytes) typical max iterations * 16 12 * 64 22 * 256 59 * 1024 212 * 8192 1,537 * * Since we have a maximum size of operation defined in * GLXSB_MAX_AES_LEN, we use this constant to decide how long * to wait. Allow an order of magnitude longer than it should * really take, just in case. */ for (i = 0; i < GLXSB_MAX_AES_LEN * 10; i++) { status = bus_read_4(sc->sc_sr, SB_CTL_A); if ((status & SB_CTL_ST) == 0) /* Done */ return (0); } device_printf(sc->sc_dev, "operation failed to complete\n"); return (EIO); } static int glxsb_crypto_encdec(struct cryptop *crp, struct glxsb_session *ses, struct glxsb_softc *sc) { char *op_src, *op_dst; const void *key; uint32_t op_psrc, op_pdst; uint8_t op_iv[SB_AES_BLOCK_SIZE]; int error; int len, tlen, xlen; int offset; uint32_t control; if ((crp->crp_payload_length % SB_AES_BLOCK_SIZE) != 0) return (EINVAL); /* How much of our buffer will we need to use? */ xlen = crp->crp_payload_length > GLXSB_MAX_AES_LEN ? GLXSB_MAX_AES_LEN : crp->crp_payload_length; /* * XXX Check if we can have input == output on Geode LX. * XXX In the meantime, use two separate (adjacent) buffers. */ op_src = sc->sc_dma.dma_vaddr; op_dst = (char *)sc->sc_dma.dma_vaddr + xlen; op_psrc = sc->sc_dma.dma_paddr; op_pdst = sc->sc_dma.dma_paddr + xlen; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) control = SB_CTL_ENC; else control = SB_CTL_DEC; - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { - arc4rand(op_iv, sizeof(op_iv), 0); - crypto_copyback(crp, crp->crp_iv_start, sizeof(op_iv), op_iv); - } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) - memcpy(op_iv, crp->crp_iv, sizeof(op_iv)); - else - crypto_copydata(crp, crp->crp_iv_start, sizeof(op_iv), op_iv); + crypto_read_iv(crp, op_iv); offset = 0; tlen = crp->crp_payload_length; if (crp->crp_cipher_key != NULL) key = crp->crp_cipher_key; else key = ses->ses_key; /* Process the data in GLXSB_MAX_AES_LEN chunks */ while (tlen > 0) { len = (tlen > GLXSB_MAX_AES_LEN) ? GLXSB_MAX_AES_LEN : tlen; crypto_copydata(crp, crp->crp_payload_start + offset, len, op_src); glxsb_dma_pre_op(sc, &sc->sc_dma); error = glxsb_aes(sc, control, op_psrc, op_pdst, key, len, op_iv); glxsb_dma_post_op(sc, &sc->sc_dma); if (error != 0) return (error); crypto_copyback(crp, crp->crp_payload_start + offset, len, op_dst); offset += len; tlen -= len; /* * Copy out last block for use as next iteration IV. */ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) bcopy(op_dst + len - sizeof(op_iv), op_iv, sizeof(op_iv)); else bcopy(op_src + len - sizeof(op_iv), op_iv, sizeof(op_iv)); } /* while */ /* All AES processing has now been done. */ bzero(sc->sc_dma.dma_vaddr, xlen * 2); return (0); } static void glxsb_crypto_task(void *arg, int pending) { struct glxsb_softc *sc = arg; const struct crypto_session_params *csp; struct glxsb_session *ses; struct cryptop *crp; int error; crp = sc->sc_to.to_crp; ses = sc->sc_to.to_ses; csp = crypto_get_params(crp->crp_session); /* Perform data authentication if requested before encryption */ if (csp->csp_mode == CSP_MODE_ETA && !CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { error = glxsb_hash_process(ses, csp, crp); if (error != 0) goto out; } error = glxsb_crypto_encdec(crp, ses, sc); if (error != 0) goto out; /* Perform data authentication if requested after encryption */ if (csp->csp_mode == CSP_MODE_ETA && CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { error = glxsb_hash_process(ses, csp, crp); if (error != 0) goto out; } out: mtx_lock(&sc->sc_task_mtx); sc->sc_task_count--; mtx_unlock(&sc->sc_task_mtx); crp->crp_etype = error; crypto_unblock(sc->sc_cid, CRYPTO_SYMQ); crypto_done(crp); } static int glxsb_crypto_process(device_t dev, struct cryptop *crp, int hint) { struct glxsb_softc *sc = device_get_softc(dev); struct glxsb_session *ses; ses = crypto_get_driver_session(crp->crp_session); mtx_lock(&sc->sc_task_mtx); if (sc->sc_task_count != 0) { mtx_unlock(&sc->sc_task_mtx); return (ERESTART); } sc->sc_task_count++; sc->sc_to.to_crp = crp; sc->sc_to.to_ses = ses; mtx_unlock(&sc->sc_task_mtx); taskqueue_enqueue(sc->sc_tq, &sc->sc_cryptotask); return(0); } Index: head/sys/dev/hifn/hifn7751.c =================================================================== --- head/sys/dev/hifn/hifn7751.c (revision 360135) +++ head/sys/dev/hifn/hifn7751.c (revision 360136) @@ -1,2825 +1,2815 @@ /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */ /*- * SPDX-License-Identifier: BSD-3-Clause * * Invertex AEON / Hifn 7751 driver * Copyright (c) 1999 Invertex Inc. All rights reserved. * Copyright (c) 1999 Theo de Raadt * Copyright (c) 2000-2001 Network Security Technologies, Inc. * http://www.netsec.net * Copyright (c) 2003 Hifn Inc. * * This driver is based on a previous driver by Invertex, for which they * requested: Please send any comments, feedback, bug-fixes, or feature * requests to software@invertex.com. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. */ #include __FBSDID("$FreeBSD$"); /* * Driver for various Hifn encryption processors. */ #include "opt_hifn.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include #ifdef HIFN_RNDTEST #include #endif #include #include #ifdef HIFN_VULCANDEV #include #include static struct cdevsw vulcanpk_cdevsw; /* forward declaration */ #endif /* * Prototypes and count for the pci_device structure */ static int hifn_probe(device_t); static int hifn_attach(device_t); static int hifn_detach(device_t); static int hifn_suspend(device_t); static int hifn_resume(device_t); static int hifn_shutdown(device_t); static int hifn_probesession(device_t, const struct crypto_session_params *); static int hifn_newsession(device_t, crypto_session_t, const struct crypto_session_params *); static int hifn_process(device_t, struct cryptop *, int); static device_method_t hifn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hifn_probe), DEVMETHOD(device_attach, hifn_attach), DEVMETHOD(device_detach, hifn_detach), DEVMETHOD(device_suspend, hifn_suspend), DEVMETHOD(device_resume, hifn_resume), DEVMETHOD(device_shutdown, hifn_shutdown), /* crypto device methods */ DEVMETHOD(cryptodev_probesession, hifn_probesession), DEVMETHOD(cryptodev_newsession, hifn_newsession), DEVMETHOD(cryptodev_process, hifn_process), DEVMETHOD_END }; static driver_t hifn_driver = { "hifn", hifn_methods, sizeof (struct hifn_softc) }; static devclass_t hifn_devclass; DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0); MODULE_DEPEND(hifn, crypto, 1, 1, 1); #ifdef HIFN_RNDTEST MODULE_DEPEND(hifn, rndtest, 1, 1, 1); #endif static void hifn_reset_board(struct hifn_softc *, int); static void hifn_reset_puc(struct hifn_softc *); static void hifn_puc_wait(struct hifn_softc *); static int hifn_enable_crypto(struct hifn_softc *); static void hifn_set_retry(struct hifn_softc *sc); static void hifn_init_dma(struct hifn_softc *); static void hifn_init_pci_registers(struct hifn_softc *); static int hifn_sramsize(struct hifn_softc *); static int hifn_dramsize(struct hifn_softc *); static int hifn_ramtype(struct hifn_softc *); static void hifn_sessions(struct hifn_softc *); static void hifn_intr(void *); static u_int hifn_write_command(struct hifn_command *, u_int8_t *); static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *); static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int); static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *); static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *); static int hifn_init_pubrng(struct hifn_softc *); static void hifn_rng(void *); static void hifn_tick(void *); static void hifn_abort(struct hifn_softc *); static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *); static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t); static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t); static __inline u_int32_t READ_REG_0(struct hifn_softc *sc, bus_size_t reg) { u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg); sc->sc_bar0_lastreg = (bus_size_t) -1; return (v); } #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val) static __inline u_int32_t READ_REG_1(struct hifn_softc *sc, bus_size_t reg) { u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg); sc->sc_bar1_lastreg = (bus_size_t) -1; return (v); } #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val) static SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Hifn driver parameters"); #ifdef HIFN_DEBUG static int hifn_debug = 0; SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug, 0, "control debugging msgs"); #endif static struct hifn_stats hifnstats; SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats, hifn_stats, "driver statistics"); static int hifn_maxbatch = 1; SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch, 0, "max ops to batch w/o interrupt"); /* * Probe for a supported device. The PCI vendor and device * IDs are used to detect devices we know how to handle. */ static int hifn_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX && pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON) return (BUS_PROBE_DEFAULT); if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)) return (BUS_PROBE_DEFAULT); if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751) return (BUS_PROBE_DEFAULT); return (ENXIO); } static void hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } static const char* hifn_partname(struct hifn_softc *sc) { /* XXX sprintf numbers when not decoded */ switch (pci_get_vendor(sc->sc_dev)) { case PCI_VENDOR_HIFN: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_HIFN_6500: return "Hifn 6500"; case PCI_PRODUCT_HIFN_7751: return "Hifn 7751"; case PCI_PRODUCT_HIFN_7811: return "Hifn 7811"; case PCI_PRODUCT_HIFN_7951: return "Hifn 7951"; case PCI_PRODUCT_HIFN_7955: return "Hifn 7955"; case PCI_PRODUCT_HIFN_7956: return "Hifn 7956"; } return "Hifn unknown-part"; case PCI_VENDOR_INVERTEX: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON"; } return "Invertex unknown-part"; case PCI_VENDOR_NETSEC: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751"; } return "NetSec unknown-part"; } return "Unknown-vendor unknown-part"; } static void default_harvest(struct rndtest_state *rsp, void *buf, u_int count) { /* MarkM: FIX!! Check that this does not swamp the harvester! */ random_harvest_queue(buf, count, RANDOM_PURE_HIFN); } static u_int checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max) { if (v > max) { device_printf(dev, "Warning, %s %u out of range, " "using max %u\n", what, v, max); v = max; } else if (v < min) { device_printf(dev, "Warning, %s %u out of range, " "using min %u\n", what, v, min); v = min; } return v; } /* * Select PLL configuration for 795x parts. This is complicated in * that we cannot determine the optimal parameters without user input. * The reference clock is derived from an external clock through a * multiplier. The external clock is either the host bus (i.e. PCI) * or an external clock generator. When using the PCI bus we assume * the clock is either 33 or 66 MHz; for an external source we cannot * tell the speed. * * PLL configuration is done with a string: "pci" for PCI bus, or "ext" * for an external source, followed by the frequency. We calculate * the appropriate multiplier and PLL register contents accordingly. * When no configuration is given we default to "pci66" since that * always will allow the card to work. If a card is using the PCI * bus clock and in a 33MHz slot then it will be operating at half * speed until the correct information is provided. * * We use a default setting of "ext66" because according to Mike Ham * of HiFn, almost every board in existence has an external crystal * populated at 66Mhz. Using PCI can be a problem on modern motherboards, * because PCI33 can have clocks from 0 to 33Mhz, and some have * non-PCI-compliant spread-spectrum clocks, which can confuse the pll. */ static void hifn_getpllconfig(device_t dev, u_int *pll) { const char *pllspec; u_int freq, mul, fl, fh; u_int32_t pllconfig; char *nxt; if (resource_string_value("hifn", device_get_unit(dev), "pllconfig", &pllspec)) pllspec = "ext66"; fl = 33, fh = 66; pllconfig = 0; if (strncmp(pllspec, "ext", 3) == 0) { pllspec += 3; pllconfig |= HIFN_PLL_REF_SEL; switch (pci_get_device(dev)) { case PCI_PRODUCT_HIFN_7955: case PCI_PRODUCT_HIFN_7956: fl = 20, fh = 100; break; #ifdef notyet case PCI_PRODUCT_HIFN_7954: fl = 20, fh = 66; break; #endif } } else if (strncmp(pllspec, "pci", 3) == 0) pllspec += 3; freq = strtoul(pllspec, &nxt, 10); if (nxt == pllspec) freq = 66; else freq = checkmaxmin(dev, "frequency", freq, fl, fh); /* * Calculate multiplier. We target a Fck of 266 MHz, * allowing only even values, possibly rounded down. * Multipliers > 8 must set the charge pump current. */ mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12); pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT; if (mul > 8) pllconfig |= HIFN_PLL_IS; *pll = pllconfig; } /* * Attach an interface that successfully probed. */ static int hifn_attach(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); caddr_t kva; int rseg, rid; char rbase; uint16_t rev; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF); /* XXX handle power management */ /* * The 7951 and 795x have a random number generator and * public key support; note this. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC; /* * The 7811 has a random number generator and * we also note it's identity 'cuz of some quirks. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && pci_get_device(dev) == PCI_PRODUCT_HIFN_7811) sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG; /* * The 795x parts support AES. */ if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) { sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES; /* * Select PLL configuration. This depends on the * bus and board design and must be manually configured * if the default setting is unacceptable. */ hifn_getpllconfig(dev, &sc->sc_pllconfig); } /* * Setup PCI resources. Note that we record the bus * tag and handle for each register mapping, this is * used by the READ_REG_0, WRITE_REG_0, READ_REG_1, * and WRITE_REG_1 macros throughout the driver. */ pci_enable_busmaster(dev); rid = HIFN_BAR0; sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_bar0res == NULL) { device_printf(dev, "cannot map bar%d register space\n", 0); goto fail_pci; } sc->sc_st0 = rman_get_bustag(sc->sc_bar0res); sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res); sc->sc_bar0_lastreg = (bus_size_t) -1; rid = HIFN_BAR1; sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_bar1res == NULL) { device_printf(dev, "cannot map bar%d register space\n", 1); goto fail_io0; } sc->sc_st1 = rman_get_bustag(sc->sc_bar1res); sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res); sc->sc_bar1_lastreg = (bus_size_t) -1; hifn_set_retry(sc); /* * Setup the area where the Hifn DMA's descriptors * and associated data structures. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* PCI parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HIFN_MAX_DMALEN, /* maxsize */ MAX_SCATTER, /* nsegments */ HIFN_MAX_SEGLEN, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->sc_dmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto fail_io1; } if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { device_printf(dev, "cannot create dma map\n"); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { device_printf(dev, "cannot alloc dma buffer\n"); bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva, sizeof (*sc->sc_dma), hifn_dmamap_cb, &sc->sc_dma_physaddr, BUS_DMA_NOWAIT)) { device_printf(dev, "cannot load dma map\n"); bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); goto fail_io1; } sc->sc_dma = (struct hifn_dma *)kva; bzero(sc->sc_dma, sizeof(*sc->sc_dma)); KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!")); KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!")); KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!")); KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!")); /* * Reset the board and do the ``secret handshake'' * to enable the crypto support. Then complete the * initialization procedure by setting up the interrupt * and hooking in to the system crypto support so we'll * get used for system services like the crypto device, * IPsec, RNG device, etc. */ hifn_reset_board(sc, 0); if (hifn_enable_crypto(sc) != 0) { device_printf(dev, "crypto enabling failed\n"); goto fail_mem; } hifn_reset_puc(sc); hifn_init_dma(sc); hifn_init_pci_registers(sc); /* XXX can't dynamically determine ram type for 795x; force dram */ if (sc->sc_flags & HIFN_IS_7956) sc->sc_drammodel = 1; else if (hifn_ramtype(sc)) goto fail_mem; if (sc->sc_drammodel == 0) hifn_sramsize(sc); else hifn_dramsize(sc); /* * Workaround for NetSec 7751 rev A: half ram size because two * of the address lines were left floating */ if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 && pci_get_revid(dev) == 0x61) /*XXX???*/ sc->sc_ramsize >>= 1; /* * Arrange the interrupt line. */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto fail_mem; } /* * NB: Network code assumes we are blocked with splimp() * so make sure the IRQ is marked appropriately. */ if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, hifn_intr, sc, &sc->sc_intrhand)) { device_printf(dev, "could not setup interrupt\n"); goto fail_intr2; } hifn_sessions(sc); /* * NB: Keep only the low 16 bits; this masks the chip id * from the 7951. */ rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff; rseg = sc->sc_ramsize / 1024; rbase = 'K'; if (sc->sc_ramsize >= (1024 * 1024)) { rbase = 'M'; rseg /= 1024; } device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram", hifn_partname(sc), rev, rseg, rbase, sc->sc_drammodel ? 'd' : 's'); if (sc->sc_flags & HIFN_IS_7956) printf(", pll=0x%x<%s clk, %ux mult>", sc->sc_pllconfig, sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci", 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11)); printf("\n"); WRITE_REG_0(sc, HIFN_0_PUCNFG, READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); sc->sc_ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; switch (sc->sc_ena) { case HIFN_PUSTAT_ENA_2: case HIFN_PUSTAT_ENA_1: sc->sc_cid = crypto_get_driverid(dev, sizeof(struct hifn_session), CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto fail_intr; } break; } bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) hifn_init_pubrng(sc); callout_init(&sc->sc_tickto, 1); callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); return (0); fail_intr: bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); fail_intr2: /* XXX don't store rid */ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); fail_mem: bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); /* Turn off DMA polling */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); fail_io1: bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); fail_io0: bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); fail_pci: mtx_destroy(&sc->sc_mtx); return (ENXIO); } /* * Detach an interface that successfully probed. */ static int hifn_detach(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); KASSERT(sc != NULL, ("hifn_detach: null software carrier!")); /* disable interrupts */ WRITE_REG_1(sc, HIFN_1_DMA_IER, 0); /*XXX other resources */ callout_stop(&sc->sc_tickto); callout_stop(&sc->sc_rngto); #ifdef HIFN_RNDTEST if (sc->sc_rndtest) rndtest_detach(sc->sc_rndtest); #endif /* Turn off DMA polling */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); crypto_unregister_all(sc->sc_cid); bus_generic_detach(dev); /*XXX should be no children, right? */ bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); /* XXX don't store rid */ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_dmat); bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); mtx_destroy(&sc->sc_mtx); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int hifn_shutdown(device_t dev) { #ifdef notyet hifn_stop(device_get_softc(dev)); #endif return (0); } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int hifn_suspend(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); #ifdef notyet hifn_stop(sc); #endif sc->sc_suspended = 1; return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int hifn_resume(device_t dev) { struct hifn_softc *sc = device_get_softc(dev); #ifdef notyet /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) rl_init(sc); #endif sc->sc_suspended = 0; return (0); } static int hifn_init_pubrng(struct hifn_softc *sc) { u_int32_t r; int i; #ifdef HIFN_RNDTEST sc->sc_rndtest = rndtest_attach(sc->sc_dev); if (sc->sc_rndtest) sc->sc_harvest = rndtest_harvest; else sc->sc_harvest = default_harvest; #else sc->sc_harvest = default_harvest; #endif if ((sc->sc_flags & HIFN_IS_7811) == 0) { /* Reset 7951 public key/rng engine */ WRITE_REG_1(sc, HIFN_1_PUB_RESET, READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); for (i = 0; i < 100; i++) { DELAY(1000); if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0) break; } if (i == 100) { device_printf(sc->sc_dev, "public key init failed\n"); return (1); } } /* Enable the rng, if available */ if (sc->sc_flags & HIFN_HAS_RNG) { if (sc->sc_flags & HIFN_IS_7811) { r = READ_REG_1(sc, HIFN_1_7811_RNGENA); if (r & HIFN_7811_RNGENA_ENA) { r &= ~HIFN_7811_RNGENA_ENA; WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); } WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, HIFN_7811_RNGCFG_DEFL); r |= HIFN_7811_RNGENA_ENA; WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); } else WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, READ_REG_1(sc, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA); sc->sc_rngfirst = 1; if (hz >= 100) sc->sc_rnghz = hz / 100; else sc->sc_rnghz = 1; callout_init(&sc->sc_rngto, 1); callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); } /* Enable public key engine, if available */ if (sc->sc_flags & HIFN_HAS_PUBLIC) { WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); #ifdef HIFN_VULCANDEV sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "vulcanpk"); sc->sc_pkdev->si_drv1 = sc; #endif } return (0); } static void hifn_rng(void *vsc) { #define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0 struct hifn_softc *sc = vsc; u_int32_t sts, num[2]; int i; if (sc->sc_flags & HIFN_IS_7811) { /* ONLY VALID ON 7811!!!! */ for (i = 0; i < 5; i++) { sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); if (sts & HIFN_7811_RNGSTS_UFL) { device_printf(sc->sc_dev, "RNG underflow: disabling\n"); return; } if ((sts & HIFN_7811_RNGSTS_RDY) == 0) break; /* * There are at least two words in the RNG FIFO * at this point. */ num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); /* NB: discard first data read */ if (sc->sc_rngfirst) sc->sc_rngfirst = 0; else (*sc->sc_harvest)(sc->sc_rndtest, num, sizeof (num)); } } else { num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA); /* NB: discard first data read */ if (sc->sc_rngfirst) sc->sc_rngfirst = 0; else (*sc->sc_harvest)(sc->sc_rndtest, num, sizeof (num[0])); } callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); #undef RANDOM_BITS } static void hifn_puc_wait(struct hifn_softc *sc) { int i; int reg = HIFN_0_PUCTRL; if (sc->sc_flags & HIFN_IS_7956) { reg = HIFN_0_PUCTRL2; } for (i = 5000; i > 0; i--) { DELAY(1); if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET)) break; } if (!i) device_printf(sc->sc_dev, "proc unit did not reset\n"); } /* * Reset the processing unit. */ static void hifn_reset_puc(struct hifn_softc *sc) { /* Reset processing unit */ int reg = HIFN_0_PUCTRL; if (sc->sc_flags & HIFN_IS_7956) { reg = HIFN_0_PUCTRL2; } WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA); hifn_puc_wait(sc); } /* * Set the Retry and TRDY registers; note that we set them to * zero because the 7811 locks up when forced to retry (section * 3.6 of "Specification Update SU-0014-04". Not clear if we * should do this for all Hifn parts, but it doesn't seem to hurt. */ static void hifn_set_retry(struct hifn_softc *sc) { /* NB: RETRY only responds to 8-bit reads/writes */ pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1); pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 1); } /* * Resets the board. Values in the regesters are left as is * from the reset (i.e. initial values are assigned elsewhere). */ static void hifn_reset_board(struct hifn_softc *sc, int full) { u_int32_t reg; /* * Set polling in the DMA configuration register to zero. 0x7 avoids * resetting the board and zeros out the other fields. */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); /* * Now that polling has been disabled, we have to wait 1 ms * before resetting the board. */ DELAY(1000); /* Reset the DMA unit */ if (full) { WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); DELAY(1000); } else { WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); hifn_reset_puc(sc); } KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!")); bzero(sc->sc_dma, sizeof(*sc->sc_dma)); /* Bring dma unit out of reset */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); hifn_puc_wait(sc); hifn_set_retry(sc); if (sc->sc_flags & HIFN_IS_7811) { for (reg = 0; reg < 1000; reg++) { if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & HIFN_MIPSRST_CRAMINIT) break; DELAY(1000); } if (reg == 1000) printf(": cram init timeout\n"); } else { /* set up DMA configuration register #2 */ /* turn off all PK and BAR0 swaps */ WRITE_REG_1(sc, HIFN_1_DMA_CNFG2, (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)| (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)| (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)| (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT)); } } static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt) { int i; u_int32_t v; for (i = 0; i < cnt; i++) { /* get the parity */ v = a & 0x80080125; v ^= v >> 16; v ^= v >> 8; v ^= v >> 4; v ^= v >> 2; v ^= v >> 1; a = (v & 1) ^ (a << 1); } return a; } struct pci2id { u_short pci_vendor; u_short pci_prod; char card_id[13]; }; static struct pci2id pci2id[] = { { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, { /* * Other vendors share this PCI ID as well, such as * http://www.powercrypt.com, and obviously they also * use the same key. */ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }, }; /* * Checks to see if crypto is already enabled. If crypto isn't enable, * "hifn_enable_crypto" is called to enable it. The check is important, * as enabling crypto twice will lock the board. */ static int hifn_enable_crypto(struct hifn_softc *sc) { u_int32_t dmacfg, ramcfg, encl, addr, i; char *offtbl = NULL; for (i = 0; i < nitems(pci2id); i++) { if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) && pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) { offtbl = pci2id[i].card_id; break; } } if (offtbl == NULL) { device_printf(sc->sc_dev, "Unknown card!\n"); return (1); } ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); /* * The RAM config register's encrypt level bit needs to be set before * every read performed on the encryption level register. */ WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; /* * Make sure we don't re-unlock. Two unlocks kills chip until the * next reboot. */ if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "Strong crypto already enabled!\n"); #endif goto report; } if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "Unknown encryption level 0x%x\n", encl); #endif return 1; } WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); DELAY(1000); addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1); DELAY(1000); WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0); DELAY(1000); for (i = 0; i <= 12; i++) { addr = hifn_next_signature(addr, offtbl[i] + 0x101); WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr); DELAY(1000); } WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; #ifdef HIFN_DEBUG if (hifn_debug) { if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) device_printf(sc->sc_dev, "Engine is permanently " "locked until next system reset!\n"); else device_printf(sc->sc_dev, "Engine enabled " "successfully!\n"); } #endif report: WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); switch (encl) { case HIFN_PUSTAT_ENA_1: case HIFN_PUSTAT_ENA_2: break; case HIFN_PUSTAT_ENA_0: default: device_printf(sc->sc_dev, "disabled"); break; } return 0; } /* * Give initial values to the registers listed in the "Register Space" * section of the HIFN Software Development reference manual. */ static void hifn_init_pci_registers(struct hifn_softc *sc) { /* write fixed values needed by the Initialization registers */ WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); /* write all 4 ring address registers */ WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); DELAY(2000); /* write status register */ WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | HIFN_DMACSR_S_WAIT | HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | HIFN_DMACSR_C_WAIT | HIFN_DMACSR_ENGINE | ((sc->sc_flags & HIFN_HAS_PUBLIC) ? HIFN_DMACSR_PUBDONE : 0) | ((sc->sc_flags & HIFN_IS_7811) ? HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | ((sc->sc_flags & HIFN_IS_7811) ? HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); if (sc->sc_flags & HIFN_IS_7956) { u_int32_t pll; WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | HIFN_PUCNFG_TCALLPHASES | HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); /* turn off the clocks and insure bypass is set */ pll = READ_REG_1(sc, HIFN_1_PLL); pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL)) | HIFN_PLL_BP | HIFN_PLL_MBSET; WRITE_REG_1(sc, HIFN_1_PLL, pll); DELAY(10*1000); /* 10ms */ /* change configuration */ pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig; WRITE_REG_1(sc, HIFN_1_PLL, pll); DELAY(10*1000); /* 10ms */ /* disable bypass */ pll &= ~HIFN_PLL_BP; WRITE_REG_1(sc, HIFN_1_PLL, pll); /* enable clocks with new configuration */ pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL; WRITE_REG_1(sc, HIFN_1_PLL, pll); } else { WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); } WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); } /* * The maximum number of sessions supported by the card * is dependent on the amount of context ram, which * encryption algorithms are enabled, and how compression * is configured. This should be configured before this * routine is called. */ static void hifn_sessions(struct hifn_softc *sc) { u_int32_t pucnfg; int ctxsize; pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); if (pucnfg & HIFN_PUCNFG_COMPSING) { if (pucnfg & HIFN_PUCNFG_ENCCNFG) ctxsize = 128; else ctxsize = 512; /* * 7955/7956 has internal context memory of 32K */ if (sc->sc_flags & HIFN_IS_7956) sc->sc_maxses = 32768 / ctxsize; else sc->sc_maxses = 1 + ((sc->sc_ramsize - 32768) / ctxsize); } else sc->sc_maxses = sc->sc_ramsize / 16384; if (sc->sc_maxses > 2048) sc->sc_maxses = 2048; } /* * Determine ram type (sram or dram). Board should be just out of a reset * state when this is called. */ static int hifn_ramtype(struct hifn_softc *sc) { u_int8_t data[8], dataexpect[8]; int i; for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = 0x55; if (hifn_writeramaddr(sc, 0, data)) return (-1); if (hifn_readramaddr(sc, 0, data)) return (-1); if (bcmp(data, dataexpect, sizeof(data)) != 0) { sc->sc_drammodel = 1; return (0); } for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = 0xaa; if (hifn_writeramaddr(sc, 0, data)) return (-1); if (hifn_readramaddr(sc, 0, data)) return (-1); if (bcmp(data, dataexpect, sizeof(data)) != 0) { sc->sc_drammodel = 1; return (0); } return (0); } #define HIFN_SRAM_MAX (32 << 20) #define HIFN_SRAM_STEP_SIZE 16384 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) static int hifn_sramsize(struct hifn_softc *sc) { u_int32_t a; u_int8_t data[8]; u_int8_t dataexpect[sizeof(data)]; int32_t i; for (i = 0; i < sizeof(data); i++) data[i] = dataexpect[i] = i ^ 0x5a; for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { a = i * HIFN_SRAM_STEP_SIZE; bcopy(&i, data, sizeof(i)); hifn_writeramaddr(sc, a, data); } for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { a = i * HIFN_SRAM_STEP_SIZE; bcopy(&i, dataexpect, sizeof(i)); if (hifn_readramaddr(sc, a, data) < 0) return (0); if (bcmp(data, dataexpect, sizeof(data)) != 0) return (0); sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; } return (0); } /* * XXX For dram boards, one should really try all of the * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG * is already set up correctly. */ static int hifn_dramsize(struct hifn_softc *sc) { u_int32_t cnfg; if (sc->sc_flags & HIFN_IS_7956) { /* * 7955/7956 have a fixed internal ram of only 32K. */ sc->sc_ramsize = 32768; } else { cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & HIFN_PUCNFG_DRAMMASK; sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); } return (0); } static void hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp) { struct hifn_dma *dma = sc->sc_dma; if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) { sc->sc_cmdi = 0; dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *cmdp = sc->sc_cmdi++; sc->sc_cmdk = sc->sc_cmdi; if (sc->sc_srci == HIFN_D_SRC_RSIZE) { sc->sc_srci = 0; dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *srcp = sc->sc_srci++; sc->sc_srck = sc->sc_srci; if (sc->sc_dsti == HIFN_D_DST_RSIZE) { sc->sc_dsti = 0; dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *dstp = sc->sc_dsti++; sc->sc_dstk = sc->sc_dsti; if (sc->sc_resi == HIFN_D_RES_RSIZE) { sc->sc_resi = 0; dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } *resp = sc->sc_resi++; sc->sc_resk = sc->sc_resi; } static int hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) { struct hifn_dma *dma = sc->sc_dma; hifn_base_command_t wc; const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; int r, cmdi, resi, srci, dsti; wc.masks = htole16(3 << 13); wc.session_num = htole16(addr >> 14); wc.total_source_count = htole16(8); wc.total_dest_count = htole16(addr & 0x3fff); hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); /* build write command */ bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc; bcopy(data, &dma->test_src, sizeof(dma->test_src)); dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_src)); dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_dst)); dma->cmdr[cmdi].l = htole32(16 | masks); dma->srcr[srci].l = htole32(8 | masks); dma->dstr[dsti].l = htole32(4 | masks); dma->resr[resi].l = htole32(4 | masks); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); for (r = 10000; r >= 0; r--) { DELAY(10); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) break; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } if (r == 0) { device_printf(sc->sc_dev, "writeramaddr -- " "result[%d](addr %d) still valid\n", resi, addr); r = -1; return (-1); } else r = 0; WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); return (r); } static int hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) { struct hifn_dma *dma = sc->sc_dma; hifn_base_command_t rc; const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; int r, cmdi, srci, dsti, resi; rc.masks = htole16(2 << 13); rc.session_num = htole16(addr >> 14); rc.total_source_count = htole16(addr & 0x3fff); rc.total_dest_count = htole16(8); hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc; dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_src)); dma->test_src = 0; dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, test_dst)); dma->test_dst = 0; dma->cmdr[cmdi].l = htole32(8 | masks); dma->srcr[srci].l = htole32(8 | masks); dma->dstr[dsti].l = htole32(8 | masks); dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); for (r = 10000; r >= 0; r--) { DELAY(10); bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) break; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } if (r == 0) { device_printf(sc->sc_dev, "readramaddr -- " "result[%d](addr %d) still valid\n", resi, addr); r = -1; } else { r = 0; bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); } WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); return (r); } /* * Initialize the descriptor rings. */ static void hifn_init_dma(struct hifn_softc *sc) { struct hifn_dma *dma = sc->sc_dma; int i; hifn_set_retry(sc); /* initialize static pointer values */ for (i = 0; i < HIFN_D_CMD_RSIZE; i++) dma->cmdr[i].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, command_bufs[i][0])); for (i = 0; i < HIFN_D_RES_RSIZE; i++) dma->resr[i].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, result_bufs[i][0])); dma->cmdr[HIFN_D_CMD_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); dma->srcr[HIFN_D_SRC_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); dma->dstr[HIFN_D_DST_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); dma->resr[HIFN_D_RES_RSIZE].p = htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); sc->sc_cmdu = sc->sc_srcu = sc->sc_dstu = sc->sc_resu = 0; sc->sc_cmdi = sc->sc_srci = sc->sc_dsti = sc->sc_resi = 0; sc->sc_cmdk = sc->sc_srck = sc->sc_dstk = sc->sc_resk = 0; } /* * Writes out the raw command buffer space. Returns the * command buffer size. */ static u_int hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) { struct cryptop *crp; u_int8_t *buf_pos; hifn_base_command_t *base_cmd; hifn_mac_command_t *mac_cmd; hifn_crypt_command_t *cry_cmd; int using_mac, using_crypt, len, ivlen; u_int32_t dlen, slen; crp = cmd->crp; buf_pos = buf; using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; base_cmd = (hifn_base_command_t *)buf_pos; base_cmd->masks = htole16(cmd->base_masks); slen = cmd->src_mapsize; if (cmd->sloplen) dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t); else dlen = cmd->dst_mapsize; base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); dlen >>= 16; slen >>= 16; base_cmd->session_num = htole16( ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); buf_pos += sizeof(hifn_base_command_t); if (using_mac) { mac_cmd = (hifn_mac_command_t *)buf_pos; dlen = crp->crp_aad_length + crp->crp_payload_length; mac_cmd->source_count = htole16(dlen & 0xffff); dlen >>= 16; mac_cmd->masks = htole16(cmd->mac_masks | ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); if (crp->crp_aad_length != 0) mac_cmd->header_skip = htole16(crp->crp_aad_start); else mac_cmd->header_skip = htole16(crp->crp_payload_start); mac_cmd->reserved = 0; buf_pos += sizeof(hifn_mac_command_t); } if (using_crypt) { cry_cmd = (hifn_crypt_command_t *)buf_pos; dlen = crp->crp_payload_length; cry_cmd->source_count = htole16(dlen & 0xffff); dlen >>= 16; cry_cmd->masks = htole16(cmd->cry_masks | ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); cry_cmd->header_skip = htole16(crp->crp_payload_length); cry_cmd->reserved = 0; buf_pos += sizeof(hifn_crypt_command_t); } if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); buf_pos += HIFN_MAC_KEY_LENGTH; } if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { case HIFN_CRYPT_CMD_ALG_3DES: bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH); buf_pos += HIFN_3DES_KEY_LENGTH; break; case HIFN_CRYPT_CMD_ALG_DES: bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH); buf_pos += HIFN_DES_KEY_LENGTH; break; case HIFN_CRYPT_CMD_ALG_RC4: len = 256; do { int clen; clen = MIN(cmd->cklen, len); bcopy(cmd->ck, buf_pos, clen); len -= clen; buf_pos += clen; } while (len > 0); bzero(buf_pos, 4); buf_pos += 4; break; case HIFN_CRYPT_CMD_ALG_AES: /* * AES keys are variable 128, 192 and * 256 bits (16, 24 and 32 bytes). */ bcopy(cmd->ck, buf_pos, cmd->cklen); buf_pos += cmd->cklen; break; } } if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { case HIFN_CRYPT_CMD_ALG_AES: ivlen = HIFN_AES_IV_LENGTH; break; default: ivlen = HIFN_IV_LENGTH; break; } bcopy(cmd->iv, buf_pos, ivlen); buf_pos += ivlen; } if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) { bzero(buf_pos, 8); buf_pos += 8; } return (buf_pos - buf); } static int hifn_dmamap_aligned(struct hifn_operand *op) { int i; for (i = 0; i < op->nsegs; i++) { if (op->segs[i].ds_addr & 3) return (0); if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) return (0); } return (1); } static __inline int hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx) { struct hifn_dma *dma = sc->sc_dma; if (++idx == HIFN_D_DST_RSIZE) { dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); idx = 0; } return (idx); } static int hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) { struct hifn_dma *dma = sc->sc_dma; struct hifn_operand *dst = &cmd->dst; u_int32_t p, l; int idx, used = 0, i; idx = sc->sc_dsti; for (i = 0; i < dst->nsegs - 1; i++) { dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; idx = hifn_dmamap_dstwrap(sc, idx); } if (cmd->sloplen == 0) { p = dst->segs[i].ds_addr; l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | dst->segs[i].ds_len; } else { p = sc->sc_dma_physaddr + offsetof(struct hifn_dma, slop[cmd->slopidx]); l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | sizeof(u_int32_t); if ((dst->segs[i].ds_len - cmd->sloplen) != 0) { dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_MASKDONEIRQ | (dst->segs[i].ds_len - cmd->sloplen)); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; idx = hifn_dmamap_dstwrap(sc, idx); } } dma->dstr[idx].p = htole32(p); dma->dstr[idx].l = htole32(l); HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); used++; idx = hifn_dmamap_dstwrap(sc, idx); sc->sc_dsti = idx; sc->sc_dstu += used; return (idx); } static __inline int hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx) { struct hifn_dma *dma = sc->sc_dma; if (++idx == HIFN_D_SRC_RSIZE) { dma->srcr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); idx = 0; } return (idx); } static int hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) { struct hifn_dma *dma = sc->sc_dma; struct hifn_operand *src = &cmd->src; int idx, i; u_int32_t last = 0; idx = sc->sc_srci; for (i = 0; i < src->nsegs; i++) { if (i == src->nsegs - 1) last = HIFN_D_LAST; dma->srcr[idx].p = htole32(src->segs[i].ds_addr); dma->srcr[idx].l = htole32(src->segs[i].ds_len | HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); HIFN_SRCR_SYNC(sc, idx, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); idx = hifn_dmamap_srcwrap(sc, idx); } sc->sc_srci = idx; sc->sc_srcu += src->nsegs; return (idx); } static bus_size_t hifn_crp_length(struct cryptop *crp) { switch (crp->crp_buf_type) { case CRYPTO_BUF_MBUF: return (crp->crp_mbuf->m_pkthdr.len); case CRYPTO_BUF_UIO: return (crp->crp_uio->uio_resid); case CRYPTO_BUF_CONTIG: return (crp->crp_ilen); default: panic("bad crp buffer type"); } } static void hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, int error) { struct hifn_operand *op = arg; KASSERT(nsegs <= MAX_SCATTER, ("hifn_op_cb: too many DMA segments (%u > %u) " "returned when mapping operand", nsegs, MAX_SCATTER)); op->nsegs = nsegs; bcopy(seg, op->segs, nsegs * sizeof (seg[0])); } static int hifn_crypto( struct hifn_softc *sc, struct hifn_command *cmd, struct cryptop *crp, int hint) { struct hifn_dma *dma = sc->sc_dma; u_int32_t cmdlen, csr; int cmdi, resi, err = 0; /* * need 1 cmd, and 1 res * * NB: check this first since it's easy. */ HIFN_LOCK(sc); if ((sc->sc_cmdu + 1) > HIFN_D_CMD_RSIZE || (sc->sc_resu + 1) > HIFN_D_RES_RSIZE) { #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "cmd/result exhaustion, cmdu %u resu %u\n", sc->sc_cmdu, sc->sc_resu); } #endif hifnstats.hst_nomem_cr++; HIFN_UNLOCK(sc); return (ERESTART); } if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) { hifnstats.hst_nomem_map++; HIFN_UNLOCK(sc); return (ENOMEM); } if (bus_dmamap_load_crp(sc->sc_dmat, cmd->src_map, crp, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { hifnstats.hst_nomem_load++; err = ENOMEM; goto err_srcmap1; } cmd->src_mapsize = hifn_crp_length(crp); if (hifn_dmamap_aligned(&cmd->src)) { cmd->sloplen = cmd->src_mapsize & 3; cmd->dst = cmd->src; } else if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { int totlen, len; struct mbuf *m, *m0, *mlast; KASSERT(cmd->dst_m == NULL, ("hifn_crypto: dst_m initialized improperly")); hifnstats.hst_unaligned++; /* * Source is not aligned on a longword boundary. * Copy the data to insure alignment. If we fail * to allocate mbufs or clusters while doing this * we return ERESTART so the operation is requeued * at the crypto later, but only if there are * ops already posted to the hardware; otherwise we * have no guarantee that we'll be re-entered. */ totlen = cmd->src_mapsize; if (crp->crp_mbuf->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m0, M_NOWAIT, MT_DATA); if (m0 && !m_dup_pkthdr(m0, crp->crp_mbuf, M_NOWAIT)) { m_free(m0); m0 = NULL; } } else { len = MLEN; MGET(m0, M_NOWAIT, MT_DATA); } if (m0 == NULL) { hifnstats.hst_nomem_mbuf++; err = sc->sc_cmdu ? ERESTART : ENOMEM; goto err_srcmap; } if (totlen >= MINCLSIZE) { if (!(MCLGET(m0, M_NOWAIT))) { hifnstats.hst_nomem_mcl++; err = sc->sc_cmdu ? ERESTART : ENOMEM; m_freem(m0); goto err_srcmap; } len = MCLBYTES; } totlen -= len; m0->m_pkthdr.len = m0->m_len = len; mlast = m0; while (totlen > 0) { MGET(m, M_NOWAIT, MT_DATA); if (m == NULL) { hifnstats.hst_nomem_mbuf++; err = sc->sc_cmdu ? ERESTART : ENOMEM; m_freem(m0); goto err_srcmap; } len = MLEN; if (totlen >= MINCLSIZE) { if (!(MCLGET(m, M_NOWAIT))) { hifnstats.hst_nomem_mcl++; err = sc->sc_cmdu ? ERESTART : ENOMEM; mlast->m_next = m; m_freem(m0); goto err_srcmap; } len = MCLBYTES; } m->m_len = len; m0->m_pkthdr.len += len; totlen -= len; mlast->m_next = m; mlast = m; } cmd->dst_m = m0; if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) { hifnstats.hst_nomem_map++; err = ENOMEM; goto err_srcmap; } if (bus_dmamap_load_mbuf_sg(sc->sc_dmat, cmd->dst_map, m0, cmd->dst_segs, &cmd->dst_nsegs, 0)) { hifnstats.hst_nomem_map++; err = ENOMEM; goto err_dstmap1; } cmd->dst_mapsize = m0->m_pkthdr.len; } else { err = EINVAL; goto err_srcmap; } #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu, cmd->src_nsegs, cmd->dst_nsegs); } #endif if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_PREREAD); } /* * need N src, and N dst */ if ((sc->sc_srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE || (sc->sc_dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) { #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "src/dst exhaustion, srcu %u+%u dstu %u+%u\n", sc->sc_srcu, cmd->src_nsegs, sc->sc_dstu, cmd->dst_nsegs); } #endif hifnstats.hst_nomem_sd++; err = ERESTART; goto err_dstmap; } if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) { sc->sc_cmdi = 0; dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); } cmdi = sc->sc_cmdi++; cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); /* .p for command/result already set */ dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); HIFN_CMDR_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); sc->sc_cmdu++; /* * We don't worry about missing an interrupt (which a "command wait" * interrupt salvages us from), unless there is more than one command * in the queue. */ if (sc->sc_cmdu > 1) { sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); } hifnstats.hst_ipackets++; hifnstats.hst_ibytes += cmd->src_mapsize; hifn_dmamap_load_src(sc, cmd); /* * Unlike other descriptors, we don't mask done interrupt from * result descriptor. */ #ifdef HIFN_DEBUG if (hifn_debug) printf("load res\n"); #endif if (sc->sc_resi == HIFN_D_RES_RSIZE) { sc->sc_resi = 0; dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } resi = sc->sc_resi++; KASSERT(sc->sc_hifn_commands[resi] == NULL, ("hifn_crypto: command slot %u busy", resi)); sc->sc_hifn_commands[resi] = cmd; HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) { dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); sc->sc_curbatch++; if (sc->sc_curbatch > hifnstats.hst_maxbatch) hifnstats.hst_maxbatch = sc->sc_curbatch; hifnstats.hst_totbatch++; } else { dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_VALID | HIFN_D_LAST); sc->sc_curbatch = 0; } HIFN_RESR_SYNC(sc, resi, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->sc_resu++; if (cmd->sloplen) cmd->slopidx = resi; hifn_dmamap_load_dst(sc, cmd); csr = 0; if (sc->sc_c_busy == 0) { csr |= HIFN_DMACSR_C_CTRL_ENA; sc->sc_c_busy = 1; } if (sc->sc_s_busy == 0) { csr |= HIFN_DMACSR_S_CTRL_ENA; sc->sc_s_busy = 1; } if (sc->sc_r_busy == 0) { csr |= HIFN_DMACSR_R_CTRL_ENA; sc->sc_r_busy = 1; } if (sc->sc_d_busy == 0) { csr |= HIFN_DMACSR_D_CTRL_ENA; sc->sc_d_busy = 1; } if (csr) WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr); #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "command: stat %8x ier %8x\n", READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER)); } #endif sc->sc_active = 5; HIFN_UNLOCK(sc); KASSERT(err == 0, ("hifn_crypto: success with error %u", err)); return (err); /* success */ err_dstmap: if (cmd->src_map != cmd->dst_map) bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); err_dstmap1: if (cmd->src_map != cmd->dst_map) bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); err_srcmap: if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { if (cmd->dst_m != NULL) m_freem(cmd->dst_m); } bus_dmamap_unload(sc->sc_dmat, cmd->src_map); err_srcmap1: bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); HIFN_UNLOCK(sc); return (err); } static void hifn_tick(void* vsc) { struct hifn_softc *sc = vsc; HIFN_LOCK(sc); if (sc->sc_active == 0) { u_int32_t r = 0; if (sc->sc_cmdu == 0 && sc->sc_c_busy) { sc->sc_c_busy = 0; r |= HIFN_DMACSR_C_CTRL_DIS; } if (sc->sc_srcu == 0 && sc->sc_s_busy) { sc->sc_s_busy = 0; r |= HIFN_DMACSR_S_CTRL_DIS; } if (sc->sc_dstu == 0 && sc->sc_d_busy) { sc->sc_d_busy = 0; r |= HIFN_DMACSR_D_CTRL_DIS; } if (sc->sc_resu == 0 && sc->sc_r_busy) { sc->sc_r_busy = 0; r |= HIFN_DMACSR_R_CTRL_DIS; } if (r) WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); } else sc->sc_active--; HIFN_UNLOCK(sc); callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); } static void hifn_intr(void *arg) { struct hifn_softc *sc = arg; struct hifn_dma *dma; u_int32_t dmacsr, restart; int i, u; dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); /* Nothing in the DMA unit interrupted */ if ((dmacsr & sc->sc_dmaier) == 0) return; HIFN_LOCK(sc); dma = sc->sc_dma; #ifdef HIFN_DEBUG if (hifn_debug) { device_printf(sc->sc_dev, "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n", dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier, sc->sc_cmdi, sc->sc_srci, sc->sc_dsti, sc->sc_resi, sc->sc_cmdk, sc->sc_srck, sc->sc_dstk, sc->sc_resk, sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu); } #endif WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); if ((sc->sc_flags & HIFN_HAS_PUBLIC) && (dmacsr & HIFN_DMACSR_PUBDONE)) WRITE_REG_1(sc, HIFN_1_PUB_STATUS, READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER); if (restart) device_printf(sc->sc_dev, "overrun %x\n", dmacsr); if (sc->sc_flags & HIFN_IS_7811) { if (dmacsr & HIFN_DMACSR_ILLR) device_printf(sc->sc_dev, "illegal read\n"); if (dmacsr & HIFN_DMACSR_ILLW) device_printf(sc->sc_dev, "illegal write\n"); } restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); if (restart) { device_printf(sc->sc_dev, "abort, resetting.\n"); hifnstats.hst_abort++; hifn_abort(sc); HIFN_UNLOCK(sc); return; } if ((dmacsr & HIFN_DMACSR_C_WAIT) && (sc->sc_cmdu == 0)) { /* * If no slots to process and we receive a "waiting on * command" interrupt, we disable the "waiting on command" * (by clearing it). */ sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); } /* clear the rings */ i = sc->sc_resk; u = sc->sc_resu; while (u != 0) { HIFN_RESR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->resr[i].l & htole32(HIFN_D_VALID)) { HIFN_RESR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } if (i != HIFN_D_RES_RSIZE) { struct hifn_command *cmd; u_int8_t *macbuf = NULL; HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); cmd = sc->sc_hifn_commands[i]; KASSERT(cmd != NULL, ("hifn_intr: null command slot %u", i)); sc->sc_hifn_commands[i] = NULL; if (cmd->base_masks & HIFN_BASE_CMD_MAC) { macbuf = dma->result_bufs[i]; macbuf += 12; } hifn_callback(sc, cmd, macbuf); hifnstats.hst_opackets++; u--; } if (++i == (HIFN_D_RES_RSIZE + 1)) i = 0; } sc->sc_resk = i; sc->sc_resu = u; i = sc->sc_srck; u = sc->sc_srcu; while (u != 0) { if (i == HIFN_D_SRC_RSIZE) i = 0; HIFN_SRCR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { HIFN_SRCR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } i++, u--; } sc->sc_srck = i; sc->sc_srcu = u; i = sc->sc_cmdk; u = sc->sc_cmdu; while (u != 0) { HIFN_CMDR_SYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { HIFN_CMDR_SYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } if (i != HIFN_D_CMD_RSIZE) { u--; HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); } if (++i == (HIFN_D_CMD_RSIZE + 1)) i = 0; } sc->sc_cmdk = i; sc->sc_cmdu = u; HIFN_UNLOCK(sc); if (sc->sc_needwakeup) { /* XXX check high watermark */ int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "wakeup crypto (%x) u %d/%d/%d/%d\n", sc->sc_needwakeup, sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu); #endif sc->sc_needwakeup &= ~wakeup; crypto_unblock(sc->sc_cid, wakeup); } } static bool hifn_auth_supported(struct hifn_softc *sc, const struct crypto_session_params *csp) { switch (sc->sc_ena) { case HIFN_PUSTAT_ENA_2: case HIFN_PUSTAT_ENA_1: break; default: return (false); } switch (csp->csp_auth_alg) { case CRYPTO_MD5: case CRYPTO_SHA1: break; case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: if (csp->csp_auth_klen > HIFN_MAC_KEY_LENGTH) return (false); break; default: return (false); } return (true); } static bool hifn_cipher_supported(struct hifn_softc *sc, const struct crypto_session_params *csp) { if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen > HIFN_MAX_IV_LENGTH) return (false); switch (sc->sc_ena) { case HIFN_PUSTAT_ENA_2: switch (csp->csp_cipher_alg) { case CRYPTO_3DES_CBC: case CRYPTO_ARC4: break; case CRYPTO_AES_CBC: if ((sc->sc_flags & HIFN_HAS_AES) == 0) return (false); switch (csp->csp_cipher_klen) { case 128: case 192: case 256: break; default: return (false); } return (true); } /*FALLTHROUGH*/ case HIFN_PUSTAT_ENA_1: switch (csp->csp_cipher_alg) { case CRYPTO_DES_CBC: return (true); } break; } return (false); } static int hifn_probesession(device_t dev, const struct crypto_session_params *csp) { struct hifn_softc *sc; sc = device_get_softc(dev); if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!hifn_auth_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!hifn_cipher_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_ETA: if (!hifn_auth_supported(sc, csp) || !hifn_cipher_supported(sc, csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_HARDWARE); } /* * Allocate a new 'session'. */ static int hifn_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct hifn_session *ses; ses = crypto_get_driver_session(cses); if (csp->csp_auth_alg != 0) { if (csp->csp_auth_mlen == 0) ses->hs_mlen = crypto_auth_hash(csp)->hashsize; else ses->hs_mlen = csp->csp_auth_mlen; } return (0); } /* * XXX freesession routine should run a zero'd mac/encrypt key into context * ram. to blow away any keys already stored there. */ static int hifn_process(device_t dev, struct cryptop *crp, int hint) { const struct crypto_session_params *csp; struct hifn_softc *sc = device_get_softc(dev); struct hifn_command *cmd = NULL; const void *mackey; - int err, ivlen, keylen; + int err, keylen; struct hifn_session *ses; ses = crypto_get_driver_session(crp->crp_session); cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO); if (cmd == NULL) { hifnstats.hst_nomem++; err = ENOMEM; goto errout; } csp = crypto_get_params(crp->crp_session); /* * The driver only supports ETA requests where there is no * gap between the AAD and payload. */ if (csp->csp_mode == CSP_MODE_ETA && crp->crp_aad_length != 0 && crp->crp_aad_start + crp->crp_aad_length != crp->crp_payload_start) { err = EINVAL; goto errout; } switch (csp->csp_mode) { case CSP_MODE_CIPHER: case CSP_MODE_ETA: if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) cmd->base_masks |= HIFN_BASE_CMD_DECODE; cmd->base_masks |= HIFN_BASE_CMD_CRYPT; switch (csp->csp_cipher_alg) { case CRYPTO_ARC4: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; break; case CRYPTO_DES_CBC: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; break; case CRYPTO_3DES_CBC: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; break; case CRYPTO_AES_CBC: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | HIFN_CRYPT_CMD_MODE_CBC | HIFN_CRYPT_CMD_NEW_IV; break; default: err = EINVAL; goto errout; } - if (csp->csp_cipher_alg != CRYPTO_ARC4) { - ivlen = csp->csp_ivlen; - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { - arc4rand(cmd->iv, ivlen, 0); - crypto_copyback(crp, crp->crp_iv_start, ivlen, - cmd->iv); - } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) - memcpy(cmd->iv, crp->crp_iv, ivlen); - else - crypto_copydata(crp, crp->crp_iv_start, ivlen, - cmd->iv); - } + if (csp->csp_cipher_alg != CRYPTO_ARC4) + crypto_read_iv(crp, cmd->iv); if (crp->crp_cipher_key != NULL) cmd->ck = crp->crp_cipher_key; else cmd->ck = csp->csp_cipher_key; cmd->cklen = csp->csp_cipher_klen; cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; /* * Need to specify the size for the AES key in the masks. */ if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == HIFN_CRYPT_CMD_ALG_AES) { switch (cmd->cklen) { case 16: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; break; case 24: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; break; case 32: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; break; default: err = EINVAL; goto errout; } } break; } switch (csp->csp_mode) { case CSP_MODE_DIGEST: case CSP_MODE_ETA: cmd->base_masks |= HIFN_BASE_CMD_MAC; switch (csp->csp_auth_alg) { case CRYPTO_MD5: cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | HIFN_MAC_CMD_POS_IPSEC; break; case CRYPTO_MD5_HMAC: cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; break; case CRYPTO_SHA1: cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | HIFN_MAC_CMD_POS_IPSEC; break; case CRYPTO_SHA1_HMAC: cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; break; } if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC || csp->csp_auth_alg == CRYPTO_MD5_HMAC) { cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; if (crp->crp_auth_key != NULL) mackey = crp->crp_auth_key; else mackey = csp->csp_auth_key; keylen = csp->csp_auth_klen; bcopy(mackey, cmd->mac, keylen); bzero(cmd->mac + keylen, HIFN_MAC_KEY_LENGTH - keylen); } } cmd->crp = crp; cmd->session = ses; cmd->softc = sc; err = hifn_crypto(sc, cmd, crp, hint); if (!err) { return 0; } else if (err == ERESTART) { /* * There weren't enough resources to dispatch the request * to the part. Notify the caller so they'll requeue this * request and resubmit it again soon. */ #ifdef HIFN_DEBUG if (hifn_debug) device_printf(sc->sc_dev, "requeue request\n"); #endif free(cmd, M_DEVBUF); sc->sc_needwakeup |= CRYPTO_SYMQ; return (err); } errout: if (cmd != NULL) free(cmd, M_DEVBUF); if (err == EINVAL) hifnstats.hst_invalid++; else hifnstats.hst_nomem++; crp->crp_etype = err; crypto_done(crp); return (err); } static void hifn_abort(struct hifn_softc *sc) { struct hifn_dma *dma = sc->sc_dma; struct hifn_command *cmd; struct cryptop *crp; int i, u; i = sc->sc_resk; u = sc->sc_resu; while (u != 0) { cmd = sc->sc_hifn_commands[i]; KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i)); sc->sc_hifn_commands[i] = NULL; crp = cmd->crp; if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { /* Salvage what we can. */ u_int8_t *macbuf; if (cmd->base_masks & HIFN_BASE_CMD_MAC) { macbuf = dma->result_bufs[i]; macbuf += 12; } else macbuf = NULL; hifnstats.hst_opackets++; hifn_callback(sc, cmd, macbuf); } else { if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_POSTREAD); } if (cmd->dst_m != NULL) { m_freem(cmd->dst_m); } /* non-shared buffers cannot be restarted */ if (cmd->src_map != cmd->dst_map) { /* * XXX should be EAGAIN, delayed until * after the reset. */ crp->crp_etype = ENOMEM; bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); } else crp->crp_etype = ENOMEM; bus_dmamap_unload(sc->sc_dmat, cmd->src_map); bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); free(cmd, M_DEVBUF); if (crp->crp_etype != EAGAIN) crypto_done(crp); } if (++i == HIFN_D_RES_RSIZE) i = 0; u--; } sc->sc_resk = i; sc->sc_resu = u; hifn_reset_board(sc, 1); hifn_init_dma(sc); hifn_init_pci_registers(sc); } static void hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf) { struct hifn_dma *dma = sc->sc_dma; struct cryptop *crp = cmd->crp; uint8_t macbuf2[SHA1_HASH_LEN]; struct mbuf *m; int totlen, i, u; if (cmd->src_map == cmd->dst_map) { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); } else { bus_dmamap_sync(sc->sc_dmat, cmd->src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, BUS_DMASYNC_POSTREAD); } if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { if (cmd->dst_m != NULL) { totlen = cmd->src_mapsize; for (m = cmd->dst_m; m != NULL; m = m->m_next) { if (totlen < m->m_len) { m->m_len = totlen; totlen = 0; } else totlen -= m->m_len; } cmd->dst_m->m_pkthdr.len = crp->crp_mbuf->m_pkthdr.len; m_freem(crp->crp_mbuf); crp->crp_mbuf = cmd->dst_m; } } if (cmd->sloplen != 0) { crypto_copyback(crp, cmd->src_mapsize - cmd->sloplen, cmd->sloplen, &dma->slop[cmd->slopidx]); } i = sc->sc_dstk; u = sc->sc_dstu; while (u != 0) { if (i == HIFN_D_DST_RSIZE) i = 0; bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); break; } i++, u--; } sc->sc_dstk = i; sc->sc_dstu = u; hifnstats.hst_obytes += cmd->dst_mapsize; if (macbuf != NULL) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, cmd->session->hs_mlen, macbuf2); if (timingsafe_bcmp(macbuf, macbuf2, cmd->session->hs_mlen) != 0) crp->crp_etype = EBADMSG; } else crypto_copyback(crp, crp->crp_digest_start, cmd->session->hs_mlen, macbuf); } if (cmd->src_map != cmd->dst_map) { bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); } bus_dmamap_unload(sc->sc_dmat, cmd->src_map); bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); free(cmd, M_DEVBUF); crypto_done(crp); } /* * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 * and Group 1 registers; avoid conditions that could create * burst writes by doing a read in between the writes. * * NB: The read we interpose is always to the same register; * we do this because reading from an arbitrary (e.g. last) * register may not always work. */ static void hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) { if (sc->sc_flags & HIFN_IS_7811) { if (sc->sc_bar0_lastreg == reg - 4) bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG); sc->sc_bar0_lastreg = reg; } bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); } static void hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) { if (sc->sc_flags & HIFN_IS_7811) { if (sc->sc_bar1_lastreg == reg - 4) bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); sc->sc_bar1_lastreg = reg; } bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); } #ifdef HIFN_VULCANDEV /* * this code provides support for mapping the PK engine's register * into a userspace program. * */ static int vulcanpk_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr) { struct hifn_softc *sc; vm_paddr_t pd; void *b; sc = dev->si_drv1; pd = rman_get_start(sc->sc_bar1res); b = rman_get_virtual(sc->sc_bar1res); #if 0 printf("vpk mmap: %p(%016llx) offset=%lld\n", b, (unsigned long long)pd, offset); hexdump(b, HIFN_1_PUB_MEMEND, "vpk", 0); #endif if (offset == 0) { *paddr = pd; return (0); } return (-1); } static struct cdevsw vulcanpk_cdevsw = { .d_version = D_VERSION, .d_mmap = vulcanpk_mmap, .d_name = "vulcanpk", }; #endif /* HIFN_VULCANDEV */ Index: head/sys/dev/safe/safe.c =================================================================== --- head/sys/dev/safe/safe.c (revision 360135) +++ head/sys/dev/safe/safe.c (revision 360136) @@ -1,2046 +1,2037 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Sam Leffler, Errno Consulting * Copyright (c) 2003 Global Technology Associates, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * SafeNet SafeXcel-1141 hardware crypto accelerator */ #include "opt_safe.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include #ifdef SAFE_RNDTEST #include #endif #include #include #ifndef bswap32 #define bswap32 NTOHL #endif /* * Prototypes and count for the pci_device structure */ static int safe_probe(device_t); static int safe_attach(device_t); static int safe_detach(device_t); static int safe_suspend(device_t); static int safe_resume(device_t); static int safe_shutdown(device_t); static int safe_probesession(device_t, const struct crypto_session_params *); static int safe_newsession(device_t, crypto_session_t, const struct crypto_session_params *); static int safe_process(device_t, struct cryptop *, int); static device_method_t safe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, safe_probe), DEVMETHOD(device_attach, safe_attach), DEVMETHOD(device_detach, safe_detach), DEVMETHOD(device_suspend, safe_suspend), DEVMETHOD(device_resume, safe_resume), DEVMETHOD(device_shutdown, safe_shutdown), /* crypto device methods */ DEVMETHOD(cryptodev_probesession, safe_probesession), DEVMETHOD(cryptodev_newsession, safe_newsession), DEVMETHOD(cryptodev_process, safe_process), DEVMETHOD_END }; static driver_t safe_driver = { "safe", safe_methods, sizeof (struct safe_softc) }; static devclass_t safe_devclass; DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, 0, 0); MODULE_DEPEND(safe, crypto, 1, 1, 1); #ifdef SAFE_RNDTEST MODULE_DEPEND(safe, rndtest, 1, 1, 1); #endif static void safe_intr(void *); static void safe_callback(struct safe_softc *, struct safe_ringentry *); static void safe_feed(struct safe_softc *, struct safe_ringentry *); static void safe_mcopy(struct mbuf *, struct mbuf *, u_int); #ifndef SAFE_NO_RNG static void safe_rng_init(struct safe_softc *); static void safe_rng(void *); #endif /* SAFE_NO_RNG */ static int safe_dma_malloc(struct safe_softc *, bus_size_t, struct safe_dma_alloc *, int); #define safe_dma_sync(_dma, _flags) \ bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *); static int safe_dmamap_aligned(const struct safe_operand *); static int safe_dmamap_uniform(const struct safe_operand *); static void safe_reset_board(struct safe_softc *); static void safe_init_board(struct safe_softc *); static void safe_init_pciregs(device_t dev); static void safe_cleanchip(struct safe_softc *); static void safe_totalreset(struct safe_softc *); static int safe_free_entry(struct safe_softc *, struct safe_ringentry *); static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "SafeNet driver parameters"); #ifdef SAFE_DEBUG static void safe_dump_dmastatus(struct safe_softc *, const char *); static void safe_dump_ringstate(struct safe_softc *, const char *); static void safe_dump_intrstate(struct safe_softc *, const char *); static void safe_dump_request(struct safe_softc *, const char *, struct safe_ringentry *); static struct safe_softc *safec; /* for use by hw.safe.dump */ static int safe_debug = 0; SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug, 0, "control debugging msgs"); #define DPRINTF(_x) if (safe_debug) printf _x #else #define DPRINTF(_x) #endif #define READ_REG(sc,r) \ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) #define WRITE_REG(sc,reg,val) \ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) struct safe_stats safestats; SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats, safe_stats, "driver statistics"); #ifndef SAFE_NO_RNG static int safe_rnginterval = 1; /* poll once a second */ SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval, 0, "RNG polling interval (secs)"); static int safe_rngbufsize = 16; /* 64 bytes each poll */ SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize, 0, "RNG polling buffer size (32-bit words)"); static int safe_rngmaxalarm = 8; /* max alarms before reset */ SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm, 0, "RNG max alarms before reset"); #endif /* SAFE_NO_RNG */ static int safe_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET && pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL) return (BUS_PROBE_DEFAULT); return (ENXIO); } static const char* safe_partname(struct safe_softc *sc) { /* XXX sprintf numbers when not decoded */ switch (pci_get_vendor(sc->sc_dev)) { case PCI_VENDOR_SAFENET: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141"; } return "SafeNet unknown-part"; } return "Unknown-vendor unknown-part"; } #ifndef SAFE_NO_RNG static void default_harvest(struct rndtest_state *rsp, void *buf, u_int count) { /* MarkM: FIX!! Check that this does not swamp the harvester! */ random_harvest_queue(buf, count, RANDOM_PURE_SAFE); } #endif /* SAFE_NO_RNG */ static int safe_attach(device_t dev) { struct safe_softc *sc = device_get_softc(dev); u_int32_t raddr; u_int32_t i; int rid; bzero(sc, sizeof (*sc)); sc->sc_dev = dev; /* XXX handle power management */ pci_enable_busmaster(dev); /* * Setup memory-mapping of PCI registers. */ rid = BS_BAR; sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_sr == NULL) { device_printf(dev, "cannot map register space\n"); goto bad; } sc->sc_st = rman_get_bustag(sc->sc_sr); sc->sc_sh = rman_get_bushandle(sc->sc_sr); /* * Arrange interrupt line. */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto bad1; } /* * NB: Network code assumes we are blocked with splimp() * so make sure the IRQ is mapped appropriately. */ if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, safe_intr, sc, &sc->sc_ih)) { device_printf(dev, "could not establish interrupt\n"); goto bad2; } sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safe_session), CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto bad3; } sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) & (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN); /* * Setup DMA descriptor area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, /* alignment */ SAFE_DMA_BOUNDARY, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SAFE_MAX_DMA, /* maxsize */ SAFE_MAX_PART, /* nsegments */ SAFE_MAX_SSIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* locking */ &sc->sc_srcdmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad4; } if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, /* alignment */ SAFE_MAX_DSIZE, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SAFE_MAX_DMA, /* maxsize */ SAFE_MAX_PART, /* nsegments */ SAFE_MAX_DSIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* locking */ &sc->sc_dstdmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad4; } /* * Allocate packet engine descriptors. */ if (safe_dma_malloc(sc, SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry), &sc->sc_ringalloc, 0)) { device_printf(dev, "cannot allocate PE descriptor ring\n"); bus_dma_tag_destroy(sc->sc_srcdmat); goto bad4; } /* * Hookup the static portion of all our data structures. */ sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr; sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE; sc->sc_front = sc->sc_ring; sc->sc_back = sc->sc_ring; raddr = sc->sc_ringalloc.dma_paddr; bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry)); for (i = 0; i < SAFE_MAX_NQUEUE; i++) { struct safe_ringentry *re = &sc->sc_ring[i]; re->re_desc.d_sa = raddr + offsetof(struct safe_ringentry, re_sa); re->re_sa.sa_staterec = raddr + offsetof(struct safe_ringentry, re_sastate); raddr += sizeof (struct safe_ringentry); } mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev), "packet engine ring", MTX_DEF); /* * Allocate scatter and gather particle descriptors. */ if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc), &sc->sc_spalloc, 0)) { device_printf(dev, "cannot allocate source particle " "descriptor ring\n"); mtx_destroy(&sc->sc_ringmtx); safe_dma_free(sc, &sc->sc_ringalloc); bus_dma_tag_destroy(sc->sc_srcdmat); goto bad4; } sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr; sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART; sc->sc_spfree = sc->sc_spring; bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc)); if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc), &sc->sc_dpalloc, 0)) { device_printf(dev, "cannot allocate destination particle " "descriptor ring\n"); mtx_destroy(&sc->sc_ringmtx); safe_dma_free(sc, &sc->sc_spalloc); safe_dma_free(sc, &sc->sc_ringalloc); bus_dma_tag_destroy(sc->sc_dstdmat); goto bad4; } sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr; sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART; sc->sc_dpfree = sc->sc_dpring; bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc)); device_printf(sc->sc_dev, "%s", safe_partname(sc)); sc->sc_devinfo = READ_REG(sc, SAFE_DEVINFO); if (sc->sc_devinfo & SAFE_DEVINFO_RNG) { sc->sc_flags |= SAFE_FLAGS_RNG; printf(" rng"); } if (sc->sc_devinfo & SAFE_DEVINFO_PKEY) { #if 0 printf(" key"); sc->sc_flags |= SAFE_FLAGS_KEY; crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0); crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0); #endif } if (sc->sc_devinfo & SAFE_DEVINFO_DES) { printf(" des/3des"); } if (sc->sc_devinfo & SAFE_DEVINFO_AES) { printf(" aes"); } if (sc->sc_devinfo & SAFE_DEVINFO_MD5) { printf(" md5"); } if (sc->sc_devinfo & SAFE_DEVINFO_SHA1) { printf(" sha1"); } /* XXX other supported algorithms */ printf("\n"); safe_reset_board(sc); /* reset h/w */ safe_init_pciregs(dev); /* init pci settings */ safe_init_board(sc); /* init h/w */ #ifndef SAFE_NO_RNG if (sc->sc_flags & SAFE_FLAGS_RNG) { #ifdef SAFE_RNDTEST sc->sc_rndtest = rndtest_attach(dev); if (sc->sc_rndtest) sc->sc_harvest = rndtest_harvest; else sc->sc_harvest = default_harvest; #else sc->sc_harvest = default_harvest; #endif safe_rng_init(sc); callout_init(&sc->sc_rngto, 1); callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc); } #endif /* SAFE_NO_RNG */ #ifdef SAFE_DEBUG safec = sc; /* for use by hw.safe.dump */ #endif return (0); bad4: crypto_unregister_all(sc->sc_cid); bad3: bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bad2: bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bad1: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); bad: return (ENXIO); } /* * Detach a device that successfully probed. */ static int safe_detach(device_t dev) { struct safe_softc *sc = device_get_softc(dev); /* XXX wait/abort active ops */ WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */ callout_stop(&sc->sc_rngto); crypto_unregister_all(sc->sc_cid); #ifdef SAFE_RNDTEST if (sc->sc_rndtest) rndtest_detach(sc->sc_rndtest); #endif safe_cleanchip(sc); safe_dma_free(sc, &sc->sc_dpalloc); safe_dma_free(sc, &sc->sc_spalloc); mtx_destroy(&sc->sc_ringmtx); safe_dma_free(sc, &sc->sc_ringalloc); bus_generic_detach(dev); bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bus_dma_tag_destroy(sc->sc_srcdmat); bus_dma_tag_destroy(sc->sc_dstdmat); bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); return (0); } /* * Stop all chip i/o so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int safe_shutdown(device_t dev) { #ifdef notyet safe_stop(device_get_softc(dev)); #endif return (0); } /* * Device suspend routine. */ static int safe_suspend(device_t dev) { struct safe_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX stop the device and save PCI settings */ #endif sc->sc_suspended = 1; return (0); } static int safe_resume(device_t dev) { struct safe_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX retore PCI settings and start the device */ #endif sc->sc_suspended = 0; return (0); } /* * SafeXcel Interrupt routine */ static void safe_intr(void *arg) { struct safe_softc *sc = arg; volatile u_int32_t stat; stat = READ_REG(sc, SAFE_HM_STAT); if (stat == 0) /* shared irq, not for us */ return; WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */ if ((stat & SAFE_INT_PE_DDONE)) { /* * Descriptor(s) done; scan the ring and * process completed operations. */ mtx_lock(&sc->sc_ringmtx); while (sc->sc_back != sc->sc_front) { struct safe_ringentry *re = sc->sc_back; #ifdef SAFE_DEBUG if (safe_debug) { safe_dump_ringstate(sc, __func__); safe_dump_request(sc, __func__, re); } #endif /* * safe_process marks ring entries that were allocated * but not used with a csr of zero. This insures the * ring front pointer never needs to be set backwards * in the event that an entry is allocated but not used * because of a setup error. */ if (re->re_desc.d_csr != 0) { if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) break; if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) break; sc->sc_nqchip--; safe_callback(sc, re); } if (++(sc->sc_back) == sc->sc_ringtop) sc->sc_back = sc->sc_ring; } mtx_unlock(&sc->sc_ringmtx); } /* * Check to see if we got any DMA Error */ if (stat & SAFE_INT_PE_ERROR) { DPRINTF(("dmaerr dmastat %08x\n", READ_REG(sc, SAFE_PE_DMASTAT))); safestats.st_dmaerr++; safe_totalreset(sc); #if 0 safe_feed(sc); #endif } if (sc->sc_needwakeup) { /* XXX check high watermark */ int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); DPRINTF(("%s: wakeup crypto %x\n", __func__, sc->sc_needwakeup)); sc->sc_needwakeup &= ~wakeup; crypto_unblock(sc->sc_cid, wakeup); } } /* * safe_feed() - post a request to chip */ static void safe_feed(struct safe_softc *sc, struct safe_ringentry *re) { bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE); if (re->re_dst_map != NULL) bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, BUS_DMASYNC_PREREAD); /* XXX have no smaller granularity */ safe_dma_sync(&sc->sc_ringalloc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE); safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE); #ifdef SAFE_DEBUG if (safe_debug) { safe_dump_ringstate(sc, __func__); safe_dump_request(sc, __func__, re); } #endif sc->sc_nqchip++; if (sc->sc_nqchip > safestats.st_maxqchip) safestats.st_maxqchip = sc->sc_nqchip; /* poke h/w to check descriptor ring, any value can be written */ WRITE_REG(sc, SAFE_HI_RD_DESCR, 0); } #define N(a) (sizeof(a) / sizeof (a[0])) static void safe_setup_enckey(struct safe_session *ses, const void *key) { int i; bcopy(key, ses->ses_key, ses->ses_klen); /* PE is little-endian, insure proper byte order */ for (i = 0; i < N(ses->ses_key); i++) ses->ses_key[i] = htole32(ses->ses_key[i]); } static void safe_setup_mackey(struct safe_session *ses, int algo, const uint8_t *key, int klen) { MD5_CTX md5ctx; SHA1_CTX sha1ctx; int i; if (algo == CRYPTO_MD5_HMAC) { hmac_init_ipad(&auth_hash_hmac_md5, key, klen, &md5ctx); bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state)); hmac_init_opad(&auth_hash_hmac_md5, key, klen, &md5ctx); bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state)); explicit_bzero(&md5ctx, sizeof(md5ctx)); } else { hmac_init_ipad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); hmac_init_opad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); explicit_bzero(&sha1ctx, sizeof(sha1ctx)); } /* PE is little-endian, insure proper byte order */ for (i = 0; i < N(ses->ses_hminner); i++) { ses->ses_hminner[i] = htole32(ses->ses_hminner[i]); ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]); } } #undef N static bool safe_auth_supported(struct safe_softc *sc, const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_MD5_HMAC: if ((sc->sc_devinfo & SAFE_DEVINFO_MD5) == 0) return (false); break; case CRYPTO_SHA1_HMAC: if ((sc->sc_devinfo & SAFE_DEVINFO_SHA1) == 0) return (false); break; default: return (false); } return (true); } static bool safe_cipher_supported(struct safe_softc *sc, const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: if ((sc->sc_devinfo & SAFE_DEVINFO_DES) == 0) return (false); if (csp->csp_ivlen != 8) return (false); if (csp->csp_cipher_alg == CRYPTO_DES_CBC) { if (csp->csp_cipher_klen != 8) return (false); } else { if (csp->csp_cipher_klen != 24) return (false); } break; case CRYPTO_AES_CBC: if ((sc->sc_devinfo & SAFE_DEVINFO_AES) == 0) return (false); if (csp->csp_ivlen != 16) return (false); if (csp->csp_cipher_klen != 16 && csp->csp_cipher_klen != 24 && csp->csp_cipher_klen != 32) return (false); break; } return (true); } static int safe_probesession(device_t dev, const struct crypto_session_params *csp) { struct safe_softc *sc = device_get_softc(dev); if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!safe_auth_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!safe_cipher_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_ETA: if (!safe_auth_supported(sc, csp) || !safe_cipher_supported(sc, csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_HARDWARE); } /* * Allocate a new 'session'. */ static int safe_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct safe_session *ses; ses = crypto_get_driver_session(cses); if (csp->csp_cipher_alg != 0) { ses->ses_klen = csp->csp_cipher_klen; if (csp->csp_cipher_key != NULL) safe_setup_enckey(ses, csp->csp_cipher_key); } if (csp->csp_auth_alg != 0) { ses->ses_mlen = csp->csp_auth_mlen; if (ses->ses_mlen == 0) { if (csp->csp_auth_alg == CRYPTO_MD5_HMAC) ses->ses_mlen = MD5_HASH_LEN; else ses->ses_mlen = SHA1_HASH_LEN; } if (csp->csp_auth_key != NULL) { safe_setup_mackey(ses, csp->csp_auth_alg, csp->csp_auth_key, csp->csp_auth_klen); } } return (0); } static bus_size_t safe_crp_length(struct cryptop *crp) { switch (crp->crp_buf_type) { case CRYPTO_BUF_MBUF: return (crp->crp_mbuf->m_pkthdr.len); case CRYPTO_BUF_UIO: return (crp->crp_uio->uio_resid); case CRYPTO_BUF_CONTIG: return (crp->crp_ilen); default: panic("bad crp buffer type"); } } static void safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error) { struct safe_operand *op = arg; DPRINTF(("%s: nsegs %d error %d\n", __func__, nsegs, error)); if (error != 0) return; op->nsegs = nsegs; bcopy(seg, op->segs, nsegs * sizeof (seg[0])); } static int safe_process(device_t dev, struct cryptop *crp, int hint) { struct safe_softc *sc = device_get_softc(dev); const struct crypto_session_params *csp; int err = 0, i, nicealign, uniform; int bypass, oplen; int16_t coffset; struct safe_session *ses; struct safe_ringentry *re; struct safe_sarec *sa; struct safe_pdesc *pd; u_int32_t cmd0, cmd1, staterec; mtx_lock(&sc->sc_ringmtx); if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) { safestats.st_ringfull++; sc->sc_needwakeup |= CRYPTO_SYMQ; mtx_unlock(&sc->sc_ringmtx); return (ERESTART); } re = sc->sc_front; staterec = re->re_sa.sa_staterec; /* save */ /* NB: zero everything but the PE descriptor */ bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc)); re->re_sa.sa_staterec = staterec; /* restore */ re->re_crp = crp; sa = &re->re_sa; ses = crypto_get_driver_session(crp->crp_session); csp = crypto_get_params(crp->crp_session); cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */ cmd1 = 0; switch (csp->csp_mode) { case CSP_MODE_DIGEST: cmd0 |= SAFE_SA_CMD0_OP_HASH; break; case CSP_MODE_CIPHER: cmd0 |= SAFE_SA_CMD0_OP_CRYPT; break; case CSP_MODE_ETA: cmd0 |= SAFE_SA_CMD0_OP_BOTH; break; } if (csp->csp_cipher_alg != 0) { if (crp->crp_cipher_key != NULL) safe_setup_enckey(ses, crp->crp_cipher_key); switch (csp->csp_cipher_alg) { case CRYPTO_DES_CBC: cmd0 |= SAFE_SA_CMD0_DES; cmd1 |= SAFE_SA_CMD1_CBC; break; case CRYPTO_3DES_CBC: cmd0 |= SAFE_SA_CMD0_3DES; cmd1 |= SAFE_SA_CMD1_CBC; break; case CRYPTO_AES_CBC: cmd0 |= SAFE_SA_CMD0_AES; cmd1 |= SAFE_SA_CMD1_CBC; if (ses->ses_klen * 8 == 128) cmd1 |= SAFE_SA_CMD1_AES128; else if (ses->ses_klen * 8 == 192) cmd1 |= SAFE_SA_CMD1_AES192; else cmd1 |= SAFE_SA_CMD1_AES256; } /* * Setup encrypt/decrypt state. When using basic ops * we can't use an inline IV because hash/crypt offset * must be from the end of the IV to the start of the * crypt data and this leaves out the preceding header * from the hash calculation. Instead we place the IV * in the state record and set the hash/crypt offset to * copy both the header+IV. */ - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { - arc4rand(re->re_sastate.sa_saved_iv, csp->csp_ivlen, 0); - crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen, - re->re_sastate.sa_saved_iv); - } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) - memcpy(re->re_sastate.sa_saved_iv, crp->crp_iv, - csp->csp_ivlen); - else - crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, - re->re_sastate.sa_saved_iv); + crypto_read_iv(crp, re->re_sastate.sa_saved_iv); cmd0 |= SAFE_SA_CMD0_IVLD_STATE; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { cmd0 |= SAFE_SA_CMD0_OUTBOUND; /* * XXX: I suspect we don't need this since we * don't save the returned IV. */ cmd0 |= SAFE_SA_CMD0_SAVEIV; } else { cmd0 |= SAFE_SA_CMD0_INBOUND; } /* * For basic encryption use the zero pad algorithm. * This pads results to an 8-byte boundary and * suppresses padding verification for inbound (i.e. * decrypt) operations. * * NB: Not sure if the 8-byte pad boundary is a problem. */ cmd0 |= SAFE_SA_CMD0_PAD_ZERO; /* XXX assert key bufs have the same size */ bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key)); } if (csp->csp_auth_alg != 0) { if (crp->crp_auth_key != NULL) { safe_setup_mackey(ses, csp->csp_auth_alg, crp->crp_auth_key, csp->csp_auth_klen); } switch (csp->csp_auth_alg) { case CRYPTO_MD5_HMAC: cmd0 |= SAFE_SA_CMD0_MD5; cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ break; case CRYPTO_SHA1_HMAC: cmd0 |= SAFE_SA_CMD0_SHA1; cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ break; } /* * Digest data is loaded from the SA and the hash * result is saved to the state block where we * retrieve it for return to the caller. */ /* XXX assert digest bufs have the same size */ bcopy(ses->ses_hminner, sa->sa_indigest, sizeof(sa->sa_indigest)); bcopy(ses->ses_hmouter, sa->sa_outdigest, sizeof(sa->sa_outdigest)); cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH; re->re_flags |= SAFE_QFLAGS_COPYOUTICV; } if (csp->csp_mode == CSP_MODE_ETA) { /* * The driver only supports ETA requests where there * is no gap between the AAD and payload. */ if (crp->crp_aad_length != 0 && crp->crp_aad_start + crp->crp_aad_length != crp->crp_payload_start) { safestats.st_lenmismatch++; err = EINVAL; goto errout; } if (crp->crp_aad_length != 0) bypass = crp->crp_aad_start; else bypass = crp->crp_payload_start; coffset = crp->crp_aad_length; oplen = crp->crp_payload_start + crp->crp_payload_length; #ifdef SAFE_DEBUG if (safe_debug) { printf("AAD: skip %d, len %d, digest %d\n", crp->crp_aad_start, crp->crp_aad_length, crp->crp_digest_start); printf("payload: skip %d, len %d, IV %d\n", crp->crp_payload_start, crp->crp_payload_length, crp->crp_iv_start); printf("bypass %d coffset %d oplen %d\n", bypass, coffset, oplen); } #endif if (coffset & 3) { /* offset must be 32-bit aligned */ DPRINTF(("%s: coffset %u misaligned\n", __func__, coffset)); safestats.st_coffmisaligned++; err = EINVAL; goto errout; } coffset >>= 2; if (coffset > 255) { /* offset must be <256 dwords */ DPRINTF(("%s: coffset %u too big\n", __func__, coffset)); safestats.st_cofftoobig++; err = EINVAL; goto errout; } /* * Tell the hardware to copy the header to the output. * The header is defined as the data from the end of * the bypass to the start of data to be encrypted. * Typically this is the inline IV. Note that you need * to do this even if src+dst are the same; it appears * that w/o this bit the crypted data is written * immediately after the bypass data. */ cmd1 |= SAFE_SA_CMD1_HDRCOPY; /* * Disable IP header mutable bit handling. This is * needed to get correct HMAC calculations. */ cmd1 |= SAFE_SA_CMD1_MUTABLE; } else { bypass = crp->crp_payload_start; oplen = bypass + crp->crp_payload_length; coffset = 0; } /* XXX verify multiple of 4 when using s/g */ if (bypass > 96) { /* bypass offset must be <= 96 bytes */ DPRINTF(("%s: bypass %u too big\n", __func__, bypass)); safestats.st_bypasstoobig++; err = EINVAL; goto errout; } if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_crp(sc->sc_srcdmat, re->re_src_map, crp, safe_op_cb, &re->re_src, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); re->re_src_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } re->re_src_mapsize = safe_crp_length(crp); nicealign = safe_dmamap_aligned(&re->re_src); uniform = safe_dmamap_uniform(&re->re_src); DPRINTF(("src nicealign %u uniform %u nsegs %u\n", nicealign, uniform, re->re_src.nsegs)); if (re->re_src.nsegs > 1) { re->re_desc.d_src = sc->sc_spalloc.dma_paddr + ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring); for (i = 0; i < re->re_src_nsegs; i++) { /* NB: no need to check if there's space */ pd = sc->sc_spfree; if (++(sc->sc_spfree) == sc->sc_springtop) sc->sc_spfree = sc->sc_spring; KASSERT((pd->pd_flags&3) == 0 || (pd->pd_flags&3) == SAFE_PD_DONE, ("bogus source particle descriptor; flags %x", pd->pd_flags)); pd->pd_addr = re->re_src_segs[i].ds_addr; pd->pd_size = re->re_src_segs[i].ds_len; pd->pd_flags = SAFE_PD_READY; } cmd0 |= SAFE_SA_CMD0_IGATHER; } else { /* * No need for gather, reference the operand directly. */ re->re_desc.d_src = re->re_src_segs[0].ds_addr; } if (csp->csp_mode == CSP_MODE_DIGEST) { /* * Hash op; no destination needed. */ } else { if (nicealign && uniform == 1) { /* * Source layout is suitable for direct * sharing of the DMA map and segment list. */ re->re_dst = re->re_src; } else if (nicealign && uniform == 2) { /* * The source is properly aligned but requires a * different particle list to handle DMA of the * result. Create a new map and do the load to * create the segment list. The particle * descriptor setup code below will handle the * rest. */ if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, &re->re_dst_map)) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_crp(sc->sc_dstdmat, re->re_dst_map, crp, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); re->re_dst_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } } else if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { int totlen, len; struct mbuf *m, *top, **mp; /* * DMA constraints require that we allocate a * new mbuf chain for the destination. We * allocate an entire new set of mbufs of * optimal/required size and then tell the * hardware to copy any bits that are not * created as a byproduct of the operation. */ if (!nicealign) safestats.st_unaligned++; if (!uniform) safestats.st_notuniform++; totlen = re->re_src_mapsize; if (crp->crp_mbuf->m_flags & M_PKTHDR) { len = MHLEN; MGETHDR(m, M_NOWAIT, MT_DATA); if (m && !m_dup_pkthdr(m, crp->crp_mbuf, M_NOWAIT)) { m_free(m); m = NULL; } } else { len = MLEN; MGET(m, M_NOWAIT, MT_DATA); } if (m == NULL) { safestats.st_nombuf++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } if (totlen >= MINCLSIZE) { if (!(MCLGET(m, M_NOWAIT))) { m_free(m); safestats.st_nomcl++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } len = MCLBYTES; } m->m_len = len; top = NULL; mp = ⊤ while (totlen > 0) { if (top) { MGET(m, M_NOWAIT, MT_DATA); if (m == NULL) { m_freem(top); safestats.st_nombuf++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } len = MLEN; } if (top && totlen >= MINCLSIZE) { if (!(MCLGET(m, M_NOWAIT))) { *mp = m; m_freem(top); safestats.st_nomcl++; err = sc->sc_nqchip ? ERESTART : ENOMEM; goto errout; } len = MCLBYTES; } m->m_len = len = min(totlen, len); totlen -= len; *mp = m; mp = &m->m_next; } re->re_dst_m = top; if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, &re->re_dst_map) != 0) { safestats.st_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_mbuf_sg(sc->sc_dstdmat, re->re_dst_map, top, re->re_dst_segs, &re->re_dst_nsegs, 0) != 0) { bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); re->re_dst_map = NULL; safestats.st_noload++; err = ENOMEM; goto errout; } re->re_dst_mapsize = re->re_src_mapsize; if (re->re_src.mapsize > oplen) { /* * There's data following what the * hardware will copy for us. If this * isn't just the ICV (that's going to * be written on completion), copy it * to the new mbufs */ if (!(csp->csp_mode == CSP_MODE_ETA && (re->re_src.mapsize-oplen) == ses->ses_mlen && crp->crp_digest_start == oplen)) safe_mcopy(crp->crp_mbuf, re->re_dst_m, oplen); else safestats.st_noicvcopy++; } } else { if (!nicealign) { safestats.st_iovmisaligned++; err = EINVAL; goto errout; } else { /* * There's no way to handle the DMA * requirements with this uio. We * could create a separate DMA area for * the result and then copy it back, * but for now we just bail and return * an error. Note that uio requests * > SAFE_MAX_DSIZE are handled because * the DMA map and segment list for the * destination wil result in a * destination particle list that does * the necessary scatter DMA. */ safestats.st_iovnotuniform++; err = EINVAL; goto errout; } } if (re->re_dst.nsegs > 1) { re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr + ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring); for (i = 0; i < re->re_dst_nsegs; i++) { pd = sc->sc_dpfree; KASSERT((pd->pd_flags&3) == 0 || (pd->pd_flags&3) == SAFE_PD_DONE, ("bogus dest particle descriptor; flags %x", pd->pd_flags)); if (++(sc->sc_dpfree) == sc->sc_dpringtop) sc->sc_dpfree = sc->sc_dpring; pd->pd_addr = re->re_dst_segs[i].ds_addr; pd->pd_flags = SAFE_PD_READY; } cmd0 |= SAFE_SA_CMD0_OSCATTER; } else { /* * No need for scatter, reference the operand directly. */ re->re_desc.d_dst = re->re_dst_segs[0].ds_addr; } } /* * All done with setup; fillin the SA command words * and the packet engine descriptor. The operation * is now ready for submission to the hardware. */ sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI; sa->sa_cmd1 = cmd1 | (coffset << SAFE_SA_CMD1_OFFSET_S) | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */ | SAFE_SA_CMD1_SRPCI ; /* * NB: the order of writes is important here. In case the * chip is scanning the ring because of an outstanding request * it might nab this one too. In that case we need to make * sure the setup is complete before we write the length * field of the descriptor as it signals the descriptor is * ready for processing. */ re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI; if (csp->csp_auth_alg != 0) re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL; re->re_desc.d_len = oplen | SAFE_PE_LEN_READY | (bypass << SAFE_PE_LEN_BYPASS_S) ; safestats.st_ipackets++; safestats.st_ibytes += oplen; if (++(sc->sc_front) == sc->sc_ringtop) sc->sc_front = sc->sc_ring; /* XXX honor batching */ safe_feed(sc, re); mtx_unlock(&sc->sc_ringmtx); return (0); errout: if (re->re_dst_m != NULL) m_freem(re->re_dst_m); if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); } if (re->re_src_map != NULL) { bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); } mtx_unlock(&sc->sc_ringmtx); if (err != ERESTART) { crp->crp_etype = err; crypto_done(crp); } else { sc->sc_needwakeup |= CRYPTO_SYMQ; } return (err); } static void safe_callback(struct safe_softc *sc, struct safe_ringentry *re) { const struct crypto_session_params *csp; struct cryptop *crp = (struct cryptop *)re->re_crp; struct safe_session *ses; uint8_t hash[HASH_MAX_LEN]; ses = crypto_get_driver_session(crp->crp_session); csp = crypto_get_params(crp->crp_session); safestats.st_opackets++; safestats.st_obytes += re->re_dst.mapsize; safe_dma_sync(&sc->sc_ringalloc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) { device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n", re->re_desc.d_csr, re->re_sa.sa_cmd0, re->re_sa.sa_cmd1); safestats.st_peoperr++; crp->crp_etype = EIO; /* something more meaningful? */ } /* XXX: Should crp_mbuf be updated to re->re_dst_m if it is non-NULL? */ if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); } bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) { if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC) { /* * SHA-1 ICV's are byte-swapped; fix 'em up * before copying them to their destination. */ re->re_sastate.sa_saved_indigest[0] = bswap32(re->re_sastate.sa_saved_indigest[0]); re->re_sastate.sa_saved_indigest[1] = bswap32(re->re_sastate.sa_saved_indigest[1]); re->re_sastate.sa_saved_indigest[2] = bswap32(re->re_sastate.sa_saved_indigest[2]); } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, ses->ses_mlen, hash); if (timingsafe_bcmp(re->re_sastate.sa_saved_indigest, hash, ses->ses_mlen) != 0) crp->crp_etype = EBADMSG; } else crypto_copyback(crp, crp->crp_digest_start, ses->ses_mlen, re->re_sastate.sa_saved_indigest); } crypto_done(crp); } /* * Copy all data past offset from srcm to dstm. */ static void safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset) { u_int j, dlen, slen; caddr_t dptr, sptr; /* * Advance src and dst to offset. */ j = offset; while (j >= srcm->m_len) { j -= srcm->m_len; srcm = srcm->m_next; if (srcm == NULL) return; } sptr = mtod(srcm, caddr_t) + j; slen = srcm->m_len - j; j = offset; while (j >= dstm->m_len) { j -= dstm->m_len; dstm = dstm->m_next; if (dstm == NULL) return; } dptr = mtod(dstm, caddr_t) + j; dlen = dstm->m_len - j; /* * Copy everything that remains. */ for (;;) { j = min(slen, dlen); bcopy(sptr, dptr, j); if (slen == j) { srcm = srcm->m_next; if (srcm == NULL) return; sptr = srcm->m_data; slen = srcm->m_len; } else sptr += j, slen -= j; if (dlen == j) { dstm = dstm->m_next; if (dstm == NULL) return; dptr = dstm->m_data; dlen = dstm->m_len; } else dptr += j, dlen -= j; } } #ifndef SAFE_NO_RNG #define SAFE_RNG_MAXWAIT 1000 static void safe_rng_init(struct safe_softc *sc) { u_int32_t w, v; int i; WRITE_REG(sc, SAFE_RNG_CTRL, 0); /* use default value according to the manual */ WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */ WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); /* * There is a bug in rev 1.0 of the 1140 that when the RNG * is brought out of reset the ready status flag does not * work until the RNG has finished its internal initialization. * * So in order to determine the device is through its * initialization we must read the data register, using the * status reg in the read in case it is initialized. Then read * the data register until it changes from the first read. * Once it changes read the data register until it changes * again. At this time the RNG is considered initialized. * This could take between 750ms - 1000ms in time. */ i = 0; w = READ_REG(sc, SAFE_RNG_OUT); do { v = READ_REG(sc, SAFE_RNG_OUT); if (v != w) { w = v; break; } DELAY(10); } while (++i < SAFE_RNG_MAXWAIT); /* Wait Until data changes again */ i = 0; do { v = READ_REG(sc, SAFE_RNG_OUT); if (v != w) break; DELAY(10); } while (++i < SAFE_RNG_MAXWAIT); } static __inline void safe_rng_disable_short_cycle(struct safe_softc *sc) { WRITE_REG(sc, SAFE_RNG_CTRL, READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN); } static __inline void safe_rng_enable_short_cycle(struct safe_softc *sc) { WRITE_REG(sc, SAFE_RNG_CTRL, READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN); } static __inline u_int32_t safe_rng_read(struct safe_softc *sc) { int i; i = 0; while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT) ; return READ_REG(sc, SAFE_RNG_OUT); } static void safe_rng(void *arg) { struct safe_softc *sc = arg; u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */ u_int maxwords; int i; safestats.st_rng++; /* * Fetch the next block of data. */ maxwords = safe_rngbufsize; if (maxwords > SAFE_RNG_MAXBUFSIZ) maxwords = SAFE_RNG_MAXBUFSIZ; retry: for (i = 0; i < maxwords; i++) buf[i] = safe_rng_read(sc); /* * Check the comparator alarm count and reset the h/w if * it exceeds our threshold. This guards against the * hardware oscillators resonating with external signals. */ if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) { u_int32_t freq_inc, w; DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__, READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm)); safestats.st_rngalarm++; safe_rng_enable_short_cycle(sc); freq_inc = 18; for (i = 0; i < 64; i++) { w = READ_REG(sc, SAFE_RNG_CNFG); freq_inc = ((w + freq_inc) & 0x3fL); w = ((w & ~0x3fL) | freq_inc); WRITE_REG(sc, SAFE_RNG_CNFG, w); WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); (void) safe_rng_read(sc); DELAY(25); if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) { safe_rng_disable_short_cycle(sc); goto retry; } freq_inc = 1; } safe_rng_disable_short_cycle(sc); } else WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t)); callout_reset(&sc->sc_rngto, hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc); } #endif /* SAFE_NO_RNG */ static void safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } static int safe_dma_malloc( struct safe_softc *sc, bus_size_t size, struct safe_dma_alloc *dma, int mapflags ) { int r; r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ sizeof(u_int32_t), 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* locking */ &dma->dma_tag); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dma_tag_create failed; error %u\n", r); goto fail_0; } r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dmammem_alloc failed; size %ju, error %u\n", (uintmax_t)size, r); goto fail_1; } r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, safe_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) { device_printf(sc->sc_dev, "safe_dma_malloc: " "bus_dmamap_load failed; error %u\n", r); goto fail_2; } dma->dma_size = size; return (0); bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); fail_1: bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_tag = NULL; return (r); } static void safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma) { bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); } /* * Resets the board. Values in the regesters are left as is * from the reset (i.e. initial values are assigned elsewhere). */ static void safe_reset_board(struct safe_softc *sc) { u_int32_t v; /* * Reset the device. The manual says no delay * is needed between marking and clearing reset. */ v = READ_REG(sc, SAFE_PE_DMACFG) &~ (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | SAFE_PE_DMACFG_SGRESET); WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | SAFE_PE_DMACFG_SGRESET); WRITE_REG(sc, SAFE_PE_DMACFG, v); } /* * Initialize registers we need to touch only once. */ static void safe_init_board(struct safe_softc *sc) { u_int32_t v, dwords; v = READ_REG(sc, SAFE_PE_DMACFG); v &=~ SAFE_PE_DMACFG_PEMODE; v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */ | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */ | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */ | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */ | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */ | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */ ; WRITE_REG(sc, SAFE_PE_DMACFG, v); #if 0 /* XXX select byte swap based on host byte order */ WRITE_REG(sc, SAFE_ENDIAN, 0x1b); #endif if (sc->sc_chiprev == SAFE_REV(1,0)) { /* * Avoid large PCI DMA transfers. Rev 1.0 has a bug where * "target mode transfers" done while the chip is DMA'ing * >1020 bytes cause the hardware to lockup. To avoid this * we reduce the max PCI transfer size and use small source * particle descriptors (<= 256 bytes). */ WRITE_REG(sc, SAFE_DMA_CFG, 256); device_printf(sc->sc_dev, "Reduce max DMA size to %u words for rev %u.%u WAR\n", (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff, SAFE_REV_MAJ(sc->sc_chiprev), SAFE_REV_MIN(sc->sc_chiprev)); } /* NB: operands+results are overlaid */ WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr); WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr); /* * Configure ring entry size and number of items in the ring. */ KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0, ("PE ring entry not 32-bit aligned!")); dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t); WRITE_REG(sc, SAFE_PE_RINGCFG, (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE); WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */ WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr); WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr); WRITE_REG(sc, SAFE_PE_PARTSIZE, (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART); /* * NB: destination particles are fixed size. We use * an mbuf cluster and require all results go to * clusters or smaller. */ WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE); /* it's now safe to enable PE mode, do it */ WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE); /* * Configure hardware to use level-triggered interrupts and * to interrupt after each descriptor is processed. */ WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL); WRITE_REG(sc, SAFE_HI_DESC_CNT, 1); WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR); } /* * Init PCI registers */ static void safe_init_pciregs(device_t dev) { } /* * Clean up after a chip crash. * It is assumed that the caller in splimp() */ static void safe_cleanchip(struct safe_softc *sc) { if (sc->sc_nqchip != 0) { struct safe_ringentry *re = sc->sc_back; while (re != sc->sc_front) { if (re->re_desc.d_csr != 0) safe_free_entry(sc, re); if (++re == sc->sc_ringtop) re = sc->sc_ring; } sc->sc_back = re; sc->sc_nqchip = 0; } } /* * free a safe_q * It is assumed that the caller is within splimp(). */ static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re) { struct cryptop *crp; /* * Free header MCR */ if (re->re_dst_m != NULL) m_freem(re->re_dst_m); crp = (struct cryptop *)re->re_crp; re->re_desc.d_csr = 0; crp->crp_etype = EFAULT; crypto_done(crp); return(0); } /* * Routine to reset the chip and clean up. * It is assumed that the caller is in splimp() */ static void safe_totalreset(struct safe_softc *sc) { safe_reset_board(sc); safe_init_board(sc); safe_cleanchip(sc); } /* * Is the operand suitable aligned for direct DMA. Each * segment must be aligned on a 32-bit boundary and all * but the last segment must be a multiple of 4 bytes. */ static int safe_dmamap_aligned(const struct safe_operand *op) { int i; for (i = 0; i < op->nsegs; i++) { if (op->segs[i].ds_addr & 3) return (0); if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3)) return (0); } return (1); } /* * Is the operand suitable for direct DMA as the destination * of an operation. The hardware requires that each ``particle'' * but the last in an operation result have the same size. We * fix that size at SAFE_MAX_DSIZE bytes. This routine returns * 0 if some segment is not a multiple of of this size, 1 if all * segments are exactly this size, or 2 if segments are at worst * a multple of this size. */ static int safe_dmamap_uniform(const struct safe_operand *op) { int result = 1; if (op->nsegs > 0) { int i; for (i = 0; i < op->nsegs-1; i++) { if (op->segs[i].ds_len % SAFE_MAX_DSIZE) return (0); if (op->segs[i].ds_len != SAFE_MAX_DSIZE) result = 2; } } return (result); } #ifdef SAFE_DEBUG static void safe_dump_dmastatus(struct safe_softc *sc, const char *tag) { printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n" , tag , READ_REG(sc, SAFE_DMA_ENDIAN) , READ_REG(sc, SAFE_DMA_SRCADDR) , READ_REG(sc, SAFE_DMA_DSTADDR) , READ_REG(sc, SAFE_DMA_STAT) ); } static void safe_dump_intrstate(struct safe_softc *sc, const char *tag) { printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n" , tag , READ_REG(sc, SAFE_HI_CFG) , READ_REG(sc, SAFE_HI_MASK) , READ_REG(sc, SAFE_HI_DESC_CNT) , READ_REG(sc, SAFE_HU_STAT) , READ_REG(sc, SAFE_HM_STAT) ); } static void safe_dump_ringstate(struct safe_softc *sc, const char *tag) { u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT); /* NB: assume caller has lock on ring */ printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n", tag, estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S), (unsigned long)(sc->sc_back - sc->sc_ring), (unsigned long)(sc->sc_front - sc->sc_ring)); } static void safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re) { int ix, nsegs; ix = re - sc->sc_ring; printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n" , tag , re, ix , re->re_desc.d_csr , re->re_desc.d_src , re->re_desc.d_dst , re->re_desc.d_sa , re->re_desc.d_len ); if (re->re_src.nsegs > 1) { ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) / sizeof(struct safe_pdesc); for (nsegs = re->re_src.nsegs; nsegs; nsegs--) { printf(" spd[%u] %p: %p size %u flags %x" , ix, &sc->sc_spring[ix] , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr , sc->sc_spring[ix].pd_size , sc->sc_spring[ix].pd_flags ); if (sc->sc_spring[ix].pd_size == 0) printf(" (zero!)"); printf("\n"); if (++ix == SAFE_TOTAL_SPART) ix = 0; } } if (re->re_dst.nsegs > 1) { ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) / sizeof(struct safe_pdesc); for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) { printf(" dpd[%u] %p: %p flags %x\n" , ix, &sc->sc_dpring[ix] , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr , sc->sc_dpring[ix].pd_flags ); if (++ix == SAFE_TOTAL_DPART) ix = 0; } } printf("sa: cmd0 %08x cmd1 %08x staterec %x\n", re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec); printf("sa: key %x %x %x %x %x %x %x %x\n" , re->re_sa.sa_key[0] , re->re_sa.sa_key[1] , re->re_sa.sa_key[2] , re->re_sa.sa_key[3] , re->re_sa.sa_key[4] , re->re_sa.sa_key[5] , re->re_sa.sa_key[6] , re->re_sa.sa_key[7] ); printf("sa: indigest %x %x %x %x %x\n" , re->re_sa.sa_indigest[0] , re->re_sa.sa_indigest[1] , re->re_sa.sa_indigest[2] , re->re_sa.sa_indigest[3] , re->re_sa.sa_indigest[4] ); printf("sa: outdigest %x %x %x %x %x\n" , re->re_sa.sa_outdigest[0] , re->re_sa.sa_outdigest[1] , re->re_sa.sa_outdigest[2] , re->re_sa.sa_outdigest[3] , re->re_sa.sa_outdigest[4] ); printf("sr: iv %x %x %x %x\n" , re->re_sastate.sa_saved_iv[0] , re->re_sastate.sa_saved_iv[1] , re->re_sastate.sa_saved_iv[2] , re->re_sastate.sa_saved_iv[3] ); printf("sr: hashbc %u indigest %x %x %x %x %x\n" , re->re_sastate.sa_saved_hashbc , re->re_sastate.sa_saved_indigest[0] , re->re_sastate.sa_saved_indigest[1] , re->re_sastate.sa_saved_indigest[2] , re->re_sastate.sa_saved_indigest[3] , re->re_sastate.sa_saved_indigest[4] ); } static void safe_dump_ring(struct safe_softc *sc, const char *tag) { mtx_lock(&sc->sc_ringmtx); printf("\nSafeNet Ring State:\n"); safe_dump_intrstate(sc, tag); safe_dump_dmastatus(sc, tag); safe_dump_ringstate(sc, tag); if (sc->sc_nqchip) { struct safe_ringentry *re = sc->sc_back; do { safe_dump_request(sc, tag, re); if (++re == sc->sc_ringtop) re = sc->sc_ring; } while (re != sc->sc_front); } mtx_unlock(&sc->sc_ringmtx); } static int sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS) { char dmode[64]; int error; strncpy(dmode, "", sizeof(dmode) - 1); dmode[sizeof(dmode) - 1] = '\0'; error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req); if (error == 0 && req->newptr != NULL) { struct safe_softc *sc = safec; if (!sc) return EINVAL; if (strncmp(dmode, "dma", 3) == 0) safe_dump_dmastatus(sc, "safe0"); else if (strncmp(dmode, "int", 3) == 0) safe_dump_intrstate(sc, "safe0"); else if (strncmp(dmode, "ring", 4) == 0) safe_dump_ring(sc, "safe0"); else return EINVAL; } return error; } SYSCTL_PROC(_hw_safe, OID_AUTO, dump, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, sysctl_hw_safe_dump, "A", "Dump driver state"); #endif /* SAFE_DEBUG */ Index: head/sys/dev/sec/sec.c =================================================================== --- head/sys/dev/sec/sec.c (revision 360135) +++ head/sys/dev/sec/sec.c (revision 360136) @@ -1,1671 +1,1661 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and * 3.0 are supported. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include static int sec_probe(device_t dev); static int sec_attach(device_t dev); static int sec_detach(device_t dev); static int sec_suspend(device_t dev); static int sec_resume(device_t dev); static int sec_shutdown(device_t dev); static void sec_primary_intr(void *arg); static void sec_secondary_intr(void *arg); static int sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand, int *irid, driver_intr_t handler, const char *iname); static void sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand, int irid, const char *iname); static int sec_controller_reset(struct sec_softc *sc); static int sec_channel_reset(struct sec_softc *sc, int channel, int full); static int sec_init(struct sec_softc *sc); static int sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem, bus_size_t size); static int sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, struct cryptop *crp, bus_size_t size, struct sec_desc_map_info *sdmi); static void sec_free_dma_mem(struct sec_dma_mem *dma_mem); static void sec_enqueue(struct sec_softc *sc); static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel); static int sec_eu_channel(struct sec_softc *sc, int eu); static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc, u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize); static int sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize); static int sec_probesession(device_t dev, const struct crypto_session_params *csp); static int sec_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp); static int sec_process(device_t dev, struct cryptop *crp, int hint); static int sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc, const struct crypto_session_params *csp, struct cryptop *crp); static int sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc, const struct crypto_session_params *csp, struct cryptop *crp); static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr); /* AESU */ static bool sec_aesu_newsession(const struct crypto_session_params *csp); static int sec_aesu_make_desc(struct sec_softc *sc, const struct crypto_session_params *csp, struct sec_desc *desc, struct cryptop *crp); /* DEU */ static bool sec_deu_newsession(const struct crypto_session_params *csp); static int sec_deu_make_desc(struct sec_softc *sc, const struct crypto_session_params *csp, struct sec_desc *desc, struct cryptop *crp); /* MDEU */ static bool sec_mdeu_can_handle(u_int alg); static int sec_mdeu_config(const struct crypto_session_params *csp, u_int *eu, u_int *mode, u_int *hashlen); static bool sec_mdeu_newsession(const struct crypto_session_params *csp); static int sec_mdeu_make_desc(struct sec_softc *sc, const struct crypto_session_params *csp, struct sec_desc *desc, struct cryptop *crp); static device_method_t sec_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sec_probe), DEVMETHOD(device_attach, sec_attach), DEVMETHOD(device_detach, sec_detach), DEVMETHOD(device_suspend, sec_suspend), DEVMETHOD(device_resume, sec_resume), DEVMETHOD(device_shutdown, sec_shutdown), /* Crypto methods */ DEVMETHOD(cryptodev_probesession, sec_probesession), DEVMETHOD(cryptodev_newsession, sec_newsession), DEVMETHOD(cryptodev_process, sec_process), DEVMETHOD_END }; static driver_t sec_driver = { "sec", sec_methods, sizeof(struct sec_softc), }; static devclass_t sec_devclass; DRIVER_MODULE(sec, simplebus, sec_driver, sec_devclass, 0, 0); MODULE_DEPEND(sec, crypto, 1, 1, 1); static struct sec_eu_methods sec_eus[] = { { sec_aesu_newsession, sec_aesu_make_desc, }, { sec_deu_newsession, sec_deu_make_desc, }, { sec_mdeu_newsession, sec_mdeu_make_desc, }, { NULL, NULL } }; static inline void sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op) { /* Sync only if dma memory is valid */ if (dma_mem->dma_vaddr != NULL) bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op); } static inline void * sec_get_pointer_data(struct sec_desc *desc, u_int n) { return (desc->sd_ptr_dmem[n].dma_vaddr); } static int sec_probe(device_t dev) { struct sec_softc *sc; uint64_t id; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "fsl,sec2.0")) return (ENXIO); sc = device_get_softc(dev); sc->sc_rrid = 0; sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); id = SEC_READ(sc, SEC_ID); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); switch (id) { case SEC_20_ID: device_set_desc(dev, "Freescale Security Engine 2.0"); sc->sc_version = 2; break; case SEC_30_ID: device_set_desc(dev, "Freescale Security Engine 3.0"); sc->sc_version = 3; break; case SEC_31_ID: device_set_desc(dev, "Freescale Security Engine 3.1"); sc->sc_version = 3; break; default: device_printf(dev, "unknown SEC ID 0x%016"PRIx64"!\n", id); return (ENXIO); } return (0); } static int sec_attach(device_t dev) { struct sec_softc *sc; struct sec_hw_lt *lt; int error = 0; int i; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_blocked = 0; sc->sc_shutdown = 0; sc->sc_cid = crypto_get_driverid(dev, sizeof(struct sec_session), CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver ID!\n"); return (ENXIO); } /* Init locks */ mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev), "SEC Controller lock", MTX_DEF); mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev), "SEC Descriptors lock", MTX_DEF); /* Allocate I/O memory for SEC registers */ sc->sc_rrid = 0; sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) { device_printf(dev, "could not allocate I/O memory!\n"); goto fail1; } sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); /* Setup interrupts */ sc->sc_pri_irid = 0; error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand, &sc->sc_pri_irid, sec_primary_intr, "primary"); if (error) goto fail2; if (sc->sc_version == 3) { sc->sc_sec_irid = 1; error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand, &sc->sc_sec_irid, sec_secondary_intr, "secondary"); if (error) goto fail3; } /* Alloc DMA memory for descriptors and link tables */ error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem), SEC_DESCRIPTORS * sizeof(struct sec_hw_desc)); if (error) goto fail4; error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem), (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt)); if (error) goto fail5; /* Fill in descriptors and link tables */ for (i = 0; i < SEC_DESCRIPTORS; i++) { sc->sc_desc[i].sd_desc = (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i; sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr + (i * sizeof(struct sec_hw_desc)); } for (i = 0; i < SEC_LT_ENTRIES + 1; i++) { sc->sc_lt[i].sl_lt = (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i; sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr + (i * sizeof(struct sec_hw_lt)); } /* Last entry in link table is used to create a circle */ lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt; lt->shl_length = 0; lt->shl_r = 0; lt->shl_n = 1; lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr; /* Init descriptor and link table queues pointers */ SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS); SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES); SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES); /* Create masks for fast checks */ sc->sc_int_error_mask = 0; for (i = 0; i < SEC_CHANNELS; i++) sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i)); switch (sc->sc_version) { case 2: sc->sc_channel_idle_mask = (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) | (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) | (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) | (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S); break; case 3: sc->sc_channel_idle_mask = (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) | (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) | (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) | (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S); break; } /* Init hardware */ error = sec_init(sc); if (error) goto fail6; return (0); fail6: sec_free_dma_mem(&(sc->sc_lt_dmem)); fail5: sec_free_dma_mem(&(sc->sc_desc_dmem)); fail4: sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand, sc->sc_sec_irid, "secondary"); fail3: sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand, sc->sc_pri_irid, "primary"); fail2: bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); fail1: mtx_destroy(&sc->sc_controller_lock); mtx_destroy(&sc->sc_descriptors_lock); return (ENXIO); } static int sec_detach(device_t dev) { struct sec_softc *sc = device_get_softc(dev); int i, error, timeout = SEC_TIMEOUT; /* Prepare driver to shutdown */ SEC_LOCK(sc, descriptors); sc->sc_shutdown = 1; SEC_UNLOCK(sc, descriptors); /* Wait until all queued processing finishes */ while (1) { SEC_LOCK(sc, descriptors); i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc); SEC_UNLOCK(sc, descriptors); if (i == 0) break; if (timeout < 0) { device_printf(dev, "queue flush timeout!\n"); /* DMA can be still active - stop it */ for (i = 0; i < SEC_CHANNELS; i++) sec_channel_reset(sc, i, 1); break; } timeout -= 1000; DELAY(1000); } /* Disable interrupts */ SEC_WRITE(sc, SEC_IER, 0); /* Unregister from OCF */ crypto_unregister_all(sc->sc_cid); /* Free DMA memory */ for (i = 0; i < SEC_DESCRIPTORS; i++) SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i])); sec_free_dma_mem(&(sc->sc_lt_dmem)); sec_free_dma_mem(&(sc->sc_desc_dmem)); /* Release interrupts */ sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand, sc->sc_pri_irid, "primary"); sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand, sc->sc_sec_irid, "secondary"); /* Release memory */ if (sc->sc_rres) { error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); if (error) device_printf(dev, "bus_release_resource() failed for" " I/O memory, error %d\n", error); sc->sc_rres = NULL; } mtx_destroy(&sc->sc_controller_lock); mtx_destroy(&sc->sc_descriptors_lock); return (0); } static int sec_suspend(device_t dev) { return (0); } static int sec_resume(device_t dev) { return (0); } static int sec_shutdown(device_t dev) { return (0); } static int sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand, int *irid, driver_intr_t handler, const char *iname) { int error; (*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid, RF_ACTIVE); if ((*ires) == NULL) { device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname); return (ENXIO); } error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET, NULL, handler, sc, ihand); if (error) { device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname); if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires)) device_printf(sc->sc_dev, "could not release %s IRQ\n", iname); (*ires) = NULL; return (error); } return (0); } static void sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand, int irid, const char *iname) { int error; if (ires == NULL) return; error = bus_teardown_intr(sc->sc_dev, ires, ihand); if (error) device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s" " IRQ, error %d\n", iname, error); error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires); if (error) device_printf(sc->sc_dev, "bus_release_resource() failed for %s" " IRQ, error %d\n", iname, error); } static void sec_primary_intr(void *arg) { struct sec_session *ses; struct sec_softc *sc = arg; struct sec_desc *desc; struct cryptop *crp; uint64_t isr; uint8_t hash[HASH_MAX_LEN]; int i, wakeup = 0; SEC_LOCK(sc, controller); /* Check for errors */ isr = SEC_READ(sc, SEC_ISR); if (isr & sc->sc_int_error_mask) { /* Check each channel for error */ for (i = 0; i < SEC_CHANNELS; i++) { if ((isr & SEC_INT_CH_ERR(i)) == 0) continue; device_printf(sc->sc_dev, "I/O error on channel %i!\n", i); /* Find and mark problematic descriptor */ desc = sec_find_desc(sc, SEC_READ(sc, SEC_CHAN_CDPR(i))); if (desc != NULL) desc->sd_error = EIO; /* Do partial channel reset */ sec_channel_reset(sc, i, 0); } } /* ACK interrupt */ SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL); SEC_UNLOCK(sc, controller); SEC_LOCK(sc, descriptors); /* Handle processed descriptors */ SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); while (SEC_QUEUED_DESC_CNT(sc) > 0) { desc = SEC_GET_QUEUED_DESC(sc); if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) { SEC_PUT_BACK_QUEUED_DESC(sc); break; } SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); crp = desc->sd_crp; crp->crp_etype = desc->sd_error; if (crp->crp_etype == 0) { ses = crypto_get_driver_session(crp->crp_session); if (ses->ss_mlen != 0) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, ses->ss_mlen, hash); if (timingsafe_bcmp( desc->sd_desc->shd_digest, hash, ses->ss_mlen) != 0) crp->crp_etype = EBADMSG; } else crypto_copyback(crp, crp->crp_digest_start, ses->ss_mlen, desc->sd_desc->shd_digest); } } crypto_done(desc->sd_crp); SEC_DESC_FREE_POINTERS(desc); SEC_DESC_FREE_LT(sc, desc); SEC_DESC_QUEUED2FREE(sc); } SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (!sc->sc_shutdown) { wakeup = sc->sc_blocked; sc->sc_blocked = 0; } SEC_UNLOCK(sc, descriptors); /* Enqueue ready descriptors in hardware */ sec_enqueue(sc); if (wakeup) crypto_unblock(sc->sc_cid, wakeup); } static void sec_secondary_intr(void *arg) { struct sec_softc *sc = arg; device_printf(sc->sc_dev, "spurious secondary interrupt!\n"); sec_primary_intr(arg); } static int sec_controller_reset(struct sec_softc *sc) { int timeout = SEC_TIMEOUT; /* Reset Controller */ SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR); while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) { DELAY(1000); timeout -= 1000; if (timeout < 0) { device_printf(sc->sc_dev, "timeout while waiting for " "device reset!\n"); return (ETIMEDOUT); } } return (0); } static int sec_channel_reset(struct sec_softc *sc, int channel, int full) { int timeout = SEC_TIMEOUT; uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON; uint64_t reg; /* Reset Channel */ reg = SEC_READ(sc, SEC_CHAN_CCR(channel)); SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit); while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) { DELAY(1000); timeout -= 1000; if (timeout < 0) { device_printf(sc->sc_dev, "timeout while waiting for " "channel reset!\n"); return (ETIMEDOUT); } } if (full) { reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS; switch(sc->sc_version) { case 2: reg |= SEC_CHAN_CCR_CDWE; break; case 3: reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN; break; } SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg); } return (0); } static int sec_init(struct sec_softc *sc) { uint64_t reg; int error, i; /* Reset controller twice to clear all pending interrupts */ error = sec_controller_reset(sc); if (error) return (error); error = sec_controller_reset(sc); if (error) return (error); /* Reset channels */ for (i = 0; i < SEC_CHANNELS; i++) { error = sec_channel_reset(sc, i, 1); if (error) return (error); } /* Enable Interrupts */ reg = SEC_INT_ITO; for (i = 0; i < SEC_CHANNELS; i++) reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i); SEC_WRITE(sc, SEC_IER, reg); return (error); } static void sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sec_dma_mem *dma_mem = arg; if (error) return; KASSERT(nseg == 1, ("Wrong number of segments, should be 1")); dma_mem->dma_paddr = segs->ds_addr; } static void sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sec_desc_map_info *sdmi = arg; struct sec_softc *sc = sdmi->sdmi_sc; struct sec_lt *lt = NULL; bus_addr_t addr; bus_size_t size; int i; SEC_LOCK_ASSERT(sc, descriptors); if (error) return; for (i = 0; i < nseg; i++) { addr = segs[i].ds_addr; size = segs[i].ds_len; /* Skip requested offset */ if (sdmi->sdmi_offset >= size) { sdmi->sdmi_offset -= size; continue; } addr += sdmi->sdmi_offset; size -= sdmi->sdmi_offset; sdmi->sdmi_offset = 0; /* Do not link more than requested */ if (sdmi->sdmi_size < size) size = sdmi->sdmi_size; lt = SEC_ALLOC_LT_ENTRY(sc); lt->sl_lt->shl_length = size; lt->sl_lt->shl_r = 0; lt->sl_lt->shl_n = 0; lt->sl_lt->shl_ptr = addr; if (sdmi->sdmi_lt_first == NULL) sdmi->sdmi_lt_first = lt; sdmi->sdmi_lt_used += 1; if ((sdmi->sdmi_size -= size) == 0) break; } sdmi->sdmi_lt_last = lt; } static int sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem, bus_size_t size) { int error; if (dma_mem->dma_vaddr != NULL) return (EBUSY); error = bus_dma_tag_create(NULL, /* parent */ SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ size, 1, /* maxsize, nsegments */ size, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &(dma_mem->dma_tag)); /* dmat */ if (error) { device_printf(sc->sc_dev, "failed to allocate busdma tag, error" " %i!\n", error); goto err1; } error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr), BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map)); if (error) { device_printf(sc->sc_dev, "failed to allocate DMA safe" " memory, error %i!\n", error); goto err2; } error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map, dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "cannot get address of the DMA" " memory, error %i\n", error); goto err3; } dma_mem->dma_is_map = 0; return (0); err3: bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map); err2: bus_dma_tag_destroy(dma_mem->dma_tag); err1: dma_mem->dma_vaddr = NULL; return(error); } static int sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, struct cryptop *crp, bus_size_t size, struct sec_desc_map_info *sdmi) { int error; if (dma_mem->dma_vaddr != NULL) return (EBUSY); switch (crp->crp_buf_type) { case CRYPTO_BUF_CONTIG: break; case CRYPTO_BUF_UIO: size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE; break; case CRYPTO_BUF_MBUF: size = m_length(crp->crp_mbuf, NULL); break; default: return (EINVAL); } error = bus_dma_tag_create(NULL, /* parent */ SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ size, /* maxsize */ SEC_FREE_LT_CNT(sc), /* nsegments */ SEC_MAX_DMA_BLOCK_SIZE, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &(dma_mem->dma_tag)); /* dmat */ if (error) { device_printf(sc->sc_dev, "failed to allocate busdma tag, error" " %i!\n", error); dma_mem->dma_vaddr = NULL; return (error); } error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map)); if (error) { device_printf(sc->sc_dev, "failed to create DMA map, error %i!" "\n", error); bus_dma_tag_destroy(dma_mem->dma_tag); return (error); } error = bus_dmamap_load_crp(dma_mem->dma_tag, dma_mem->dma_map, crp, sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "cannot get address of the DMA" " memory, error %i!\n", error); bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map); bus_dma_tag_destroy(dma_mem->dma_tag); return (error); } dma_mem->dma_is_map = 1; dma_mem->dma_vaddr = crp; return (0); } static void sec_free_dma_mem(struct sec_dma_mem *dma_mem) { /* Check for double free */ if (dma_mem->dma_vaddr == NULL) return; bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map); if (dma_mem->dma_is_map) bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map); else bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map); bus_dma_tag_destroy(dma_mem->dma_tag); dma_mem->dma_vaddr = NULL; } static int sec_eu_channel(struct sec_softc *sc, int eu) { uint64_t reg; int channel = 0; SEC_LOCK_ASSERT(sc, controller); reg = SEC_READ(sc, SEC_EUASR); switch (eu) { case SEC_EU_AFEU: channel = SEC_EUASR_AFEU(reg); break; case SEC_EU_DEU: channel = SEC_EUASR_DEU(reg); break; case SEC_EU_MDEU_A: case SEC_EU_MDEU_B: channel = SEC_EUASR_MDEU(reg); break; case SEC_EU_RNGU: channel = SEC_EUASR_RNGU(reg); break; case SEC_EU_PKEU: channel = SEC_EUASR_PKEU(reg); break; case SEC_EU_AESU: channel = SEC_EUASR_AESU(reg); break; case SEC_EU_KEU: channel = SEC_EUASR_KEU(reg); break; case SEC_EU_CRCU: channel = SEC_EUASR_CRCU(reg); break; } return (channel - 1); } static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel) { u_int fflvl = SEC_MAX_FIFO_LEVEL; uint64_t reg; int i; SEC_LOCK_ASSERT(sc, controller); /* Find free channel if have not got one */ if (channel < 0) { for (i = 0; i < SEC_CHANNELS; i++) { reg = SEC_READ(sc, SEC_CHAN_CSR(channel)); if ((reg & sc->sc_channel_idle_mask) == 0) { channel = i; break; } } } /* There is no free channel */ if (channel < 0) return (-1); /* Check FIFO level on selected channel */ reg = SEC_READ(sc, SEC_CHAN_CSR(channel)); switch(sc->sc_version) { case 2: fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M; break; case 3: fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M; break; } if (fflvl >= SEC_MAX_FIFO_LEVEL) return (-1); /* Enqueue descriptor in channel */ SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr); return (channel); } static void sec_enqueue(struct sec_softc *sc) { struct sec_desc *desc; int ch0, ch1; SEC_LOCK(sc, descriptors); SEC_LOCK(sc, controller); while (SEC_READY_DESC_CNT(sc) > 0) { desc = SEC_GET_READY_DESC(sc); ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0); ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1); /* * Both EU are used by the same channel. * Enqueue descriptor in channel used by busy EUs. */ if (ch0 >= 0 && ch0 == ch1) { if (sec_enqueue_desc(sc, desc, ch0) >= 0) { SEC_DESC_READY2QUEUED(sc); continue; } } /* * Only one EU is free. * Enqueue descriptor in channel used by busy EU. */ if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) { if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1) >= 0) { SEC_DESC_READY2QUEUED(sc); continue; } } /* * Both EU are free. * Enqueue descriptor in first free channel. */ if (ch0 < 0 && ch1 < 0) { if (sec_enqueue_desc(sc, desc, -1) >= 0) { SEC_DESC_READY2QUEUED(sc); continue; } } /* Current descriptor can not be queued at the moment */ SEC_PUT_BACK_READY_DESC(sc); break; } SEC_UNLOCK(sc, controller); SEC_UNLOCK(sc, descriptors); } static struct sec_desc * sec_find_desc(struct sec_softc *sc, bus_addr_t paddr) { struct sec_desc *desc = NULL; int i; SEC_LOCK_ASSERT(sc, descriptors); for (i = 0; i < SEC_CHANNELS; i++) { if (sc->sc_desc[i].sd_desc_paddr == paddr) { desc = &(sc->sc_desc[i]); break; } } return (desc); } static int sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize) { struct sec_hw_desc_ptr *ptr; SEC_LOCK_ASSERT(sc, descriptors); ptr = &(desc->sd_desc->shd_pointer[n]); ptr->shdp_length = dsize; ptr->shdp_extent = 0; ptr->shdp_j = 0; ptr->shdp_ptr = data; return (0); } static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc, u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize) { struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 }; struct sec_hw_desc_ptr *ptr; int error; SEC_LOCK_ASSERT(sc, descriptors); error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), crp, dsize, &sdmi); if (error) return (error); sdmi.sdmi_lt_last->sl_lt->shl_r = 1; desc->sd_lt_used += sdmi.sdmi_lt_used; ptr = &(desc->sd_desc->shd_pointer[n]); ptr->shdp_length = dsize; ptr->shdp_extent = 0; ptr->shdp_j = 1; ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr; return (0); } static bool sec_cipher_supported(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_AES_CBC: /* AESU */ if (csp->csp_ivlen != AES_BLOCK_LEN) return (false); break; case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: /* DEU */ if (csp->csp_ivlen != DES_BLOCK_LEN) return (false); break; default: return (false); } if (csp->csp_cipher_klen == 0 || csp->csp_cipher_klen > SEC_MAX_KEY_LEN) return (false); return (true); } static bool sec_auth_supported(struct sec_softc *sc, const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: if (sc->sc_version < 3) return (false); /* FALLTHROUGH */ case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: if (csp->csp_auth_klen > SEC_MAX_KEY_LEN) return (false); break; case CRYPTO_MD5: case CRYPTO_SHA1: break; default: return (false); } return (true); } static int sec_probesession(device_t dev, const struct crypto_session_params *csp) { struct sec_softc *sc = device_get_softc(dev); if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!sec_auth_supported(sc, csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!sec_cipher_supported(csp)) return (EINVAL); break; case CSP_MODE_ETA: if (!sec_auth_supported(sc, csp) || !sec_cipher_supported(csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_HARDWARE); } static int sec_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct sec_eu_methods *eu = sec_eus; struct sec_session *ses; ses = crypto_get_driver_session(cses); /* Find EU for this session */ while (eu->sem_make_desc != NULL) { if (eu->sem_newsession(csp)) break; eu++; } KASSERT(eu->sem_make_desc != NULL, ("failed to find eu for session")); /* Save cipher key */ if (csp->csp_cipher_key != NULL) memcpy(ses->ss_key, csp->csp_cipher_key, csp->csp_cipher_klen); /* Save digest key */ if (csp->csp_auth_key != NULL) memcpy(ses->ss_mkey, csp->csp_auth_key, csp->csp_auth_klen); if (csp->csp_auth_alg != 0) { if (csp->csp_auth_mlen == 0) ses->ss_mlen = crypto_auth_hash(csp)->hashsize; else ses->ss_mlen = csp->csp_auth_mlen; } return (0); } static int sec_process(device_t dev, struct cryptop *crp, int hint) { struct sec_softc *sc = device_get_softc(dev); struct sec_desc *desc = NULL; const struct crypto_session_params *csp; struct sec_session *ses; int error = 0; ses = crypto_get_driver_session(crp->crp_session); csp = crypto_get_params(crp->crp_session); /* Check for input length */ if (crp->crp_ilen > SEC_MAX_DMA_BLOCK_SIZE) { crp->crp_etype = E2BIG; crypto_done(crp); return (0); } SEC_LOCK(sc, descriptors); SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Block driver if there is no free descriptors or we are going down */ if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) { sc->sc_blocked |= CRYPTO_SYMQ; SEC_UNLOCK(sc, descriptors); return (ERESTART); } /* Prepare descriptor */ desc = SEC_GET_FREE_DESC(sc); desc->sd_lt_used = 0; desc->sd_error = 0; desc->sd_crp = crp; - if (csp->csp_cipher_alg != 0) { - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { - arc4rand(desc->sd_desc->shd_iv, csp->csp_ivlen, 0); - crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen, - desc->sd_desc->shd_iv); - } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) - memcpy(desc->sd_desc->shd_iv, crp->crp_iv, - csp->csp_ivlen); - else - crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, - desc->sd_desc->shd_iv); - } + if (csp->csp_cipher_alg != 0) + crypto_read_iv(crp, desc->sd_desc->shd_iv); if (crp->crp_cipher_key != NULL) memcpy(ses->ss_key, crp->crp_cipher_key, csp->csp_cipher_klen); if (crp->crp_auth_key != NULL) memcpy(ses->ss_mkey, crp->crp_auth_key, csp->csp_auth_klen); memcpy(desc->sd_desc->shd_key, ses->ss_key, csp->csp_cipher_klen); memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, csp->csp_auth_klen); error = ses->ss_eu->sem_make_desc(sc, csp, desc, crp); if (error) { SEC_DESC_FREE_POINTERS(desc); SEC_DESC_PUT_BACK_LT(sc, desc); SEC_PUT_BACK_FREE_DESC(sc); SEC_UNLOCK(sc, descriptors); crp->crp_etype = error; crypto_done(crp); return (0); } /* * Skip DONE interrupt if this is not last request in burst, but only * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE * signaling on each descriptor. */ if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3) desc->sd_desc->shd_dn = 0; else desc->sd_desc->shd_dn = 1; SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); SEC_DESC_FREE2READY(sc); SEC_UNLOCK(sc, descriptors); /* Enqueue ready descriptors in hardware */ sec_enqueue(sc); return (0); } static int sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc, const struct crypto_session_params *csp, struct cryptop *crp) { struct sec_hw_desc *hd = desc->sd_desc; int error; hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP; hd->shd_eu_sel1 = SEC_EU_NONE; hd->shd_mode1 = 0; /* Pointer 0: NULL */ error = sec_make_pointer_direct(sc, desc, 0, 0, 0); if (error) return (error); /* Pointer 1: IV IN */ error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen); if (error) return (error); /* Pointer 2: Cipher Key */ error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen); if (error) return (error); /* Pointer 3: Data IN */ error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); /* Pointer 4: Data OUT */ error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); /* Pointer 5: IV OUT (Not used: NULL) */ error = sec_make_pointer_direct(sc, desc, 5, 0, 0); if (error) return (error); /* Pointer 6: NULL */ error = sec_make_pointer_direct(sc, desc, 6, 0, 0); return (error); } static int sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc, const struct crypto_session_params *csp, struct cryptop *crp) { struct sec_hw_desc *hd = desc->sd_desc; u_int eu, mode, hashlen; int error; error = sec_mdeu_config(csp, &eu, &mode, &hashlen); if (error) return (error); hd->shd_desc_type = SEC_DT_HMAC_SNOOP; hd->shd_eu_sel1 = eu; hd->shd_mode1 = mode; /* Pointer 0: HMAC Key */ error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_mkey), csp->csp_auth_klen); if (error) return (error); /* Pointer 1: HMAC-Only Data IN */ error = sec_make_pointer(sc, desc, 1, crp, crp->crp_aad_start, crp->crp_aad_length); if (error) return (error); /* Pointer 2: Cipher Key */ error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen); if (error) return (error); /* Pointer 3: IV IN */ error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen); if (error) return (error); /* Pointer 4: Data IN */ error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); /* Pointer 5: Data OUT */ error = sec_make_pointer(sc, desc, 5, crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); /* Pointer 6: HMAC OUT */ error = sec_make_pointer_direct(sc, desc, 6, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_digest), hashlen); return (error); } /* AESU */ static bool sec_aesu_newsession(const struct crypto_session_params *csp) { return (csp->csp_cipher_alg == CRYPTO_AES_CBC); } static int sec_aesu_make_desc(struct sec_softc *sc, const struct crypto_session_params *csp, struct sec_desc *desc, struct cryptop *crp) { struct sec_hw_desc *hd = desc->sd_desc; int error; hd->shd_eu_sel0 = SEC_EU_AESU; hd->shd_mode0 = SEC_AESU_MODE_CBC; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { hd->shd_mode0 |= SEC_AESU_MODE_ED; hd->shd_dir = 0; } else hd->shd_dir = 1; if (csp->csp_mode == CSP_MODE_ETA) error = sec_build_common_s_desc(sc, desc, csp, crp); else error = sec_build_common_ns_desc(sc, desc, csp, crp); return (error); } /* DEU */ static bool sec_deu_newsession(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: return (true); default: return (false); } } static int sec_deu_make_desc(struct sec_softc *sc, const struct crypto_session_params *csp, struct sec_desc *desc, struct cryptop *crp) { struct sec_hw_desc *hd = desc->sd_desc; int error; hd->shd_eu_sel0 = SEC_EU_DEU; hd->shd_mode0 = SEC_DEU_MODE_CBC; switch (csp->csp_cipher_alg) { case CRYPTO_3DES_CBC: hd->shd_mode0 |= SEC_DEU_MODE_TS; break; case CRYPTO_DES_CBC: break; default: return (EINVAL); } if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { hd->shd_mode0 |= SEC_DEU_MODE_ED; hd->shd_dir = 0; } else hd->shd_dir = 1; if (csp->csp_mode == CSP_MODE_ETA) error = sec_build_common_s_desc(sc, desc, csp, crp); else error = sec_build_common_ns_desc(sc, desc, csp, crp); return (error); } /* MDEU */ static bool sec_mdeu_can_handle(u_int alg) { switch (alg) { case CRYPTO_MD5: case CRYPTO_SHA1: case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: return (true); default: return (false); } } static int sec_mdeu_config(const struct crypto_session_params *csp, u_int *eu, u_int *mode, u_int *hashlen) { *mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT; *eu = SEC_EU_NONE; switch (csp->csp_auth_alg) { case CRYPTO_MD5_HMAC: *mode |= SEC_MDEU_MODE_HMAC; /* FALLTHROUGH */ case CRYPTO_MD5: *eu = SEC_EU_MDEU_A; *mode |= SEC_MDEU_MODE_MD5; *hashlen = MD5_HASH_LEN; break; case CRYPTO_SHA1_HMAC: *mode |= SEC_MDEU_MODE_HMAC; /* FALLTHROUGH */ case CRYPTO_SHA1: *eu = SEC_EU_MDEU_A; *mode |= SEC_MDEU_MODE_SHA1; *hashlen = SHA1_HASH_LEN; break; case CRYPTO_SHA2_256_HMAC: *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256; *eu = SEC_EU_MDEU_A; break; case CRYPTO_SHA2_384_HMAC: *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384; *eu = SEC_EU_MDEU_B; break; case CRYPTO_SHA2_512_HMAC: *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512; *eu = SEC_EU_MDEU_B; break; default: return (EINVAL); } if (*mode & SEC_MDEU_MODE_HMAC) *hashlen = SEC_HMAC_HASH_LEN; return (0); } static bool sec_mdeu_newsession(const struct crypto_session_params *csp) { return (sec_mdeu_can_handle(csp->csp_auth_alg)); } static int sec_mdeu_make_desc(struct sec_softc *sc, const struct crypto_session_params *csp, struct sec_desc *desc, struct cryptop *crp) { struct sec_hw_desc *hd = desc->sd_desc; u_int eu, mode, hashlen; int error; error = sec_mdeu_config(csp, &eu, &mode, &hashlen); if (error) return (error); hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP; hd->shd_eu_sel0 = eu; hd->shd_mode0 = mode; hd->shd_eu_sel1 = SEC_EU_NONE; hd->shd_mode1 = 0; /* Pointer 0: NULL */ error = sec_make_pointer_direct(sc, desc, 0, 0, 0); if (error) return (error); /* Pointer 1: Context In (Not used: NULL) */ error = sec_make_pointer_direct(sc, desc, 1, 0, 0); if (error) return (error); /* Pointer 2: HMAC Key (or NULL, depending on digest type) */ if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC) error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_mkey), csp->csp_auth_klen); else error = sec_make_pointer_direct(sc, desc, 2, 0, 0); if (error) return (error); /* Pointer 3: Input Data */ error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start, crp->crp_payload_length); if (error) return (error); /* Pointer 4: NULL */ error = sec_make_pointer_direct(sc, desc, 4, 0, 0); if (error) return (error); /* Pointer 5: Hash out */ error = sec_make_pointer_direct(sc, desc, 5, desc->sd_desc_paddr + offsetof(struct sec_hw_desc, shd_digest), hashlen); if (error) return (error); /* Pointer 6: NULL */ error = sec_make_pointer_direct(sc, desc, 6, 0, 0); return (0); } Index: head/sys/dev/ubsec/ubsec.c =================================================================== --- head/sys/dev/ubsec/ubsec.c (revision 360135) +++ head/sys/dev/ubsec/ubsec.c (revision 360136) @@ -1,2707 +1,2699 @@ /* $OpenBSD: ubsec.c,v 1.115 2002/09/24 18:33:26 jason Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2000 Jason L. Wright (jason@thought.net) * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org) * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com) * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Jason L. Wright * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. */ #include __FBSDID("$FreeBSD$"); /* * uBsec 5[56]01, 58xx hardware crypto accelerator */ #include "opt_ubsec.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include /* grr, #defines for gratuitous incompatibility in queue.h */ #define SIMPLEQ_HEAD STAILQ_HEAD #define SIMPLEQ_ENTRY STAILQ_ENTRY #define SIMPLEQ_INIT STAILQ_INIT #define SIMPLEQ_INSERT_TAIL STAILQ_INSERT_TAIL #define SIMPLEQ_EMPTY STAILQ_EMPTY #define SIMPLEQ_FIRST STAILQ_FIRST #define SIMPLEQ_REMOVE_HEAD STAILQ_REMOVE_HEAD #define SIMPLEQ_FOREACH STAILQ_FOREACH /* ditto for endian.h */ #define letoh16(x) le16toh(x) #define letoh32(x) le32toh(x) #ifdef UBSEC_RNDTEST #include #endif #include #include /* * Prototypes and count for the pci_device structure */ static int ubsec_probe(device_t); static int ubsec_attach(device_t); static int ubsec_detach(device_t); static int ubsec_suspend(device_t); static int ubsec_resume(device_t); static int ubsec_shutdown(device_t); static int ubsec_probesession(device_t, const struct crypto_session_params *); static int ubsec_newsession(device_t, crypto_session_t, const struct crypto_session_params *); static int ubsec_process(device_t, struct cryptop *, int); static int ubsec_kprocess(device_t, struct cryptkop *, int); static device_method_t ubsec_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ubsec_probe), DEVMETHOD(device_attach, ubsec_attach), DEVMETHOD(device_detach, ubsec_detach), DEVMETHOD(device_suspend, ubsec_suspend), DEVMETHOD(device_resume, ubsec_resume), DEVMETHOD(device_shutdown, ubsec_shutdown), /* crypto device methods */ DEVMETHOD(cryptodev_probesession, ubsec_probesession), DEVMETHOD(cryptodev_newsession, ubsec_newsession), DEVMETHOD(cryptodev_process, ubsec_process), DEVMETHOD(cryptodev_kprocess, ubsec_kprocess), DEVMETHOD_END }; static driver_t ubsec_driver = { "ubsec", ubsec_methods, sizeof (struct ubsec_softc) }; static devclass_t ubsec_devclass; DRIVER_MODULE(ubsec, pci, ubsec_driver, ubsec_devclass, 0, 0); MODULE_DEPEND(ubsec, crypto, 1, 1, 1); #ifdef UBSEC_RNDTEST MODULE_DEPEND(ubsec, rndtest, 1, 1, 1); #endif static void ubsec_intr(void *); static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *); static void ubsec_feed(struct ubsec_softc *); static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int); static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *); static int ubsec_feed2(struct ubsec_softc *); static void ubsec_rng(void *); static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t, struct ubsec_dma_alloc *, int); #define ubsec_dma_sync(_dma, _flags) \ bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *); static int ubsec_dmamap_aligned(struct ubsec_operand *op); static void ubsec_reset_board(struct ubsec_softc *sc); static void ubsec_init_board(struct ubsec_softc *sc); static void ubsec_init_pciregs(device_t dev); static void ubsec_totalreset(struct ubsec_softc *sc); static int ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q); static int ubsec_kprocess_modexp_hw(struct ubsec_softc *, struct cryptkop *, int); static int ubsec_kprocess_modexp_sw(struct ubsec_softc *, struct cryptkop *, int); static int ubsec_kprocess_rsapriv(struct ubsec_softc *, struct cryptkop *, int); static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *); static int ubsec_ksigbits(struct crparam *); static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int); static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int); static SYSCTL_NODE(_hw, OID_AUTO, ubsec, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Broadcom driver parameters"); #ifdef UBSEC_DEBUG static void ubsec_dump_pb(volatile struct ubsec_pktbuf *); static void ubsec_dump_mcr(struct ubsec_mcr *); static void ubsec_dump_ctx2(struct ubsec_ctx_keyop *); static int ubsec_debug = 0; SYSCTL_INT(_hw_ubsec, OID_AUTO, debug, CTLFLAG_RW, &ubsec_debug, 0, "control debugging msgs"); #endif #define READ_REG(sc,r) \ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) #define WRITE_REG(sc,reg,val) \ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) #define SWAP32(x) (x) = htole32(ntohl((x))) #define HTOLE32(x) (x) = htole32(x) struct ubsec_stats ubsecstats; SYSCTL_STRUCT(_hw_ubsec, OID_AUTO, stats, CTLFLAG_RD, &ubsecstats, ubsec_stats, "driver statistics"); static int ubsec_probe(device_t dev) { if (pci_get_vendor(dev) == PCI_VENDOR_SUN && (pci_get_device(dev) == PCI_PRODUCT_SUN_5821 || pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K)) return (BUS_PROBE_DEFAULT); if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && (pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5501 || pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601)) return (BUS_PROBE_DEFAULT); if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5801 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5825 )) return (BUS_PROBE_DEFAULT); return (ENXIO); } static const char* ubsec_partname(struct ubsec_softc *sc) { /* XXX sprintf numbers when not decoded */ switch (pci_get_vendor(sc->sc_dev)) { case PCI_VENDOR_BROADCOM: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_BROADCOM_5801: return "Broadcom 5801"; case PCI_PRODUCT_BROADCOM_5802: return "Broadcom 5802"; case PCI_PRODUCT_BROADCOM_5805: return "Broadcom 5805"; case PCI_PRODUCT_BROADCOM_5820: return "Broadcom 5820"; case PCI_PRODUCT_BROADCOM_5821: return "Broadcom 5821"; case PCI_PRODUCT_BROADCOM_5822: return "Broadcom 5822"; case PCI_PRODUCT_BROADCOM_5823: return "Broadcom 5823"; case PCI_PRODUCT_BROADCOM_5825: return "Broadcom 5825"; } return "Broadcom unknown-part"; case PCI_VENDOR_BLUESTEEL: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_BLUESTEEL_5601: return "Bluesteel 5601"; } return "Bluesteel unknown-part"; case PCI_VENDOR_SUN: switch (pci_get_device(sc->sc_dev)) { case PCI_PRODUCT_SUN_5821: return "Sun Crypto 5821"; case PCI_PRODUCT_SUN_SCA1K: return "Sun Crypto 1K"; } return "Sun unknown-part"; } return "Unknown-vendor unknown-part"; } static void default_harvest(struct rndtest_state *rsp, void *buf, u_int count) { /* MarkM: FIX!! Check that this does not swamp the harvester! */ random_harvest_queue(buf, count, RANDOM_PURE_UBSEC); } static int ubsec_attach(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); struct ubsec_dma *dmap; u_int32_t i; int rid; bzero(sc, sizeof (*sc)); sc->sc_dev = dev; SIMPLEQ_INIT(&sc->sc_queue); SIMPLEQ_INIT(&sc->sc_qchip); SIMPLEQ_INIT(&sc->sc_queue2); SIMPLEQ_INIT(&sc->sc_qchip2); SIMPLEQ_INIT(&sc->sc_q2free); /* XXX handle power management */ sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR; if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805)) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; if ((pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 || pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5825)) || (pci_get_vendor(dev) == PCI_VENDOR_SUN && (pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K || pci_get_device(dev) == PCI_PRODUCT_SUN_5821))) { /* NB: the 5821/5822 defines some additional status bits */ sc->sc_statmask |= BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY; sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; } pci_enable_busmaster(dev); /* * Setup memory-mapping of PCI registers. */ rid = BS_BAR; sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_sr == NULL) { device_printf(dev, "cannot map register space\n"); goto bad; } sc->sc_st = rman_get_bustag(sc->sc_sr); sc->sc_sh = rman_get_bushandle(sc->sc_sr); /* * Arrange interrupt line. */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "could not map interrupt\n"); goto bad1; } /* * NB: Network code assumes we are blocked with splimp() * so make sure the IRQ is mapped appropriately. */ if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, ubsec_intr, sc, &sc->sc_ih)) { device_printf(dev, "could not establish interrupt\n"); goto bad2; } /* * Setup DMA descriptor area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ 0x3ffff, /* maxsize */ UBS_MAX_SCATTER, /* nsegments */ 0xffff, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sc_dmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad3; } SIMPLEQ_INIT(&sc->sc_freequeue); dmap = sc->sc_dmaa; for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { struct ubsec_q *q; q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q), M_DEVBUF, M_NOWAIT); if (q == NULL) { device_printf(dev, "cannot allocate queue buffers\n"); break; } if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), &dmap->d_alloc, 0)) { device_printf(dev, "cannot allocate dma buffers\n"); free(q, M_DEVBUF); break; } dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; q->q_dma = dmap; sc->sc_queuea[i] = q; SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); } mtx_init(&sc->sc_mcr1lock, device_get_nameunit(dev), "mcr1 operations", MTX_DEF); mtx_init(&sc->sc_freeqlock, device_get_nameunit(dev), "mcr1 free q", MTX_DEF); device_printf(sc->sc_dev, "%s\n", ubsec_partname(sc)); /* * Reset Broadcom chip */ ubsec_reset_board(sc); /* * Init Broadcom specific PCI settings */ ubsec_init_pciregs(dev); /* * Init Broadcom chip */ ubsec_init_board(sc); sc->sc_cid = crypto_get_driverid(dev, sizeof(struct ubsec_session), CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { device_printf(dev, "could not get crypto driver id\n"); goto bad4; } #ifndef UBSEC_NO_RNG if (sc->sc_flags & UBS_FLAGS_RNG) { sc->sc_statmask |= BS_STAT_MCR2_DONE; #ifdef UBSEC_RNDTEST sc->sc_rndtest = rndtest_attach(dev); if (sc->sc_rndtest) sc->sc_harvest = rndtest_harvest; else sc->sc_harvest = default_harvest; #else sc->sc_harvest = default_harvest; #endif if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &sc->sc_rng.rng_q.q_mcr, 0)) goto skip_rng; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), &sc->sc_rng.rng_q.q_ctx, 0)) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); goto skip_rng; } if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); goto skip_rng; } if (hz >= 100) sc->sc_rnghz = hz / 100; else sc->sc_rnghz = 1; callout_init(&sc->sc_rngto, 1); callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); skip_rng: ; } #endif /* UBSEC_NO_RNG */ mtx_init(&sc->sc_mcr2lock, device_get_nameunit(dev), "mcr2 operations", MTX_DEF); if (sc->sc_flags & UBS_FLAGS_KEY) { sc->sc_statmask |= BS_STAT_MCR2_DONE; crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0); #if 0 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0); #endif } return (0); bad4: while (!SIMPLEQ_EMPTY(&sc->sc_freequeue)) { struct ubsec_q *q; q = SIMPLEQ_FIRST(&sc->sc_freequeue); SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next); ubsec_dma_free(sc, &q->q_dma->d_alloc); free(q, M_DEVBUF); } bus_dma_tag_destroy(sc->sc_dmat); bad3: bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); bad2: bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bad1: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); bad: return (ENXIO); } /* * Detach a device that successfully probed. */ static int ubsec_detach(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); /* XXX wait/abort active ops */ crypto_unregister_all(sc->sc_cid); /* disable interrupts */ WRITE_REG(sc, BS_CTRL, READ_REG(sc, BS_CTRL) &~ (BS_CTRL_MCR2INT | BS_CTRL_MCR1INT | BS_CTRL_DMAERR)); callout_stop(&sc->sc_rngto); bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); #ifdef UBSEC_RNDTEST if (sc->sc_rndtest) rndtest_detach(sc->sc_rndtest); #endif while (!SIMPLEQ_EMPTY(&sc->sc_freequeue)) { struct ubsec_q *q; q = SIMPLEQ_FIRST(&sc->sc_freequeue); SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next); ubsec_dma_free(sc, &q->q_dma->d_alloc); free(q, M_DEVBUF); } mtx_destroy(&sc->sc_mcr1lock); mtx_destroy(&sc->sc_freeqlock); #ifndef UBSEC_NO_RNG if (sc->sc_flags & UBS_FLAGS_RNG) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); ubsec_dma_free(sc, &sc->sc_rng.rng_buf); } #endif /* UBSEC_NO_RNG */ mtx_destroy(&sc->sc_mcr2lock); bus_generic_detach(dev); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); bus_dma_tag_destroy(sc->sc_dmat); bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); return (0); } /* * Stop all chip i/o so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int ubsec_shutdown(device_t dev) { #ifdef notyet ubsec_stop(device_get_softc(dev)); #endif return (0); } /* * Device suspend routine. */ static int ubsec_suspend(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX stop the device and save PCI settings */ #endif sc->sc_suspended = 1; return (0); } static int ubsec_resume(device_t dev) { struct ubsec_softc *sc = device_get_softc(dev); #ifdef notyet /* XXX retore PCI settings and start the device */ #endif sc->sc_suspended = 0; return (0); } /* * UBSEC Interrupt routine */ static void ubsec_intr(void *arg) { struct ubsec_softc *sc = arg; volatile u_int32_t stat; struct ubsec_q *q; struct ubsec_dma *dmap; int npkts = 0, i; stat = READ_REG(sc, BS_STAT); stat &= sc->sc_statmask; if (stat == 0) return; WRITE_REG(sc, BS_STAT, stat); /* IACK */ /* * Check to see if we have any packets waiting for us */ if ((stat & BS_STAT_MCR1_DONE)) { mtx_lock(&sc->sc_mcr1lock); while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { q = SIMPLEQ_FIRST(&sc->sc_qchip); dmap = q->q_dma; if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0) break; SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next); npkts = q->q_nstacked_mcrs; sc->sc_nqchip -= 1+npkts; /* * search for further sc_qchip ubsec_q's that share * the same MCR, and complete them too, they must be * at the top. */ for (i = 0; i < npkts; i++) { if(q->q_stacked_mcr[i]) { ubsec_callback(sc, q->q_stacked_mcr[i]); } else { break; } } ubsec_callback(sc, q); } /* * Don't send any more packet to chip if there has been * a DMAERR. */ if (!(stat & BS_STAT_DMAERR)) ubsec_feed(sc); mtx_unlock(&sc->sc_mcr1lock); } /* * Check to see if we have any key setups/rng's waiting for us */ if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) && (stat & BS_STAT_MCR2_DONE)) { struct ubsec_q2 *q2; struct ubsec_mcr *mcr; mtx_lock(&sc->sc_mcr2lock); while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) { q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); ubsec_dma_sync(&q2->q_mcr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; if ((mcr->mcr_flags & htole16(UBS_MCR_DONE)) == 0) { ubsec_dma_sync(&q2->q_mcr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); break; } SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, q_next); ubsec_callback2(sc, q2); /* * Don't send any more packet to chip if there has been * a DMAERR. */ if (!(stat & BS_STAT_DMAERR)) ubsec_feed2(sc); } mtx_unlock(&sc->sc_mcr2lock); } /* * Check to see if we got any DMA Error */ if (stat & BS_STAT_DMAERR) { #ifdef UBSEC_DEBUG if (ubsec_debug) { volatile u_int32_t a = READ_REG(sc, BS_ERR); printf("dmaerr %s@%08x\n", (a & BS_ERR_READ) ? "read" : "write", a & BS_ERR_ADDR); } #endif /* UBSEC_DEBUG */ ubsecstats.hst_dmaerr++; mtx_lock(&sc->sc_mcr1lock); ubsec_totalreset(sc); ubsec_feed(sc); mtx_unlock(&sc->sc_mcr1lock); } if (sc->sc_needwakeup) { /* XXX check high watermark */ int wakeup; mtx_lock(&sc->sc_freeqlock); wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); #ifdef UBSEC_DEBUG if (ubsec_debug) device_printf(sc->sc_dev, "wakeup crypto (%x)\n", sc->sc_needwakeup); #endif /* UBSEC_DEBUG */ sc->sc_needwakeup &= ~wakeup; mtx_unlock(&sc->sc_freeqlock); crypto_unblock(sc->sc_cid, wakeup); } } /* * ubsec_feed() - aggregate and post requests to chip */ static void ubsec_feed(struct ubsec_softc *sc) { struct ubsec_q *q, *q2; int npkts, i; void *v; u_int32_t stat; /* * Decide how many ops to combine in a single MCR. We cannot * aggregate more than UBS_MAX_AGGR because this is the number * of slots defined in the data structure. Note that * aggregation only happens if ops are marked batch'able. * Aggregating ops reduces the number of interrupts to the host * but also (potentially) increases the latency for processing * completed ops as we only get an interrupt when all aggregated * ops have completed. */ if (sc->sc_nqueue == 0) return; if (sc->sc_nqueue > 1) { npkts = 0; SIMPLEQ_FOREACH(q, &sc->sc_queue, q_next) { npkts++; if ((q->q_crp->crp_flags & CRYPTO_F_BATCH) == 0) break; } } else npkts = 1; /* * Check device status before going any further. */ if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { if (stat & BS_STAT_DMAERR) { ubsec_totalreset(sc); ubsecstats.hst_dmaerr++; } else ubsecstats.hst_mcr1full++; return; } if (sc->sc_nqueue > ubsecstats.hst_maxqueue) ubsecstats.hst_maxqueue = sc->sc_nqueue; if (npkts > UBS_MAX_AGGR) npkts = UBS_MAX_AGGR; if (npkts < 2) /* special case 1 op */ goto feed1; ubsecstats.hst_totbatch += npkts-1; #ifdef UBSEC_DEBUG if (ubsec_debug) printf("merging %d records\n", npkts); #endif /* UBSEC_DEBUG */ q = SIMPLEQ_FIRST(&sc->sc_queue); SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next); --sc->sc_nqueue; bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); if (q->q_dst_map != NULL) bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */ for (i = 0; i < q->q_nstacked_mcrs; i++) { q2 = SIMPLEQ_FIRST(&sc->sc_queue); bus_dmamap_sync(sc->sc_dmat, q2->q_src_map, BUS_DMASYNC_PREWRITE); if (q2->q_dst_map != NULL) bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map, BUS_DMASYNC_PREREAD); SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next); --sc->sc_nqueue; v = (void*)(((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) - sizeof(struct ubsec_mcr_add)); bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add)); q->q_stacked_mcr[i] = q2; } q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts); SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); sc->sc_nqchip += npkts; if (sc->sc_nqchip > ubsecstats.hst_maxqchip) ubsecstats.hst_maxqchip = sc->sc_nqchip; ubsec_dma_sync(&q->q_dma->d_alloc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_mcr)); return; feed1: q = SIMPLEQ_FIRST(&sc->sc_queue); bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); if (q->q_dst_map != NULL) bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); ubsec_dma_sync(&q->q_dma->d_alloc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_mcr)); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("feed1: q->chip %p %08x stat %08x\n", q, (u_int32_t)vtophys(&q->q_dma->d_dma->d_mcr), stat); #endif /* UBSEC_DEBUG */ SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next); --sc->sc_nqueue; SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); sc->sc_nqchip++; if (sc->sc_nqchip > ubsecstats.hst_maxqchip) ubsecstats.hst_maxqchip = sc->sc_nqchip; return; } static void ubsec_setup_enckey(struct ubsec_session *ses, int algo, const void *key) { /* Go ahead and compute key in ubsec's byte order */ if (algo == CRYPTO_DES_CBC) { bcopy(key, &ses->ses_deskey[0], 8); bcopy(key, &ses->ses_deskey[2], 8); bcopy(key, &ses->ses_deskey[4], 8); } else bcopy(key, ses->ses_deskey, 24); SWAP32(ses->ses_deskey[0]); SWAP32(ses->ses_deskey[1]); SWAP32(ses->ses_deskey[2]); SWAP32(ses->ses_deskey[3]); SWAP32(ses->ses_deskey[4]); SWAP32(ses->ses_deskey[5]); } static void ubsec_setup_mackey(struct ubsec_session *ses, int algo, const char *key, int klen) { MD5_CTX md5ctx; SHA1_CTX sha1ctx; if (algo == CRYPTO_MD5_HMAC) { hmac_init_ipad(&auth_hash_hmac_md5, key, klen, &md5ctx); bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state)); hmac_init_opad(&auth_hash_hmac_md5, key, klen, &md5ctx); bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state)); explicit_bzero(&md5ctx, sizeof(md5ctx)); } else { hmac_init_ipad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); hmac_init_opad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); explicit_bzero(&sha1ctx, sizeof(sha1ctx)); } } static bool ubsec_auth_supported(const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: return (true); default: return (false); } } static bool ubsec_cipher_supported(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: return (csp->csp_ivlen == 8); default: return (false); } } static int ubsec_probesession(device_t dev, const struct crypto_session_params *csp) { if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!ubsec_auth_supported(csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!ubsec_cipher_supported(csp)) return (EINVAL); break; case CSP_MODE_ETA: if (!ubsec_auth_supported(csp) || !ubsec_cipher_supported(csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_HARDWARE); } /* * Allocate a new 'session'. */ static int ubsec_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct ubsec_session *ses; ses = crypto_get_driver_session(cses); if (csp->csp_cipher_alg != 0 && csp->csp_cipher_key != NULL) ubsec_setup_enckey(ses, csp->csp_cipher_alg, csp->csp_cipher_key); if (csp->csp_auth_alg != 0) { ses->ses_mlen = csp->csp_auth_mlen; if (ses->ses_mlen == 0) { if (csp->csp_auth_alg == CRYPTO_MD5_HMAC) ses->ses_mlen = MD5_HASH_LEN; else ses->ses_mlen = SHA1_HASH_LEN; } if (csp->csp_auth_key != NULL) { ubsec_setup_mackey(ses, csp->csp_auth_alg, csp->csp_auth_key, csp->csp_auth_klen); } } return (0); } static bus_size_t ubsec_crp_length(struct cryptop *crp) { switch (crp->crp_buf_type) { case CRYPTO_BUF_MBUF: return (crp->crp_mbuf->m_pkthdr.len); case CRYPTO_BUF_UIO: return (crp->crp_uio->uio_resid); case CRYPTO_BUF_CONTIG: return (crp->crp_ilen); default: panic("bad crp buffer type"); } } static void ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error) { struct ubsec_operand *op = arg; KASSERT(nsegs <= UBS_MAX_SCATTER, ("Too many DMA segments returned when mapping operand")); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("ubsec_op_cb: nsegs %d error %d\n", nsegs, error); #endif if (error != 0) return; op->nsegs = nsegs; bcopy(seg, op->segs, nsegs * sizeof (seg[0])); } static int ubsec_process(device_t dev, struct cryptop *crp, int hint) { const struct crypto_session_params *csp; struct ubsec_softc *sc = device_get_softc(dev); struct ubsec_q *q = NULL; int err = 0, i, j, nicealign; int cpskip, cpoffset; int sskip, dskip, stheend, dtheend; int16_t coffset; struct ubsec_session *ses; struct ubsec_pktctx ctx; struct ubsec_dma *dmap = NULL; mtx_lock(&sc->sc_freeqlock); if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { ubsecstats.hst_queuefull++; sc->sc_needwakeup |= CRYPTO_SYMQ; mtx_unlock(&sc->sc_freeqlock); return (ERESTART); } q = SIMPLEQ_FIRST(&sc->sc_freequeue); SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next); mtx_unlock(&sc->sc_freeqlock); dmap = q->q_dma; /* Save dma pointer */ bzero(q, sizeof(struct ubsec_q)); bzero(&ctx, sizeof(ctx)); q->q_dma = dmap; ses = crypto_get_driver_session(crp->crp_session); bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr)); dmap->d_dma->d_mcr.mcr_pkts = htole16(1); dmap->d_dma->d_mcr.mcr_flags = 0; q->q_crp = crp; csp = crypto_get_params(crp->crp_session); if (csp->csp_cipher_alg != 0) { if (crp->crp_cipher_key != NULL) { ubsec_setup_enckey(ses, csp->csp_cipher_alg, crp->crp_cipher_key); } ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES); - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { - arc4rand(ctx.pc_iv, csp->csp_ivlen, 0); - crypto_copyback(crp, crp->crp_iv_start, - csp->csp_ivlen, ctx.pc_iv); - } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) - memcpy(ctx.pc_iv, crp->crp_iv, csp->csp_ivlen); - else - crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, - ctx.pc_iv); + crypto_read_iv(crp, ctx.pc_iv); if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND); } ctx.pc_deskey[0] = ses->ses_deskey[0]; ctx.pc_deskey[1] = ses->ses_deskey[1]; ctx.pc_deskey[2] = ses->ses_deskey[2]; ctx.pc_deskey[3] = ses->ses_deskey[3]; ctx.pc_deskey[4] = ses->ses_deskey[4]; ctx.pc_deskey[5] = ses->ses_deskey[5]; SWAP32(ctx.pc_iv[0]); SWAP32(ctx.pc_iv[1]); } if (csp->csp_auth_alg != 0) { if (crp->crp_auth_key != NULL) { ubsec_setup_mackey(ses, csp->csp_auth_alg, crp->crp_auth_key, csp->csp_auth_klen); } if (csp->csp_auth_alg == CRYPTO_MD5_HMAC) ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5); else ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1); for (i = 0; i < 5; i++) { ctx.pc_hminner[i] = ses->ses_hminner[i]; ctx.pc_hmouter[i] = ses->ses_hmouter[i]; HTOLE32(ctx.pc_hminner[i]); HTOLE32(ctx.pc_hmouter[i]); } } if (csp->csp_mode == CSP_MODE_ETA) { /* * ubsec only supports ETA requests where there is no * gap between the AAD and payload. */ if (crp->crp_aad_length != 0 && crp->crp_aad_start + crp->crp_aad_length != crp->crp_payload_start) { ubsecstats.hst_lenmismatch++; err = EINVAL; goto errout; } if (crp->crp_aad_length != 0) { sskip = crp->crp_aad_start; } else { sskip = crp->crp_payload_start; } cpskip = dskip = crp->crp_payload_start; stheend = crp->crp_aad_length + crp->crp_payload_length; dtheend = crp->crp_payload_length; coffset = crp->crp_aad_length; cpoffset = cpskip + dtheend; #ifdef UBSEC_DEBUG if (ubsec_debug) { printf("AAD: start %d, len %d, digest %d\n", crp->crp_aad_start, crp->crp_aad_length, crp->crp_digest_start); printf("payload: start %d, len %d, IV %d\n", crp->crp_payload_start, crp->crp_payload_length, crp->crp_iv_start); printf("src: skip %d, len %d\n", sskip, stheend); printf("dst: skip %d, len %d\n", dskip, dtheend); printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n", coffset, stheend, cpskip, cpoffset); } #endif } else { cpskip = dskip = sskip = crp->crp_payload_start; dtheend = stheend = crp->crp_payload_length; cpoffset = cpskip + dtheend; coffset = 0; } ctx.pc_offset = htole16(coffset >> 2); if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_src_map)) { ubsecstats.hst_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_crp(sc->sc_dmat, q->q_src_map, crp, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); q->q_src_map = NULL; ubsecstats.hst_noload++; err = ENOMEM; goto errout; } q->q_src_mapsize = ubsec_crp_length(crp); nicealign = ubsec_dmamap_aligned(&q->q_src); dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("src skip: %d nicealign: %u\n", sskip, nicealign); #endif for (i = j = 0; i < q->q_src_nsegs; i++) { struct ubsec_pktbuf *pb; bus_size_t packl = q->q_src_segs[i].ds_len; bus_addr_t packp = q->q_src_segs[i].ds_addr; if (sskip >= packl) { sskip -= packl; continue; } packl -= sskip; packp += sskip; sskip = 0; if (packl > 0xfffc) { err = EIO; goto errout; } if (j == 0) pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; else pb = &dmap->d_dma->d_sbuf[j - 1]; pb->pb_addr = htole32(packp); if (stheend) { if (packl > stheend) { pb->pb_len = htole32(stheend); stheend = 0; } else { pb->pb_len = htole32(packl); stheend -= packl; } } else pb->pb_len = htole32(packl); if ((i + 1) == q->q_src_nsegs) pb->pb_next = 0; else pb->pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_sbuf[j])); j++; } if (csp->csp_mode == CSP_MODE_DIGEST) { dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_macbuf[0])); #ifdef UBSEC_DEBUG if (ubsec_debug) printf("opkt: %x %x %x\n", dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); #endif } else { if (nicealign) { q->q_dst = q->q_src; } else if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { int totlen, len; struct mbuf *m, *top, **mp; ubsecstats.hst_unaligned++; totlen = q->q_src_mapsize; if (totlen >= MINCLSIZE) { m = m_getcl(M_NOWAIT, MT_DATA, crp->crp_mbuf->m_flags & M_PKTHDR); len = MCLBYTES; } else if (crp->crp_mbuf->m_flags & M_PKTHDR) { m = m_gethdr(M_NOWAIT, MT_DATA); len = MHLEN; } else { m = m_get(M_NOWAIT, MT_DATA); len = MLEN; } if (m && crp->crp_mbuf->m_flags & M_PKTHDR && !m_dup_pkthdr(m, crp->crp_mbuf, M_NOWAIT)) { m_free(m); m = NULL; } if (m == NULL) { ubsecstats.hst_nombuf++; err = sc->sc_nqueue ? ERESTART : ENOMEM; goto errout; } m->m_len = len = min(totlen, len); totlen -= len; top = m; mp = ⊤ while (totlen > 0) { if (totlen >= MINCLSIZE) { m = m_getcl(M_NOWAIT, MT_DATA, 0); len = MCLBYTES; } else { m = m_get(M_NOWAIT, MT_DATA); len = MLEN; } if (m == NULL) { m_freem(top); ubsecstats.hst_nombuf++; err = sc->sc_nqueue ? ERESTART : ENOMEM; goto errout; } m->m_len = len = min(totlen, len); totlen -= len; *mp = m; mp = &m->m_next; } q->q_dst_m = top; ubsec_mcopy(crp->crp_mbuf, q->q_dst_m, cpskip, cpoffset); if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_dst_map) != 0) { ubsecstats.hst_nomap++; err = ENOMEM; goto errout; } if (bus_dmamap_load_mbuf_sg(sc->sc_dmat, q->q_dst_map, q->q_dst_m, q->q_dst_segs, &q->q_dst_nsegs, 0) != 0) { bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); q->q_dst_map = NULL; ubsecstats.hst_noload++; err = ENOMEM; goto errout; } q->q_dst_mapsize = q->q_src_mapsize; } else { ubsecstats.hst_iovmisaligned++; err = EINVAL; goto errout; } #ifdef UBSEC_DEBUG if (ubsec_debug) printf("dst skip: %d\n", dskip); #endif for (i = j = 0; i < q->q_dst_nsegs; i++) { struct ubsec_pktbuf *pb; bus_size_t packl = q->q_dst_segs[i].ds_len; bus_addr_t packp = q->q_dst_segs[i].ds_addr; if (dskip >= packl) { dskip -= packl; continue; } packl -= dskip; packp += dskip; dskip = 0; if (packl > 0xfffc) { err = EIO; goto errout; } if (j == 0) pb = &dmap->d_dma->d_mcr.mcr_opktbuf; else pb = &dmap->d_dma->d_dbuf[j - 1]; pb->pb_addr = htole32(packp); if (dtheend) { if (packl > dtheend) { pb->pb_len = htole32(dtheend); dtheend = 0; } else { pb->pb_len = htole32(packl); dtheend -= packl; } } else pb->pb_len = htole32(packl); if ((i + 1) == q->q_dst_nsegs) { if (csp->csp_auth_alg != 0) pb->pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_macbuf[0])); else pb->pb_next = 0; } else pb->pb_next = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_dbuf[j])); j++; } } dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr + offsetof(struct ubsec_dmachunk, d_ctx)); if (sc->sc_flags & UBS_FLAGS_LONGCTX) { struct ubsec_pktctx_long *ctxl; ctxl = (struct ubsec_pktctx_long *)(dmap->d_alloc.dma_vaddr + offsetof(struct ubsec_dmachunk, d_ctx)); /* transform small context into long context */ ctxl->pc_len = htole16(sizeof(struct ubsec_pktctx_long)); ctxl->pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC); ctxl->pc_flags = ctx.pc_flags; ctxl->pc_offset = ctx.pc_offset; for (i = 0; i < 6; i++) ctxl->pc_deskey[i] = ctx.pc_deskey[i]; for (i = 0; i < 5; i++) ctxl->pc_hminner[i] = ctx.pc_hminner[i]; for (i = 0; i < 5; i++) ctxl->pc_hmouter[i] = ctx.pc_hmouter[i]; ctxl->pc_iv[0] = ctx.pc_iv[0]; ctxl->pc_iv[1] = ctx.pc_iv[1]; } else bcopy(&ctx, dmap->d_alloc.dma_vaddr + offsetof(struct ubsec_dmachunk, d_ctx), sizeof(struct ubsec_pktctx)); mtx_lock(&sc->sc_mcr1lock); SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next); sc->sc_nqueue++; ubsecstats.hst_ipackets++; ubsecstats.hst_ibytes += dmap->d_alloc.dma_size; if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= UBS_MAX_AGGR) ubsec_feed(sc); mtx_unlock(&sc->sc_mcr1lock); return (0); errout: if (q != NULL) { if (q->q_dst_m != NULL) m_freem(q->q_dst_m); if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); } if (q->q_src_map != NULL) { bus_dmamap_unload(sc->sc_dmat, q->q_src_map); bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); } } if (q != NULL || err == ERESTART) { mtx_lock(&sc->sc_freeqlock); if (q != NULL) SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); if (err == ERESTART) sc->sc_needwakeup |= CRYPTO_SYMQ; mtx_unlock(&sc->sc_freeqlock); } if (err != ERESTART) { crp->crp_etype = err; crypto_done(crp); } return (err); } static void ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q) { const struct crypto_session_params *csp; struct cryptop *crp = (struct cryptop *)q->q_crp; struct ubsec_session *ses; struct ubsec_dma *dmap = q->q_dma; char hash[SHA1_HASH_LEN]; ses = crypto_get_driver_session(crp->crp_session); csp = crypto_get_params(crp->crp_session); ubsecstats.hst_opackets++; ubsecstats.hst_obytes += dmap->d_alloc.dma_size; ubsec_dma_sync(&dmap->d_alloc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); } bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, q->q_src_map); bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); if (q->q_dst_m != NULL) { m_freem(crp->crp_mbuf); crp->crp_mbuf = q->q_dst_m; } if (csp->csp_auth_alg != 0) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, ses->ses_mlen, hash); if (timingsafe_bcmp(dmap->d_dma->d_macbuf, hash, ses->ses_mlen) != 0) crp->crp_etype = EBADMSG; } else crypto_copyback(crp, crp->crp_digest_start, ses->ses_mlen, dmap->d_dma->d_macbuf); } mtx_lock(&sc->sc_freeqlock); SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); mtx_unlock(&sc->sc_freeqlock); crypto_done(crp); } static void ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset) { int i, j, dlen, slen; caddr_t dptr, sptr; j = 0; sptr = srcm->m_data; slen = srcm->m_len; dptr = dstm->m_data; dlen = dstm->m_len; while (1) { for (i = 0; i < min(slen, dlen); i++) { if (j < hoffset || j >= toffset) *dptr++ = *sptr++; slen--; dlen--; j++; } if (slen == 0) { srcm = srcm->m_next; if (srcm == NULL) return; sptr = srcm->m_data; slen = srcm->m_len; } if (dlen == 0) { dstm = dstm->m_next; if (dstm == NULL) return; dptr = dstm->m_data; dlen = dstm->m_len; } } } /* * feed the key generator, must be called at splimp() or higher. */ static int ubsec_feed2(struct ubsec_softc *sc) { struct ubsec_q2 *q; while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) { if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL) break; q = SIMPLEQ_FIRST(&sc->sc_queue2); ubsec_dma_sync(&q->q_mcr, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_PREWRITE); WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr); SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, q_next); --sc->sc_nqueue2; SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next); } return (0); } /* * Callback for handling random numbers */ static void ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q) { struct cryptkop *krp; struct ubsec_ctx_keyop *ctx; ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr; ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_POSTWRITE); switch (q->q_type) { #ifndef UBSEC_NO_RNG case UBS_CTXOP_RNGBYPASS: { struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q; ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_POSTREAD); (*sc->sc_harvest)(sc->sc_rndtest, rng->rng_buf.dma_vaddr, UBSEC_RNG_BUFSIZ*sizeof (u_int32_t)); rng->rng_used = 0; callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); break; } #endif case UBS_CTXOP_MODEXP: { struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; u_int rlen, clen; krp = me->me_krp; rlen = (me->me_modbits + 7) / 8; clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8; ubsec_dma_sync(&me->me_M, BUS_DMASYNC_POSTWRITE); ubsec_dma_sync(&me->me_E, BUS_DMASYNC_POSTWRITE); ubsec_dma_sync(&me->me_C, BUS_DMASYNC_POSTREAD); ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_POSTWRITE); if (clen < rlen) krp->krp_status = E2BIG; else { if (sc->sc_flags & UBS_FLAGS_HWNORM) { bzero(krp->krp_param[krp->krp_iparams].crp_p, (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8); bcopy(me->me_C.dma_vaddr, krp->krp_param[krp->krp_iparams].crp_p, (me->me_modbits + 7) / 8); } else ubsec_kshift_l(me->me_shiftbits, me->me_C.dma_vaddr, me->me_normbits, krp->krp_param[krp->krp_iparams].crp_p, krp->krp_param[krp->krp_iparams].crp_nbits); } crypto_kdone(krp); /* bzero all potentially sensitive data */ bzero(me->me_E.dma_vaddr, me->me_E.dma_size); bzero(me->me_M.dma_vaddr, me->me_M.dma_size); bzero(me->me_C.dma_vaddr, me->me_C.dma_size); bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); /* Can't free here, so put us on the free list. */ SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next); break; } case UBS_CTXOP_RSAPRIV: { struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; u_int len; krp = rp->rpr_krp; ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_POSTWRITE); ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_POSTREAD); len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) / 8; bcopy(rp->rpr_msgout.dma_vaddr, krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len); crypto_kdone(krp); bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); bzero(rp->rpr_q.q_ctx.dma_vaddr, rp->rpr_q.q_ctx.dma_size); /* Can't free here, so put us on the free list. */ SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next); break; } default: device_printf(sc->sc_dev, "unknown ctx op: %x\n", letoh16(ctx->ctx_op)); break; } } #ifndef UBSEC_NO_RNG static void ubsec_rng(void *vsc) { struct ubsec_softc *sc = vsc; struct ubsec_q2_rng *rng = &sc->sc_rng; struct ubsec_mcr *mcr; struct ubsec_ctx_rngbypass *ctx; mtx_lock(&sc->sc_mcr2lock); if (rng->rng_used) { mtx_unlock(&sc->sc_mcr2lock); return; } sc->sc_nqueue2++; if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE) goto out; mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr; ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr; mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr); mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0; mcr->mcr_ipktbuf.pb_len = 0; mcr->mcr_reserved = mcr->mcr_pktlen = 0; mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr); mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) & UBS_PKTBUF_LEN); mcr->mcr_opktbuf.pb_next = 0; ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass)); ctx->rbp_op = htole16(UBS_CTXOP_RNGBYPASS); rng->rng_q.q_type = UBS_CTXOP_RNGBYPASS; ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_PREREAD); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next); rng->rng_used = 1; ubsec_feed2(sc); ubsecstats.hst_rng++; mtx_unlock(&sc->sc_mcr2lock); return; out: /* * Something weird happened, generate our own call back. */ sc->sc_nqueue2--; mtx_unlock(&sc->sc_mcr2lock); callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); } #endif /* UBSEC_NO_RNG */ static void ubsec_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; *paddr = segs->ds_addr; } static int ubsec_dma_malloc( struct ubsec_softc *sc, bus_size_t size, struct ubsec_dma_alloc *dma, int mapflags ) { int r; /* XXX could specify sc_dmat as parent but that just adds overhead */ r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma->dma_tag); if (r != 0) { device_printf(sc->sc_dev, "ubsec_dma_malloc: " "bus_dma_tag_create failed; error %u\n", r); goto fail_1; } r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { device_printf(sc->sc_dev, "ubsec_dma_malloc: " "bus_dmammem_alloc failed; size %ju, error %u\n", (intmax_t)size, r); goto fail_2; } r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, ubsec_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) { device_printf(sc->sc_dev, "ubsec_dma_malloc: " "bus_dmamap_load failed; error %u\n", r); goto fail_3; } dma->dma_size = size; return (0); fail_3: bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); fail_1: bus_dma_tag_destroy(dma->dma_tag); dma->dma_tag = NULL; return (r); } static void ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma) { bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); } /* * Resets the board. Values in the regesters are left as is * from the reset (i.e. initial values are assigned elsewhere). */ static void ubsec_reset_board(struct ubsec_softc *sc) { volatile u_int32_t ctrl; ctrl = READ_REG(sc, BS_CTRL); ctrl |= BS_CTRL_RESET; WRITE_REG(sc, BS_CTRL, ctrl); /* * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us */ DELAY(10); } /* * Init Broadcom registers */ static void ubsec_init_board(struct ubsec_softc *sc) { u_int32_t ctrl; ctrl = READ_REG(sc, BS_CTRL); ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64); ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT; if (sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) ctrl |= BS_CTRL_MCR2INT; else ctrl &= ~BS_CTRL_MCR2INT; if (sc->sc_flags & UBS_FLAGS_HWNORM) ctrl &= ~BS_CTRL_SWNORM; WRITE_REG(sc, BS_CTRL, ctrl); } /* * Init Broadcom PCI registers */ static void ubsec_init_pciregs(device_t dev) { #if 0 u_int32_t misc; misc = pci_conf_read(pc, pa->pa_tag, BS_RTY_TOUT); misc = (misc & ~(UBS_PCI_RTY_MASK << UBS_PCI_RTY_SHIFT)) | ((UBS_DEF_RTY & 0xff) << UBS_PCI_RTY_SHIFT); misc = (misc & ~(UBS_PCI_TOUT_MASK << UBS_PCI_TOUT_SHIFT)) | ((UBS_DEF_TOUT & 0xff) << UBS_PCI_TOUT_SHIFT); pci_conf_write(pc, pa->pa_tag, BS_RTY_TOUT, misc); #endif /* * This will set the cache line size to 1, this will * force the BCM58xx chip just to do burst read/writes. * Cache line read/writes are to slow */ pci_write_config(dev, PCIR_CACHELNSZ, UBS_DEF_CACHELINE, 1); } /* * Clean up after a chip crash. * It is assumed that the caller in splimp() */ static void ubsec_cleanchip(struct ubsec_softc *sc) { struct ubsec_q *q; while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { q = SIMPLEQ_FIRST(&sc->sc_qchip); SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next); ubsec_free_q(sc, q); } sc->sc_nqchip = 0; } /* * free a ubsec_q * It is assumed that the caller is within splimp(). */ static int ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q) { struct ubsec_q *q2; struct cryptop *crp; int npkts; int i; npkts = q->q_nstacked_mcrs; for (i = 0; i < npkts; i++) { if(q->q_stacked_mcr[i]) { q2 = q->q_stacked_mcr[i]; if (q2->q_dst_m != NULL) m_freem(q2->q_dst_m); crp = (struct cryptop *)q2->q_crp; SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next); crp->crp_etype = EFAULT; crypto_done(crp); } else { break; } } /* * Free header MCR */ if (q->q_dst_m != NULL) m_freem(q->q_dst_m); crp = (struct cryptop *)q->q_crp; SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); crp->crp_etype = EFAULT; crypto_done(crp); return(0); } /* * Routine to reset the chip and clean up. * It is assumed that the caller is in splimp() */ static void ubsec_totalreset(struct ubsec_softc *sc) { ubsec_reset_board(sc); ubsec_init_board(sc); ubsec_cleanchip(sc); } static int ubsec_dmamap_aligned(struct ubsec_operand *op) { int i; for (i = 0; i < op->nsegs; i++) { if (op->segs[i].ds_addr & 3) return (0); if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) return (0); } return (1); } static void ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q) { switch (q->q_type) { case UBS_CTXOP_MODEXP: { struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; ubsec_dma_free(sc, &me->me_q.q_mcr); ubsec_dma_free(sc, &me->me_q.q_ctx); ubsec_dma_free(sc, &me->me_M); ubsec_dma_free(sc, &me->me_E); ubsec_dma_free(sc, &me->me_C); ubsec_dma_free(sc, &me->me_epb); free(me, M_DEVBUF); break; } case UBS_CTXOP_RSAPRIV: { struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; ubsec_dma_free(sc, &rp->rpr_q.q_mcr); ubsec_dma_free(sc, &rp->rpr_q.q_ctx); ubsec_dma_free(sc, &rp->rpr_msgin); ubsec_dma_free(sc, &rp->rpr_msgout); free(rp, M_DEVBUF); break; } default: device_printf(sc->sc_dev, "invalid kfree 0x%x\n", q->q_type); break; } } static int ubsec_kprocess(device_t dev, struct cryptkop *krp, int hint) { struct ubsec_softc *sc = device_get_softc(dev); int r; if (krp == NULL || krp->krp_callback == NULL) return (EINVAL); while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) { struct ubsec_q2 *q; q = SIMPLEQ_FIRST(&sc->sc_q2free); SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, q_next); ubsec_kfree(sc, q); } switch (krp->krp_op) { case CRK_MOD_EXP: if (sc->sc_flags & UBS_FLAGS_HWNORM) r = ubsec_kprocess_modexp_hw(sc, krp, hint); else r = ubsec_kprocess_modexp_sw(sc, krp, hint); break; case CRK_MOD_EXP_CRT: return (ubsec_kprocess_rsapriv(sc, krp, hint)); default: device_printf(sc->sc_dev, "kprocess: invalid op 0x%x\n", krp->krp_op); krp->krp_status = EOPNOTSUPP; crypto_kdone(krp); return (0); } return (0); /* silence compiler */ } /* * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization) */ static int ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) { struct ubsec_q2_modexp *me; struct ubsec_mcr *mcr; struct ubsec_ctx_modexp *ctx; struct ubsec_pktbuf *epb; int err = 0; u_int nbits, normbits, mbits, shiftbits, ebits; me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); if (me == NULL) { err = ENOMEM; goto errout; } bzero(me, sizeof *me); me->me_krp = krp; me->me_q.q_type = UBS_CTXOP_MODEXP; nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); if (nbits <= 512) normbits = 512; else if (nbits <= 768) normbits = 768; else if (nbits <= 1024) normbits = 1024; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) normbits = 1536; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) normbits = 2048; else { err = E2BIG; goto errout; } shiftbits = normbits - nbits; me->me_modbits = nbits; me->me_shiftbits = shiftbits; me->me_normbits = normbits; /* Sanity check: result bits must be >= true modulus bits. */ if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { err = ERANGE; goto errout; } if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &me->me_q.q_mcr, 0)) { err = ENOMEM; goto errout; } mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), &me->me_q.q_ctx, 0)) { err = ENOMEM; goto errout; } mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); if (mbits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { err = ENOMEM; goto errout; } ubsec_kshift_r(shiftbits, krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits, me->me_M.dma_vaddr, normbits); if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { err = ENOMEM; goto errout; } bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); if (ebits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { err = ENOMEM; goto errout; } ubsec_kshift_r(shiftbits, krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits, me->me_E.dma_vaddr, normbits); if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), &me->me_epb, 0)) { err = ENOMEM; goto errout; } epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; epb->pb_addr = htole32(me->me_E.dma_paddr); epb->pb_next = 0; epb->pb_len = htole32(normbits / 8); #ifdef UBSEC_DEBUG if (ubsec_debug) { printf("Epb "); ubsec_dump_pb(epb); } #endif mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); mcr->mcr_reserved = 0; mcr->mcr_pktlen = 0; mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); mcr->mcr_opktbuf.pb_next = 0; mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); #ifdef DIAGNOSTIC /* Misaligned output buffer will hang the chip. */ if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) panic("%s: modexp invalid addr 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_addr)); if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) panic("%s: modexp invalid len 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_len)); #endif ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; bzero(ctx, sizeof(*ctx)); ubsec_kshift_r(shiftbits, krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits, ctx->me_N, normbits); ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); ctx->me_op = htole16(UBS_CTXOP_MODEXP); ctx->me_E_len = htole16(nbits); ctx->me_N_len = htole16(nbits); #ifdef UBSEC_DEBUG if (ubsec_debug) { ubsec_dump_mcr(mcr); ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); } #endif /* * ubsec_feed2 will sync mcr and ctx, we just need to sync * everything else. */ ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); /* Enqueue and we're done... */ mtx_lock(&sc->sc_mcr2lock); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); ubsec_feed2(sc); ubsecstats.hst_modexp++; mtx_unlock(&sc->sc_mcr2lock); return (0); errout: if (me != NULL) { if (me->me_q.q_mcr.dma_tag != NULL) ubsec_dma_free(sc, &me->me_q.q_mcr); if (me->me_q.q_ctx.dma_tag != NULL) { bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); ubsec_dma_free(sc, &me->me_q.q_ctx); } if (me->me_M.dma_tag != NULL) { bzero(me->me_M.dma_vaddr, me->me_M.dma_size); ubsec_dma_free(sc, &me->me_M); } if (me->me_E.dma_tag != NULL) { bzero(me->me_E.dma_vaddr, me->me_E.dma_size); ubsec_dma_free(sc, &me->me_E); } if (me->me_C.dma_tag != NULL) { bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ubsec_dma_free(sc, &me->me_C); } if (me->me_epb.dma_tag != NULL) ubsec_dma_free(sc, &me->me_epb); free(me, M_DEVBUF); } krp->krp_status = err; crypto_kdone(krp); return (0); } /* * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization) */ static int ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) { struct ubsec_q2_modexp *me; struct ubsec_mcr *mcr; struct ubsec_ctx_modexp *ctx; struct ubsec_pktbuf *epb; int err = 0; u_int nbits, normbits, mbits, shiftbits, ebits; me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); if (me == NULL) { err = ENOMEM; goto errout; } bzero(me, sizeof *me); me->me_krp = krp; me->me_q.q_type = UBS_CTXOP_MODEXP; nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); if (nbits <= 512) normbits = 512; else if (nbits <= 768) normbits = 768; else if (nbits <= 1024) normbits = 1024; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) normbits = 1536; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) normbits = 2048; else { err = E2BIG; goto errout; } shiftbits = normbits - nbits; /* XXX ??? */ me->me_modbits = nbits; me->me_shiftbits = shiftbits; me->me_normbits = normbits; /* Sanity check: result bits must be >= true modulus bits. */ if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { err = ERANGE; goto errout; } if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &me->me_q.q_mcr, 0)) { err = ENOMEM; goto errout; } mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), &me->me_q.q_ctx, 0)) { err = ENOMEM; goto errout; } mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); if (mbits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { err = ENOMEM; goto errout; } bzero(me->me_M.dma_vaddr, normbits / 8); bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p, me->me_M.dma_vaddr, (mbits + 7) / 8); if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { err = ENOMEM; goto errout; } bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); if (ebits > nbits) { err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { err = ENOMEM; goto errout; } bzero(me->me_E.dma_vaddr, normbits / 8); bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p, me->me_E.dma_vaddr, (ebits + 7) / 8); if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), &me->me_epb, 0)) { err = ENOMEM; goto errout; } epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; epb->pb_addr = htole32(me->me_E.dma_paddr); epb->pb_next = 0; epb->pb_len = htole32((ebits + 7) / 8); #ifdef UBSEC_DEBUG if (ubsec_debug) { printf("Epb "); ubsec_dump_pb(epb); } #endif mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); mcr->mcr_reserved = 0; mcr->mcr_pktlen = 0; mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); mcr->mcr_opktbuf.pb_next = 0; mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); #ifdef DIAGNOSTIC /* Misaligned output buffer will hang the chip. */ if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) panic("%s: modexp invalid addr 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_addr)); if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) panic("%s: modexp invalid len 0x%x\n", device_get_nameunit(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_len)); #endif ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; bzero(ctx, sizeof(*ctx)); bcopy(krp->krp_param[UBS_MODEXP_PAR_N].crp_p, ctx->me_N, (nbits + 7) / 8); ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); ctx->me_op = htole16(UBS_CTXOP_MODEXP); ctx->me_E_len = htole16(ebits); ctx->me_N_len = htole16(nbits); #ifdef UBSEC_DEBUG if (ubsec_debug) { ubsec_dump_mcr(mcr); ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); } #endif /* * ubsec_feed2 will sync mcr and ctx, we just need to sync * everything else. */ ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); /* Enqueue and we're done... */ mtx_lock(&sc->sc_mcr2lock); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); ubsec_feed2(sc); mtx_unlock(&sc->sc_mcr2lock); return (0); errout: if (me != NULL) { if (me->me_q.q_mcr.dma_tag != NULL) ubsec_dma_free(sc, &me->me_q.q_mcr); if (me->me_q.q_ctx.dma_tag != NULL) { bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); ubsec_dma_free(sc, &me->me_q.q_ctx); } if (me->me_M.dma_tag != NULL) { bzero(me->me_M.dma_vaddr, me->me_M.dma_size); ubsec_dma_free(sc, &me->me_M); } if (me->me_E.dma_tag != NULL) { bzero(me->me_E.dma_vaddr, me->me_E.dma_size); ubsec_dma_free(sc, &me->me_E); } if (me->me_C.dma_tag != NULL) { bzero(me->me_C.dma_vaddr, me->me_C.dma_size); ubsec_dma_free(sc, &me->me_C); } if (me->me_epb.dma_tag != NULL) ubsec_dma_free(sc, &me->me_epb); free(me, M_DEVBUF); } krp->krp_status = err; crypto_kdone(krp); return (0); } static int ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, int hint) { struct ubsec_q2_rsapriv *rp = NULL; struct ubsec_mcr *mcr; struct ubsec_ctx_rsapriv *ctx; int err = 0; u_int padlen, msglen; msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]); padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]); if (msglen > padlen) padlen = msglen; if (padlen <= 256) padlen = 256; else if (padlen <= 384) padlen = 384; else if (padlen <= 512) padlen = 512; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768) padlen = 768; else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024) padlen = 1024; else { err = E2BIG; goto errout; } if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) { err = E2BIG; goto errout; } if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) { err = E2BIG; goto errout; } if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) { err = E2BIG; goto errout; } rp = (struct ubsec_q2_rsapriv *)malloc(sizeof *rp, M_DEVBUF, M_NOWAIT); if (rp == NULL) return (ENOMEM); bzero(rp, sizeof *rp); rp->rpr_krp = krp; rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &rp->rpr_q.q_mcr, 0)) { err = ENOMEM; goto errout; } mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv), &rp->rpr_q.q_ctx, 0)) { err = ENOMEM; goto errout; } ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr; bzero(ctx, sizeof *ctx); /* Copy in p */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p, &ctx->rpr_buf[0 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8); /* Copy in q */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p, &ctx->rpr_buf[1 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8); /* Copy in dp */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p, &ctx->rpr_buf[2 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8); /* Copy in dq */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p, &ctx->rpr_buf[3 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8); /* Copy in pinv */ bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p, &ctx->rpr_buf[4 * (padlen / 8)], (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8); msglen = padlen * 2; /* Copy in input message (aligned buffer/length). */ if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) { /* Is this likely? */ err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) { err = ENOMEM; goto errout; } bzero(rp->rpr_msgin.dma_vaddr, (msglen + 7) / 8); bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p, rp->rpr_msgin.dma_vaddr, (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8); /* Prepare space for output message (aligned buffer/length). */ if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) { /* Is this likely? */ err = E2BIG; goto errout; } if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) { err = ENOMEM; goto errout; } bzero(rp->rpr_msgout.dma_vaddr, (msglen + 7) / 8); mcr->mcr_pkts = htole16(1); mcr->mcr_flags = 0; mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr); mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr); mcr->mcr_ipktbuf.pb_next = 0; mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size); mcr->mcr_reserved = 0; mcr->mcr_pktlen = htole16(msglen); mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr); mcr->mcr_opktbuf.pb_next = 0; mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size); #ifdef DIAGNOSTIC if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) { panic("%s: rsapriv: invalid msgin %x(0x%jx)", device_get_nameunit(sc->sc_dev), rp->rpr_msgin.dma_paddr, (uintmax_t)rp->rpr_msgin.dma_size); } if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) { panic("%s: rsapriv: invalid msgout %x(0x%jx)", device_get_nameunit(sc->sc_dev), rp->rpr_msgout.dma_paddr, (uintmax_t)rp->rpr_msgout.dma_size); } #endif ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8)); ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV); ctx->rpr_q_len = htole16(padlen); ctx->rpr_p_len = htole16(padlen); /* * ubsec_feed2 will sync mcr and ctx, we just need to sync * everything else. */ ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_PREWRITE); ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_PREREAD); /* Enqueue and we're done... */ mtx_lock(&sc->sc_mcr2lock); SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next); ubsec_feed2(sc); ubsecstats.hst_modexpcrt++; mtx_unlock(&sc->sc_mcr2lock); return (0); errout: if (rp != NULL) { if (rp->rpr_q.q_mcr.dma_tag != NULL) ubsec_dma_free(sc, &rp->rpr_q.q_mcr); if (rp->rpr_msgin.dma_tag != NULL) { bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); ubsec_dma_free(sc, &rp->rpr_msgin); } if (rp->rpr_msgout.dma_tag != NULL) { bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); ubsec_dma_free(sc, &rp->rpr_msgout); } free(rp, M_DEVBUF); } krp->krp_status = err; crypto_kdone(krp); return (0); } #ifdef UBSEC_DEBUG static void ubsec_dump_pb(volatile struct ubsec_pktbuf *pb) { printf("addr 0x%x (0x%x) next 0x%x\n", pb->pb_addr, pb->pb_len, pb->pb_next); } static void ubsec_dump_ctx2(struct ubsec_ctx_keyop *c) { printf("CTX (0x%x):\n", c->ctx_len); switch (letoh16(c->ctx_op)) { case UBS_CTXOP_RNGBYPASS: case UBS_CTXOP_RNGSHA1: break; case UBS_CTXOP_MODEXP: { struct ubsec_ctx_modexp *cx = (void *)c; int i, len; printf(" Elen %u, Nlen %u\n", letoh16(cx->me_E_len), letoh16(cx->me_N_len)); len = (cx->me_N_len + 7)/8; for (i = 0; i < len; i++) printf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]); printf("\n"); break; } default: printf("unknown context: %x\n", c->ctx_op); } printf("END CTX\n"); } static void ubsec_dump_mcr(struct ubsec_mcr *mcr) { volatile struct ubsec_mcr_add *ma; int i; printf("MCR:\n"); printf(" pkts: %u, flags 0x%x\n", letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags)); ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp; for (i = 0; i < letoh16(mcr->mcr_pkts); i++) { printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i, letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen), letoh16(ma->mcr_reserved)); printf(" %d: ipkt ", i); ubsec_dump_pb(&ma->mcr_ipktbuf); printf(" %d: opkt ", i); ubsec_dump_pb(&ma->mcr_opktbuf); ma++; } printf("END MCR\n"); } #endif /* UBSEC_DEBUG */ /* * Return the number of significant bits of a big number. */ static int ubsec_ksigbits(struct crparam *cr) { u_int plen = (cr->crp_nbits + 7) / 8; int i, sig = plen * 8; u_int8_t c, *p = cr->crp_p; for (i = plen - 1; i >= 0; i--) { c = p[i]; if (c != 0) { while ((c & 0x80) == 0) { sig--; c <<= 1; } break; } sig -= 8; } return (sig); } static void ubsec_kshift_r( u_int shiftbits, u_int8_t *src, u_int srcbits, u_int8_t *dst, u_int dstbits) { u_int slen, dlen; int i, si, di, n; slen = (srcbits + 7) / 8; dlen = (dstbits + 7) / 8; for (i = 0; i < slen; i++) dst[i] = src[i]; for (i = 0; i < dlen - slen; i++) dst[slen + i] = 0; n = shiftbits / 8; if (n != 0) { si = dlen - n - 1; di = dlen - 1; while (si >= 0) dst[di--] = dst[si--]; while (di >= 0) dst[di--] = 0; } n = shiftbits % 8; if (n != 0) { for (i = dlen - 1; i > 0; i--) dst[i] = (dst[i] << n) | (dst[i - 1] >> (8 - n)); dst[0] = dst[0] << n; } } static void ubsec_kshift_l( u_int shiftbits, u_int8_t *src, u_int srcbits, u_int8_t *dst, u_int dstbits) { int slen, dlen, i, n; slen = (srcbits + 7) / 8; dlen = (dstbits + 7) / 8; n = shiftbits / 8; for (i = 0; i < slen; i++) dst[i] = src[i + n]; for (i = 0; i < dlen - slen; i++) dst[slen + i] = 0; n = shiftbits % 8; if (n != 0) { for (i = 0; i < (dlen - 1); i++) dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n)); dst[dlen - 1] = dst[dlen - 1] >> n; } } Index: head/sys/mips/cavium/cryptocteon/cryptocteon.c =================================================================== --- head/sys/mips/cavium/cryptocteon/cryptocteon.c (revision 360135) +++ head/sys/mips/cavium/cryptocteon/cryptocteon.c (revision 360136) @@ -1,469 +1,464 @@ /* * Octeon Crypto for OCF * * Written by David McCullough * Copyright (C) 2009 David McCullough * * LICENSE TERMS * * The free distribution and use of this software in both source and binary * form is allowed (with or without changes) provided that: * * 1. distributions of this source code include the above copyright * notice, this list of conditions and the following disclaimer; * * 2. distributions in binary form include the above copyright * notice, this list of conditions and the following disclaimer * in the documentation and/or other associated materials; * * 3. the copyright holder's name is not used to endorse products * built using this software without specific written permission. * * DISCLAIMER * * This software is provided 'as is' with no explicit or implied warranties * in respect of its properties, including, but not limited to, correctness * and/or fitness for purpose. * --------------------------------------------------------------------------- */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" struct cryptocteon_softc { int32_t sc_cid; /* opencrypto id */ }; int cryptocteon_debug = 0; TUNABLE_INT("hw.cryptocteon.debug", &cryptocteon_debug); static void cryptocteon_identify(driver_t *, device_t); static int cryptocteon_probe(device_t); static int cryptocteon_attach(device_t); static int cryptocteon_process(device_t, struct cryptop *, int); static int cryptocteon_probesession(device_t, const struct crypto_session_params *); static int cryptocteon_newsession(device_t, crypto_session_t, const struct crypto_session_params *); static void cryptocteon_identify(driver_t *drv, device_t parent) { if (octeon_has_feature(OCTEON_FEATURE_CRYPTO)) BUS_ADD_CHILD(parent, 0, "cryptocteon", 0); } static int cryptocteon_probe(device_t dev) { device_set_desc(dev, "Octeon Secure Coprocessor"); return (0); } static int cryptocteon_attach(device_t dev) { struct cryptocteon_softc *sc; sc = device_get_softc(dev); sc->sc_cid = crypto_get_driverid(dev, sizeof(struct octo_sess), CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC); if (sc->sc_cid < 0) { device_printf(dev, "crypto_get_driverid ret %d\n", sc->sc_cid); return (ENXIO); } return (0); } static bool cryptocteon_auth_supported(const struct crypto_session_params *csp) { u_int hash_len; switch (csp->csp_auth_alg) { case CRYPTO_MD5_HMAC: hash_len = MD5_HASH_LEN; break; case CRYPTO_SHA1_HMAC: hash_len = SHA1_HASH_LEN; break; default: return (false); } if (csp->csp_auth_klen > hash_len) return (false); return (true); } static bool cryptocteon_cipher_supported(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: if (csp->csp_ivlen != 8) return (false); if (csp->csp_cipher_klen != 8 && csp->csp_cipher_klen != 24) return (false); break; case CRYPTO_AES_CBC: if (csp->csp_ivlen != 16) return (false); if (csp->csp_cipher_klen != 16 && csp->csp_cipher_klen != 24 && csp->csp_cipher_klen != 32) return (false); break; default: return (false); } return (true); } static int cryptocteon_probesession(device_t dev, const struct crypto_session_params *csp) { if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!cryptocteon_auth_supported(csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!cryptocteon_cipher_supported(csp)) return (EINVAL); break; case CSP_MODE_ETA: if (!cryptocteon_auth_supported(csp) || !cryptocteon_cipher_supported(csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_ACCEL_SOFTWARE); } static void cryptocteon_calc_hash(const struct crypto_session_params *csp, const char *key, struct octo_sess *ocd) { char hash_key[SHA1_HASH_LEN]; memset(hash_key, 0, sizeof(hash_key)); memcpy(hash_key, key, csp->csp_auth_klen); octo_calc_hash(csp->csp_auth_alg == CRYPTO_SHA1_HMAC, hash_key, ocd->octo_hminner, ocd->octo_hmouter); } /* Generate a new octo session. */ static int cryptocteon_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct cryptocteon_softc *sc; struct octo_sess *ocd; sc = device_get_softc(dev); ocd = crypto_get_driver_session(cses); ocd->octo_encklen = csp->csp_cipher_klen; if (csp->csp_cipher_key != NULL) memcpy(ocd->octo_enckey, csp->csp_cipher_key, ocd->octo_encklen); if (csp->csp_auth_key != NULL) cryptocteon_calc_hash(csp, csp->csp_auth_key, ocd); ocd->octo_mlen = csp->csp_auth_mlen; if (csp->csp_auth_mlen == 0) { switch (csp->csp_auth_alg) { case CRYPTO_MD5_HMAC: ocd->octo_mlen = MD5_HASH_LEN; break; case CRYPTO_SHA1_HMAC: ocd->octo_mlen = SHA1_HASH_LEN; break; } } switch (csp->csp_mode) { case CSP_MODE_DIGEST: switch (csp->csp_auth_alg) { case CRYPTO_MD5_HMAC: ocd->octo_encrypt = octo_null_md5_encrypt; ocd->octo_decrypt = octo_null_md5_encrypt; break; case CRYPTO_SHA1_HMAC: ocd->octo_encrypt = octo_null_sha1_encrypt; ocd->octo_decrypt = octo_null_sha1_encrypt; break; } break; case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: ocd->octo_encrypt = octo_des_cbc_encrypt; ocd->octo_decrypt = octo_des_cbc_decrypt; break; case CRYPTO_AES_CBC: ocd->octo_encrypt = octo_aes_cbc_encrypt; ocd->octo_decrypt = octo_aes_cbc_decrypt; break; } break; case CSP_MODE_ETA: switch (csp->csp_cipher_alg) { case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: switch (csp->csp_auth_alg) { case CRYPTO_MD5_HMAC: ocd->octo_encrypt = octo_des_cbc_md5_encrypt; ocd->octo_decrypt = octo_des_cbc_md5_decrypt; break; case CRYPTO_SHA1_HMAC: ocd->octo_encrypt = octo_des_cbc_sha1_encrypt; ocd->octo_decrypt = octo_des_cbc_sha1_encrypt; break; } break; case CRYPTO_AES_CBC: switch (csp->csp_auth_alg) { case CRYPTO_MD5_HMAC: ocd->octo_encrypt = octo_aes_cbc_md5_encrypt; ocd->octo_decrypt = octo_aes_cbc_md5_decrypt; break; case CRYPTO_SHA1_HMAC: ocd->octo_encrypt = octo_aes_cbc_sha1_encrypt; ocd->octo_decrypt = octo_aes_cbc_sha1_decrypt; break; } break; } break; } KASSERT(ocd->octo_encrypt != NULL && ocd->octo_decrypt != NULL, ("%s: missing function pointers", __func__)); return (0); } /* * Process a request. */ static int cryptocteon_process(device_t dev, struct cryptop *crp, int hint) { const struct crypto_session_params *csp; struct octo_sess *od; size_t iovcnt, iovlen; struct mbuf *m = NULL; struct uio *uiop = NULL; unsigned char *ivp = NULL; unsigned char iv_data[16]; unsigned char icv[SHA1_HASH_LEN], icv2[SHA1_HASH_LEN]; int auth_off, auth_len, crypt_off, crypt_len; struct cryptocteon_softc *sc; sc = device_get_softc(dev); crp->crp_etype = 0; od = crypto_get_driver_session(crp->crp_session); csp = crypto_get_params(crp->crp_session); /* * The crypto routines assume that the regions to auth and * cipher are exactly 8 byte multiples and aligned on 8 * byte logical boundaries within the iovecs. */ if (crp->crp_aad_length % 8 != 0 || crp->crp_payload_length % 8 != 0) { crp->crp_etype = EFBIG; goto done; } /* * As currently written, the crypto routines assume the AAD and * payload are adjacent. */ if (crp->crp_aad_length != 0 && crp->crp_payload_start != crp->crp_aad_start + crp->crp_aad_length) { crp->crp_etype = EFBIG; goto done; } crypt_off = crp->crp_payload_start; crypt_len = crp->crp_payload_length; if (crp->crp_aad_length != 0) { auth_off = crp->crp_aad_start; auth_len = crp->crp_aad_length + crp->crp_payload_length; } else { auth_off = crypt_off; auth_len = crypt_len; } /* * do some error checking outside of the loop for m and IOV processing * this leaves us with valid m or uiop pointers for later */ switch (crp->crp_buf_type) { case CRYPTO_BUF_MBUF: { unsigned frags; m = crp->crp_mbuf; for (frags = 0; m != NULL; frags++) m = m->m_next; if (frags >= UIO_MAXIOV) { printf("%s,%d: %d frags > UIO_MAXIOV", __FILE__, __LINE__, frags); crp->crp_etype = EFBIG; goto done; } m = crp->crp_mbuf; break; } case CRYPTO_BUF_UIO: uiop = crp->crp_uio; if (uiop->uio_iovcnt > UIO_MAXIOV) { printf("%s,%d: %d uio_iovcnt > UIO_MAXIOV", __FILE__, __LINE__, uiop->uio_iovcnt); crp->crp_etype = EFBIG; goto done; } break; } if (csp->csp_cipher_alg != 0) { - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { - arc4rand(iv_data, csp->csp_ivlen, 0); - crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen, - iv_data); - ivp = iv_data; - } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) + if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) ivp = crp->crp_iv; else { crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, iv_data); ivp = iv_data; } } /* * setup the I/O vector to cover the buffer */ switch (crp->crp_buf_type) { case CRYPTO_BUF_MBUF: iovcnt = 0; iovlen = 0; while (m != NULL) { od->octo_iov[iovcnt].iov_base = mtod(m, void *); od->octo_iov[iovcnt].iov_len = m->m_len; m = m->m_next; iovlen += od->octo_iov[iovcnt++].iov_len; } break; case CRYPTO_BUF_UIO: iovlen = 0; for (iovcnt = 0; iovcnt < uiop->uio_iovcnt; iovcnt++) { od->octo_iov[iovcnt].iov_base = uiop->uio_iov[iovcnt].iov_base; od->octo_iov[iovcnt].iov_len = uiop->uio_iov[iovcnt].iov_len; iovlen += od->octo_iov[iovcnt].iov_len; } break; case CRYPTO_BUF_CONTIG: iovlen = crp->crp_ilen; od->octo_iov[0].iov_base = crp->crp_buf; od->octo_iov[0].iov_len = crp->crp_ilen; iovcnt = 1; break; default: panic("can't happen"); } /* * setup a new explicit key */ if (crp->crp_cipher_key != NULL) memcpy(od->octo_enckey, crp->crp_cipher_key, od->octo_encklen); if (crp->crp_auth_key != NULL) cryptocteon_calc_hash(csp, crp->crp_auth_key, od); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) (*od->octo_encrypt)(od, od->octo_iov, iovcnt, iovlen, auth_off, auth_len, crypt_off, crypt_len, icv, ivp); else (*od->octo_decrypt)(od, od->octo_iov, iovcnt, iovlen, auth_off, auth_len, crypt_off, crypt_len, icv, ivp); if (csp->csp_auth_alg != 0) { if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, od->octo_mlen, icv2); if (timingsafe_bcmp(icv, icv2, od->octo_mlen) != 0) crp->crp_etype = EBADMSG; } else crypto_copyback(crp, crp->crp_digest_start, od->octo_mlen, icv); } done: crypto_done(crp); return (0); } static device_method_t cryptocteon_methods[] = { /* device methods */ DEVMETHOD(device_identify, cryptocteon_identify), DEVMETHOD(device_probe, cryptocteon_probe), DEVMETHOD(device_attach, cryptocteon_attach), /* crypto device methods */ DEVMETHOD(cryptodev_probesession, cryptocteon_probesession), DEVMETHOD(cryptodev_newsession, cryptocteon_newsession), DEVMETHOD(cryptodev_process, cryptocteon_process), { 0, 0 } }; static driver_t cryptocteon_driver = { "cryptocteon", cryptocteon_methods, sizeof (struct cryptocteon_softc), }; static devclass_t cryptocteon_devclass; DRIVER_MODULE(cryptocteon, nexus, cryptocteon_driver, cryptocteon_devclass, 0, 0); Index: head/sys/mips/nlm/dev/sec/nlmsec.c =================================================================== --- head/sys/mips/nlm/dev/sec/nlmsec.c (revision 360135) +++ head/sys/mips/nlm/dev/sec/nlmsec.c (revision 360136) @@ -1,706 +1,702 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2012 Broadcom Corporation * All Rights Reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include #include #include #include #include #include #include #include #include #include #include unsigned int creditleft; static int xlp_sec_init(struct xlp_sec_softc *sc); static int xlp_sec_probesession(device_t, const struct crypto_session_params *); static int xlp_sec_newsession(device_t , crypto_session_t, const struct crypto_session_params *); static int xlp_sec_process(device_t , struct cryptop *, int); static void xlp_copyiv(struct xlp_sec_softc *, struct xlp_sec_command *, const struct crypto_session_params *); static int xlp_get_nsegs(struct cryptop *, unsigned int *); static int xlp_alloc_cmd_params(struct xlp_sec_command *, unsigned int); static void xlp_free_cmd_params(struct xlp_sec_command *); static int xlp_sec_probe(device_t); static int xlp_sec_attach(device_t); static int xlp_sec_detach(device_t); static device_method_t xlp_sec_methods[] = { /* device interface */ DEVMETHOD(device_probe, xlp_sec_probe), DEVMETHOD(device_attach, xlp_sec_attach), DEVMETHOD(device_detach, xlp_sec_detach), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* crypto device methods */ DEVMETHOD(cryptodev_probesession, xlp_sec_probesession), DEVMETHOD(cryptodev_newsession, xlp_sec_newsession), DEVMETHOD(cryptodev_process, xlp_sec_process), DEVMETHOD_END }; static driver_t xlp_sec_driver = { "nlmsec", xlp_sec_methods, sizeof(struct xlp_sec_softc) }; static devclass_t xlp_sec_devclass; DRIVER_MODULE(nlmsec, pci, xlp_sec_driver, xlp_sec_devclass, 0, 0); MODULE_DEPEND(nlmsec, crypto, 1, 1, 1); void nlm_xlpsec_msgring_handler(int vc, int size, int code, int src_id, struct nlm_fmn_msg *msg, void *data); #ifdef NLM_SEC_DEBUG #define extract_bits(x, bitshift, bitcnt) \ (((unsigned long long)x >> bitshift) & ((1ULL << bitcnt) - 1)) void print_crypto_params(struct xlp_sec_command *cmd, struct nlm_fmn_msg m) { unsigned long long msg0,msg1,msg2,msg3,msg4,msg5,msg6,msg7,msg8; msg0 = cmd->ctrlp->desc0; msg1 = cmd->paramp->desc0; msg2 = cmd->paramp->desc1; msg3 = cmd->paramp->desc2; msg4 = cmd->paramp->desc3; msg5 = cmd->paramp->segment[0][0]; msg6 = cmd->paramp->segment[0][1]; msg7 = m.msg[0]; msg8 = m.msg[1]; printf("msg0 %llx msg1 %llx msg2 %llx msg3 %llx msg4 %llx msg5 %llx" "msg6 %llx msg7 %llx msg8 %llx\n", msg0, msg1, msg2, msg3, msg4, msg5, msg6, msg7, msg8); printf("c0: hmac %d htype %d hmode %d ctype %d cmode %d arc4 %x\n", (unsigned int)extract_bits(msg0, 61, 1), (unsigned int)extract_bits(msg0, 52, 8), (unsigned int)extract_bits(msg0, 43, 8), (unsigned int)extract_bits(msg0, 34, 8), (unsigned int)extract_bits(msg0, 25, 8), (unsigned int)extract_bits(msg0, 0, 23)); printf("p0: tls %d hsrc %d hl3 %d enc %d ivl %d hd %llx\n", (unsigned int)extract_bits(msg1, 63, 1), (unsigned int)extract_bits(msg1,62,1), (unsigned int)extract_bits(msg1,60,1), (unsigned int)extract_bits(msg1,59,1), (unsigned int)extract_bits(msg1,41,16), extract_bits(msg1,0,40)); printf("p1: clen %u hl %u\n", (unsigned int)extract_bits(msg2, 32, 32), (unsigned int)extract_bits(msg2,0,32)); printf("p2: ivoff %d cbit %d coff %d hbit %d hclb %d hoff %d\n", (unsigned int)extract_bits(msg3, 45, 17), (unsigned int)extract_bits(msg3, 42,3), (unsigned int)extract_bits(msg3, 22,16), (unsigned int)extract_bits(msg3, 19,3), (unsigned int)extract_bits(msg3, 18,1), (unsigned int)extract_bits(msg3, 0, 16)); printf("p3: desfbid %d tlen %d arc4 %x hmacpad %d\n", (unsigned int)extract_bits(msg4, 48,16), (unsigned int)extract_bits(msg4,11,16), (unsigned int)extract_bits(msg4,6,3), (unsigned int)extract_bits(msg4,5,1)); printf("p4: sflen %d sddr %llx \n", (unsigned int)extract_bits(msg5, 48, 16),extract_bits(msg5, 0, 40)); printf("p5: dflen %d cl3 %d cclob %d cdest %llx \n", (unsigned int)extract_bits(msg6, 48, 16), (unsigned int)extract_bits(msg6, 46, 1), (unsigned int)extract_bits(msg6, 41, 1), extract_bits(msg6, 0, 40)); printf("fmn0: fbid %d dfrlen %d dfrv %d cklen %d cdescaddr %llx\n", (unsigned int)extract_bits(msg7, 48, 16), (unsigned int)extract_bits(msg7,46,2), (unsigned int)extract_bits(msg7,45,1), (unsigned int)extract_bits(msg7,40,5), (extract_bits(msg7,0,34)<< 6)); printf("fmn1: arc4 %d hklen %d pdesclen %d pktdescad %llx\n", (unsigned int)extract_bits(msg8, 63, 1), (unsigned int)extract_bits(msg8,56,5), (unsigned int)extract_bits(msg8,43,12), (extract_bits(msg8,0,34) << 6)); return; } void print_cmd(struct xlp_sec_command *cmd) { printf("session_num :%d\n",cmd->session_num); printf("crp :0x%x\n",(uint32_t)cmd->crp); printf("enccrd :0x%x\n",(uint32_t)cmd->enccrd); printf("maccrd :0x%x\n",(uint32_t)cmd->maccrd); printf("ses :%d\n",(uint32_t)cmd->ses); printf("ctrlp :0x%x\n",(uint32_t)cmd->ctrlp); printf("paramp :0x%x\n",(uint32_t)cmd->paramp); printf("hashdest :0x%x\n",(uint32_t)cmd->hashdest); printf("hashsrc :%d\n",cmd->hashsrc); printf("hmacpad :%d\n",cmd->hmacpad); printf("hashoff :%d\n",cmd->hashoff); printf("hashlen :%d\n",cmd->hashlen); printf("cipheroff :%d\n",cmd->cipheroff); printf("cipherlen :%d\n",cmd->cipherlen); printf("ivoff :%d\n",cmd->ivoff); printf("ivlen :%d\n",cmd->ivlen); printf("hashalg :%d\n",cmd->hashalg); printf("hashmode :%d\n",cmd->hashmode); printf("cipheralg :%d\n",cmd->cipheralg); printf("ciphermode :%d\n",cmd->ciphermode); printf("nsegs :%d\n",cmd->nsegs); printf("hash_dst_len :%d\n",cmd->hash_dst_len); } #endif /* NLM_SEC_DEBUG */ static int xlp_sec_init(struct xlp_sec_softc *sc) { /* Register interrupt handler for the SEC CMS messages */ if (register_msgring_handler(sc->sec_vc_start, sc->sec_vc_end, nlm_xlpsec_msgring_handler, sc) != 0) { printf("Couldn't register sec msgring handler\n"); return (-1); } /* Do the CMS credit initialization */ /* Currently it is configured by default to 50 when kernel comes up */ return (0); } /* This function is called from an interrupt handler */ void nlm_xlpsec_msgring_handler(int vc, int size, int code, int src_id, struct nlm_fmn_msg *msg, void *data) { struct xlp_sec_command *cmd = NULL; struct xlp_sec_softc *sc = NULL; uint8_t hash[HASH_MAX_LEN]; KASSERT(code == FMN_SWCODE_CRYPTO, ("%s: bad code = %d, expected code = %d\n", __FUNCTION__, code, FMN_SWCODE_CRYPTO)); sc = (struct xlp_sec_softc *)data; KASSERT(src_id >= sc->sec_vc_start && src_id <= sc->sec_vc_end, ("%s: bad src_id = %d, expect %d - %d\n", __FUNCTION__, src_id, sc->sec_vc_start, sc->sec_vc_end)); cmd = (struct xlp_sec_command *)(uintptr_t)msg->msg[0]; KASSERT(cmd != NULL && cmd->crp != NULL, ("%s :cmd not received properly\n",__FUNCTION__)); KASSERT(CRYPTO_ERROR(msg->msg[1]) == 0, ("%s: Message rcv msg0 %llx msg1 %llx err %x \n", __FUNCTION__, (unsigned long long)msg->msg[0], (unsigned long long)msg->msg[1], (int)CRYPTO_ERROR(msg->msg[1]))); /* If there are not enough credits to send, then send request * will fail with ERESTART and the driver will be blocked until it is * unblocked here after knowing that there are sufficient credits to * send the request again. */ if (sc->sc_needwakeup) { atomic_add_int(&creditleft, sc->sec_msgsz); if (creditleft >= (NLM_CRYPTO_LEFT_REQS)) { crypto_unblock(sc->sc_cid, sc->sc_needwakeup); sc->sc_needwakeup &= (~(CRYPTO_SYMQ | CRYPTO_ASYMQ)); } } if (cmd->hash_dst_len != 0) { if (cmd->crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(cmd->crp, cmd->crp->crp_digest_start, cmd->hash_dst_len, hash); if (timingsafe_bcmp(cmd->hashdest, hash, cmd->hash_dst_len) != 0) cmd->crp->crp_etype = EBADMSG; } else crypto_copyback(cmd->crp, cmd->crp->crp_digest_start, cmd->hash_dst_len, cmd->hashdest); } /* This indicates completion of the crypto operation */ crypto_done(cmd->crp); xlp_free_cmd_params(cmd); return; } static int xlp_sec_probe(device_t dev) { struct xlp_sec_softc *sc; if (pci_get_vendor(dev) == PCI_VENDOR_NETLOGIC && pci_get_device(dev) == PCI_DEVICE_ID_NLM_SAE) { sc = device_get_softc(dev); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Attach an interface that successfully probed. */ static int xlp_sec_attach(device_t dev) { struct xlp_sec_softc *sc = device_get_softc(dev); uint64_t base; int qstart, qnum; int freq, node; sc->sc_dev = dev; node = nlm_get_device_node(pci_get_slot(dev)); freq = nlm_set_device_frequency(node, DFS_DEVICE_SAE, 250); if (bootverbose) device_printf(dev, "SAE Freq: %dMHz\n", freq); if(pci_get_device(dev) == PCI_DEVICE_ID_NLM_SAE) { device_set_desc(dev, "XLP Security Accelerator"); sc->sc_cid = crypto_get_driverid(dev, sizeof(struct xlp_sec_session), CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { printf("xlp_sec - error : could not get the driver" " id\n"); goto error_exit; } base = nlm_get_sec_pcibase(node); qstart = nlm_qidstart(base); qnum = nlm_qnum(base); sc->sec_vc_start = qstart; sc->sec_vc_end = qstart + qnum - 1; } if (xlp_sec_init(sc) != 0) goto error_exit; if (bootverbose) device_printf(dev, "SEC Initialization complete!\n"); return (0); error_exit: return (ENXIO); } /* * Detach an interface that successfully probed. */ static int xlp_sec_detach(device_t dev) { return (0); } static bool xlp_sec_auth_supported(const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_MD5: case CRYPTO_SHA1: case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: break; default: return (false); } return (true); } static bool xlp_sec_cipher_supported(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_DES_CBC: case CRYPTO_3DES_CBC: if (csp->csp_ivlen != XLP_SEC_DES_IV_LENGTH) return (false); break; case CRYPTO_AES_CBC: if (csp->csp_ivlen != XLP_SEC_AES_IV_LENGTH) return (false); break; case CRYPTO_ARC4: if (csp->csp_ivlen != XLP_SEC_ARC4_IV_LENGTH) return (false); break; default: return (false); } return (true); } static int xlp_sec_probesession(device_t dev, const struct crypto_session_params *csp) { if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_DIGEST: if (!xlp_sec_auth_supported(csp)) return (EINVAL); break; case CSP_MODE_CIPHER: if (!xlp_sec_cipher_supported(csp)) return (EINVAL); break; case CSP_MODE_ETA: if (!xlp_sec_auth_supported(csp) || !xlp_sec_cipher_supported(csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_HARDWARE); } static int xlp_sec_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct xlp_sec_session *ses; ses = crypto_get_driver_session(cses); if (csp->csp_auth_alg != 0) { if (csp->csp_auth_mlen == 0) ses->hs_mlen = crypto_auth_hash(csp)->hashsize; else ses->hs_mlen = csp->csp_auth_mlen; } return (0); } /* * XXX freesession routine should run a zero'd mac/encrypt key into context * ram. to blow away any keys already stored there. */ static void xlp_copyiv(struct xlp_sec_softc *sc, struct xlp_sec_command *cmd, const struct crypto_session_params *csp) { struct cryptop *crp = NULL; crp = cmd->crp; if (csp->csp_cipher_alg != CRYPTO_ARC4) { - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { - arc4rand(cmd->iv, csp->csp_ivlen, 0); - crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen, - cmd->iv); - } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) + if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) memcpy(cmd->iv, crp->crp_iv, csp->csp_ivlen); } } static int xlp_get_nsegs(struct cryptop *crp, unsigned int *nsegs) { switch (crp->crp_buf_type) { case CRYPTO_BUF_MBUF: { struct mbuf *m = NULL; m = crp->crp_mbuf; while (m != NULL) { *nsegs += NLM_CRYPTO_NUM_SEGS_REQD(m->m_len); m = m->m_next; } break; } case CRYPTO_BUF_UIO: { struct uio *uio = NULL; struct iovec *iov = NULL; int iol = 0; uio = (struct uio *)crp->crp_buf; iov = (struct iovec *)uio->uio_iov; iol = uio->uio_iovcnt; while (iol > 0) { *nsegs += NLM_CRYPTO_NUM_SEGS_REQD(iov->iov_len); iol--; iov++; } break; } case CRYPTO_BUF_CONTIG: *nsegs = NLM_CRYPTO_NUM_SEGS_REQD(crp->crp_ilen); break; default: return (EINVAL); } return (0); } static int xlp_alloc_cmd_params(struct xlp_sec_command *cmd, unsigned int nsegs) { int err = 0; if(cmd == NULL) { err = EINVAL; goto error; } if ((cmd->ctrlp = malloc(sizeof(struct nlm_crypto_pkt_ctrl), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { err = ENOMEM; goto error; } if (((uintptr_t)cmd->ctrlp & (XLP_L2L3_CACHELINE_SIZE - 1))) { err = EINVAL; goto error; } /* (nsegs - 1) because one seg is part of the structure already */ if ((cmd->paramp = malloc(sizeof(struct nlm_crypto_pkt_param) + (16 * (nsegs - 1)), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { err = ENOMEM; goto error; } if (((uintptr_t)cmd->paramp & (XLP_L2L3_CACHELINE_SIZE - 1))) { err = EINVAL; goto error; } if ((cmd->iv = malloc(EALG_MAX_BLOCK_LEN, M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { err = ENOMEM; goto error; } if ((cmd->hashdest = malloc(HASH_MAX_LEN, M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { err = ENOMEM; goto error; } error: return (err); } static void xlp_free_cmd_params(struct xlp_sec_command *cmd) { if (cmd->ctrlp != NULL) free(cmd->ctrlp, M_DEVBUF); if (cmd->paramp != NULL) free(cmd->paramp, M_DEVBUF); if (cmd->iv != NULL) free(cmd->iv, M_DEVBUF); if (cmd->hashdest != NULL) free(cmd->hashdest, M_DEVBUF); if (cmd != NULL) free(cmd, M_DEVBUF); return; } static int xlp_sec_process(device_t dev, struct cryptop *crp, int hint) { struct xlp_sec_softc *sc = device_get_softc(dev); const struct crypto_session_params *csp; struct xlp_sec_command *cmd = NULL; int err = -1, ret = 0; struct xlp_sec_session *ses; unsigned int nsegs = 0; ses = crypto_get_driver_session(crp->crp_session); csp = crypto_get_params(crp->crp_session); /* * This device only support AAD requests where the AAD is * adjacent to the payload. */ if (crp->crp_aad_length != 0 && crp->crp_payload_start != crp->crp_aad_start + crp->crp_aad_length) { err = EFBIG; goto errout; } if ((cmd = malloc(sizeof(struct xlp_sec_command), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { err = ENOMEM; goto errout; } cmd->crp = crp; cmd->ses = ses; cmd->hash_dst_len = ses->hs_mlen; if ((ret = xlp_get_nsegs(crp, &nsegs)) != 0) { err = EINVAL; goto errout; } if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { /* Since IV is given as separate segment to avoid copy */ nsegs += 1; } cmd->nsegs = nsegs; if ((err = xlp_alloc_cmd_params(cmd, nsegs)) != 0) goto errout; switch (csp->csp_mode) { case CSP_MODE_CIPHER: if ((ret = nlm_get_cipher_param(cmd, csp)) != 0) { err = EINVAL; goto errout; } cmd->cipheroff = crp->crp_payload_start; cmd->cipherlen = crp->crp_payload_length; if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { cmd->cipheroff += cmd->ivlen; cmd->ivoff = 0; } else cmd->ivoff = crp->crp_iv_start; xlp_copyiv(sc, cmd, csp); if ((err = nlm_crypto_do_cipher(sc, cmd, csp)) != 0) goto errout; break; case CSP_MODE_DIGEST: if ((ret = nlm_get_digest_param(cmd, csp)) != 0) { err = EINVAL; goto errout; } cmd->hashoff = crp->crp_payload_start; cmd->hashlen = crp->crp_payload_length; cmd->hmacpad = 0; cmd->hashsrc = 0; if ((err = nlm_crypto_do_digest(sc, cmd, csp)) != 0) goto errout; break; case CSP_MODE_ETA: if ((ret = nlm_get_cipher_param(cmd, csp)) != 0) { err = EINVAL; goto errout; } if ((ret = nlm_get_digest_param(cmd, csp)) != 0) { err = EINVAL; goto errout; } if (crp->crp_aad_length != 0) { cmd->hashoff = crp->crp_aad_start; cmd->hashlen = crp->crp_aad_length + crp->crp_payload_length; } else { cmd->hashoff = crp->crp_payload_start; cmd->hashlen = crp->crp_payload_length; } cmd->hmacpad = 0; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) cmd->hashsrc = 1; else cmd->hashsrc = 0; cmd->cipheroff = crp->crp_payload_start; cmd->cipherlen = crp->crp_payload_length; if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { cmd->hashoff += cmd->ivlen; cmd->cipheroff += cmd->ivlen; cmd->ivoff = 0; } else cmd->ivoff = crp->crp_iv_start; xlp_copyiv(sc, cmd, csp); if ((err = nlm_crypto_do_cipher_digest(sc, cmd, csp)) != 0) goto errout; break; default: err = EINVAL; goto errout; } return (0); errout: xlp_free_cmd_params(cmd); if (err == ERESTART) { sc->sc_needwakeup |= CRYPTO_SYMQ; creditleft = 0; return (err); } crp->crp_etype = err; crypto_done(crp); return (err); } Index: head/sys/opencrypto/crypto.c =================================================================== --- head/sys/opencrypto/crypto.c (revision 360135) +++ head/sys/opencrypto/crypto.c (revision 360136) @@ -1,2272 +1,2261 @@ /*- * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Cryptographic Subsystem. * * This code is derived from the Openbsd Cryptographic Framework (OCF) * that has the copyright shown below. Very little of the original * code remains. */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000, 2001 Angelos D. Keromytis * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #define CRYPTO_TIMING /* enable timing support */ #include "opt_compat.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) #include #endif SDT_PROVIDER_DEFINE(opencrypto); /* * Crypto drivers register themselves by allocating a slot in the * crypto_drivers table with crypto_get_driverid() and then registering * each asym algorithm they support with crypto_kregister(). */ static struct mtx crypto_drivers_mtx; /* lock on driver table */ #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) /* * Crypto device/driver capabilities structure. * * Synchronization: * (d) - protected by CRYPTO_DRIVER_LOCK() * (q) - protected by CRYPTO_Q_LOCK() * Not tagged fields are read-only. */ struct cryptocap { device_t cc_dev; uint32_t cc_hid; u_int32_t cc_sessions; /* (d) # of sessions */ u_int32_t cc_koperations; /* (d) # os asym operations */ u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; int cc_flags; /* (d) flags */ #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ int cc_qblocked; /* (q) symmetric q blocked */ int cc_kqblocked; /* (q) asymmetric q blocked */ size_t cc_session_size; volatile int cc_refs; }; static struct cryptocap **crypto_drivers = NULL; static int crypto_drivers_size = 0; struct crypto_session { struct cryptocap *cap; void *softc; struct crypto_session_params csp; }; /* * There are two queues for crypto requests; one for symmetric (e.g. * cipher) operations and one for asymmetric (e.g. MOD)operations. * A single mutex is used to lock access to both queues. We could * have one per-queue but having one simplifies handling of block/unblock * operations. */ static int crp_sleep = 0; static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ static TAILQ_HEAD(,cryptkop) crp_kq; static struct mtx crypto_q_mtx; #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) static SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0, "In-kernel cryptography"); /* * Taskqueue used to dispatch the crypto requests * that have the CRYPTO_F_ASYNC flag */ static struct taskqueue *crypto_tq; /* * Crypto seq numbers are operated on with modular arithmetic */ #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) struct crypto_ret_worker { struct mtx crypto_ret_mtx; TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ TAILQ_HEAD(,cryptkop) crp_ret_kq; /* callback queue for asym jobs */ u_int32_t reorder_ops; /* total ordered sym jobs received */ u_int32_t reorder_cur_seq; /* current sym job dispatched */ struct proc *cryptoretproc; }; static struct crypto_ret_worker *crypto_ret_workers = NULL; #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) #define FOREACH_CRYPTO_RETW(w) \ for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) #define CRYPTO_RETW_EMPTY(w) \ (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q)) static int crypto_workers_num = 0; SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN, &crypto_workers_num, 0, "Number of crypto workers used to dispatch crypto jobs"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, &crypto_workers_num, 0, "Number of crypto workers used to dispatch crypto jobs"); #endif static uma_zone_t cryptop_zone; static uma_zone_t cryptoses_zone; int crypto_userasymcrypto = 1; SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW, &crypto_userasymcrypto, 0, "Enable user-mode access to asymmetric crypto support"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, &crypto_userasymcrypto, 0, "Enable/disable user-mode access to asymmetric crypto support"); #endif int crypto_devallowsoft = 0; SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RW, &crypto_devallowsoft, 0, "Enable use of software crypto by /dev/crypto"); #ifdef COMPAT_FREEBSD12 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, &crypto_devallowsoft, 0, "Enable/disable use of software crypto by /dev/crypto"); #endif MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); static void crypto_proc(void); static struct proc *cryptoproc; static void crypto_ret_proc(struct crypto_ret_worker *ret_worker); static void crypto_destroy(void); static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); static int crypto_kinvoke(struct cryptkop *krp); static void crypto_task_invoke(void *ctx, int pending); static void crypto_batch_enqueue(struct cryptop *crp); static struct cryptostats cryptostats; SYSCTL_STRUCT(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, &cryptostats, cryptostats, "Crypto system statistics"); #ifdef CRYPTO_TIMING static int crypto_timing = 0; SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW, &crypto_timing, 0, "Enable/disable crypto timing support"); #endif /* Try to avoid directly exposing the key buffer as a symbol */ static struct keybuf *keybuf; static struct keybuf empty_keybuf = { .kb_nents = 0 }; /* Obtain the key buffer from boot metadata */ static void keybuf_init(void) { caddr_t kmdp; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); keybuf = (struct keybuf *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_KEYBUF); if (keybuf == NULL) keybuf = &empty_keybuf; } /* It'd be nice if we could store these in some kind of secure memory... */ struct keybuf * get_keybuf(void) { return (keybuf); } static struct cryptocap * cap_ref(struct cryptocap *cap) { refcount_acquire(&cap->cc_refs); return (cap); } static void cap_rele(struct cryptocap *cap) { if (refcount_release(&cap->cc_refs) == 0) return; KASSERT(cap->cc_sessions == 0, ("freeing crypto driver with active sessions")); KASSERT(cap->cc_koperations == 0, ("freeing crypto driver with active key operations")); free(cap, M_CRYPTO_DATA); } static int crypto_init(void) { struct crypto_ret_worker *ret_worker; int error; mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", MTX_DEF|MTX_QUIET); TAILQ_INIT(&crp_q); TAILQ_INIT(&crp_kq); mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop), 0, 0, 0, 0, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); cryptoses_zone = uma_zcreate("crypto_session", sizeof(struct crypto_session), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); if (cryptop_zone == NULL || cryptoses_zone == NULL) { printf("crypto_init: cannot setup crypto zones\n"); error = ENOMEM; goto bad; } crypto_drivers_size = CRYPTO_DRIVERS_INITIAL; crypto_drivers = malloc(crypto_drivers_size * sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO); if (crypto_drivers == NULL) { printf("crypto_init: cannot setup crypto drivers\n"); error = ENOMEM; goto bad; } if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) crypto_workers_num = mp_ncpus; crypto_tq = taskqueue_create("crypto", M_WAITOK|M_ZERO, taskqueue_thread_enqueue, &crypto_tq); if (crypto_tq == NULL) { printf("crypto init: cannot setup crypto taskqueue\n"); error = ENOMEM; goto bad; } taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, "crypto"); error = kproc_create((void (*)(void *)) crypto_proc, NULL, &cryptoproc, 0, 0, "crypto"); if (error) { printf("crypto_init: cannot start crypto thread; error %d", error); goto bad; } crypto_ret_workers = malloc(crypto_workers_num * sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); if (crypto_ret_workers == NULL) { error = ENOMEM; printf("crypto_init: cannot allocate ret workers\n"); goto bad; } FOREACH_CRYPTO_RETW(ret_worker) { TAILQ_INIT(&ret_worker->crp_ordered_ret_q); TAILQ_INIT(&ret_worker->crp_ret_q); TAILQ_INIT(&ret_worker->crp_ret_kq); ret_worker->reorder_ops = 0; ret_worker->reorder_cur_seq = 0; mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF); error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker, &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker)); if (error) { printf("crypto_init: cannot start cryptoret thread; error %d", error); goto bad; } } keybuf_init(); return 0; bad: crypto_destroy(); return error; } /* * Signal a crypto thread to terminate. We use the driver * table lock to synchronize the sleep/wakeups so that we * are sure the threads have terminated before we release * the data structures they use. See crypto_finis below * for the other half of this song-and-dance. */ static void crypto_terminate(struct proc **pp, void *q) { struct proc *p; mtx_assert(&crypto_drivers_mtx, MA_OWNED); p = *pp; *pp = NULL; if (p) { wakeup_one(q); PROC_LOCK(p); /* NB: insure we don't miss wakeup */ CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); PROC_UNLOCK(p); CRYPTO_DRIVER_LOCK(); } } static void hmac_init_pad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx, uint8_t padval) { uint8_t hmac_key[HMAC_MAX_BLOCK_LEN]; u_int i; KASSERT(axf->blocksize <= sizeof(hmac_key), ("Invalid HMAC block size %d", axf->blocksize)); /* * If the key is larger than the block size, use the digest of * the key as the key instead. */ memset(hmac_key, 0, sizeof(hmac_key)); if (klen > axf->blocksize) { axf->Init(auth_ctx); axf->Update(auth_ctx, key, klen); axf->Final(hmac_key, auth_ctx); klen = axf->hashsize; } else memcpy(hmac_key, key, klen); for (i = 0; i < axf->blocksize; i++) hmac_key[i] ^= padval; axf->Init(auth_ctx); axf->Update(auth_ctx, hmac_key, axf->blocksize); } void hmac_init_ipad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx) { hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL); } void hmac_init_opad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx) { hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL); } static void crypto_destroy(void) { struct crypto_ret_worker *ret_worker; int i; /* * Terminate any crypto threads. */ if (crypto_tq != NULL) taskqueue_drain_all(crypto_tq); CRYPTO_DRIVER_LOCK(); crypto_terminate(&cryptoproc, &crp_q); FOREACH_CRYPTO_RETW(ret_worker) crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q); CRYPTO_DRIVER_UNLOCK(); /* XXX flush queues??? */ /* * Reclaim dynamically allocated resources. */ for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] != NULL) cap_rele(crypto_drivers[i]); } free(crypto_drivers, M_CRYPTO_DATA); if (cryptoses_zone != NULL) uma_zdestroy(cryptoses_zone); if (cryptop_zone != NULL) uma_zdestroy(cryptop_zone); mtx_destroy(&crypto_q_mtx); FOREACH_CRYPTO_RETW(ret_worker) mtx_destroy(&ret_worker->crypto_ret_mtx); free(crypto_ret_workers, M_CRYPTO_DATA); if (crypto_tq != NULL) taskqueue_free(crypto_tq); mtx_destroy(&crypto_drivers_mtx); } uint32_t crypto_ses2hid(crypto_session_t crypto_session) { return (crypto_session->cap->cc_hid); } uint32_t crypto_ses2caps(crypto_session_t crypto_session) { return (crypto_session->cap->cc_flags & 0xff000000); } void * crypto_get_driver_session(crypto_session_t crypto_session) { return (crypto_session->softc); } const struct crypto_session_params * crypto_get_params(crypto_session_t crypto_session) { return (&crypto_session->csp); } struct auth_hash * crypto_auth_hash(const struct crypto_session_params *csp) { switch (csp->csp_auth_alg) { case CRYPTO_MD5_HMAC: return (&auth_hash_hmac_md5); case CRYPTO_SHA1_HMAC: return (&auth_hash_hmac_sha1); case CRYPTO_SHA2_224_HMAC: return (&auth_hash_hmac_sha2_224); case CRYPTO_SHA2_256_HMAC: return (&auth_hash_hmac_sha2_256); case CRYPTO_SHA2_384_HMAC: return (&auth_hash_hmac_sha2_384); case CRYPTO_SHA2_512_HMAC: return (&auth_hash_hmac_sha2_512); case CRYPTO_NULL_HMAC: return (&auth_hash_null); case CRYPTO_RIPEMD160_HMAC: return (&auth_hash_hmac_ripemd_160); case CRYPTO_MD5_KPDK: return (&auth_hash_key_md5); case CRYPTO_SHA1_KPDK: return (&auth_hash_key_sha1); #ifdef notyet case CRYPTO_MD5: return (&auth_hash_md5); #endif case CRYPTO_SHA1: return (&auth_hash_sha1); case CRYPTO_SHA2_224: return (&auth_hash_sha2_224); case CRYPTO_SHA2_256: return (&auth_hash_sha2_256); case CRYPTO_SHA2_384: return (&auth_hash_sha2_384); case CRYPTO_SHA2_512: return (&auth_hash_sha2_512); case CRYPTO_AES_NIST_GMAC: switch (csp->csp_auth_klen) { case 128 / 8: return (&auth_hash_nist_gmac_aes_128); case 192 / 8: return (&auth_hash_nist_gmac_aes_192); case 256 / 8: return (&auth_hash_nist_gmac_aes_256); default: return (NULL); } case CRYPTO_BLAKE2B: return (&auth_hash_blake2b); case CRYPTO_BLAKE2S: return (&auth_hash_blake2s); case CRYPTO_POLY1305: return (&auth_hash_poly1305); case CRYPTO_AES_CCM_CBC_MAC: switch (csp->csp_auth_klen) { case 128 / 8: return (&auth_hash_ccm_cbc_mac_128); case 192 / 8: return (&auth_hash_ccm_cbc_mac_192); case 256 / 8: return (&auth_hash_ccm_cbc_mac_256); default: return (NULL); } default: return (NULL); } } struct enc_xform * crypto_cipher(const struct crypto_session_params *csp) { switch (csp->csp_cipher_alg) { case CRYPTO_DES_CBC: return (&enc_xform_des); case CRYPTO_3DES_CBC: return (&enc_xform_3des); case CRYPTO_BLF_CBC: return (&enc_xform_blf); case CRYPTO_CAST_CBC: return (&enc_xform_cast5); case CRYPTO_SKIPJACK_CBC: return (&enc_xform_skipjack); case CRYPTO_RIJNDAEL128_CBC: return (&enc_xform_rijndael128); case CRYPTO_AES_XTS: return (&enc_xform_aes_xts); case CRYPTO_AES_ICM: return (&enc_xform_aes_icm); case CRYPTO_AES_NIST_GCM_16: return (&enc_xform_aes_nist_gcm); case CRYPTO_CAMELLIA_CBC: return (&enc_xform_camellia); case CRYPTO_NULL_CBC: return (&enc_xform_null); case CRYPTO_CHACHA20: return (&enc_xform_chacha20); case CRYPTO_AES_CCM_16: return (&enc_xform_ccm); default: return (NULL); } } static struct cryptocap * crypto_checkdriver(u_int32_t hid) { return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]); } /* * Select a driver for a new session that supports the specified * algorithms and, optionally, is constrained according to the flags. */ static struct cryptocap * crypto_select_driver(const struct crypto_session_params *csp, int flags) { struct cryptocap *cap, *best; int best_match, error, hid; CRYPTO_DRIVER_ASSERT(); best = NULL; for (hid = 0; hid < crypto_drivers_size; hid++) { /* * If there is no driver for this slot, or the driver * is not appropriate (hardware or software based on * match), then skip. */ cap = crypto_drivers[hid]; if (cap == NULL || (cap->cc_flags & flags) == 0) continue; error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp); if (error >= 0) continue; /* * Use the driver with the highest probe value. * Hardware drivers use a higher probe value than * software. In case of a tie, prefer the driver with * the fewest active sessions. */ if (best == NULL || error > best_match || (error == best_match && cap->cc_sessions < best->cc_sessions)) { best = cap; best_match = error; } } return best; } static bool alg_is_compression(int alg) { if (alg == CRYPTO_DEFLATE_COMP) return (true); return (false); } static bool alg_is_cipher(int alg) { if (alg >= CRYPTO_DES_CBC && alg <= CRYPTO_SKIPJACK_CBC) return (true); if (alg >= CRYPTO_AES_CBC && alg <= CRYPTO_ARC4) return (true); if (alg == CRYPTO_NULL_CBC) return (true); if (alg >= CRYPTO_CAMELLIA_CBC && alg <= CRYPTO_AES_ICM) return (true); if (alg == CRYPTO_CHACHA20) return (true); return (false); } static bool alg_is_digest(int alg) { if (alg >= CRYPTO_MD5_HMAC && alg <= CRYPTO_SHA1_KPDK) return (true); if (alg >= CRYPTO_MD5 && alg <= CRYPTO_SHA1) return (true); if (alg == CRYPTO_NULL_HMAC) return (true); if (alg >= CRYPTO_SHA2_256_HMAC && alg <= CRYPTO_SHA2_512_HMAC) return (true); if (alg == CRYPTO_AES_NIST_GMAC) return (true); if (alg >= CRYPTO_BLAKE2B && alg <= CRYPTO_BLAKE2S) return (true); if (alg >= CRYPTO_SHA2_224_HMAC && alg <= CRYPTO_POLY1305) return (true); if (alg == CRYPTO_AES_CCM_CBC_MAC) return (true); return (false); } static bool alg_is_keyed_digest(int alg) { if (alg >= CRYPTO_MD5_HMAC && alg <= CRYPTO_SHA1_KPDK) return (true); if (alg >= CRYPTO_SHA2_256_HMAC && alg <= CRYPTO_SHA2_512_HMAC) return (true); if (alg == CRYPTO_AES_NIST_GMAC) return (true); if (alg >= CRYPTO_BLAKE2B && alg <= CRYPTO_BLAKE2S) return (true); if (alg == CRYPTO_SHA2_224_HMAC) return (true); if (alg == CRYPTO_POLY1305) return (true); if (alg == CRYPTO_AES_CCM_CBC_MAC) return (true); return (false); } static bool alg_is_aead(int alg) { if (alg == CRYPTO_AES_NIST_GCM_16) return (true); if (alg == CRYPTO_AES_CCM_16) return (true); return (false); } /* Various sanity checks on crypto session parameters. */ static bool check_csp(const struct crypto_session_params *csp) { struct auth_hash *axf; /* Mode-independent checks. */ if (csp->csp_flags != 0) return (false); if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 || csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0) return (false); if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0) return (false); if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0) return (false); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: if (!alg_is_compression(csp->csp_cipher_alg)) return (false); if (csp->csp_flags != 0) return (false); if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 || csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || csp->csp_auth_mlen != 0) return (false); break; case CSP_MODE_CIPHER: if (!alg_is_cipher(csp->csp_cipher_alg)) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_cipher_alg != CRYPTO_ARC4) { if (csp->csp_ivlen == 0) return (false); } } if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || csp->csp_auth_mlen != 0) return (false); break; case CSP_MODE_DIGEST: if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0) return (false); /* IV is optional for digests (e.g. GMAC). */ if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (!alg_is_digest(csp->csp_auth_alg)) return (false); /* Key is optional for BLAKE2 digests. */ if (csp->csp_auth_alg == CRYPTO_BLAKE2B || csp->csp_auth_alg == CRYPTO_BLAKE2S) ; else if (alg_is_keyed_digest(csp->csp_auth_alg)) { if (csp->csp_auth_klen == 0) return (false); } else { if (csp->csp_auth_klen != 0) return (false); } if (csp->csp_auth_mlen != 0) { axf = crypto_auth_hash(csp); if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) return (false); } break; case CSP_MODE_AEAD: if (!alg_is_aead(csp->csp_cipher_alg)) return (false); if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_ivlen == 0 || csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0) return (false); /* * XXX: Would be nice to have a better way to get this * value. */ switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: if (csp->csp_auth_mlen > 16) return (false); break; } break; case CSP_MODE_ETA: if (!alg_is_cipher(csp->csp_cipher_alg)) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { if (csp->csp_cipher_klen == 0) return (false); if (csp->csp_cipher_alg != CRYPTO_ARC4) { if (csp->csp_ivlen == 0) return (false); } } if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) return (false); if (!alg_is_digest(csp->csp_auth_alg)) return (false); /* Key is optional for BLAKE2 digests. */ if (csp->csp_auth_alg == CRYPTO_BLAKE2B || csp->csp_auth_alg == CRYPTO_BLAKE2S) ; else if (alg_is_keyed_digest(csp->csp_auth_alg)) { if (csp->csp_auth_klen == 0) return (false); } else { if (csp->csp_auth_klen != 0) return (false); } if (csp->csp_auth_mlen != 0) { axf = crypto_auth_hash(csp); if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) return (false); } break; default: return (false); } return (true); } /* * Delete a session after it has been detached from its driver. */ static void crypto_deletesession(crypto_session_t cses) { struct cryptocap *cap; cap = cses->cap; explicit_bzero(cses->softc, cap->cc_session_size); free(cses->softc, M_CRYPTO_DATA); uma_zfree(cryptoses_zone, cses); CRYPTO_DRIVER_LOCK(); cap->cc_sessions--; if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) wakeup(cap); CRYPTO_DRIVER_UNLOCK(); cap_rele(cap); } /* * Create a new session. The crid argument specifies a crypto * driver to use or constraints on a driver to select (hardware * only, software only, either). Whatever driver is selected * must be capable of the requested crypto algorithms. */ int crypto_newsession(crypto_session_t *cses, const struct crypto_session_params *csp, int crid) { crypto_session_t res; struct cryptocap *cap; int err; if (!check_csp(csp)) return (EINVAL); res = NULL; CRYPTO_DRIVER_LOCK(); if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { /* * Use specified driver; verify it is capable. */ cap = crypto_checkdriver(crid); if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0) cap = NULL; } else { /* * No requested driver; select based on crid flags. */ cap = crypto_select_driver(csp, crid); } if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); CRYPTDEB("no driver"); return (EOPNOTSUPP); } cap_ref(cap); cap->cc_sessions++; CRYPTO_DRIVER_UNLOCK(); res = uma_zalloc(cryptoses_zone, M_WAITOK | M_ZERO); res->cap = cap; res->softc = malloc(cap->cc_session_size, M_CRYPTO_DATA, M_WAITOK | M_ZERO); res->csp = *csp; /* Call the driver initialization routine. */ err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp); if (err != 0) { CRYPTDEB("dev newsession failed: %d", err); crypto_deletesession(res); return (err); } *cses = res; return (0); } /* * Delete an existing session (or a reserved session on an unregistered * driver). */ void crypto_freesession(crypto_session_t cses) { struct cryptocap *cap; if (cses == NULL) return; cap = cses->cap; /* Call the driver cleanup routine, if available. */ CRYPTODEV_FREESESSION(cap->cc_dev, cses); crypto_deletesession(cses); } /* * Return a new driver id. Registers a driver with the system so that * it can be probed by subsequent sessions. */ int32_t crypto_get_driverid(device_t dev, size_t sessionsize, int flags) { struct cryptocap *cap, **newdrv; int i; if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { device_printf(dev, "no flags specified when registering driver\n"); return -1; } cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); cap->cc_dev = dev; cap->cc_session_size = sessionsize; cap->cc_flags = flags; refcount_init(&cap->cc_refs, 1); CRYPTO_DRIVER_LOCK(); for (;;) { for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] == NULL) break; } if (i < crypto_drivers_size) break; /* Out of entries, allocate some more. */ if (2 * crypto_drivers_size <= crypto_drivers_size) { CRYPTO_DRIVER_UNLOCK(); printf("crypto: driver count wraparound!\n"); cap_rele(cap); return (-1); } CRYPTO_DRIVER_UNLOCK(); newdrv = malloc(2 * crypto_drivers_size * sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO); CRYPTO_DRIVER_LOCK(); memcpy(newdrv, crypto_drivers, crypto_drivers_size * sizeof(*crypto_drivers)); crypto_drivers_size *= 2; free(crypto_drivers, M_CRYPTO_DATA); crypto_drivers = newdrv; } cap->cc_hid = i; crypto_drivers[i] = cap; CRYPTO_DRIVER_UNLOCK(); if (bootverbose) printf("crypto: assign %s driver id %u, flags 0x%x\n", device_get_nameunit(dev), i, flags); return i; } /* * Lookup a driver by name. We match against the full device * name and unit, and against just the name. The latter gives * us a simple widlcarding by device name. On success return the * driver/hardware identifier; otherwise return -1. */ int crypto_find_driver(const char *match) { struct cryptocap *cap; int i, len = strlen(match); CRYPTO_DRIVER_LOCK(); for (i = 0; i < crypto_drivers_size; i++) { if (crypto_drivers[i] == NULL) continue; cap = crypto_drivers[i]; if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 || strncmp(match, device_get_name(cap->cc_dev), len) == 0) { CRYPTO_DRIVER_UNLOCK(); return (i); } } CRYPTO_DRIVER_UNLOCK(); return (-1); } /* * Return the device_t for the specified driver or NULL * if the driver identifier is invalid. */ device_t crypto_find_device_byhid(int hid) { struct cryptocap *cap; device_t dev; dev = NULL; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(hid); if (cap != NULL) dev = cap->cc_dev; CRYPTO_DRIVER_UNLOCK(); return (dev); } /* * Return the device/driver capabilities. */ int crypto_getcaps(int hid) { struct cryptocap *cap; int flags; flags = 0; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(hid); if (cap != NULL) flags = cap->cc_flags; CRYPTO_DRIVER_UNLOCK(); return (flags); } /* * Register support for a key-related algorithm. This routine * is called once for each algorithm supported a driver. */ int crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags) { struct cryptocap *cap; int err; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL && (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { /* * XXX Do some performance testing to determine placing. * XXX We probably need an auxiliary data structure that * XXX describes relative performances. */ cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; if (bootverbose) printf("crypto: %s registers key alg %u flags %u\n" , device_get_nameunit(cap->cc_dev) , kalg , flags ); err = 0; } else err = EINVAL; CRYPTO_DRIVER_UNLOCK(); return err; } /* * Unregister all algorithms associated with a crypto driver. * If there are pending sessions using it, leave enough information * around so that subsequent calls using those sessions will * correctly detect the driver has been unregistered and reroute * requests. */ int crypto_unregister_all(u_int32_t driverid) { struct cryptocap *cap; CRYPTO_DRIVER_LOCK(); cap = crypto_checkdriver(driverid); if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); return (EINVAL); } cap->cc_flags |= CRYPTOCAP_F_CLEANUP; crypto_drivers[driverid] = NULL; /* * XXX: This doesn't do anything to kick sessions that * have no pending operations. */ while (cap->cc_sessions != 0 || cap->cc_koperations != 0) mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0); CRYPTO_DRIVER_UNLOCK(); cap_rele(cap); return (0); } /* * Clear blockage on a driver. The what parameter indicates whether * the driver is now ready for cryptop's and/or cryptokop's. */ int crypto_unblock(u_int32_t driverid, int what) { struct cryptocap *cap; int err; CRYPTO_Q_LOCK(); cap = crypto_checkdriver(driverid); if (cap != NULL) { if (what & CRYPTO_SYMQ) cap->cc_qblocked = 0; if (what & CRYPTO_ASYMQ) cap->cc_kqblocked = 0; if (crp_sleep) wakeup_one(&crp_q); err = 0; } else err = EINVAL; CRYPTO_Q_UNLOCK(); return err; } #ifdef INVARIANTS /* Various sanity checks on crypto requests. */ static void crp_sanity(struct cryptop *crp) { struct crypto_session_params *csp; KASSERT(crp->crp_session != NULL, ("incoming crp without a session")); KASSERT(crp->crp_ilen >= 0, ("incoming crp with -ve input length")); KASSERT(crp->crp_etype == 0, ("incoming crp with error")); KASSERT(!(crp->crp_flags & CRYPTO_F_DONE), ("incoming crp already done")); csp = &crp->crp_session->csp; switch (csp->csp_mode) { case CSP_MODE_COMPRESS: KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS || crp->crp_op == CRYPTO_OP_DECOMPRESS, ("invalid compression op %x", crp->crp_op)); break; case CSP_MODE_CIPHER: KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT || crp->crp_op == CRYPTO_OP_DECRYPT, ("invalid cipher op %x", crp->crp_op)); break; case CSP_MODE_DIGEST: KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST || crp->crp_op == CRYPTO_OP_VERIFY_DIGEST, ("invalid digest op %x", crp->crp_op)); break; case CSP_MODE_AEAD: KASSERT(crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || crp->crp_op == (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), ("invalid AEAD op %x", crp->crp_op)); if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, ("GCM without a separate IV")); if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16) KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, ("CCM without a separate IV")); break; case CSP_MODE_ETA: KASSERT(crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || crp->crp_op == (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), ("invalid ETA op %x", crp->crp_op)); break; } - KASSERT((crp->crp_flags & CRYPTO_F_IV_GENERATE) == 0 || - crp->crp_op == CRYPTO_OP_ENCRYPT || - crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST), - ("IV_GENERATE set for non-encryption operation %x", crp->crp_op)); - KASSERT((crp->crp_flags & - (CRYPTO_F_IV_SEPARATE | CRYPTO_F_IV_GENERATE)) != - (CRYPTO_F_IV_SEPARATE | CRYPTO_F_IV_GENERATE), - ("crp with both IV_SEPARATE and IV_GENERATE set")); KASSERT(crp->crp_buf_type >= CRYPTO_BUF_CONTIG && crp->crp_buf_type <= CRYPTO_BUF_MBUF, ("invalid crp buffer type %d", crp->crp_buf_type)); if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { KASSERT(crp->crp_aad_start == 0 || crp->crp_aad_start < crp->crp_ilen, ("invalid AAD start")); KASSERT(crp->crp_aad_length != 0 || crp->crp_aad_start == 0, ("AAD with zero length and non-zero start")); KASSERT(crp->crp_aad_length == 0 || crp->crp_aad_start + crp->crp_aad_length <= crp->crp_ilen, ("AAD outside input length")); } else { KASSERT(crp->crp_aad_start == 0 && crp->crp_aad_length == 0, ("AAD region in request not supporting AAD")); } if (csp->csp_ivlen == 0) { - KASSERT((crp->crp_flags & - (CRYPTO_F_IV_SEPARATE | CRYPTO_F_IV_GENERATE)) == 0, - ("IV_GENERATE or IV_SEPARATE set when IV isn't used")); + KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0, + ("IV_SEPARATE set when IV isn't used")); KASSERT(crp->crp_iv_start == 0, ("crp_iv_start set when IV isn't used")); } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { KASSERT(crp->crp_iv_start == 0, ("IV_SEPARATE used with non-zero IV start")); } else { KASSERT(crp->crp_iv_start < crp->crp_ilen, ("invalid IV start")); KASSERT(crp->crp_iv_start + csp->csp_ivlen <= crp->crp_ilen, ("IV outside input length")); } KASSERT(crp->crp_payload_start == 0 || crp->crp_payload_start < crp->crp_ilen, ("invalid payload start")); KASSERT(crp->crp_payload_start + crp->crp_payload_length <= crp->crp_ilen, ("payload outside input length")); if (csp->csp_mode == CSP_MODE_DIGEST || csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { KASSERT(crp->crp_digest_start == 0 || crp->crp_digest_start < crp->crp_ilen, ("invalid digest start")); /* XXX: For the mlen == 0 case this check isn't perfect. */ KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= crp->crp_ilen, ("digest outside input length")); } else { KASSERT(crp->crp_digest_start == 0, ("non-zero digest start for request without a digest")); } if (csp->csp_cipher_klen != 0) KASSERT(csp->csp_cipher_key != NULL || crp->crp_cipher_key != NULL, ("cipher request without a key")); if (csp->csp_auth_klen != 0) KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL, ("auth request without a key")); KASSERT(crp->crp_callback != NULL, ("incoming crp without callback")); } #endif /* * Add a crypto request to a queue, to be processed by the kernel thread. */ int crypto_dispatch(struct cryptop *crp) { struct cryptocap *cap; int result; #ifdef INVARIANTS crp_sanity(crp); #endif - - /* TODO: Handle CRYPTO_F_IV_GENERATE so drivers don't have to. */ cryptostats.cs_ops++; #ifdef CRYPTO_TIMING if (crypto_timing) binuptime(&crp->crp_tstamp); #endif crp->crp_retw_id = ((uintptr_t)crp->crp_session) % crypto_workers_num; if (CRYPTOP_ASYNC(crp)) { if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) { struct crypto_ret_worker *ret_worker; ret_worker = CRYPTO_RETW(crp->crp_retw_id); CRYPTO_RETW_LOCK(ret_worker); crp->crp_seq = ret_worker->reorder_ops++; CRYPTO_RETW_UNLOCK(ret_worker); } TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); taskqueue_enqueue(crypto_tq, &crp->crp_task); return (0); } if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { /* * Caller marked the request to be processed * immediately; dispatch it directly to the * driver unless the driver is currently blocked. */ cap = crp->crp_session->cap; if (!cap->cc_qblocked) { result = crypto_invoke(cap, crp, 0); if (result != ERESTART) return (result); /* * The driver ran out of resources, put the request on * the queue. */ } } crypto_batch_enqueue(crp); return 0; } void crypto_batch_enqueue(struct cryptop *crp) { CRYPTO_Q_LOCK(); TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); if (crp_sleep) wakeup_one(&crp_q); CRYPTO_Q_UNLOCK(); } /* * Add an asymetric crypto request to a queue, * to be processed by the kernel thread. */ int crypto_kdispatch(struct cryptkop *krp) { int error; cryptostats.cs_kops++; krp->krp_cap = NULL; error = crypto_kinvoke(krp); if (error == ERESTART) { CRYPTO_Q_LOCK(); TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); if (crp_sleep) wakeup_one(&crp_q); CRYPTO_Q_UNLOCK(); error = 0; } return error; } /* * Verify a driver is suitable for the specified operation. */ static __inline int kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp) { return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0; } /* * Select a driver for an asym operation. The driver must * support the necessary algorithm. The caller can constrain * which device is selected with the flags parameter. The * algorithm we use here is pretty stupid; just use the first * driver that supports the algorithms we need. If there are * multiple suitable drivers we choose the driver with the * fewest active operations. We prefer hardware-backed * drivers to software ones when either may be used. */ static struct cryptocap * crypto_select_kdriver(const struct cryptkop *krp, int flags) { struct cryptocap *cap, *best; int match, hid; CRYPTO_DRIVER_ASSERT(); /* * Look first for hardware crypto devices if permitted. */ if (flags & CRYPTOCAP_F_HARDWARE) match = CRYPTOCAP_F_HARDWARE; else match = CRYPTOCAP_F_SOFTWARE; best = NULL; again: for (hid = 0; hid < crypto_drivers_size; hid++) { /* * If there is no driver for this slot, or the driver * is not appropriate (hardware or software based on * match), then skip. */ cap = crypto_drivers[hid]; if (cap->cc_dev == NULL || (cap->cc_flags & match) == 0) continue; /* verify all the algorithms are supported. */ if (kdriver_suitable(cap, krp)) { if (best == NULL || cap->cc_koperations < best->cc_koperations) best = cap; } } if (best != NULL) return best; if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { /* sort of an Algol 68-style for loop */ match = CRYPTOCAP_F_SOFTWARE; goto again; } return best; } /* * Choose a driver for an asymmetric crypto request. */ static struct cryptocap * crypto_lookup_kdriver(struct cryptkop *krp) { struct cryptocap *cap; uint32_t crid; /* If this request is requeued, it might already have a driver. */ cap = krp->krp_cap; if (cap != NULL) return (cap); /* Use krp_crid to choose a driver. */ crid = krp->krp_crid; if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { cap = crypto_checkdriver(crid); if (cap != NULL) { /* * Driver present, it must support the * necessary algorithm and, if s/w drivers are * excluded, it must be registered as * hardware-backed. */ if (!kdriver_suitable(cap, krp) || (!crypto_devallowsoft && (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0)) cap = NULL; } } else { /* * No requested driver; select based on crid flags. */ if (!crypto_devallowsoft) /* NB: disallow s/w drivers */ crid &= ~CRYPTOCAP_F_SOFTWARE; cap = crypto_select_kdriver(krp, crid); } if (cap != NULL) { krp->krp_cap = cap_ref(cap); krp->krp_hid = cap->cc_hid; } return (cap); } /* * Dispatch an asymmetric crypto request. */ static int crypto_kinvoke(struct cryptkop *krp) { struct cryptocap *cap = NULL; int error; KASSERT(krp != NULL, ("%s: krp == NULL", __func__)); KASSERT(krp->krp_callback != NULL, ("%s: krp->crp_callback == NULL", __func__)); CRYPTO_DRIVER_LOCK(); cap = crypto_lookup_kdriver(krp); if (cap == NULL) { CRYPTO_DRIVER_UNLOCK(); krp->krp_status = ENODEV; crypto_kdone(krp); return (0); } /* * If the device is blocked, return ERESTART to requeue it. */ if (cap->cc_kqblocked) { /* * XXX: Previously this set krp_status to ERESTART and * invoked crypto_kdone but the caller would still * requeue it. */ CRYPTO_DRIVER_UNLOCK(); return (ERESTART); } cap->cc_koperations++; CRYPTO_DRIVER_UNLOCK(); error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0); if (error == ERESTART) { CRYPTO_DRIVER_LOCK(); cap->cc_koperations--; CRYPTO_DRIVER_UNLOCK(); return (error); } KASSERT(error == 0, ("error %d returned from crypto_kprocess", error)); return (0); } #ifdef CRYPTO_TIMING static void crypto_tstat(struct cryptotstat *ts, struct bintime *bt) { struct bintime now, delta; struct timespec t; uint64_t u; binuptime(&now); u = now.frac; delta.frac = now.frac - bt->frac; delta.sec = now.sec - bt->sec; if (u < delta.frac) delta.sec--; bintime2timespec(&delta, &t); timespecadd(&ts->acc, &t, &ts->acc); if (timespeccmp(&t, &ts->min, <)) ts->min = t; if (timespeccmp(&t, &ts->max, >)) ts->max = t; ts->count++; *bt = now; } #endif static void crypto_task_invoke(void *ctx, int pending) { struct cryptocap *cap; struct cryptop *crp; int result; crp = (struct cryptop *)ctx; cap = crp->crp_session->cap; result = crypto_invoke(cap, crp, 0); if (result == ERESTART) crypto_batch_enqueue(crp); } /* * Dispatch a crypto request to the appropriate crypto devices. */ static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) { KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); KASSERT(crp->crp_callback != NULL, ("%s: crp->crp_callback == NULL", __func__)); KASSERT(crp->crp_session != NULL, ("%s: crp->crp_session == NULL", __func__)); #ifdef CRYPTO_TIMING if (crypto_timing) crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp); #endif if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { struct crypto_session_params csp; crypto_session_t nses; /* * Driver has unregistered; migrate the session and return * an error to the caller so they'll resubmit the op. * * XXX: What if there are more already queued requests for this * session? * * XXX: Real solution is to make sessions refcounted * and force callers to hold a reference when * assigning to crp_session. Could maybe change * crypto_getreq to accept a session pointer to make * that work. Alternatively, we could abandon the * notion of rewriting crp_session in requests forcing * the caller to deal with allocating a new session. * Perhaps provide a method to allow a crp's session to * be swapped that callers could use. */ csp = crp->crp_session->csp; crypto_freesession(crp->crp_session); /* * XXX: Key pointers may no longer be valid. If we * really want to support this we need to define the * KPI such that 'csp' is required to be valid for the * duration of a session by the caller perhaps. * * XXX: If the keys have been changed this will reuse * the old keys. This probably suggests making * rekeying more explicit and updating the key * pointers in 'csp' when the keys change. */ if (crypto_newsession(&nses, &csp, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) crp->crp_session = nses; crp->crp_etype = EAGAIN; crypto_done(crp); return 0; } else { /* * Invoke the driver to process the request. */ return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); } } void crypto_freereq(struct cryptop *crp) { if (crp == NULL) return; #ifdef DIAGNOSTIC { struct cryptop *crp2; struct crypto_ret_worker *ret_worker; CRYPTO_Q_LOCK(); TAILQ_FOREACH(crp2, &crp_q, crp_next) { KASSERT(crp2 != crp, ("Freeing cryptop from the crypto queue (%p).", crp)); } CRYPTO_Q_UNLOCK(); FOREACH_CRYPTO_RETW(ret_worker) { CRYPTO_RETW_LOCK(ret_worker); TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { KASSERT(crp2 != crp, ("Freeing cryptop from the return queue (%p).", crp)); } CRYPTO_RETW_UNLOCK(ret_worker); } } #endif uma_zfree(cryptop_zone, crp); } struct cryptop * crypto_getreq(crypto_session_t cses, int how) { struct cryptop *crp; MPASS(how == M_WAITOK || how == M_NOWAIT); crp = uma_zalloc(cryptop_zone, how | M_ZERO); crp->crp_session = cses; return (crp); } /* * Invoke the callback on behalf of the driver. */ void crypto_done(struct cryptop *crp) { KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); crp->crp_flags |= CRYPTO_F_DONE; if (crp->crp_etype != 0) cryptostats.cs_errs++; #ifdef CRYPTO_TIMING if (crypto_timing) crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp); #endif /* * CBIMM means unconditionally do the callback immediately; * CBIFSYNC means do the callback immediately only if the * operation was done synchronously. Both are used to avoid * doing extraneous context switches; the latter is mostly * used with the software crypto driver. */ if (!CRYPTOP_ASYNC_KEEPORDER(crp) && ((crp->crp_flags & CRYPTO_F_CBIMM) || ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)))) { /* * Do the callback directly. This is ok when the * callback routine does very little (e.g. the * /dev/crypto callback method just does a wakeup). */ #ifdef CRYPTO_TIMING if (crypto_timing) { /* * NB: We must copy the timestamp before * doing the callback as the cryptop is * likely to be reclaimed. */ struct bintime t = crp->crp_tstamp; crypto_tstat(&cryptostats.cs_cb, &t); crp->crp_callback(crp); crypto_tstat(&cryptostats.cs_finis, &t); } else #endif crp->crp_callback(crp); } else { struct crypto_ret_worker *ret_worker; bool wake; ret_worker = CRYPTO_RETW(crp->crp_retw_id); wake = false; /* * Normal case; queue the callback for the thread. */ CRYPTO_RETW_LOCK(ret_worker); if (CRYPTOP_ASYNC_KEEPORDER(crp)) { struct cryptop *tmp; TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q, cryptop_q, crp_next) { if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q, tmp, crp, crp_next); break; } } if (tmp == NULL) { TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q, crp, crp_next); } if (crp->crp_seq == ret_worker->reorder_cur_seq) wake = true; } else { if (CRYPTO_RETW_EMPTY(ret_worker)) wake = true; TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next); } if (wake) wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ CRYPTO_RETW_UNLOCK(ret_worker); } } /* * Invoke the callback on behalf of the driver. */ void crypto_kdone(struct cryptkop *krp) { struct crypto_ret_worker *ret_worker; struct cryptocap *cap; if (krp->krp_status != 0) cryptostats.cs_kerrs++; CRYPTO_DRIVER_LOCK(); cap = krp->krp_cap; KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0")); cap->cc_koperations--; if (cap->cc_koperations == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) wakeup(cap); CRYPTO_DRIVER_UNLOCK(); krp->krp_cap = NULL; cap_rele(cap); ret_worker = CRYPTO_RETW(0); CRYPTO_RETW_LOCK(ret_worker); if (CRYPTO_RETW_EMPTY(ret_worker)) wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next); CRYPTO_RETW_UNLOCK(ret_worker); } int crypto_getfeat(int *featp) { int hid, kalg, feat = 0; CRYPTO_DRIVER_LOCK(); for (hid = 0; hid < crypto_drivers_size; hid++) { const struct cryptocap *cap = crypto_drivers[hid]; if (cap == NULL || ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && !crypto_devallowsoft)) { continue; } for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED) feat |= 1 << kalg; } CRYPTO_DRIVER_UNLOCK(); *featp = feat; return (0); } /* * Terminate a thread at module unload. The process that * initiated this is waiting for us to signal that we're gone; * wake it up and exit. We use the driver table lock to insure * we don't do the wakeup before they're waiting. There is no * race here because the waiter sleeps on the proc lock for the * thread so it gets notified at the right time because of an * extra wakeup that's done in exit1(). */ static void crypto_finis(void *chan) { CRYPTO_DRIVER_LOCK(); wakeup_one(chan); CRYPTO_DRIVER_UNLOCK(); kproc_exit(0); } /* * Crypto thread, dispatches crypto requests. */ static void crypto_proc(void) { struct cryptop *crp, *submit; struct cryptkop *krp; struct cryptocap *cap; int result, hint; #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) fpu_kern_thread(FPU_KERN_NORMAL); #endif CRYPTO_Q_LOCK(); for (;;) { /* * Find the first element in the queue that can be * processed and look-ahead to see if multiple ops * are ready for the same driver. */ submit = NULL; hint = 0; TAILQ_FOREACH(crp, &crp_q, crp_next) { cap = crp->crp_session->cap; /* * Driver cannot disappeared when there is an active * session. */ KASSERT(cap != NULL, ("%s:%u Driver disappeared.", __func__, __LINE__)); if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { /* Op needs to be migrated, process it. */ if (submit == NULL) submit = crp; break; } if (!cap->cc_qblocked) { if (submit != NULL) { /* * We stop on finding another op, * regardless whether its for the same * driver or not. We could keep * searching the queue but it might be * better to just use a per-driver * queue instead. */ if (submit->crp_session->cap == cap) hint = CRYPTO_HINT_MORE; break; } else { submit = crp; if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) break; /* keep scanning for more are q'd */ } } } if (submit != NULL) { TAILQ_REMOVE(&crp_q, submit, crp_next); cap = submit->crp_session->cap; KASSERT(cap != NULL, ("%s:%u Driver disappeared.", __func__, __LINE__)); CRYPTO_Q_UNLOCK(); result = crypto_invoke(cap, submit, hint); CRYPTO_Q_LOCK(); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ cap->cc_qblocked = 1; TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); cryptostats.cs_blocks++; } } /* As above, but for key ops */ TAILQ_FOREACH(krp, &crp_kq, krp_next) { cap = krp->krp_cap; if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { /* * Operation needs to be migrated, * clear krp_cap so a new driver is * selected. */ krp->krp_cap = NULL; cap_rele(cap); break; } if (!cap->cc_kqblocked) break; } if (krp != NULL) { TAILQ_REMOVE(&crp_kq, krp, krp_next); CRYPTO_Q_UNLOCK(); result = crypto_kinvoke(krp); CRYPTO_Q_LOCK(); if (result == ERESTART) { /* * The driver ran out of resources, mark the * driver ``blocked'' for cryptkop's and put * the request back in the queue. It would * best to put the request back where we got * it but that's hard so for now we put it * at the front. This should be ok; putting * it at the end does not work. */ krp->krp_cap->cc_kqblocked = 1; TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); cryptostats.cs_kblocks++; } } if (submit == NULL && krp == NULL) { /* * Nothing more to be processed. Sleep until we're * woken because there are more ops to process. * This happens either by submission or by a driver * becoming unblocked and notifying us through * crypto_unblock. Note that when we wakeup we * start processing each queue again from the * front. It's not clear that it's important to * preserve this ordering since ops may finish * out of order if dispatched to different devices * and some become blocked while others do not. */ crp_sleep = 1; msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); crp_sleep = 0; if (cryptoproc == NULL) break; cryptostats.cs_intrs++; } } CRYPTO_Q_UNLOCK(); crypto_finis(&crp_q); } /* * Crypto returns thread, does callbacks for processed crypto requests. * Callbacks are done here, rather than in the crypto drivers, because * callbacks typically are expensive and would slow interrupt handling. */ static void crypto_ret_proc(struct crypto_ret_worker *ret_worker) { struct cryptop *crpt; struct cryptkop *krpt; CRYPTO_RETW_LOCK(ret_worker); for (;;) { /* Harvest return q's for completed ops */ crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); if (crpt != NULL) { if (crpt->crp_seq == ret_worker->reorder_cur_seq) { TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); ret_worker->reorder_cur_seq++; } else { crpt = NULL; } } if (crpt == NULL) { crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); if (crpt != NULL) TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); } krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq); if (krpt != NULL) TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next); if (crpt != NULL || krpt != NULL) { CRYPTO_RETW_UNLOCK(ret_worker); /* * Run callbacks unlocked. */ if (crpt != NULL) { #ifdef CRYPTO_TIMING if (crypto_timing) { /* * NB: We must copy the timestamp before * doing the callback as the cryptop is * likely to be reclaimed. */ struct bintime t = crpt->crp_tstamp; crypto_tstat(&cryptostats.cs_cb, &t); crpt->crp_callback(crpt); crypto_tstat(&cryptostats.cs_finis, &t); } else #endif crpt->crp_callback(crpt); } if (krpt != NULL) krpt->krp_callback(krpt); CRYPTO_RETW_LOCK(ret_worker); } else { /* * Nothing more to be processed. Sleep until we're * woken because there are more returns to process. */ msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, "crypto_ret_wait", 0); if (ret_worker->cryptoretproc == NULL) break; cryptostats.cs_rets++; } } CRYPTO_RETW_UNLOCK(ret_worker); crypto_finis(&ret_worker->crp_ret_q); } #ifdef DDB static void db_show_drivers(void) { int hid; db_printf("%12s %4s %4s %8s %2s %2s\n" , "Device" , "Ses" , "Kops" , "Flags" , "QB" , "KB" ); for (hid = 0; hid < crypto_drivers_size; hid++) { const struct cryptocap *cap = crypto_drivers[hid]; if (cap == NULL) continue; db_printf("%-12s %4u %4u %08x %2u %2u\n" , device_get_nameunit(cap->cc_dev) , cap->cc_sessions , cap->cc_koperations , cap->cc_flags , cap->cc_qblocked , cap->cc_kqblocked ); } } DB_SHOW_COMMAND(crypto, db_show_crypto) { struct cryptop *crp; struct crypto_ret_worker *ret_worker; db_show_drivers(); db_printf("\n"); db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", "Device", "Callback"); TAILQ_FOREACH(crp, &crp_q, crp_next) { db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n" , crp->crp_session->cap->cc_hid , (int) crypto_ses2caps(crp->crp_session) , crp->crp_ilen, crp->crp_olen , crp->crp_etype , crp->crp_flags , device_get_nameunit(crp->crp_session->cap->cc_dev) , crp->crp_callback ); } FOREACH_CRYPTO_RETW(ret_worker) { db_printf("\n%8s %4s %4s %4s %8s\n", "ret_worker", "HID", "Etype", "Flags", "Callback"); if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { db_printf("%8td %4u %4u %04x %8p\n" , CRYPTO_RETW_ID(ret_worker) , crp->crp_session->cap->cc_hid , crp->crp_etype , crp->crp_flags , crp->crp_callback ); } } } } DB_SHOW_COMMAND(kcrypto, db_show_kcrypto) { struct cryptkop *krp; struct crypto_ret_worker *ret_worker; db_show_drivers(); db_printf("\n"); db_printf("%4s %5s %4s %4s %8s %4s %8s\n", "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); TAILQ_FOREACH(krp, &crp_kq, krp_next) { db_printf("%4u %5u %4u %4u %08x %4u %8p\n" , krp->krp_op , krp->krp_status , krp->krp_iparams, krp->krp_oparams , krp->krp_crid, krp->krp_hid , krp->krp_callback ); } ret_worker = CRYPTO_RETW(0); if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { db_printf("%4s %5s %8s %4s %8s\n", "Op", "Status", "CRID", "HID", "Callback"); TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) { db_printf("%4u %5u %08x %4u %8p\n" , krp->krp_op , krp->krp_status , krp->krp_crid, krp->krp_hid , krp->krp_callback ); } } } #endif int crypto_modevent(module_t mod, int type, void *unused); /* * Initialization code, both for static and dynamic loading. * Note this is not invoked with the usual MODULE_DECLARE * mechanism but instead is listed as a dependency by the * cryptosoft driver. This guarantees proper ordering of * calls on module load/unload. */ int crypto_modevent(module_t mod, int type, void *unused) { int error = EINVAL; switch (type) { case MOD_LOAD: error = crypto_init(); if (error == 0 && bootverbose) printf("crypto: \n"); break; case MOD_UNLOAD: /*XXX disallow if active sessions */ error = 0; crypto_destroy(); return 0; } return error; } MODULE_VERSION(crypto, 1); MODULE_DEPEND(crypto, zlib, 1, 1, 1); Index: head/sys/opencrypto/cryptodev.h =================================================================== --- head/sys/opencrypto/cryptodev.h (revision 360135) +++ head/sys/opencrypto/cryptodev.h (revision 360136) @@ -1,615 +1,626 @@ /* $FreeBSD$ */ /* $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000 Angelos D. Keromytis * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. * * Copyright (c) 2001 Theo de Raadt * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * */ #ifndef _CRYPTO_CRYPTO_H_ #define _CRYPTO_CRYPTO_H_ #include #ifdef _KERNEL #include #include #endif /* Some initial values */ #define CRYPTO_DRIVERS_INITIAL 4 /* Hash values */ #define NULL_HASH_LEN 16 #define MD5_HASH_LEN 16 #define SHA1_HASH_LEN 20 #define RIPEMD160_HASH_LEN 20 #define SHA2_224_HASH_LEN 28 #define SHA2_256_HASH_LEN 32 #define SHA2_384_HASH_LEN 48 #define SHA2_512_HASH_LEN 64 #define MD5_KPDK_HASH_LEN 16 #define SHA1_KPDK_HASH_LEN 20 #define AES_GMAC_HASH_LEN 16 #define POLY1305_HASH_LEN 16 #define AES_CBC_MAC_HASH_LEN 16 /* Maximum hash algorithm result length */ #define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */ #define MD5_BLOCK_LEN 64 #define SHA1_BLOCK_LEN 64 #define RIPEMD160_BLOCK_LEN 64 #define SHA2_224_BLOCK_LEN 64 #define SHA2_256_BLOCK_LEN 64 #define SHA2_384_BLOCK_LEN 128 #define SHA2_512_BLOCK_LEN 128 /* HMAC values */ #define NULL_HMAC_BLOCK_LEN 64 /* Maximum HMAC block length */ #define HMAC_MAX_BLOCK_LEN SHA2_512_BLOCK_LEN /* Keep this updated */ #define HMAC_IPAD_VAL 0x36 #define HMAC_OPAD_VAL 0x5C /* HMAC Key Length */ #define AES_128_GMAC_KEY_LEN 16 #define AES_192_GMAC_KEY_LEN 24 #define AES_256_GMAC_KEY_LEN 32 #define AES_128_CBC_MAC_KEY_LEN 16 #define AES_192_CBC_MAC_KEY_LEN 24 #define AES_256_CBC_MAC_KEY_LEN 32 #define POLY1305_KEY_LEN 32 /* Encryption algorithm block sizes */ #define NULL_BLOCK_LEN 4 /* IPsec to maintain alignment */ #define DES_BLOCK_LEN 8 #define DES3_BLOCK_LEN 8 #define BLOWFISH_BLOCK_LEN 8 #define SKIPJACK_BLOCK_LEN 8 #define CAST128_BLOCK_LEN 8 #define RIJNDAEL128_BLOCK_LEN 16 #define AES_BLOCK_LEN 16 #define AES_ICM_BLOCK_LEN 1 #define ARC4_BLOCK_LEN 1 #define CAMELLIA_BLOCK_LEN 16 #define CHACHA20_NATIVE_BLOCK_LEN 64 #define EALG_MAX_BLOCK_LEN CHACHA20_NATIVE_BLOCK_LEN /* Keep this updated */ /* IV Lengths */ #define ARC4_IV_LEN 1 #define AES_GCM_IV_LEN 12 #define AES_CCM_IV_LEN 12 #define AES_XTS_IV_LEN 8 #define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */ /* Min and Max Encryption Key Sizes */ #define NULL_MIN_KEY 0 #define NULL_MAX_KEY 256 /* 2048 bits, max key */ #define DES_MIN_KEY 8 #define DES_MAX_KEY DES_MIN_KEY #define TRIPLE_DES_MIN_KEY 24 #define TRIPLE_DES_MAX_KEY TRIPLE_DES_MIN_KEY #define BLOWFISH_MIN_KEY 5 #define BLOWFISH_MAX_KEY 56 /* 448 bits, max key */ #define CAST_MIN_KEY 5 #define CAST_MAX_KEY 16 #define SKIPJACK_MIN_KEY 10 #define SKIPJACK_MAX_KEY SKIPJACK_MIN_KEY #define RIJNDAEL_MIN_KEY 16 #define RIJNDAEL_MAX_KEY 32 #define AES_MIN_KEY RIJNDAEL_MIN_KEY #define AES_MAX_KEY RIJNDAEL_MAX_KEY #define AES_XTS_MIN_KEY (2 * AES_MIN_KEY) #define AES_XTS_MAX_KEY (2 * AES_MAX_KEY) #define ARC4_MIN_KEY 1 #define ARC4_MAX_KEY 32 #define CAMELLIA_MIN_KEY 8 #define CAMELLIA_MAX_KEY 32 /* Maximum hash algorithm result length */ #define AALG_MAX_RESULT_LEN 64 /* Keep this updated */ #define CRYPTO_ALGORITHM_MIN 1 #define CRYPTO_DES_CBC 1 #define CRYPTO_3DES_CBC 2 #define CRYPTO_BLF_CBC 3 #define CRYPTO_CAST_CBC 4 #define CRYPTO_SKIPJACK_CBC 5 #define CRYPTO_MD5_HMAC 6 #define CRYPTO_SHA1_HMAC 7 #define CRYPTO_RIPEMD160_HMAC 8 #define CRYPTO_MD5_KPDK 9 #define CRYPTO_SHA1_KPDK 10 #define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */ #define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */ #define CRYPTO_ARC4 12 #define CRYPTO_MD5 13 #define CRYPTO_SHA1 14 #define CRYPTO_NULL_HMAC 15 #define CRYPTO_NULL_CBC 16 #define CRYPTO_DEFLATE_COMP 17 /* Deflate compression algorithm */ #define CRYPTO_SHA2_256_HMAC 18 #define CRYPTO_SHA2_384_HMAC 19 #define CRYPTO_SHA2_512_HMAC 20 #define CRYPTO_CAMELLIA_CBC 21 #define CRYPTO_AES_XTS 22 #define CRYPTO_AES_ICM 23 /* commonly known as CTR mode */ #define CRYPTO_AES_NIST_GMAC 24 /* GMAC only */ #define CRYPTO_AES_NIST_GCM_16 25 /* 16 byte ICV */ #ifdef _KERNEL #define CRYPTO_AES_128_NIST_GMAC 26 /* auth side */ #define CRYPTO_AES_192_NIST_GMAC 27 /* auth side */ #define CRYPTO_AES_256_NIST_GMAC 28 /* auth side */ #endif #define CRYPTO_BLAKE2B 29 /* Blake2b hash */ #define CRYPTO_BLAKE2S 30 /* Blake2s hash */ #define CRYPTO_CHACHA20 31 /* Chacha20 stream cipher */ #define CRYPTO_SHA2_224_HMAC 32 #define CRYPTO_RIPEMD160 33 #define CRYPTO_SHA2_224 34 #define CRYPTO_SHA2_256 35 #define CRYPTO_SHA2_384 36 #define CRYPTO_SHA2_512 37 #define CRYPTO_POLY1305 38 #define CRYPTO_AES_CCM_CBC_MAC 39 /* auth side */ #define CRYPTO_AES_CCM_16 40 /* cipher side */ #define CRYPTO_ALGORITHM_MAX 40 /* Keep updated - see below */ #define CRYPTO_ALGO_VALID(x) ((x) >= CRYPTO_ALGORITHM_MIN && \ (x) <= CRYPTO_ALGORITHM_MAX) /* Algorithm flags */ #define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */ #define CRYPTO_ALG_FLAG_RNG_ENABLE 0x02 /* Has HW RNG for DH/DSA */ #define CRYPTO_ALG_FLAG_DSA_SHA 0x04 /* Can do SHA on msg */ /* * Crypto driver/device flags. They can set in the crid * parameter when creating a session or submitting a key * op to affect the device/driver assigned. If neither * of these are specified then the crid is assumed to hold * the driver id of an existing (and suitable) device that * must be used to satisfy the request. */ #define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */ #define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */ /* NB: deprecated */ struct session_op { u_int32_t cipher; /* ie. CRYPTO_DES_CBC */ u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */ u_int32_t keylen; /* cipher key */ c_caddr_t key; int mackeylen; /* mac key */ c_caddr_t mackey; u_int32_t ses; /* returns: session # */ }; /* * session and crypt _op structs are used by userspace programs to interact * with /dev/crypto. Confusingly, the internal kernel interface is named * "cryptop" (no underscore). */ struct session2_op { u_int32_t cipher; /* ie. CRYPTO_DES_CBC */ u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */ u_int32_t keylen; /* cipher key */ c_caddr_t key; int mackeylen; /* mac key */ c_caddr_t mackey; u_int32_t ses; /* returns: session # */ int crid; /* driver id + flags (rw) */ int pad[4]; /* for future expansion */ }; struct crypt_op { u_int32_t ses; u_int16_t op; /* i.e. COP_ENCRYPT */ #define COP_ENCRYPT 1 #define COP_DECRYPT 2 u_int16_t flags; #define COP_F_CIPHER_FIRST 0x0001 /* Cipher before MAC. */ #define COP_F_BATCH 0x0008 /* Batch op if possible */ u_int len; c_caddr_t src; /* become iov[] inside kernel */ caddr_t dst; caddr_t mac; /* must be big enough for chosen MAC */ c_caddr_t iv; }; /* op and flags the same as crypt_op */ struct crypt_aead { u_int32_t ses; u_int16_t op; /* i.e. COP_ENCRYPT */ u_int16_t flags; u_int len; u_int aadlen; u_int ivlen; c_caddr_t src; /* become iov[] inside kernel */ caddr_t dst; c_caddr_t aad; /* additional authenticated data */ caddr_t tag; /* must fit for chosen TAG length */ c_caddr_t iv; }; /* * Parameters for looking up a crypto driver/device by * device name or by id. The latter are returned for * created sessions (crid) and completed key operations. */ struct crypt_find_op { int crid; /* driver id + flags */ char name[32]; /* device/driver name */ }; /* bignum parameter, in packed bytes, ... */ struct crparam { caddr_t crp_p; u_int crp_nbits; }; #define CRK_MAXPARAM 8 struct crypt_kop { u_int crk_op; /* ie. CRK_MOD_EXP or other */ u_int crk_status; /* return status */ u_short crk_iparams; /* # of input parameters */ u_short crk_oparams; /* # of output parameters */ u_int crk_crid; /* NB: only used by CIOCKEY2 (rw) */ struct crparam crk_param[CRK_MAXPARAM]; }; #define CRK_ALGORITM_MIN 0 #define CRK_MOD_EXP 0 #define CRK_MOD_EXP_CRT 1 #define CRK_DSA_SIGN 2 #define CRK_DSA_VERIFY 3 #define CRK_DH_COMPUTE_KEY 4 #define CRK_ALGORITHM_MAX 4 /* Keep updated - see below */ #define CRF_MOD_EXP (1 << CRK_MOD_EXP) #define CRF_MOD_EXP_CRT (1 << CRK_MOD_EXP_CRT) #define CRF_DSA_SIGN (1 << CRK_DSA_SIGN) #define CRF_DSA_VERIFY (1 << CRK_DSA_VERIFY) #define CRF_DH_COMPUTE_KEY (1 << CRK_DH_COMPUTE_KEY) /* * done against open of /dev/crypto, to get a cloned descriptor. * Please use F_SETFD against the cloned descriptor. */ #define CRIOGET _IOWR('c', 100, u_int32_t) #define CRIOASYMFEAT CIOCASYMFEAT #define CRIOFINDDEV CIOCFINDDEV /* the following are done against the cloned descriptor */ #define CIOCGSESSION _IOWR('c', 101, struct session_op) #define CIOCFSESSION _IOW('c', 102, u_int32_t) #define CIOCCRYPT _IOWR('c', 103, struct crypt_op) #define CIOCKEY _IOWR('c', 104, struct crypt_kop) #define CIOCASYMFEAT _IOR('c', 105, u_int32_t) #define CIOCGSESSION2 _IOWR('c', 106, struct session2_op) #define CIOCKEY2 _IOWR('c', 107, struct crypt_kop) #define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op) #define CIOCCRYPTAEAD _IOWR('c', 109, struct crypt_aead) struct cryptotstat { struct timespec acc; /* total accumulated time */ struct timespec min; /* min time */ struct timespec max; /* max time */ u_int32_t count; /* number of observations */ }; struct cryptostats { u_int32_t cs_ops; /* symmetric crypto ops submitted */ u_int32_t cs_errs; /* symmetric crypto ops that failed */ u_int32_t cs_kops; /* asymetric/key ops submitted */ u_int32_t cs_kerrs; /* asymetric/key ops that failed */ u_int32_t cs_intrs; /* crypto swi thread activations */ u_int32_t cs_rets; /* crypto return thread activations */ u_int32_t cs_blocks; /* symmetric op driver block */ u_int32_t cs_kblocks; /* symmetric op driver block */ /* * When CRYPTO_TIMING is defined at compile time and the * sysctl debug.crypto is set to 1, the crypto system will * accumulate statistics about how long it takes to process * crypto requests at various points during processing. */ struct cryptotstat cs_invoke; /* crypto_dipsatch -> crypto_invoke */ struct cryptotstat cs_done; /* crypto_invoke -> crypto_done */ struct cryptotstat cs_cb; /* crypto_done -> callback */ struct cryptotstat cs_finis; /* callback -> callback return */ }; #ifdef _KERNEL /* * Return values for cryptodev_probesession methods. */ #define CRYPTODEV_PROBE_HARDWARE (-100) #define CRYPTODEV_PROBE_ACCEL_SOFTWARE (-200) #define CRYPTODEV_PROBE_SOFTWARE (-500) #if 0 #define CRYPTDEB(s, ...) do { \ printf("%s:%d: " s "\n", __FILE__, __LINE__, ## __VA_ARGS__); \ } while (0) #else #define CRYPTDEB(...) do { } while (0) #endif struct crypto_session_params { int csp_mode; /* Type of operations to perform. */ #define CSP_MODE_NONE 0 #define CSP_MODE_COMPRESS 1 /* Compression/decompression. */ #define CSP_MODE_CIPHER 2 /* Encrypt/decrypt. */ #define CSP_MODE_DIGEST 3 /* Compute/verify digest. */ #define CSP_MODE_AEAD 4 /* Combined auth/encryption. */ #define CSP_MODE_ETA 5 /* IPsec style encrypt-then-auth */ int csp_flags; int csp_ivlen; /* IV length in bytes. */ int csp_cipher_alg; int csp_cipher_klen; /* Key length in bytes. */ const void *csp_cipher_key; int csp_auth_alg; int csp_auth_klen; /* Key length in bytes. */ const void *csp_auth_key; int csp_auth_mlen; /* Number of digest bytes to use. 0 means all. */ }; /* Structure describing complete operation */ struct cryptop { TAILQ_ENTRY(cryptop) crp_next; struct task crp_task; crypto_session_t crp_session; /* Session */ int crp_ilen; /* Input data total length */ int crp_olen; /* Result total length */ int crp_etype; /* * Error type (zero means no error). * All error codes except EAGAIN * indicate possible data corruption (as in, * the data have been touched). On all * errors, the crp_session may have changed * (reset to a new one), so the caller * should always check and use the new * value on future requests. */ int crp_flags; #define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */ #define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */ #define CRYPTO_F_DONE 0x0020 /* Operation completed */ #define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */ #define CRYPTO_F_ASYNC 0x0080 /* Dispatch crypto jobs on several threads * if op is synchronous */ #define CRYPTO_F_ASYNC_KEEPORDER 0x0100 /* * Dispatch the crypto jobs in the same * order there are submitted. Applied only * if CRYPTO_F_ASYNC flags is set */ #define CRYPTO_F_IV_SEPARATE 0x0200 /* Use crp_iv[] as IV. */ -#define CRYPTO_F_IV_GENERATE 0x0400 /* Generate a random IV and store. */ int crp_op; union { caddr_t crp_buf; /* Data to be processed */ struct mbuf *crp_mbuf; struct uio *crp_uio; }; int crp_buf_type; /* Which union member describes data. */ int crp_aad_start; /* Location of AAD. */ int crp_aad_length; /* 0 => no AAD. */ int crp_iv_start; /* Location of IV. IV length is from * the session. */ int crp_payload_start; /* Location of ciphertext. */ int crp_payload_length; int crp_digest_start; /* Location of MAC/tag. Length is * from the session. */ uint8_t crp_iv[EALG_MAX_BLOCK_LEN]; /* IV if IV_SEPARATE. */ const void *crp_cipher_key; /* New cipher key if non-NULL. */ const void *crp_auth_key; /* New auth key if non-NULL. */ void *crp_opaque; /* Opaque pointer, passed along */ int (*crp_callback)(struct cryptop *); /* Callback function */ struct bintime crp_tstamp; /* performance time stamp */ uint32_t crp_seq; /* used for ordered dispatch */ uint32_t crp_retw_id; /* * the return worker to be used, * used for ordered dispatch */ }; #define CRYPTOP_ASYNC(crp) \ (((crp)->crp_flags & CRYPTO_F_ASYNC) && \ crypto_ses2caps((crp)->crp_session) & CRYPTOCAP_F_SYNC) #define CRYPTOP_ASYNC_KEEPORDER(crp) \ (CRYPTOP_ASYNC(crp) && \ (crp)->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) #define CRYPTO_BUF_CONTIG 0x0 #define CRYPTO_BUF_UIO 0x1 #define CRYPTO_BUF_MBUF 0x2 /* Flags in crp_op. */ #define CRYPTO_OP_DECRYPT 0x0 #define CRYPTO_OP_ENCRYPT 0x1 #define CRYPTO_OP_IS_ENCRYPT(op) ((op) & CRYPTO_OP_ENCRYPT) #define CRYPTO_OP_COMPUTE_DIGEST 0x0 #define CRYPTO_OP_VERIFY_DIGEST 0x2 #define CRYPTO_OP_DECOMPRESS CRYPTO_OP_DECRYPT #define CRYPTO_OP_COMPRESS CRYPTO_OP_ENCRYPT #define CRYPTO_OP_IS_COMPRESS(op) ((op) & CRYPTO_OP_COMPRESS) /* * Hints passed to process methods. */ #define CRYPTO_HINT_MORE 0x1 /* more ops coming shortly */ struct cryptkop { TAILQ_ENTRY(cryptkop) krp_next; u_int krp_op; /* ie. CRK_MOD_EXP or other */ u_int krp_status; /* return status */ u_short krp_iparams; /* # of input parameters */ u_short krp_oparams; /* # of output parameters */ u_int krp_crid; /* desired device, etc. */ uint32_t krp_hid; /* device used */ struct crparam krp_param[CRK_MAXPARAM]; /* kvm */ void (*krp_callback)(struct cryptkop *); struct cryptocap *krp_cap; }; uint32_t crypto_ses2hid(crypto_session_t crypto_session); uint32_t crypto_ses2caps(crypto_session_t crypto_session); void *crypto_get_driver_session(crypto_session_t crypto_session); const struct crypto_session_params *crypto_get_params( crypto_session_t crypto_session); struct auth_hash *crypto_auth_hash(const struct crypto_session_params *csp); struct enc_xform *crypto_cipher(const struct crypto_session_params *csp); MALLOC_DECLARE(M_CRYPTO_DATA); extern int crypto_newsession(crypto_session_t *cses, const struct crypto_session_params *params, int hard); extern void crypto_freesession(crypto_session_t cses); #define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE #define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE #define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */ extern int32_t crypto_get_driverid(device_t dev, size_t session_size, int flags); extern int crypto_find_driver(const char *); extern device_t crypto_find_device_byhid(int hid); extern int crypto_getcaps(int hid); extern int crypto_kregister(u_int32_t, int, u_int32_t); extern int crypto_unregister_all(u_int32_t driverid); extern int crypto_dispatch(struct cryptop *crp); extern int crypto_kdispatch(struct cryptkop *); #define CRYPTO_SYMQ 0x1 #define CRYPTO_ASYMQ 0x2 extern int crypto_unblock(u_int32_t, int); extern void crypto_done(struct cryptop *crp); extern void crypto_kdone(struct cryptkop *); extern int crypto_getfeat(int *); extern void crypto_freereq(struct cryptop *crp); extern struct cryptop *crypto_getreq(crypto_session_t cses, int how); extern int crypto_usercrypto; /* userland may do crypto requests */ extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */ extern int crypto_devallowsoft; /* only use hardware crypto */ /* Helper routines for drivers to initialize auth contexts for HMAC. */ struct auth_hash; void hmac_init_ipad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx); void hmac_init_opad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx); /* * Crypto-related utility routines used mainly by drivers. * * XXX these don't really belong here; but for now they're * kept apart from the rest of the system. * * Similar to m_copyback/data, *_copyback copy data from the 'src' * buffer into the crypto request's data buffer while *_copydata copy * data from the crypto request's data buffer into the the 'dst' * buffer. */ struct uio; extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp); extern void cuio_copyback(struct uio* uio, int off, int len, c_caddr_t cp); extern int cuio_getptr(struct uio *uio, int loc, int *off); extern int cuio_apply(struct uio *uio, int off, int len, int (*f)(void *, void *, u_int), void *arg); struct mbuf; struct iovec; extern int crypto_mbuftoiov(struct mbuf *mbuf, struct iovec **iovptr, int *cnt, int *allocated); void crypto_copyback(struct cryptop *crp, int off, int size, const void *src); void crypto_copydata(struct cryptop *crp, int off, int size, void *dst); int crypto_apply(struct cryptop *crp, int off, int len, int (*f)(void *, void *, u_int), void *arg); void *crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len); + +static __inline void +crypto_read_iv(struct cryptop *crp, void *iv) +{ + const struct crypto_session_params *csp; + + csp = crypto_get_params(crp->crp_session); + if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) + memcpy(iv, crp->crp_iv, csp->csp_ivlen); + else + crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, iv); +} #endif /* _KERNEL */ #endif /* _CRYPTO_CRYPTO_H_ */ Index: head/sys/opencrypto/cryptosoft.c =================================================================== --- head/sys/opencrypto/cryptosoft.c (revision 360135) +++ head/sys/opencrypto/cryptosoft.c (revision 360136) @@ -1,1519 +1,1500 @@ /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ /*- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting * * This code was written by Angelos D. Keromytis in Athens, Greece, in * February 2000. Network Security Technologies Inc. (NSTI) kindly * supported the development of this code. * * Copyright (c) 2000, 2001 Angelos D. Keromytis * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by John-Mark Gurney * under sponsorship of the FreeBSD Foundation and * Rubicon Communications, LLC (Netgate). * * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all source code copies of any software which is or includes a copy or * modification of this software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR * PURPOSE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" struct swcr_auth { void *sw_ictx; void *sw_octx; struct auth_hash *sw_axf; uint16_t sw_mlen; uint16_t sw_octx_len; }; struct swcr_encdec { uint8_t *sw_kschedule; struct enc_xform *sw_exf; }; struct swcr_compdec { struct comp_algo *sw_cxf; }; struct swcr_session { struct mtx swcr_lock; int (*swcr_process)(struct swcr_session *, struct cryptop *); struct swcr_auth swcr_auth; struct swcr_encdec swcr_encdec; struct swcr_compdec swcr_compdec; }; static int32_t swcr_id; static void swcr_freesession(device_t dev, crypto_session_t cses); /* Used for CRYPTO_NULL_CBC. */ static int swcr_null(struct swcr_session *ses, struct cryptop *crp) { return (0); } /* * Apply a symmetric encryption/decryption algorithm. */ static int swcr_encdec(struct swcr_session *ses, struct cryptop *crp) { unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN]; unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; const struct crypto_session_params *csp; struct swcr_encdec *sw; struct enc_xform *exf; int i, j, k, blks, ind, count, ivlen; struct uio *uio, uiolcl; struct iovec iovlcl[4]; struct iovec *iov; int iovcnt, iovalloc; int error; bool encrypting; error = 0; sw = &ses->swcr_encdec; exf = sw->sw_exf; blks = exf->blocksize; ivlen = exf->ivsize; /* Check for non-padded data */ if ((crp->crp_payload_length % blks) != 0) return EINVAL; if (exf == &enc_xform_aes_icm && (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); - /* IV explicitly provided ? */ - if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) - bcopy(crp->crp_iv, iv, ivlen); - else if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { - arc4rand(iv, ivlen, 0); - crypto_copyback(crp, crp->crp_iv_start, ivlen, iv); - } else - crypto_copydata(crp, crp->crp_iv_start, ivlen, iv); + crypto_read_iv(crp, iv); if (crp->crp_cipher_key != NULL) { if (sw->sw_kschedule) exf->zerokey(&(sw->sw_kschedule)); csp = crypto_get_params(crp->crp_session); error = exf->setkey(&sw->sw_kschedule, crp->crp_cipher_key, csp->csp_cipher_klen); if (error) return (error); } iov = iovlcl; iovcnt = nitems(iovlcl); iovalloc = 0; uio = &uiolcl; switch (crp->crp_buf_type) { case CRYPTO_BUF_MBUF: error = crypto_mbuftoiov(crp->crp_mbuf, &iov, &iovcnt, &iovalloc); if (error) return (error); uio->uio_iov = iov; uio->uio_iovcnt = iovcnt; break; case CRYPTO_BUF_UIO: uio = crp->crp_uio; break; case CRYPTO_BUF_CONTIG: iov[0].iov_base = crp->crp_buf; iov[0].iov_len = crp->crp_ilen; uio->uio_iov = iov; uio->uio_iovcnt = 1; break; } ivp = iv; if (exf->reinit) { /* * xforms that provide a reinit method perform all IV * handling themselves. */ exf->reinit(sw->sw_kschedule, iv); } count = crp->crp_payload_start; ind = cuio_getptr(uio, count, &k); if (ind == -1) { error = EINVAL; goto out; } i = crp->crp_payload_length; encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); while (i > 0) { /* * If there's insufficient data at the end of * an iovec, we have to do some copying. */ if (uio->uio_iov[ind].iov_len < k + blks && uio->uio_iov[ind].iov_len != k) { cuio_copydata(uio, count, blks, blk); /* Actual encryption/decryption */ if (exf->reinit) { if (encrypting) { exf->encrypt(sw->sw_kschedule, blk); } else { exf->decrypt(sw->sw_kschedule, blk); } } else if (encrypting) { /* XOR with previous block */ for (j = 0; j < blks; j++) blk[j] ^= ivp[j]; exf->encrypt(sw->sw_kschedule, blk); /* * Keep encrypted block for XOR'ing * with next block */ bcopy(blk, iv, blks); ivp = iv; } else { /* decrypt */ /* * Keep encrypted block for XOR'ing * with next block */ nivp = (ivp == iv) ? iv2 : iv; bcopy(blk, nivp, blks); exf->decrypt(sw->sw_kschedule, blk); /* XOR with previous block */ for (j = 0; j < blks; j++) blk[j] ^= ivp[j]; ivp = nivp; } /* Copy back decrypted block */ cuio_copyback(uio, count, blks, blk); count += blks; /* Advance pointer */ ind = cuio_getptr(uio, count, &k); if (ind == -1) { error = EINVAL; goto out; } i -= blks; /* Could be done... */ if (i == 0) break; } while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) { uint8_t *idat; size_t nb, rem; nb = blks; rem = MIN((size_t)i, uio->uio_iov[ind].iov_len - (size_t)k); idat = (uint8_t *)uio->uio_iov[ind].iov_base + k; if (exf->reinit) { if (encrypting && exf->encrypt_multi == NULL) exf->encrypt(sw->sw_kschedule, idat); else if (encrypting) { nb = rounddown(rem, blks); exf->encrypt_multi(sw->sw_kschedule, idat, nb); } else if (exf->decrypt_multi == NULL) exf->decrypt(sw->sw_kschedule, idat); else { nb = rounddown(rem, blks); exf->decrypt_multi(sw->sw_kschedule, idat, nb); } } else if (encrypting) { /* XOR with previous block/IV */ for (j = 0; j < blks; j++) idat[j] ^= ivp[j]; exf->encrypt(sw->sw_kschedule, idat); ivp = idat; } else { /* decrypt */ /* * Keep encrypted block to be used * in next block's processing. */ nivp = (ivp == iv) ? iv2 : iv; bcopy(idat, nivp, blks); exf->decrypt(sw->sw_kschedule, idat); /* XOR with previous block/IV */ for (j = 0; j < blks; j++) idat[j] ^= ivp[j]; ivp = nivp; } count += nb; k += nb; i -= nb; } /* * Advance to the next iov if the end of the current iov * is aligned with the end of a cipher block. * Note that the code is equivalent to calling: * ind = cuio_getptr(uio, count, &k); */ if (i > 0 && k == uio->uio_iov[ind].iov_len) { k = 0; ind++; if (ind >= uio->uio_iovcnt) { error = EINVAL; goto out; } } } out: if (iovalloc) free(iov, M_CRYPTO_DATA); return (error); } static void swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw, const uint8_t *key, int klen) { switch (axf->type) { case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: hmac_init_ipad(axf, key, klen, sw->sw_ictx); hmac_init_opad(axf, key, klen, sw->sw_octx); break; case CRYPTO_MD5_KPDK: case CRYPTO_SHA1_KPDK: { /* * We need a buffer that can hold an md5 and a sha1 result * just to throw it away. * What we do here is the initial part of: * ALGO( key, keyfill, .. ) * adding the key to sw_ictx and abusing Final() to get the * "keyfill" padding. * In addition we abuse the sw_octx to save the key to have * it to be able to append it at the end in swcr_authcompute(). */ u_char buf[SHA1_RESULTLEN]; bcopy(key, sw->sw_octx, klen); axf->Init(sw->sw_ictx); axf->Update(sw->sw_ictx, key, klen); axf->Final(buf, sw->sw_ictx); break; } case CRYPTO_POLY1305: case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: axf->Setkey(sw->sw_ictx, key, klen); axf->Init(sw->sw_ictx); break; default: panic("%s: algorithm %d doesn't use keys", __func__, axf->type); } } /* * Compute or verify hash. */ static int swcr_authcompute(struct swcr_session *ses, struct cryptop *crp) { u_char aalg[HASH_MAX_LEN]; u_char uaalg[HASH_MAX_LEN]; const struct crypto_session_params *csp; struct swcr_auth *sw; struct auth_hash *axf; union authctx ctx; int err; sw = &ses->swcr_auth; axf = sw->sw_axf; if (crp->crp_auth_key != NULL) { csp = crypto_get_params(crp->crp_session); swcr_authprepare(axf, sw, crp->crp_auth_key, csp->csp_auth_klen); } bcopy(sw->sw_ictx, &ctx, axf->ctxsize); err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, (int (*)(void *, void *, unsigned int))axf->Update, &ctx); if (err) return err; err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length, (int (*)(void *, void *, unsigned int))axf->Update, &ctx); if (err) return err; switch (axf->type) { case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA2_384: case CRYPTO_SHA2_512: axf->Final(aalg, &ctx); break; case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_RIPEMD160_HMAC: if (sw->sw_octx == NULL) return EINVAL; axf->Final(aalg, &ctx); bcopy(sw->sw_octx, &ctx, axf->ctxsize); axf->Update(&ctx, aalg, axf->hashsize); axf->Final(aalg, &ctx); break; case CRYPTO_MD5_KPDK: case CRYPTO_SHA1_KPDK: /* If we have no key saved, return error. */ if (sw->sw_octx == NULL) return EINVAL; /* * Add the trailing copy of the key (see comment in * swcr_authprepare()) after the data: * ALGO( .., key, algofill ) * and let Final() do the proper, natural "algofill" * padding. */ axf->Update(&ctx, sw->sw_octx, sw->sw_octx_len); axf->Final(aalg, &ctx); break; case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: case CRYPTO_NULL_HMAC: case CRYPTO_POLY1305: axf->Final(aalg, &ctx); break; } if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg); if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0) return (EBADMSG); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg); } return (0); } CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ static int swcr_gmac(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; union authctx ctx; struct swcr_auth *swa; struct auth_hash *axf; uint32_t *blkp; int blksz, i, ivlen, len; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = axf->blocksize; - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) - return (EINVAL); - /* Initialize the IV */ ivlen = AES_GCM_IV_LEN; - if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) - bcopy(crp->crp_iv, iv, ivlen); - else - crypto_copydata(crp, crp->crp_iv_start, ivlen, iv); + crypto_read_iv(crp, iv); axf->Reinit(&ctx, iv, ivlen); for (i = 0; i < crp->crp_payload_length; i += blksz) { len = MIN(crp->crp_payload_length - i, blksz); crypto_copydata(crp, crp->crp_payload_start + i, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } /* length block */ bzero(blk, blksz); blkp = (uint32_t *)blk + 1; *blkp = htobe32(crp->crp_payload_length * 8); axf->Update(&ctx, blk, blksz); /* Finalize MAC */ axf->Final(aalg, &ctx); if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, uaalg); if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0) return (EBADMSG); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg); } return (0); } static int swcr_gcm(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; struct auth_hash *axf; struct enc_xform *exf; uint32_t *blkp; int blksz, i, ivlen, len, r; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = axf->blocksize; swe = &ses->swcr_encdec; exf = swe->sw_exf; if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); /* Initialize the IV */ ivlen = AES_GCM_IV_LEN; bcopy(crp->crp_iv, iv, ivlen); /* Supply MAC with IV */ axf->Reinit(&ctx, iv, ivlen); /* Supply MAC with AAD */ for (i = 0; i < crp->crp_aad_length; i += blksz) { len = MIN(crp->crp_aad_length - i, blksz); crypto_copydata(crp, crp->crp_aad_start + i, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } exf->reinit(swe->sw_kschedule, iv); /* Do encryption with MAC */ for (i = 0; i < crp->crp_payload_length; i += len) { len = MIN(crp->crp_payload_length - i, blksz); if (len < blksz) bzero(blk, blksz); crypto_copydata(crp, crp->crp_payload_start + i, len, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { exf->encrypt(swe->sw_kschedule, blk); axf->Update(&ctx, blk, len); crypto_copyback(crp, crp->crp_payload_start + i, len, blk); } else { axf->Update(&ctx, blk, len); } } /* length block */ bzero(blk, blksz); blkp = (uint32_t *)blk + 1; *blkp = htobe32(crp->crp_aad_length * 8); blkp = (uint32_t *)blk + 3; *blkp = htobe32(crp->crp_payload_length * 8); axf->Update(&ctx, blk, blksz); /* Finalize MAC */ axf->Final(aalg, &ctx); /* Validate tag */ if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, uaalg); r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen); if (r != 0) return (EBADMSG); /* tag matches, decrypt data */ for (i = 0; i < crp->crp_payload_length; i += blksz) { len = MIN(crp->crp_payload_length - i, blksz); if (len < blksz) bzero(blk, blksz); crypto_copydata(crp, crp->crp_payload_start + i, len, blk); exf->decrypt(swe->sw_kschedule, blk); crypto_copyback(crp, crp->crp_payload_start + i, len, blk); } } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg); } return (0); } static int swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; union authctx ctx; struct swcr_auth *swa; struct auth_hash *axf; int blksz, i, ivlen, len; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = axf->blocksize; - if (crp->crp_flags & CRYPTO_F_IV_GENERATE) - return (EINVAL); - /* Initialize the IV */ ivlen = AES_CCM_IV_LEN; - if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) - bcopy(crp->crp_iv, iv, ivlen); - else - crypto_copydata(crp, crp->crp_iv_start, ivlen, iv); + crypto_read_iv(crp, iv); /* * AES CCM-CBC-MAC needs to know the length of both the auth * data and payload data before doing the auth computation. */ ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length; ctx.aes_cbc_mac_ctx.cryptDataLength = 0; axf->Reinit(&ctx, iv, ivlen); for (i = 0; i < crp->crp_payload_length; i += blksz) { len = MIN(crp->crp_payload_length - i, blksz); crypto_copydata(crp, crp->crp_payload_start + i, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } /* Finalize MAC */ axf->Final(aalg, &ctx); if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, uaalg); if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0) return (EBADMSG); } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg); } return (0); } static int swcr_ccm(struct swcr_session *ses, struct cryptop *crp) { uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; u_char *blk = (u_char *)blkbuf; u_char aalg[AALG_MAX_RESULT_LEN]; u_char uaalg[AALG_MAX_RESULT_LEN]; u_char iv[EALG_MAX_BLOCK_LEN]; union authctx ctx; struct swcr_auth *swa; struct swcr_encdec *swe; struct auth_hash *axf; struct enc_xform *exf; int blksz, i, ivlen, len, r; swa = &ses->swcr_auth; axf = swa->sw_axf; bcopy(swa->sw_ictx, &ctx, axf->ctxsize); blksz = axf->blocksize; swe = &ses->swcr_encdec; exf = swe->sw_exf; if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) return (EINVAL); /* Initialize the IV */ ivlen = AES_CCM_IV_LEN; bcopy(crp->crp_iv, iv, ivlen); /* * AES CCM-CBC-MAC needs to know the length of both the auth * data and payload data before doing the auth computation. */ ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length; ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length; /* Supply MAC with IV */ axf->Reinit(&ctx, iv, ivlen); /* Supply MAC with AAD */ for (i = 0; i < crp->crp_aad_length; i += blksz) { len = MIN(crp->crp_aad_length - i, blksz); crypto_copydata(crp, crp->crp_aad_start + i, len, blk); bzero(blk + len, blksz - len); axf->Update(&ctx, blk, blksz); } exf->reinit(swe->sw_kschedule, iv); /* Do encryption/decryption with MAC */ for (i = 0; i < crp->crp_payload_length; i += len) { len = MIN(crp->crp_payload_length - i, blksz); if (len < blksz) bzero(blk, blksz); crypto_copydata(crp, crp->crp_payload_start + i, len, blk); if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { axf->Update(&ctx, blk, len); exf->encrypt(swe->sw_kschedule, blk); crypto_copyback(crp, crp->crp_payload_start + i, len, blk); } else { /* * One of the problems with CCM+CBC is that * the authentication is done on the * unecncrypted data. As a result, we have to * decrypt the data twice: once to generate * the tag and a second time after the tag is * verified. */ exf->decrypt(swe->sw_kschedule, blk); axf->Update(&ctx, blk, len); } } /* Finalize MAC */ axf->Final(aalg, &ctx); /* Validate tag */ if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, uaalg); r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen); if (r != 0) return (EBADMSG); /* tag matches, decrypt data */ exf->reinit(swe->sw_kschedule, iv); for (i = 0; i < crp->crp_payload_length; i += blksz) { len = MIN(crp->crp_payload_length - i, blksz); if (len < blksz) bzero(blk, blksz); crypto_copydata(crp, crp->crp_payload_start + i, len, blk); exf->decrypt(swe->sw_kschedule, blk); crypto_copyback(crp, crp->crp_payload_start + i, len, blk); } } else { /* Inject the authentication data */ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg); } return (0); } /* * Apply a cipher and a digest to perform EtA. */ static int swcr_eta(struct swcr_session *ses, struct cryptop *crp) { int error; if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { error = swcr_encdec(ses, crp); if (error == 0) error = swcr_authcompute(ses, crp); } else { error = swcr_authcompute(ses, crp); if (error == 0) error = swcr_encdec(ses, crp); } return (error); } /* * Apply a compression/decompression algorithm */ static int swcr_compdec(struct swcr_session *ses, struct cryptop *crp) { u_int8_t *data, *out; struct comp_algo *cxf; int adj; u_int32_t result; cxf = ses->swcr_compdec.sw_cxf; /* We must handle the whole buffer of data in one time * then if there is not all the data in the mbuf, we must * copy in a buffer. */ data = malloc(crp->crp_payload_length, M_CRYPTO_DATA, M_NOWAIT); if (data == NULL) return (EINVAL); crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, data); if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) result = cxf->compress(data, crp->crp_payload_length, &out); else result = cxf->decompress(data, crp->crp_payload_length, &out); free(data, M_CRYPTO_DATA); if (result == 0) return (EINVAL); crp->crp_olen = result; /* Check the compressed size when doing compression */ if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) { if (result >= crp->crp_payload_length) { /* Compression was useless, we lost time */ free(out, M_CRYPTO_DATA); return (0); } } /* Copy back the (de)compressed data. m_copyback is * extending the mbuf as necessary. */ crypto_copyback(crp, crp->crp_payload_start, result, out); if (result < crp->crp_payload_length) { switch (crp->crp_buf_type) { case CRYPTO_BUF_MBUF: adj = result - crp->crp_payload_length; m_adj(crp->crp_mbuf, adj); break; case CRYPTO_BUF_UIO: { struct uio *uio = crp->crp_uio; int ind; adj = crp->crp_payload_length - result; ind = uio->uio_iovcnt - 1; while (adj > 0 && ind >= 0) { if (adj < uio->uio_iov[ind].iov_len) { uio->uio_iov[ind].iov_len -= adj; break; } adj -= uio->uio_iov[ind].iov_len; uio->uio_iov[ind].iov_len = 0; ind--; uio->uio_iovcnt--; } } break; } } free(out, M_CRYPTO_DATA); return 0; } static int swcr_setup_encdec(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_encdec *swe; struct enc_xform *txf; int error; swe = &ses->swcr_encdec; txf = crypto_cipher(csp); MPASS(txf->ivsize == csp->csp_ivlen); if (csp->csp_cipher_key != NULL) { error = txf->setkey(&swe->sw_kschedule, csp->csp_cipher_key, csp->csp_cipher_klen); if (error) return (error); } swe->sw_exf = txf; return (0); } static int swcr_setup_auth(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_auth *swa; struct auth_hash *axf; swa = &ses->swcr_auth; axf = crypto_auth_hash(csp); swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); switch (csp->csp_auth_alg) { case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: swa->sw_octx_len = axf->ctxsize; swa->sw_octx = malloc(swa->sw_octx_len, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_octx == NULL) return (ENOBUFS); if (csp->csp_auth_key != NULL) { swcr_authprepare(axf, swa, csp->csp_auth_key, csp->csp_auth_klen); } if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_MD5_KPDK: case CRYPTO_SHA1_KPDK: swa->sw_octx_len = csp->csp_auth_klen; swa->sw_octx = malloc(swa->sw_octx_len, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_octx == NULL) return (ENOBUFS); /* Store the key so we can "append" it to the payload */ if (csp->csp_auth_key != NULL) { swcr_authprepare(axf, swa, csp->csp_auth_key, csp->csp_auth_klen); } if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; #ifdef notdef case CRYPTO_MD5: #endif case CRYPTO_SHA1: case CRYPTO_SHA2_224: case CRYPTO_SHA2_256: case CRYPTO_SHA2_384: case CRYPTO_SHA2_512: axf->Init(swa->sw_ictx); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_AES_NIST_GMAC: axf->Init(swa->sw_ictx); axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_gmac; break; case CRYPTO_POLY1305: case CRYPTO_BLAKE2B: case CRYPTO_BLAKE2S: /* * Blake2b and Blake2s support an optional key but do * not require one. */ if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); axf->Init(swa->sw_ictx); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_authcompute; break; case CRYPTO_AES_CCM_CBC_MAC: axf->Init(swa->sw_ictx); axf->Setkey(swa->sw_ictx, csp->csp_auth_key, csp->csp_auth_klen); if (csp->csp_mode == CSP_MODE_DIGEST) ses->swcr_process = swcr_ccm_cbc_mac; break; } return (0); } static int swcr_setup_gcm(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_encdec *swe; struct swcr_auth *swa; struct enc_xform *txf; struct auth_hash *axf; int error; if (csp->csp_ivlen != AES_GCM_IV_LEN) return (EINVAL); /* First, setup the auth side. */ swa = &ses->swcr_auth; switch (csp->csp_cipher_klen * 8) { case 128: axf = &auth_hash_nist_gmac_aes_128; break; case 192: axf = &auth_hash_nist_gmac_aes_192; break; case 256: axf = &auth_hash_nist_gmac_aes_256; break; default: return (EINVAL); } swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); axf->Init(swa->sw_ictx); if (csp->csp_cipher_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, csp->csp_cipher_klen); /* Second, setup the cipher side. */ swe = &ses->swcr_encdec; txf = &enc_xform_aes_nist_gcm; if (csp->csp_cipher_key != NULL) { error = txf->setkey(&swe->sw_kschedule, csp->csp_cipher_key, csp->csp_cipher_klen); if (error) return (error); } swe->sw_exf = txf; return (0); } static int swcr_setup_ccm(struct swcr_session *ses, const struct crypto_session_params *csp) { struct swcr_encdec *swe; struct swcr_auth *swa; struct enc_xform *txf; struct auth_hash *axf; int error; if (csp->csp_ivlen != AES_CCM_IV_LEN) return (EINVAL); /* First, setup the auth side. */ swa = &ses->swcr_auth; switch (csp->csp_cipher_klen * 8) { case 128: axf = &auth_hash_ccm_cbc_mac_128; break; case 192: axf = &auth_hash_ccm_cbc_mac_192; break; case 256: axf = &auth_hash_ccm_cbc_mac_256; break; default: return (EINVAL); } swa->sw_axf = axf; if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) return (EINVAL); if (csp->csp_auth_mlen == 0) swa->sw_mlen = axf->hashsize; else swa->sw_mlen = csp->csp_auth_mlen; swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); if (swa->sw_ictx == NULL) return (ENOBUFS); axf->Init(swa->sw_ictx); if (csp->csp_cipher_key != NULL) axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, csp->csp_cipher_klen); /* Second, setup the cipher side. */ swe = &ses->swcr_encdec; txf = &enc_xform_ccm; if (csp->csp_cipher_key != NULL) { error = txf->setkey(&swe->sw_kschedule, csp->csp_cipher_key, csp->csp_cipher_klen); if (error) return (error); } swe->sw_exf = txf; return (0); } static bool swcr_auth_supported(const struct crypto_session_params *csp) { struct auth_hash *axf; axf = crypto_auth_hash(csp); if (axf == NULL) return (false); switch (csp->csp_auth_alg) { case CRYPTO_MD5_HMAC: case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_224_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_NULL_HMAC: case CRYPTO_RIPEMD160_HMAC: case CRYPTO_MD5_KPDK: case CRYPTO_SHA1_KPDK: break; case CRYPTO_AES_NIST_GMAC: switch (csp->csp_auth_klen * 8) { case 128: case 192: case 256: break; default: return (false); } if (csp->csp_auth_key == NULL) return (false); if (csp->csp_ivlen != AES_GCM_IV_LEN) return (false); break; case CRYPTO_POLY1305: if (csp->csp_auth_klen != POLY1305_KEY_LEN) return (false); break; case CRYPTO_AES_CCM_CBC_MAC: switch (csp->csp_auth_klen * 8) { case 128: case 192: case 256: break; default: return (false); } if (csp->csp_auth_key == NULL) return (false); if (csp->csp_ivlen != AES_CCM_IV_LEN) return (false); break; } return (true); } static bool swcr_cipher_supported(const struct crypto_session_params *csp) { struct enc_xform *txf; txf = crypto_cipher(csp); if (txf == NULL) return (false); if (csp->csp_cipher_alg != CRYPTO_NULL_CBC && txf->ivsize != csp->csp_ivlen) return (false); return (true); } static int swcr_probesession(device_t dev, const struct crypto_session_params *csp) { if (csp->csp_flags != 0) return (EINVAL); switch (csp->csp_mode) { case CSP_MODE_COMPRESS: switch (csp->csp_cipher_alg) { case CRYPTO_DEFLATE_COMP: break; default: return (EINVAL); } break; case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: return (EINVAL); default: if (!swcr_cipher_supported(csp)) return (EINVAL); break; } break; case CSP_MODE_DIGEST: if (!swcr_auth_supported(csp)) return (EINVAL); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: break; default: return (EINVAL); } break; case CSP_MODE_ETA: /* AEAD algorithms cannot be used for EtA. */ switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: return (EINVAL); } switch (csp->csp_auth_alg) { case CRYPTO_AES_NIST_GMAC: case CRYPTO_AES_CCM_CBC_MAC: return (EINVAL); } if (!swcr_cipher_supported(csp) || !swcr_auth_supported(csp)) return (EINVAL); break; default: return (EINVAL); } return (CRYPTODEV_PROBE_SOFTWARE); } /* * Generate a new software session. */ static int swcr_newsession(device_t dev, crypto_session_t cses, const struct crypto_session_params *csp) { struct swcr_session *ses; struct swcr_encdec *swe; struct swcr_auth *swa; struct comp_algo *cxf; int error; ses = crypto_get_driver_session(cses); mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF); error = 0; swe = &ses->swcr_encdec; swa = &ses->swcr_auth; switch (csp->csp_mode) { case CSP_MODE_COMPRESS: switch (csp->csp_cipher_alg) { case CRYPTO_DEFLATE_COMP: cxf = &comp_algo_deflate; break; #ifdef INVARIANTS default: panic("bad compression algo"); #endif } ses->swcr_compdec.sw_cxf = cxf; ses->swcr_process = swcr_compdec; break; case CSP_MODE_CIPHER: switch (csp->csp_cipher_alg) { case CRYPTO_NULL_CBC: ses->swcr_process = swcr_null; break; #ifdef INVARIANTS case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: panic("bad cipher algo"); #endif default: error = swcr_setup_encdec(ses, csp); if (error == 0) ses->swcr_process = swcr_encdec; } break; case CSP_MODE_DIGEST: error = swcr_setup_auth(ses, csp); break; case CSP_MODE_AEAD: switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: error = swcr_setup_gcm(ses, csp); if (error == 0) ses->swcr_process = swcr_gcm; break; case CRYPTO_AES_CCM_16: error = swcr_setup_ccm(ses, csp); if (error == 0) ses->swcr_process = swcr_ccm; break; #ifdef INVARIANTS default: panic("bad aead algo"); #endif } break; case CSP_MODE_ETA: #ifdef INVARIANTS switch (csp->csp_cipher_alg) { case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_CCM_16: panic("bad eta cipher algo"); } switch (csp->csp_auth_alg) { case CRYPTO_AES_NIST_GMAC: case CRYPTO_AES_CCM_CBC_MAC: panic("bad eta auth algo"); } #endif error = swcr_setup_auth(ses, csp); if (error) break; if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) { /* Effectively degrade to digest mode. */ ses->swcr_process = swcr_authcompute; break; } error = swcr_setup_encdec(ses, csp); if (error == 0) ses->swcr_process = swcr_eta; break; default: error = EINVAL; } if (error) swcr_freesession(dev, cses); return (error); } static void swcr_freesession(device_t dev, crypto_session_t cses) { struct swcr_session *ses; struct swcr_auth *swa; struct enc_xform *txf; struct auth_hash *axf; ses = crypto_get_driver_session(cses); mtx_destroy(&ses->swcr_lock); txf = ses->swcr_encdec.sw_exf; if (txf != NULL) { if (ses->swcr_encdec.sw_kschedule != NULL) txf->zerokey(&(ses->swcr_encdec.sw_kschedule)); } axf = ses->swcr_auth.sw_axf; if (axf != NULL) { swa = &ses->swcr_auth; if (swa->sw_ictx != NULL) { explicit_bzero(swa->sw_ictx, axf->ctxsize); free(swa->sw_ictx, M_CRYPTO_DATA); } if (swa->sw_octx != NULL) { explicit_bzero(swa->sw_octx, swa->sw_octx_len); free(swa->sw_octx, M_CRYPTO_DATA); } } } /* * Process a software request. */ static int swcr_process(device_t dev, struct cryptop *crp, int hint) { struct swcr_session *ses; ses = crypto_get_driver_session(crp->crp_session); mtx_lock(&ses->swcr_lock); crp->crp_etype = ses->swcr_process(ses, crp); mtx_unlock(&ses->swcr_lock); crypto_done(crp); return (0); } static void swcr_identify(driver_t *drv, device_t parent) { /* NB: order 10 is so we get attached after h/w devices */ if (device_find_child(parent, "cryptosoft", -1) == NULL && BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) panic("cryptosoft: could not attach"); } static int swcr_probe(device_t dev) { device_set_desc(dev, "software crypto"); return (BUS_PROBE_NOWILDCARD); } static int swcr_attach(device_t dev) { swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session), CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); if (swcr_id < 0) { device_printf(dev, "cannot initialize!"); return (ENXIO); } return (0); } static int swcr_detach(device_t dev) { crypto_unregister_all(swcr_id); return 0; } static device_method_t swcr_methods[] = { DEVMETHOD(device_identify, swcr_identify), DEVMETHOD(device_probe, swcr_probe), DEVMETHOD(device_attach, swcr_attach), DEVMETHOD(device_detach, swcr_detach), DEVMETHOD(cryptodev_probesession, swcr_probesession), DEVMETHOD(cryptodev_newsession, swcr_newsession), DEVMETHOD(cryptodev_freesession,swcr_freesession), DEVMETHOD(cryptodev_process, swcr_process), {0, 0}, }; static driver_t swcr_driver = { "cryptosoft", swcr_methods, 0, /* NB: no softc */ }; static devclass_t swcr_devclass; /* * NB: We explicitly reference the crypto module so we * get the necessary ordering when built as a loadable * module. This is required because we bundle the crypto * module code together with the cryptosoft driver (otherwise * normal module dependencies would handle things). */ extern int crypto_modevent(struct module *, int, void *); /* XXX where to attach */ DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); MODULE_VERSION(cryptosoft, 1); MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);