Index: head/share/man/man4/Makefile =================================================================== --- head/share/man/man4/Makefile (revision 367385) +++ head/share/man/man4/Makefile (revision 367386) @@ -1,1033 +1,1035 @@ # @(#)Makefile 8.1 (Berkeley) 6/18/93 # $FreeBSD$ .include MAN= aac.4 \ aacraid.4 \ acpi.4 \ ${_acpi_asus.4} \ ${_acpi_asus_wmi.4} \ ${_acpi_dock.4} \ ${_acpi_fujitsu.4} \ ${_acpi_hp.4} \ ${_acpi_ibm.4} \ ${_acpi_panasonic.4} \ ${_acpi_rapidstart.4} \ ${_acpi_sony.4} \ acpi_thermal.4 \ acpi_battery.4 \ ${_acpi_toshiba.4} \ acpi_video.4 \ ${_acpi_wmi.4} \ ada.4 \ adm6996fc.4 \ ads111x.4 \ ae.4 \ ${_aesni.4} \ age.4 \ agp.4 \ ahc.4 \ ahci.4 \ ahd.4 \ ${_aibs.4} \ aio.4 \ alc.4 \ ale.4 \ alpm.4 \ altera_atse.4 \ altera_avgen.4 \ altera_jtag_uart.4 \ altera_sdcard.4 \ altq.4 \ amdpm.4 \ ${_amdsbwd.4} \ ${_amdsmb.4} \ ${_amdsmn.4} \ ${_amdtemp.4} \ ${_bxe.4} \ amr.4 \ an.4 \ ${_aout.4} \ ${_apic.4} \ arcmsr.4 \ ${_asmc.4} \ at45d.4 \ ata.4 \ ath.4 \ ath_ahb.4 \ ath_hal.4 \ ath_pci.4 \ atkbd.4 \ atkbdc.4 \ atp.4 \ ${_atf_test_case.4} \ ${_atrtc.4} \ ${_attimer.4} \ audit.4 \ auditpipe.4 \ aue.4 \ axe.4 \ axge.4 \ bce.4 \ bcma.4 \ bfe.4 \ bge.4 \ ${_bhyve.4} \ bhnd.4 \ bhnd_chipc.4 \ bhnd_pmu.4 \ bhndb.4 \ bhndb_pci.4 \ blackhole.4 \ bnxt.4 \ bpf.4 \ bridge.4 \ bt.4 \ bwi.4 \ bwn.4 \ ${_bytgpio.4} \ capsicum.4 \ cardbus.4 \ carp.4 \ cas.4 \ cc_cdg.4 \ cc_chd.4 \ cc_cubic.4 \ cc_dctcp.4 \ cc_hd.4 \ cc_htcp.4 \ cc_newreno.4 \ cc_vegas.4 \ ${_ccd.4} \ ccr.4 \ cd.4 \ cdce.4 \ cdceem.4 \ cfi.4 \ cfumass.4 \ ch.4 \ chromebook_platform.4 \ ${_chvgpio.4} \ ciss.4 \ cloudabi.4 \ cmx.4 \ ${_coretemp.4} \ cp2112.4 \ ${_cpuctl.4} \ cpufreq.4 \ crypto.4 \ ctl.4 \ cue.4 \ cxgb.4 \ cxgbe.4 \ cxgbev.4 \ cyapa.4 \ da.4 \ dc.4 \ dcons.4 \ dcons_crom.4 \ ddb.4 \ devctl.4 \ disc.4 \ divert.4 \ ${_dpms.4} \ ds1307.4 \ ds3231.4 \ ${_dtrace_provs} \ dummynet.4 \ edsc.4 \ ehci.4 \ em.4 \ ena.4 \ enc.4 \ epair.4 \ esp.4 \ est.4 \ et.4 \ etherswitch.4 \ eventtimers.4 \ exca.4 \ e6060sw.4 \ fd.4 \ fdc.4 \ fdt.4 \ fdt_pinctrl.4 \ fdtbus.4 \ ffclock.4 \ filemon.4 \ firewire.4 \ full.4 \ fwe.4 \ fwip.4 \ fwohci.4 \ fxp.4 \ gbde.4 \ gdb.4 \ gem.4 \ geom.4 \ geom_linux_lvm.4 \ geom_map.4 \ geom_uzip.4 \ gif.4 \ gpio.4 \ gpioiic.4 \ gpiokeys.4 \ gpioled.4 \ gpioths.4 \ gre.4 \ h_ertt.4 \ hifn.4 \ hme.4 \ hpet.4 \ ${_hpt27xx.4} \ ${_hptiop.4} \ ${_hptmv.4} \ ${_hptnr.4} \ ${_hptrr.4} \ ${_hv_kvp.4} \ ${_hv_netvsc.4} \ ${_hv_storvsc.4} \ ${_hv_utils.4} \ ${_hv_vmbus.4} \ ${_hv_vss.4} \ hwpmc.4 \ ${_hwpstate_intel.4} \ iavf.4 \ ichsmb.4 \ ${_ichwd.4} \ icmp.4 \ icmp6.4 \ ida.4 \ if_ipsec.4 \ iflib.4 \ ifmib.4 \ ig4.4 \ igmp.4 \ iic.4 \ iic_gpiomux.4 \ iicbb.4 \ iicbus.4 \ iicmux.4 \ iicsmb.4 \ iir.4 \ ${_imcsmb.4} \ inet.4 \ inet6.4 \ intpm.4 \ intro.4 \ ${_io.4} \ ${_ioat.4} \ ip.4 \ ip6.4 \ ipfirewall.4 \ ipheth.4 \ ${_ipmi.4} \ ips.4 \ ipsec.4 \ ipw.4 \ ipwfw.4 \ isci.4 \ isl.4 \ ismt.4 \ isp.4 \ ispfw.4 \ ${_itwd.4} \ iwi.4 \ iwifw.4 \ iwm.4 \ iwmfw.4 \ iwn.4 \ iwnfw.4 \ ixgbe.4 \ ixl.4 \ jedec_dimm.4 \ jme.4 \ kbdmux.4 \ kcov.4 \ keyboard.4 \ kld.4 \ ksyms.4 \ ksz8995ma.4 \ ktr.4 \ kue.4 \ lagg.4 \ le.4 \ led.4 \ lge.4 \ ${_linux.4} \ liquidio.4 \ lm75.4 \ lo.4 \ lp.4 \ lpbb.4 \ lpt.4 \ ltc430x.4 \ mac.4 \ mac_biba.4 \ mac_bsdextended.4 \ mac_ifoff.4 \ mac_lomac.4 \ mac_mls.4 \ mac_none.4 \ mac_ntpd.4 \ mac_partition.4 \ mac_portacl.4 \ mac_seeotheruids.4 \ mac_stub.4 \ mac_test.4 \ malo.4 \ md.4 \ mdio.4 \ me.4 \ mem.4 \ meteor.4 \ mfi.4 \ miibus.4 \ mk48txx.4 \ mld.4 \ mlx.4 \ mlx4en.4 \ mlx5en.4 \ mly.4 \ mmc.4 \ mmcsd.4 \ mn.4 \ mod_cc.4 \ mos.4 \ mouse.4 \ mpr.4 \ mps.4 \ mpt.4 \ mrsas.4 \ msk.4 \ mtio.4 \ multicast.4 \ muge.4 \ mvs.4 \ mwl.4 \ mwlfw.4 \ mx25l.4 \ mxge.4 \ my.4 \ ${_ndis.4} \ net80211.4 \ netdump.4 \ netfpga10g_nf10bmac.4 \ netgdb.4 \ netgraph.4 \ netintro.4 \ netmap.4 \ ${_nfe.4} \ ${_nfsmb.4} \ ng_async.4 \ ngatmbase.4 \ ng_atmllc.4 \ ng_bpf.4 \ ng_bridge.4 \ ng_bt3c.4 \ ng_btsocket.4 \ ng_car.4 \ ng_ccatm.4 \ ng_checksum.4 \ ng_cisco.4 \ ng_deflate.4 \ ng_device.4 \ nge.4 \ ng_echo.4 \ ng_eiface.4 \ ng_etf.4 \ ng_ether.4 \ ng_ether_echo.4 \ ng_frame_relay.4 \ ng_gif.4 \ ng_gif_demux.4 \ ng_h4.4 \ ng_hci.4 \ ng_hole.4 \ ng_hub.4 \ ng_iface.4 \ ng_ipfw.4 \ ng_ip_input.4 \ ng_ksocket.4 \ ng_l2cap.4 \ ng_l2tp.4 \ ng_lmi.4 \ ng_mppc.4 \ ng_nat.4 \ ng_netflow.4 \ ng_one2many.4 \ ng_patch.4 \ ng_pipe.4 \ ng_ppp.4 \ ng_pppoe.4 \ ng_pptpgre.4 \ ng_pred1.4 \ ng_rfc1490.4 \ ng_socket.4 \ ng_source.4 \ ng_split.4 \ ng_sppp.4 \ ng_sscfu.4 \ ng_sscop.4 \ ng_tag.4 \ ng_tcpmss.4 \ ng_tee.4 \ ng_tty.4 \ ng_ubt.4 \ ng_UI.4 \ ng_uni.4 \ ng_vjc.4 \ ng_vlan.4 \ nmdm.4 \ ${_ntb.4} \ ${_ntb_hw_amd.4} \ ${_ntb_hw_intel.4} \ ${_ntb_hw_plx.4} \ ${_ntb_transport.4} \ ${_nda.4} \ ${_if_ntb.4} \ null.4 \ numa.4 \ ${_nvd.4} \ ${_nvdimm.4} \ ${_nvme.4} \ ${_nvram.4} \ ${_nvram2env.4} \ oce.4 \ ocs_fc.4\ ohci.4 \ orm.4 \ ${_ossl.4} \ ow.4 \ ow_temp.4 \ owc.4 \ ${_padlock.4} \ pass.4 \ pccard.4 \ pccbb.4 \ pcf.4 \ ${_pchtherm.4} \ pci.4 \ pcib.4 \ pcic.4 \ pcm.4 \ ${_pf.4} \ ${_pflog.4} \ ${_pfsync.4} \ pim.4 \ pms.4 \ polling.4 \ ppbus.4 \ ppc.4 \ ppi.4 \ procdesc.4 \ proto.4 \ psm.4 \ pst.4 \ pt.4 \ ptnet.4 \ pts.4 \ pty.4 \ puc.4 \ pwmc.4 \ + ${_qat.4} \ ${_qlxge.4} \ ${_qlxgb.4} \ ${_qlxgbe.4} \ ${_qlnxe.4} \ ral.4 \ random.4 \ rctl.4 \ re.4 \ rgephy.4 \ rights.4 \ rl.4 \ rndtest.4 \ route.4 \ rtwn.4 \ rtwnfw.4 \ rtwn_pci.4 \ rue.4 \ sa.4 \ safe.4 \ safexcel.4 \ sbp.4 \ sbp_targ.4 \ scc.4 \ sched_4bsd.4 \ sched_ule.4 \ screen.4 \ scsi.4 \ sctp.4 \ sdhci.4 \ sem.4 \ send.4 \ ses.4 \ ${_sfxge.4} \ sge.4 \ siba.4 \ siftr.4 \ siis.4 \ simplebus.4 \ sis.4 \ sk.4 \ ${_smartpqi.4} \ smb.4 \ smbios.4 \ smbus.4 \ smp.4 \ smsc.4 \ snd_ad1816.4 \ snd_als4000.4 \ snd_atiixp.4 \ snd_cmi.4 \ snd_cs4281.4 \ snd_csa.4 \ snd_ds1.4 \ snd_emu10k1.4 \ snd_emu10kx.4 \ snd_envy24.4 \ snd_envy24ht.4 \ snd_es137x.4 \ snd_ess.4 \ snd_fm801.4 \ snd_gusc.4 \ snd_hda.4 \ snd_hdspe.4 \ snd_ich.4 \ snd_maestro3.4 \ snd_maestro.4 \ snd_mss.4 \ snd_neomagic.4 \ snd_sbc.4 \ snd_solo.4 \ snd_spicds.4 \ snd_t4dwave.4 \ snd_uaudio.4 \ snd_via8233.4 \ snd_via82c686.4 \ snd_vibes.4 \ snp.4 \ spigen.4 \ ${_spkr.4} \ splash.4 \ sppp.4 \ ste.4 \ stf.4 \ stge.4 \ ${_sume.4} \ ${_superio.4} \ sym.4 \ syncache.4 \ syncer.4 \ syscons.4 \ sysmouse.4 \ tap.4 \ targ.4 \ tcp.4 \ tcp_bbr.4 \ tdfx.4 \ terasic_mtl.4 \ termios.4 \ textdump.4 \ ti.4 \ timecounters.4 \ ${_tpm.4} \ tty.4 \ tun.4 \ twa.4 \ twe.4 \ tws.4 \ udp.4 \ udplite.4 \ ure.4 \ vale.4 \ vga.4 \ vge.4 \ viapm.4 \ ${_viawd.4} \ ${_virtio.4} \ ${_virtio_balloon.4} \ ${_virtio_blk.4} \ ${_virtio_console.4} \ ${_virtio_random.4} \ ${_virtio_scsi.4} \ ${_vmci.4} \ vkbd.4 \ vlan.4 \ vxlan.4 \ ${_vmd.4} \ ${_vmm.4} \ ${_vmx.4} \ vr.4 \ vt.4 \ vte.4 \ ${_vtnet.4} \ watchdog.4 \ ${_wbwd.4} \ wi.4 \ witness.4 \ wlan.4 \ wlan_acl.4 \ wlan_amrr.4 \ wlan_ccmp.4 \ wlan_tkip.4 \ wlan_wep.4 \ wlan_xauth.4 \ wmt.4 \ ${_wpi.4} \ wsp.4 \ ${_xen.4} \ xhci.4 \ xl.4 \ ${_xnb.4} \ xpt.4 \ zero.4 MLINKS= ads111x.4 ads1013.4 \ ads111x.4 ads1014.4 \ ads111x.4 ads1015.4 \ ads111x.4 ads1113.4 \ ads111x.4 ads1114.4 \ ads111x.4 ads1115.4 MLINKS+=ae.4 if_ae.4 MLINKS+=age.4 if_age.4 MLINKS+=agp.4 agpgart.4 MLINKS+=alc.4 if_alc.4 MLINKS+=ale.4 if_ale.4 MLINKS+=altera_atse.4 atse.4 MLINKS+=altera_sdcard.4 altera_sdcardc.4 MLINKS+=altq.4 ALTQ.4 MLINKS+=ath.4 if_ath.4 MLINKS+=ath_pci.4 if_ath_pci.4 MLINKS+=an.4 if_an.4 MLINKS+=aue.4 if_aue.4 MLINKS+=axe.4 if_axe.4 MLINKS+=bce.4 if_bce.4 MLINKS+=bfe.4 if_bfe.4 MLINKS+=bge.4 if_bge.4 MLINKS+=bnxt.4 if_bnxt.4 MLINKS+=bridge.4 if_bridge.4 MLINKS+=bwi.4 if_bwi.4 MLINKS+=bwn.4 if_bwn.4 MLINKS+=${_bxe.4} ${_if_bxe.4} MLINKS+=cas.4 if_cas.4 MLINKS+=cdce.4 if_cdce.4 MLINKS+=cfi.4 cfid.4 MLINKS+=cloudabi.4 cloudabi32.4 \ cloudabi.4 cloudabi64.4 MLINKS+=crypto.4 cryptodev.4 MLINKS+=cue.4 if_cue.4 MLINKS+=cxgb.4 if_cxgb.4 MLINKS+=cxgbe.4 if_cxgbe.4 \ cxgbe.4 vcxgbe.4 \ cxgbe.4 if_vcxgbe.4 \ cxgbe.4 cxl.4 \ cxgbe.4 if_cxl.4 \ cxgbe.4 vcxl.4 \ cxgbe.4 if_vcxl.4 \ cxgbe.4 cc.4 \ cxgbe.4 if_cc.4 \ cxgbe.4 vcc.4 \ cxgbe.4 if_vcc.4 MLINKS+=cxgbev.4 if_cxgbev.4 \ cxgbev.4 cxlv.4 \ cxgbev.4 if_cxlv.4 \ cxgbev.4 ccv.4 \ cxgbev.4 if_ccv.4 MLINKS+=dc.4 if_dc.4 MLINKS+=disc.4 if_disc.4 MLINKS+=edsc.4 if_edsc.4 MLINKS+=em.4 if_em.4 \ em.4 igb.4 \ em.4 if_igb.4 MLINKS+=enc.4 if_enc.4 MLINKS+=epair.4 if_epair.4 MLINKS+=et.4 if_et.4 MLINKS+=fd.4 stderr.4 \ fd.4 stdin.4 \ fd.4 stdout.4 MLINKS+=fdt.4 FDT.4 MLINKS+=firewire.4 ieee1394.4 MLINKS+=fwe.4 if_fwe.4 MLINKS+=fwip.4 if_fwip.4 MLINKS+=fxp.4 if_fxp.4 MLINKS+=gem.4 if_gem.4 MLINKS+=geom.4 GEOM.4 MLINKS+=gif.4 if_gif.4 MLINKS+=gpio.4 gpiobus.4 MLINKS+=gpioths.4 dht11.4 MLINKS+=gpioths.4 dht22.4 MLINKS+=gre.4 if_gre.4 MLINKS+=hme.4 if_hme.4 MLINKS+=hpet.4 acpi_hpet.4 MLINKS+=${_hptrr.4} ${_rr232x.4} MLINKS+=${_attimer.4} ${_i8254.4} MLINKS+=ip.4 rawip.4 MLINKS+=ipfirewall.4 ipaccounting.4 \ ipfirewall.4 ipacct.4 \ ipfirewall.4 ipfw.4 MLINKS+=ipheth.4 if_ipheth.4 MLINKS+=ipw.4 if_ipw.4 MLINKS+=iwi.4 if_iwi.4 MLINKS+=iwm.4 if_iwm.4 MLINKS+=iwn.4 if_iwn.4 MLINKS+=ixgbe.4 ix.4 MLINKS+=ixgbe.4 if_ix.4 MLINKS+=ixgbe.4 if_ixgbe.4 MLINKS+=ixl.4 if_ixl.4 MLINKS+=iavf.4 if_iavf.4 MLINKS+=jme.4 if_jme.4 MLINKS+=kue.4 if_kue.4 MLINKS+=lagg.4 trunk.4 MLINKS+=lagg.4 if_lagg.4 MLINKS+=le.4 if_le.4 MLINKS+=lge.4 if_lge.4 MLINKS+=lo.4 loop.4 MLINKS+=lp.4 plip.4 MLINKS+=malo.4 if_malo.4 MLINKS+=md.4 vn.4 MLINKS+=mem.4 kmem.4 MLINKS+=mfi.4 mfi_linux.4 \ mfi.4 mfip.4 MLINKS+=mlx5en.4 mce.4 MLINKS+=mn.4 if_mn.4 MLINKS+=mos.4 if_mos.4 MLINKS+=msk.4 if_msk.4 MLINKS+=mwl.4 if_mwl.4 MLINKS+=mxge.4 if_mxge.4 MLINKS+=my.4 if_my.4 MLINKS+=${_ndis.4} ${_if_ndis.4} MLINKS+=netfpga10g_nf10bmac.4 if_nf10bmac.4 MLINKS+=netintro.4 net.4 \ netintro.4 networking.4 MLINKS+=${_nfe.4} ${_if_nfe.4} MLINKS+=nge.4 if_nge.4 MLINKS+=ow.4 onewire.4 MLINKS+=pccbb.4 cbb.4 MLINKS+=pcm.4 snd.4 \ pcm.4 sound.4 MLINKS+=pms.4 pmspcv.4 MLINKS+=ptnet.4 if_ptnet.4 MLINKS+=ral.4 if_ral.4 MLINKS+=re.4 if_re.4 MLINKS+=rl.4 if_rl.4 MLINKS+=rtwn_pci.4 if_rtwn_pci.4 MLINKS+=rue.4 if_rue.4 MLINKS+=scsi.4 CAM.4 \ scsi.4 cam.4 \ scsi.4 scbus.4 \ scsi.4 SCSI.4 MLINKS+=sge.4 if_sge.4 MLINKS+=sis.4 if_sis.4 MLINKS+=sk.4 if_sk.4 MLINKS+=smp.4 SMP.4 MLINKS+=smsc.4 if_smsc.4 MLINKS+=snd_envy24.4 snd_ak452x.4 MLINKS+=snd_sbc.4 snd_sb16.4 \ snd_sbc.4 snd_sb8.4 MLINKS+=${_spkr.4} ${_speaker.4} MLINKS+=splash.4 screensaver.4 MLINKS+=ste.4 if_ste.4 MLINKS+=stf.4 if_stf.4 MLINKS+=stge.4 if_stge.4 MLINKS+=syncache.4 syncookies.4 MLINKS+=syscons.4 sc.4 MLINKS+=tap.4 if_tap.4 \ tap.4 vmnet.4 \ tap.4 if_vmnet.4 MLINKS+=tdfx.4 tdfx_linux.4 MLINKS+=ti.4 if_ti.4 MLINKS+=tun.4 if_tun.4 MLINKS+=ure.4 if_ure.4 MLINKS+=vge.4 if_vge.4 MLINKS+=vlan.4 if_vlan.4 MLINKS+=vxlan.4 if_vxlan.4 MLINKS+=${_vmx.4} ${_if_vmx.4} MLINKS+=vr.4 if_vr.4 MLINKS+=vte.4 if_vte.4 MLINKS+=${_vtnet.4} ${_if_vtnet.4} MLINKS+=watchdog.4 SW_WATCHDOG.4 MLINKS+=wi.4 if_wi.4 MLINKS+=${_wpi.4} ${_if_wpi.4} MLINKS+=xl.4 if_xl.4 .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386" _acpi_asus.4= acpi_asus.4 _acpi_asus_wmi.4= acpi_asus_wmi.4 _acpi_dock.4= acpi_dock.4 _acpi_fujitsu.4=acpi_fujitsu.4 _acpi_hp.4= acpi_hp.4 _acpi_ibm.4= acpi_ibm.4 _acpi_panasonic.4=acpi_panasonic.4 _acpi_rapidstart.4=acpi_rapidstart.4 _acpi_sony.4= acpi_sony.4 _acpi_toshiba.4=acpi_toshiba.4 _acpi_wmi.4= acpi_wmi.4 _aesni.4= aesni.4 _aout.4= aout.4 _apic.4= apic.4 _atrtc.4= atrtc.4 _attimer.4= attimer.4 _aibs.4= aibs.4 _amdsbwd.4= amdsbwd.4 _amdsmb.4= amdsmb.4 _amdsmn.4= amdsmn.4 _amdtemp.4= amdtemp.4 _asmc.4= asmc.4 _bxe.4= bxe.4 _bytgpio.4= bytgpio.4 _chvgpio.4= chvgpio.4 _coretemp.4= coretemp.4 _cpuctl.4= cpuctl.4 _dpms.4= dpms.4 _hpt27xx.4= hpt27xx.4 _hptiop.4= hptiop.4 _hptmv.4= hptmv.4 _hptnr.4= hptnr.4 _hptrr.4= hptrr.4 _hv_kvp.4= hv_kvp.4 _hv_netvsc.4= hv_netvsc.4 _hv_storvsc.4= hv_storvsc.4 _hv_utils.4= hv_utils.4 _hv_vmbus.4= hv_vmbus.4 _hv_vss.4= hv_vss.4 _hwpstate_intel.4= hwpstate_intel.4 _i8254.4= i8254.4 _ichwd.4= ichwd.4 _if_bxe.4= if_bxe.4 _if_ndis.4= if_ndis.4 _if_nfe.4= if_nfe.4 _if_urtw.4= if_urtw.4 _if_vmx.4= if_vmx.4 _if_vtnet.4= if_vtnet.4 _if_wpi.4= if_wpi.4 _imcsmb.4= imcsmb.4 _ipmi.4= ipmi.4 _io.4= io.4 _itwd.4= itwd.4 _linux.4= linux.4 _nda.4= nda.4 _ndis.4= ndis.4 _nfe.4= nfe.4 _nfsmb.4= nfsmb.4 _if_ntb.4= if_ntb.4 _ntb.4= ntb.4 _ntb_hw_amd.4= ntb_hw_amd.4 _ntb_hw_intel.4= ntb_hw_intel.4 _ntb_hw_plx.4= ntb_hw_plx.4 _ntb_transport.4=ntb_transport.4 _nvd.4= nvd.4 _nvme.4= nvme.4 _nvram.4= nvram.4 _ossl.4= ossl.4 _padlock.4= padlock.4 _pchtherm.4= pchtherm.4 +_qat.4= qat.4 _rr232x.4= rr232x.4 _speaker.4= speaker.4 _spkr.4= spkr.4 _superio.4= superio.4 _tpm.4= tpm.4 _urtw.4= urtw.4 _viawd.4= viawd.4 _virtio.4= virtio.4 _virtio_balloon.4=virtio_balloon.4 _virtio_blk.4= virtio_blk.4 _virtio_console.4=virtio_console.4 _virtio_random.4= virtio_random.4 _virtio_scsi.4= virtio_scsi.4 _vmci.4= vmci.4 _vmx.4= vmx.4 _vtnet.4= vtnet.4 _wbwd.4= wbwd.4 _wpi.4= wpi.4 _xen.4= xen.4 _xnb.4= xnb.4 .endif .if ${MACHINE_CPUARCH} == "amd64" _ioat.4= ioat.4 _nvdimm.4= nvdimm.4 _qlxge.4= qlxge.4 _qlxgb.4= qlxgb.4 _qlxgbe.4= qlxgbe.4 _qlnxe.4= qlnxe.4 _sfxge.4= sfxge.4 _smartpqi.4= smartpqi.4 _sume.4= sume.4 _vmd.4= vmd.4 MLINKS+=qlxge.4 if_qlxge.4 MLINKS+=qlxgb.4 if_qlxgb.4 MLINKS+=qlxgbe.4 if_qlxgbe.4 MLINKS+=qlnxe.4 if_qlnxe.4 MLINKS+=sfxge.4 if_sfxge.4 MLINKS+=sume.4 if_sume.4 .if ${MK_BHYVE} != "no" _bhyve.4= bhyve.4 _vmm.4= vmm.4 .endif .endif .if ${MACHINE_CPUARCH} == "mips" _nvram2env.4= nvram2env.4 .endif .if ${MACHINE_CPUARCH} == "powerpc" _if_vtnet.4= if_vtnet.4 _nvd.4= nvd.4 _nvme.4= nvme.4 _virtio.4= virtio.4 _virtio_balloon.4=virtio_balloon.4 _virtio_blk.4= virtio_blk.4 _virtio_console.4=virtio_console.4 _virtio_random.4= virtio_random.4 _virtio_scsi.4= virtio_scsi.4 _vtnet.4= vtnet.4 .endif .if empty(MAN_ARCH) __arches= ${MACHINE} ${MACHINE_ARCH} ${MACHINE_CPUARCH} .elif ${MAN_ARCH} == "all" __arches= ${:!/bin/sh -c "/bin/ls -d ${.CURDIR}/man4.*"!:E} .else __arches= ${MAN_ARCH} .endif .for __arch in ${__arches:O:u} .if exists(${.CURDIR}/man4.${__arch}) SUBDIR+= man4.${__arch} .endif .endfor .if ${MK_BLUETOOTH} != "no" MAN+= ng_bluetooth.4 .endif .if ${MK_CCD} != "no" _ccd.4= ccd.4 .endif .if ${MK_CDDL} != "no" _dtrace_provs= dtrace_audit.4 \ dtrace_io.4 \ dtrace_ip.4 \ dtrace_lockstat.4 \ dtrace_proc.4 \ dtrace_sched.4 \ dtrace_sctp.4 \ dtrace_tcp.4 \ dtrace_udp.4 \ dtrace_udplite.4 MLINKS+= dtrace_audit.4 dtaudit.4 .endif .if ${MK_EFI} != "no" MAN+= efidev.4 MLINKS+= efidev.4 efirtc.4 .endif .if ${MK_ISCSI} != "no" MAN+= cfiscsi.4 MAN+= iscsi.4 MAN+= iscsi_initiator.4 MAN+= iser.4 .endif .if ${MK_OFED} != "no" MAN+= mlx4ib.4 MAN+= mlx5ib.4 .endif .if ${MK_MLX5TOOL} != "no" MAN+= mlx5io.4 .endif .if ${MK_TESTS} != "no" ATF= ${SRCTOP}/contrib/atf .PATH: ${ATF}/doc _atf_test_case.4= atf-test-case.4 .endif .if ${MK_PF} != "no" _pf.4= pf.4 _pflog.4= pflog.4 _pfsync.4= pfsync.4 .endif .if ${MK_USB} != "no" MAN+= \ otus.4 \ otusfw.4 \ rsu.4 \ rsufw.4 \ rtwn_usb.4 \ rum.4 \ run.4 \ runfw.4 \ u3g.4 \ uark.4 \ uart.4 \ uath.4 \ ubsa.4 \ ubser.4 \ ubtbcmfw.4 \ uchcom.4 \ ucom.4 \ ucycom.4 \ udav.4 \ udbp.4 \ udl.4 \ uep.4 \ ufoma.4 \ uftdi.4 \ ugen.4 \ ugold.4 \ uhci.4 \ uhid.4 \ uhso.4 \ uipaq.4 \ ukbd.4 \ uled.4 \ ulpt.4 \ umass.4 \ umcs.4 \ umct.4 \ umodem.4 \ umoscom.4 \ ums.4 \ unix.4 \ upgt.4 \ uplcom.4 \ ural.4 \ urio.4 \ urndis.4 \ ${_urtw.4} \ usb.4 \ usb_quirk.4 \ usb_template.4 \ usfs.4 \ uslcom.4 \ uvisor.4 \ uvscom.4 \ zyd.4 MLINKS+=otus.4 if_otus.4 MLINKS+=rsu.4 if_rsu.4 MLINKS+=rtwn_usb.4 if_rtwn_usb.4 MLINKS+=rum.4 if_rum.4 MLINKS+=run.4 if_run.4 MLINKS+=u3g.4 u3gstub.4 MLINKS+=uath.4 if_uath.4 MLINKS+=udav.4 if_udav.4 MLINKS+=upgt.4 if_upgt.4 MLINKS+=ural.4 if_ural.4 MLINKS+=urndis.4 if_urndis.4 MLINKS+=${_urtw.4} ${_if_urtw.4} MLINKS+=zyd.4 if_zyd.4 .endif .include Index: head/share/man/man4/qat.4 =================================================================== --- head/share/man/man4/qat.4 (nonexistent) +++ head/share/man/man4/qat.4 (revision 367386) @@ -0,0 +1,99 @@ +.\"- +.\" Copyright (c) 2020 Rubicon Communications, LLC (Netgate) +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" $FreeBSD$ +.\" +.Dd November 5, 2020 +.Dt QAT 4 +.Os +.Sh NAME +.Nm qat +.Nd Intel QuickAssist Technology (QAT) driver +.Sh SYNOPSIS +To compile this driver into the kernel, +place the following lines in your +kernel configuration file: +.Bd -ragged -offset indent +.Cd "device crypto" +.Cd "device cryptodev" +.Cd "device qat" +.Ed +.Pp +Alternatively, to load the driver as a +module at boot time, place the following lines in +.Xr loader.conf 5 : +.Bd -literal -offset indent +qat_load="YES" +qat_c2xxxfw_load="YES" +qat_c3xxxfw_load="YES" +qat_c63xfw_load="YES" +qat_d15xxfw_load="YES" +qat_dh895xcc_load="YES" +.Ed +.Sh DESCRIPTION +The +.Nm +driver implements +.Xr crypto 4 +support for some of the cryptographic acceleration functions of the Intel +QuickAssist device. +The +.Nm +driver supports the QAT devices integrated with Atom C2000 and C3000 and Xeon +C620 and D-1500 chipsets, and the Intel QAT Adapter 8950. +It can accelerate AES in CBC, CTR, XTS (except for the C2000) and GCM modes, +and can perform authenticated encryption combining the CBC, CTR and XTS modes +with SHA1-HMAC and SHA2-HMAC. +The +.Nm +driver can also compute SHA1 and SHA2 digests. +.Sh SEE ALSO +.Xr crypto 4 , +.Xr ipsec 4 , +.Xr pci 4 , +.Xr random 4 , +.Xr crypto 7 , +.Xr crypto 9 +.Sh HISTORY +The +.Nm +driver first appeared in +.Fx 13.0 . +.Sh AUTHORS +The +.Nm +driver was written for +.Nx +by +.An Hikaru Abe Aq Mt hikaru@iij.ad.jp +and ported to +.Fx +by +.An Mark Johnston Aq Mt markj@FreeBSD.org . +.Sh BUGS +Some Atom C2000 QAT devices have two acceleration engines instead of one. +The +.Nm +driver currently misbehaves when both are enabled and thus does not enable +the second acceleration engine if one is present. Property changes on: head/share/man/man4/qat.4 ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/amd64/conf/NOTES =================================================================== --- head/sys/amd64/conf/NOTES (revision 367385) +++ head/sys/amd64/conf/NOTES (revision 367386) @@ -1,684 +1,688 @@ # # NOTES -- Lines that can be cut/pasted into kernel and hints configs. # # This file contains machine dependent kernel configuration notes. For # machine independent notes, look in /sys/conf/NOTES. # # $FreeBSD$ # # # We want LINT to cover profiling as well. profile 2 # # Enable the kernel DTrace hooks which are required to load the DTrace # kernel modules. # options KDTRACE_HOOKS # DTrace core # NOTE: introduces CDDL-licensed components into the kernel #device dtrace # DTrace modules #device dtrace_profile #device dtrace_sdt #device dtrace_fbt #device dtrace_systrace #device dtrace_prototype #device dtnfscl #device dtmalloc # Alternatively include all the DTrace modules #device dtraceall ##################################################################### # SMP OPTIONS: # # Notes: # # IPI_PREEMPTION instructs the kernel to preempt threads running on other # CPUS if needed. Relies on the PREEMPTION option # Optional: options IPI_PREEMPTION device atpic # Optional legacy pic support device mptable # Optional MPSPEC mptable support # # Watchdog routines. # options MP_WATCHDOG # Debugging options. # options COUNT_XINVLTLB_HITS # Counters for TLB events options COUNT_IPIS # Per-CPU IPI interrupt counters ##################################################################### # CPU OPTIONS # # You must specify at least one CPU (the one you intend to run on); # deleting the specification for CPUs you don't need to use may make # parts of the system run faster. # cpu HAMMER # aka K8, aka Opteron & Athlon64 # # Options for CPU features. # ##################################################################### # NETWORKING OPTIONS # # DEVICE_POLLING adds support for mixed interrupt-polling handling # of network device drivers, which has significant benefits in terms # of robustness to overloads and responsivity, as well as permitting # accurate scheduling of the CPU time between kernel network processing # and other activities. The drawback is a moderate (up to 1/HZ seconds) # potential increase in response times. # It is strongly recommended to use HZ=1000 or 2000 with DEVICE_POLLING # to achieve smoother behaviour. # Additionally, you can enable/disable polling at runtime with help of # the ifconfig(8) utility, and select the CPU fraction reserved to # userland with the sysctl variable kern.polling.user_frac # (default 50, range 0..100). # # Not all device drivers support this mode of operation at the time of # this writing. See polling(4) for more details. options DEVICE_POLLING # BPF_JITTER adds support for BPF just-in-time compiler. options BPF_JITTER # OpenFabrics Enterprise Distribution (Infiniband). options OFED options OFED_DEBUG_INIT # Sockets Direct Protocol options SDP options SDP_DEBUG # IP over Infiniband options IPOIB options IPOIB_DEBUG options IPOIB_CM ##################################################################### # CLOCK OPTIONS # Provide read/write access to the memory in the clock chip. device nvram # Access to rtc cmos via /dev/nvram ##################################################################### # MISCELLANEOUS DEVICES AND OPTIONS device speaker #Play IBM BASIC-style noises out your speaker envvar hint.speaker.0.at="isa" envvar hint.speaker.0.port="0x61" ##################################################################### # HARDWARE BUS CONFIGURATION # # ISA bus # device isa # # Options for `isa': # # AUTO_EOI_1 enables the `automatic EOI' feature for the master 8259A # interrupt controller. This saves about 0.7-1.25 usec for each interrupt. # This option breaks suspend/resume on some portables. # # AUTO_EOI_2 enables the `automatic EOI' feature for the slave 8259A # interrupt controller. This saves about 0.7-1.25 usec for each interrupt. # Automatic EOI is documented not to work for for the slave with the # original i8259A, but it works for some clones and some integrated # versions. # # MAXMEM specifies the amount of RAM on the machine; if this is not # specified, FreeBSD will first read the amount of memory from the CMOS # RAM, so the amount of memory will initially be limited to 64MB or 16MB # depending on the BIOS. If the BIOS reports 64MB, a memory probe will # then attempt to detect the installed amount of RAM. If this probe # fails to detect >64MB RAM you will have to use the MAXMEM option. # The amount is in kilobytes, so for a machine with 128MB of RAM, it would # be 131072 (128 * 1024). # # BROKEN_KEYBOARD_RESET disables the use of the keyboard controller to # reset the CPU for reboot. This is needed on some systems with broken # keyboard controllers. options AUTO_EOI_1 #options AUTO_EOI_2 options MAXMEM=(128*1024) #options BROKEN_KEYBOARD_RESET # # AGP GART support device agp # # AGP debugging. # options AGP_DEBUG ##################################################################### # HARDWARE DEVICE CONFIGURATION # To include support for VGA VESA video modes options VESA # Turn on extra debugging checks and output for VESA support. options VESA_DEBUG device dpms # DPMS suspend & resume via VESA BIOS # x86 real mode BIOS emulator, required by atkbdc/dpms/vesa options X86BIOS # # Optional devices: # # PS/2 mouse device psm envvar hint.psm.0.at="atkbdc" envvar hint.psm.0.irq="12" # Options for psm: options PSM_HOOKRESUME #hook the system resume event, useful #for some laptops options PSM_RESETAFTERSUSPEND #reset the device at the resume event # The keyboard controller; it controls the keyboard and the PS/2 mouse. device atkbdc envvar hint.atkbdc.0.at="isa" envvar hint.atkbdc.0.port="0x060" # The AT keyboard device atkbd envvar hint.atkbd.0.at="atkbdc" envvar hint.atkbd.0.irq="1" # Options for atkbd: options ATKBD_DFLT_KEYMAP # specify the built-in keymap makeoptions ATKBD_DFLT_KEYMAP=fr.dvorak # `flags' for atkbd: # 0x01 Force detection of keyboard, else we always assume a keyboard # 0x02 Don't reset keyboard, useful for some newer ThinkPads # 0x03 Force detection and avoid reset, might help with certain # dockingstations # 0x04 Old-style (XT) keyboard support, useful for older ThinkPads # Video card driver for VGA adapters. device vga envvar hint.vga.0.at="isa" # Options for vga: # Try the following option if the mouse pointer is not drawn correctly # or font does not seem to be loaded properly. May cause flicker on # some systems. options VGA_ALT_SEQACCESS # If you can dispense with some vga driver features, you may want to # use the following options to save some memory. #options VGA_NO_FONT_LOADING # don't save/load font #options VGA_NO_MODE_CHANGE # don't change video modes # Older video cards may require this option for proper operation. options VGA_SLOW_IOACCESS # do byte-wide i/o's to TS and GDC regs # The following option probably won't work with the LCD displays. options VGA_WIDTH90 # support 90 column modes # Debugging. options VGA_DEBUG # vt(4) drivers. device vt_vga # VGA device vt_efifb # EFI framebuffer # Linear framebuffer driver for S3 VESA 1.2 cards. Works on top of VESA. device s3pci # 3Dfx Voodoo Graphics, Voodoo II /dev/3dfx CDEV support. This will create # the /dev/3dfx0 device to work with glide implementations. This should get # linked to /dev/3dfx and /dev/voodoo. Note that this is not the same as # the tdfx DRI module from XFree86 and is completely unrelated. # # To enable Linuxulator support, one must also include COMPAT_LINUX in the # config as well. The other option is to load both as modules. device tdfx # Enable 3Dfx Voodoo support #XXX#device tdfx_linux # Enable Linuxulator support # # ACPI support using the Intel ACPI Component Architecture reference # implementation. # # ACPI_DEBUG enables the use of the debug.acpi.level and debug.acpi.layer # kernel environment variables to select initial debugging levels for the # Intel ACPICA code. (Note that the Intel code must also have USE_DEBUGGER # defined when it is built). device acpi options ACPI_DEBUG # The cpufreq(4) driver provides support for non-ACPI CPU frequency control device cpufreq # # Network interfaces: # # bxe: Broadcom NetXtreme II (BCM5771X/BCM578XX) PCIe 10Gb Ethernet # adapters. # ice: Intel 800 Series Physical Function # Requires the ice_ddp module for full functionality # ipw: Intel PRO/Wireless 2100 IEEE 802.11 adapter # Requires the ipw firmware module # iwi: Intel PRO/Wireless 2200BG/2225BG/2915ABG IEEE 802.11 adapters # Requires the iwi firmware module # iwn: Intel Wireless WiFi Link 1000/105/135/2000/4965/5000/6000/6050 abgn # 802.11 network adapters # Requires the iwn firmware module # mthca: Mellanox HCA InfiniBand # mlx4ib: Mellanox ConnectX HCA InfiniBand # mlx4en: Mellanox ConnectX HCA Ethernet # nfe: nVidia nForce MCP on-board Ethernet Networking (BSD open source) # sfxge: Solarflare SFC9000 family 10Gb Ethernet adapters # vmx: VMware VMXNET3 Ethernet (BSD open source) # wpi: Intel 3945ABG Wireless LAN controller # Requires the wpi firmware module device bxe # Broadcom NetXtreme II BCM5771X/BCM578XX 10GbE options ED_3C503 options ED_HPP options ED_SIC device ipw # Intel 2100 wireless NICs. device iwi # Intel 2200BG/2225BG/2915ABG wireless NICs. device iwn # Intel 4965/1000/5000/6000 wireless NICs. device ixl # Intel 700 Series Physical Function device iavf # Intel Adaptive Virtual Function device ice # Intel 800 Series Physical Function device ice_ddp # Intel 800 Series DDP Package device mthca # Mellanox HCA InfiniBand device mlx4 # Shared code module between IB and Ethernet device mlx4ib # Mellanox ConnectX HCA InfiniBand device mlx4en # Mellanox ConnectX HCA Ethernet device nfe # nVidia nForce MCP on-board Ethernet device sfxge # Solarflare SFC9000 10Gb Ethernet device vmx # VMware VMXNET3 Ethernet device wpi # Intel 3945ABG wireless NICs. device axp # AMD EPYC integrated NIC # IEEE 802.11 adapter firmware modules # Intel PRO/Wireless 2100 firmware: # ipwfw: BSS/IBSS/monitor mode firmware # ipwbssfw: BSS mode firmware # ipwibssfw: IBSS mode firmware # ipwmonitorfw: Monitor mode firmware # Intel PRO/Wireless 2200BG/2225BG/2915ABG firmware: # iwifw: BSS/IBSS/monitor mode firmware # iwibssfw: BSS mode firmware # iwiibssfw: IBSS mode firmware # iwimonitorfw: Monitor mode firmware # Intel Wireless WiFi Link 4965/1000/5000/6000 series firmware: # iwnfw: Single module to support all devices # iwn1000fw: Specific module for the 1000 only # iwn105fw: Specific module for the 105 only # iwn135fw: Specific module for the 135 only # iwn2000fw: Specific module for the 2000 only # iwn2030fw: Specific module for the 2030 only # iwn4965fw: Specific module for the 4965 only # iwn5000fw: Specific module for the 5000 only # iwn5150fw: Specific module for the 5150 only # iwn6000fw: Specific module for the 6000 only # iwn6000g2afw: Specific module for the 6000g2a only # iwn6000g2bfw: Specific module for the 6000g2b only # iwn6050fw: Specific module for the 6050 only # wpifw: Intel 3945ABG Wireless LAN Controller firmware device iwifw device iwibssfw device iwiibssfw device iwimonitorfw device ipwfw device ipwbssfw device ipwibssfw device ipwmonitorfw device iwnfw device iwn1000fw device iwn105fw device iwn135fw device iwn2000fw device iwn2030fw device iwn4965fw device iwn5000fw device iwn5150fw device iwn6000fw device iwn6000g2afw device iwn6000g2bfw device iwn6050fw device wpifw # # Non-Transparent Bridge (NTB) drivers # device if_ntb # Virtual NTB network interface device ntb_transport # NTB packet transport driver device ntb # NTB hardware interface device ntb_hw_amd # AMD NTB hardware driver device ntb_hw_intel # Intel NTB hardware driver device ntb_hw_plx # PLX NTB hardware driver # #XXX this stores pointers in a 32bit field that is defined by the hardware #device pst # # Areca 11xx and 12xx series of SATA II RAID controllers. # CAM is required. # device arcmsr # Areca SATA II RAID # # Microsemi smartpqi controllers. # These controllers have a SCSI-like interface, and require the # CAM infrastructure. # device smartpqi # # 3ware 9000 series PATA/SATA RAID controller driver and options. # The driver is implemented as a SIM, and so, needs the CAM infrastructure. # options TWA_DEBUG # 0-10; 10 prints the most messages. device twa # 3ware 9000 series PATA/SATA RAID # # Adaptec FSA RAID controllers, including integrated DELL controllers, # the Dell PERC 2/QC and the HP NetRAID-4M device aac device aacp # SCSI Passthrough interface (optional, CAM required) # # Highpoint RocketRAID 27xx. device hpt27xx # # Highpoint RocketRAID 182x. device hptmv # # Highpoint DC7280 and R750. device hptnr # # Highpoint RocketRAID. Supports RR172x, RR222x, RR2240, RR232x, RR2340, # RR2210, RR174x, RR2522, RR231x, RR230x. device hptrr # # Highpoint RocketRaid 3xxx series SATA RAID device hptiop # # IBM (now Adaptec) ServeRAID controllers device ips # # Intel integrated Memory Controller (iMC) SMBus controller # Sandybridge-Xeon, Ivybridge-Xeon, Haswell-Xeon, Broadwell-Xeon device imcsmb # # Intel C600 (Patsburg) integrated SAS controller device isci options ISCI_LOGGING # enable debugging in isci HAL # # NVM Express (NVMe) support device nvme # base NVMe driver device nvd # expose NVMe namespaces as disks, depends on nvme # # Intel Volume Management Device (VMD) support device vmd # base VMD device device vmd_bus # bus for VMD children # # PMC-Sierra SAS/SATA controller device pmspcv # +# Intel QuickAssist +device qat + +# # SafeNet crypto driver: can be moved to the MI NOTES as soon as # it's tested on a big-endian machine # device safe # SafeNet 1141 options SAFE_DEBUG # enable debugging support: hw.safe.debug options SAFE_RNDTEST # enable rndtest support # # VirtIO support # # The virtio entry provides a generic bus for use by the device drivers. # It must be combined with an interface that communicates with the host. # Multiple such interfaces are defined by the VirtIO specification. FreeBSD # only has support for PCI. Therefore, virtio_pci must be statically # compiled in or loaded as a module for the device drivers to function. # device virtio # Generic VirtIO bus (required) device virtio_pci # VirtIO PCI Interface device vtnet # VirtIO Ethernet device device virtio_blk # VirtIO Block device device virtio_scsi # VirtIO SCSI device device virtio_balloon # VirtIO Memory Balloon device device virtio_random # VirtIO Entropy device device virtio_console # VirtIO Console device # Microsoft Hyper-V enhancement support device hyperv # HyperV drivers # Xen HVM Guest Optimizations options XENHVM # Xen HVM kernel infrastructure device xenpci # Xen HVM Hypervisor services driver ##################################################################### # # Miscellaneous hardware: # # ipmi: Intelligent Platform Management Interface # pbio: Parallel (8255 PPI) basic I/O (mode 0) port (e.g. Advantech PCL-724) # smbios: DMI/SMBIOS entry point # vpd: Vital Product Data kernel interface # asmc: Apple System Management Controller # si: Specialix International SI/XIO or SX intelligent serial card # tpm: Trusted Platform Module # Notes on the Specialix SI/XIO driver: # The host card is memory, not IO mapped. # The Rev 1 host cards use a 64K chunk, on a 32K boundary. # The Rev 2 host cards use a 32K chunk, on a 32K boundary. # The cards can use an IRQ of 11, 12 or 15. device ipmi device pbio envvar hint.pbio.0.at="isa" envvar hint.pbio.0.port="0x360" device smbios device vpd device asmc device tpm device padlock_rng # VIA Padlock RNG device rdrand_rng # Intel Bull Mountain RNG device aesni # AES-NI OpenCrypto module device ossl # OpenSSL OpenCrypto module device ioat # Intel I/OAT DMA engine # # Laptop/Notebook options: # device backlight # # I2C Bus # # # Hardware watchdog timers: # # ichwd: Intel ICH watchdog timer # amdsbwd: AMD SB7xx watchdog timer # viawd: VIA south bridge watchdog timer # wbwd: Winbond watchdog timer # itwd: ITE Super I/O watchdog timer # device ichwd device amdsbwd device viawd device wbwd device itwd # # Temperature sensors: # # coretemp: on-die sensor on Intel Core and newer CPUs # amdtemp: on-die sensor on AMD K8/K10/K11 CPUs # device coretemp device amdtemp # # CPU control pseudo-device. Provides access to MSRs, CPUID info and # microcode update feature. # device cpuctl # # SuperIO driver. # device superio # # System Management Bus (SMB) # options ENABLE_ALART # Control alarm on Intel intpm driver # # AMD System Management Network (SMN) # device amdsmn # # Number of initial kernel page table pages used for early bootstrap. # This number should include enough pages to map the kernel and any # modules or other data loaded with the kernel by the loader. Each # page table page maps 2MB. # options NKPT=31 # EFI Runtime Services support options EFIRT ##################################################################### # ABI Emulation #XXX keep these here for now and reactivate when support for emulating #XXX these 32 bit binaries is added. # Enable 32-bit runtime support for FreeBSD/i386 binaries. options COMPAT_FREEBSD32 # Enable (32-bit) a.out binary support options COMPAT_AOUT # Enable 32-bit runtime support for CloudABI binaries. options COMPAT_CLOUDABI32 # Enable 64-bit runtime support for CloudABI binaries. options COMPAT_CLOUDABI64 # Enable Linux ABI emulation #XXX#options COMPAT_LINUX # Enable 32-bit Linux ABI emulation (requires COMPAT_FREEBSD32). options COMPAT_LINUX32 # Enable the linux-like proc filesystem support (requires COMPAT_LINUX32 # and PSEUDOFS) options LINPROCFS #Enable the linux-like sys filesystem support (requires COMPAT_LINUX32 # and PSEUDOFS) options LINSYSFS ##################################################################### # ZFS support # NB: This depends on crypto, cryptodev and ZSTDIO options ZFS ##################################################################### # VM OPTIONS # KSTACK_PAGES is the number of memory pages to assign to the kernel # stack of each thread. options KSTACK_PAGES=5 # Enable detailed accounting by the PV entry allocator. options PV_STATS ##################################################################### # More undocumented options for linting. # Note that documenting these are not considered an affront. options FB_INSTALL_CDEV # install a CDEV entry in /dev options KBDIO_DEBUG=2 options KBD_MAXRETRY=4 options KBD_MAXWAIT=6 options KBD_RESETDELAY=201 options PSM_DEBUG=1 options TIMER_FREQ=((14318182+6)/12) options VM_KMEM_SIZE options VM_KMEM_SIZE_MAX options VM_KMEM_SIZE_SCALE # Enable NDIS binary driver support options NDISAPI device ndis # GCOV (code coverage) support options LINDEBUGFS options GCOV Index: head/sys/conf/files.x86 =================================================================== --- head/sys/conf/files.x86 (revision 367385) +++ head/sys/conf/files.x86 (revision 367386) @@ -1,350 +1,359 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # $FreeBSD$ # # This file contains all the x86 devices and such that are # common between i386 and amd64, but aren't applicable to # any other architecture we support. # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # atkbdmap.h optional atkbd_dflt_keymap \ compile-with "${KEYMAP} -L ${ATKBD_DFLT_KEYMAP} | ${KEYMAP_FIX} > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "atkbdmap.h" cddl/dev/fbt/x86/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" cddl/dev/dtrace/x86/dis_tables.c optional dtrace_fbt | dtraceall compile-with "${DTRACE_C}" cddl/dev/dtrace/x86/instr_size.c optional dtrace_fbt | dtraceall compile-with "${DTRACE_C}" compat/ndis/kern_ndis.c optional ndisapi pci compat/ndis/kern_windrv.c optional ndisapi pci compat/ndis/subr_hal.c optional ndisapi pci compat/ndis/subr_ndis.c optional ndisapi pci compat/ndis/subr_ntoskrnl.c optional ndisapi pci compat/ndis/subr_pe.c optional ndisapi pci compat/ndis/subr_usbd.c optional ndisapi pci crypto/aesni/aesni.c optional aesni aesni_ghash.o optional aesni \ dependency "$S/crypto/aesni/aesni_ghash.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -mmmx -msse -msse4 -maes -mpclmul ${.IMPSRC}" \ no-implicit-rule \ clean "aesni_ghash.o" aesni_ccm.o optional aesni \ dependency "$S/crypto/aesni/aesni_ccm.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -mmmx -msse -msse4 -maes -mpclmul ${.IMPSRC}" \ no-implicit-rule \ clean "aesni_ccm.o" aesni_wrap.o optional aesni \ dependency "$S/crypto/aesni/aesni_wrap.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -mmmx -msse -msse4 -maes ${.IMPSRC}" \ no-implicit-rule \ clean "aesni_wrap.o" intel_sha1.o optional aesni \ dependency "$S/crypto/aesni/intel_sha1.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} -mmmx -msse -msse4 -msha ${.IMPSRC}" \ no-implicit-rule \ clean "intel_sha1.o" intel_sha256.o optional aesni \ dependency "$S/crypto/aesni/intel_sha256.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} -mmmx -msse -msse4 -msha ${.IMPSRC}" \ no-implicit-rule \ clean "intel_sha256.o" crypto/openssl/ossl.c optional ossl crypto/openssl/ossl_sha1.c optional ossl crypto/openssl/ossl_sha256.c optional ossl crypto/openssl/ossl_sha512.c optional ossl crypto/via/padlock.c optional padlock crypto/via/padlock_cipher.c optional padlock crypto/via/padlock_hash.c optional padlock dev/acpica/acpi_hpet.c optional acpi dev/acpica/acpi_if.m standard dev/acpica/acpi_pci.c optional acpi pci dev/acpica/acpi_pci_link.c optional acpi pci dev/acpica/acpi_pcib.c optional acpi pci dev/acpica/acpi_pcib_acpi.c optional acpi pci dev/acpica/acpi_pcib_pci.c optional acpi pci dev/acpica/acpi_pxm.c optional acpi dev/acpica/acpi_timer.c optional acpi dev/amdsbwd/amdsbwd.c optional amdsbwd dev/amdsmn/amdsmn.c optional amdsmn | amdtemp dev/amdtemp/amdtemp.c optional amdtemp dev/arcmsr/arcmsr.c optional arcmsr pci dev/asmc/asmc.c optional asmc isa dev/atkbdc/atkbd.c optional atkbd atkbdc dev/atkbdc/atkbd_atkbdc.c optional atkbd atkbdc dev/atkbdc/atkbdc.c optional atkbdc dev/atkbdc/atkbdc_isa.c optional atkbdc isa dev/atkbdc/atkbdc_subr.c optional atkbdc dev/atkbdc/psm.c optional psm atkbdc dev/bxe/bxe.c optional bxe pci dev/bxe/bxe_stats.c optional bxe pci dev/bxe/bxe_debug.c optional bxe pci dev/bxe/ecore_sp.c optional bxe pci dev/bxe/bxe_elink.c optional bxe pci dev/bxe/57710_init_values.c optional bxe pci dev/bxe/57711_init_values.c optional bxe pci dev/bxe/57712_init_values.c optional bxe pci dev/coretemp/coretemp.c optional coretemp dev/cpuctl/cpuctl.c optional cpuctl dev/dpms/dpms.c optional dpms dev/fb/fb.c optional fb | vga dev/fb/s3_pci.c optional s3pci dev/fb/vesa.c optional vga vesa dev/fb/vga.c optional vga dev/fdc/fdc.c optional fdc dev/fdc/fdc_acpi.c optional fdc dev/fdc/fdc_isa.c optional fdc isa dev/fdc/fdc_pccard.c optional fdc pccard dev/gpio/bytgpio.c optional bytgpio dev/gpio/chvgpio.c optional chvgpio dev/hpt27xx/hpt27xx_os_bsd.c optional hpt27xx dev/hpt27xx/hpt27xx_osm_bsd.c optional hpt27xx dev/hpt27xx/hpt27xx_config.c optional hpt27xx hpt27xx_lib.o optional hpt27xx \ dependency "$S/dev/hpt27xx/$M-elf.hpt27xx_lib.o.uu" \ compile-with "uudecode < $S/dev/hpt27xx/$M-elf.hpt27xx_lib.o.uu" \ no-implicit-rule dev/hptmv/entry.c optional hptmv dev/hptmv/mv.c optional hptmv dev/hptmv/gui_lib.c optional hptmv dev/hptmv/hptproc.c optional hptmv dev/hptmv/ioctl.c optional hptmv hptmvraid.o optional hptmv \ dependency "$S/dev/hptmv/$M-elf.raid.o.uu" \ compile-with "uudecode < $S/dev/hptmv/$M-elf.raid.o.uu" \ no-implicit-rule dev/hptnr/hptnr_os_bsd.c optional hptnr dev/hptnr/hptnr_osm_bsd.c optional hptnr dev/hptnr/hptnr_config.c optional hptnr hptnr_lib.o optional hptnr \ dependency "$S/dev/hptnr/$M-elf.hptnr_lib.o.uu" \ compile-with "uudecode < $S/dev/hptnr/$M-elf.hptnr_lib.o.uu" \ no-implicit-rule dev/hptrr/hptrr_os_bsd.c optional hptrr dev/hptrr/hptrr_osm_bsd.c optional hptrr dev/hptrr/hptrr_config.c optional hptrr hptrr_lib.o optional hptrr \ dependency "$S/dev/hptrr/$M-elf.hptrr_lib.o.uu" \ compile-with "uudecode < $S/dev/hptrr/$M-elf.hptrr_lib.o.uu" \ no-implicit-rule dev/hwpmc/hwpmc_amd.c optional hwpmc dev/hwpmc/hwpmc_intel.c optional hwpmc dev/hwpmc/hwpmc_core.c optional hwpmc dev/hwpmc/hwpmc_uncore.c optional hwpmc dev/hwpmc/hwpmc_tsc.c optional hwpmc dev/hwpmc/hwpmc_x86.c optional hwpmc dev/hyperv/hvsock/hv_sock.c optional hyperv dev/hyperv/input/hv_kbd.c optional hyperv dev/hyperv/input/hv_kbdc.c optional hyperv dev/hyperv/pcib/vmbus_pcib.c optional hyperv pci dev/hyperv/netvsc/hn_nvs.c optional hyperv dev/hyperv/netvsc/hn_rndis.c optional hyperv dev/hyperv/netvsc/if_hn.c optional hyperv dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c optional hyperv dev/hyperv/utilities/hv_kvp.c optional hyperv dev/hyperv/utilities/hv_snapshot.c optional hyperv dev/hyperv/utilities/vmbus_heartbeat.c optional hyperv dev/hyperv/utilities/vmbus_ic.c optional hyperv dev/hyperv/utilities/vmbus_shutdown.c optional hyperv dev/hyperv/utilities/vmbus_timesync.c optional hyperv dev/hyperv/vmbus/hyperv.c optional hyperv dev/hyperv/vmbus/hyperv_busdma.c optional hyperv dev/hyperv/vmbus/vmbus.c optional hyperv pci dev/hyperv/vmbus/vmbus_br.c optional hyperv dev/hyperv/vmbus/vmbus_chan.c optional hyperv dev/hyperv/vmbus/vmbus_et.c optional hyperv dev/hyperv/vmbus/vmbus_if.m optional hyperv dev/hyperv/vmbus/vmbus_res.c optional hyperv dev/hyperv/vmbus/vmbus_xact.c optional hyperv dev/ichwd/ichwd.c optional ichwd dev/if_ndis/if_ndis.c optional ndis dev/if_ndis/if_ndis_pccard.c optional ndis pccard dev/if_ndis/if_ndis_pci.c optional ndis cardbus | ndis pci dev/if_ndis/if_ndis_usb.c optional ndis usb dev/imcsmb/imcsmb.c optional imcsmb dev/imcsmb/imcsmb_pci.c optional imcsmb pci dev/intel/spi.c optional intelspi dev/io/iodev.c optional io dev/iommu/busdma_iommu.c optional acpi iommu pci dev/iommu/iommu_gas.c optional acpi iommu pci dev/ipmi/ipmi.c optional ipmi dev/ipmi/ipmi_acpi.c optional ipmi acpi dev/ipmi/ipmi_isa.c optional ipmi isa dev/ipmi/ipmi_kcs.c optional ipmi dev/ipmi/ipmi_smic.c optional ipmi dev/ipmi/ipmi_smbus.c optional ipmi smbus dev/ipmi/ipmi_smbios.c optional ipmi dev/ipmi/ipmi_ssif.c optional ipmi smbus dev/ipmi/ipmi_pci.c optional ipmi pci dev/ipmi/ipmi_linux.c optional ipmi compat_linux32 dev/isci/isci.c optional isci dev/isci/isci_controller.c optional isci dev/isci/isci_domain.c optional isci dev/isci/isci_interrupt.c optional isci dev/isci/isci_io_request.c optional isci dev/isci/isci_logger.c optional isci dev/isci/isci_oem_parameters.c optional isci dev/isci/isci_remote_device.c optional isci dev/isci/isci_sysctl.c optional isci dev/isci/isci_task_request.c optional isci dev/isci/isci_timer.c optional isci dev/isci/scil/sati.c optional isci dev/isci/scil/sati_abort_task_set.c optional isci dev/isci/scil/sati_atapi.c optional isci dev/isci/scil/sati_device.c optional isci dev/isci/scil/sati_inquiry.c optional isci dev/isci/scil/sati_log_sense.c optional isci dev/isci/scil/sati_lun_reset.c optional isci dev/isci/scil/sati_mode_pages.c optional isci dev/isci/scil/sati_mode_select.c optional isci dev/isci/scil/sati_mode_sense.c optional isci dev/isci/scil/sati_mode_sense_10.c optional isci dev/isci/scil/sati_mode_sense_6.c optional isci dev/isci/scil/sati_move.c optional isci dev/isci/scil/sati_passthrough.c optional isci dev/isci/scil/sati_read.c optional isci dev/isci/scil/sati_read_buffer.c optional isci dev/isci/scil/sati_read_capacity.c optional isci dev/isci/scil/sati_reassign_blocks.c optional isci dev/isci/scil/sati_report_luns.c optional isci dev/isci/scil/sati_request_sense.c optional isci dev/isci/scil/sati_start_stop_unit.c optional isci dev/isci/scil/sati_synchronize_cache.c optional isci dev/isci/scil/sati_test_unit_ready.c optional isci dev/isci/scil/sati_unmap.c optional isci dev/isci/scil/sati_util.c optional isci dev/isci/scil/sati_verify.c optional isci dev/isci/scil/sati_write.c optional isci dev/isci/scil/sati_write_and_verify.c optional isci dev/isci/scil/sati_write_buffer.c optional isci dev/isci/scil/sati_write_long.c optional isci dev/isci/scil/sci_abstract_list.c optional isci dev/isci/scil/sci_base_controller.c optional isci dev/isci/scil/sci_base_domain.c optional isci dev/isci/scil/sci_base_iterator.c optional isci dev/isci/scil/sci_base_library.c optional isci dev/isci/scil/sci_base_logger.c optional isci dev/isci/scil/sci_base_memory_descriptor_list.c optional isci dev/isci/scil/sci_base_memory_descriptor_list_decorator.c optional isci dev/isci/scil/sci_base_object.c optional isci dev/isci/scil/sci_base_observer.c optional isci dev/isci/scil/sci_base_phy.c optional isci dev/isci/scil/sci_base_port.c optional isci dev/isci/scil/sci_base_remote_device.c optional isci dev/isci/scil/sci_base_request.c optional isci dev/isci/scil/sci_base_state_machine.c optional isci dev/isci/scil/sci_base_state_machine_logger.c optional isci dev/isci/scil/sci_base_state_machine_observer.c optional isci dev/isci/scil/sci_base_subject.c optional isci dev/isci/scil/sci_util.c optional isci dev/isci/scil/scic_sds_controller.c optional isci dev/isci/scil/scic_sds_library.c optional isci dev/isci/scil/scic_sds_pci.c optional isci dev/isci/scil/scic_sds_phy.c optional isci dev/isci/scil/scic_sds_port.c optional isci dev/isci/scil/scic_sds_port_configuration_agent.c optional isci dev/isci/scil/scic_sds_remote_device.c optional isci dev/isci/scil/scic_sds_remote_node_context.c optional isci dev/isci/scil/scic_sds_remote_node_table.c optional isci dev/isci/scil/scic_sds_request.c optional isci dev/isci/scil/scic_sds_sgpio.c optional isci dev/isci/scil/scic_sds_smp_remote_device.c optional isci dev/isci/scil/scic_sds_smp_request.c optional isci dev/isci/scil/scic_sds_ssp_request.c optional isci dev/isci/scil/scic_sds_stp_packet_request.c optional isci dev/isci/scil/scic_sds_stp_remote_device.c optional isci dev/isci/scil/scic_sds_stp_request.c optional isci dev/isci/scil/scic_sds_unsolicited_frame_control.c optional isci dev/isci/scil/scif_sas_controller.c optional isci dev/isci/scil/scif_sas_controller_state_handlers.c optional isci dev/isci/scil/scif_sas_controller_states.c optional isci dev/isci/scil/scif_sas_domain.c optional isci dev/isci/scil/scif_sas_domain_state_handlers.c optional isci dev/isci/scil/scif_sas_domain_states.c optional isci dev/isci/scil/scif_sas_high_priority_request_queue.c optional isci dev/isci/scil/scif_sas_internal_io_request.c optional isci dev/isci/scil/scif_sas_io_request.c optional isci dev/isci/scil/scif_sas_io_request_state_handlers.c optional isci dev/isci/scil/scif_sas_io_request_states.c optional isci dev/isci/scil/scif_sas_library.c optional isci dev/isci/scil/scif_sas_remote_device.c optional isci dev/isci/scil/scif_sas_remote_device_ready_substate_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_ready_substates.c optional isci dev/isci/scil/scif_sas_remote_device_starting_substate_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_starting_substates.c optional isci dev/isci/scil/scif_sas_remote_device_state_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_states.c optional isci dev/isci/scil/scif_sas_request.c optional isci dev/isci/scil/scif_sas_smp_activity_clear_affiliation.c optional isci dev/isci/scil/scif_sas_smp_io_request.c optional isci dev/isci/scil/scif_sas_smp_phy.c optional isci dev/isci/scil/scif_sas_smp_remote_device.c optional isci dev/isci/scil/scif_sas_stp_io_request.c optional isci dev/isci/scil/scif_sas_stp_remote_device.c optional isci dev/isci/scil/scif_sas_stp_task_request.c optional isci dev/isci/scil/scif_sas_task_request.c optional isci dev/isci/scil/scif_sas_task_request_state_handlers.c optional isci dev/isci/scil/scif_sas_task_request_states.c optional isci dev/isci/scil/scif_sas_timer.c optional isci dev/itwd/itwd.c optional itwd +dev/qat/qat.c optional qat +dev/qat/qat_ae.c optional qat +dev/qat/qat_c2xxx.c optional qat +dev/qat/qat_c3xxx.c optional qat +dev/qat/qat_c62x.c optional qat +dev/qat/qat_d15xx.c optional qat +dev/qat/qat_dh895xcc.c optional qat +dev/qat/qat_hw15.c optional qat +dev/qat/qat_hw17.c optional qat libkern/x86/crc32_sse42.c standard # # x86 shared code between IA32 and AMD64 architectures # x86/acpica/OsdEnvironment.c optional acpi x86/acpica/acpi_apm.c optional acpi x86/acpica/acpi_wakeup.c optional acpi x86/acpica/srat.c optional acpi x86/bios/smbios.c optional smbios x86/bios/vpd.c optional vpd x86/cpufreq/est.c optional cpufreq x86/cpufreq/hwpstate_amd.c optional cpufreq x86/cpufreq/hwpstate_intel.c optional cpufreq x86/cpufreq/p4tcc.c optional cpufreq x86/cpufreq/powernow.c optional cpufreq x86/iommu/intel_ctx.c optional acpi iommu pci x86/iommu/intel_drv.c optional acpi iommu pci x86/iommu/intel_fault.c optional acpi iommu pci x86/iommu/intel_idpgtbl.c optional acpi iommu pci x86/iommu/intel_intrmap.c optional acpi iommu pci x86/iommu/intel_qi.c optional acpi iommu pci x86/iommu/intel_quirks.c optional acpi iommu pci x86/iommu/intel_utils.c optional acpi iommu pci x86/isa/atrtc.c standard x86/isa/clock.c standard x86/isa/isa.c optional isa x86/isa/isa_dma.c optional isa x86/isa/nmi.c standard x86/isa/orm.c optional isa x86/pci/pci_bus.c optional pci x86/pci/qpi.c optional pci x86/x86/autoconf.c standard x86/x86/bus_machdep.c standard x86/x86/busdma_bounce.c standard x86/x86/busdma_machdep.c standard x86/x86/cpu_machdep.c standard x86/x86/dump_machdep.c standard x86/x86/fdt_machdep.c optional fdt x86/x86/identcpu.c standard x86/x86/intr_machdep.c standard x86/x86/legacy.c standard x86/x86/mca.c standard x86/x86/x86_mem.c optional mem x86/x86/mp_x86.c optional smp x86/x86/mp_watchdog.c optional mp_watchdog smp x86/x86/nexus.c standard x86/x86/pvclock.c standard x86/x86/stack_machdep.c optional ddb | stack x86/x86/tsc.c standard x86/x86/ucode.c standard x86/x86/delay.c standard x86/xen/hvm.c optional xenhvm x86/xen/xen_intr.c optional xenhvm x86/xen/xen_apic.c optional xenhvm x86/xen/xenpv.c optional xenhvm x86/xen/xen_msi.c optional xenhvm x86/xen/xen_nexus.c optional xenhvm Index: head/sys/dev/qat/qat.c =================================================================== --- head/sys/dev/qat/qat.c (nonexistent) +++ head/sys/dev/qat/qat.c (revision 367386) @@ -0,0 +1,2140 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2007-2019 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); +#if 0 +__KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $"); +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "cryptodev_if.h" + +#include +#include + +#include "qatreg.h" +#include "qatvar.h" +#include "qat_aevar.h" + +extern struct qat_hw qat_hw_c2xxx; +extern struct qat_hw qat_hw_c3xxx; +extern struct qat_hw qat_hw_c62x; +extern struct qat_hw qat_hw_d15xx; +extern struct qat_hw qat_hw_dh895xcc; + +#define PCI_VENDOR_INTEL 0x8086 +#define PCI_PRODUCT_INTEL_C2000_IQIA_PHYS 0x1f18 +#define PCI_PRODUCT_INTEL_C3K_QAT 0x19e2 +#define PCI_PRODUCT_INTEL_C3K_QAT_VF 0x19e3 +#define PCI_PRODUCT_INTEL_C620_QAT 0x37c8 +#define PCI_PRODUCT_INTEL_C620_QAT_VF 0x37c9 +#define PCI_PRODUCT_INTEL_XEOND_QAT 0x6f54 +#define PCI_PRODUCT_INTEL_XEOND_QAT_VF 0x6f55 +#define PCI_PRODUCT_INTEL_DH895XCC_QAT 0x0435 +#define PCI_PRODUCT_INTEL_DH895XCC_QAT_VF 0x0443 + +static const struct qat_product { + uint16_t qatp_vendor; + uint16_t qatp_product; + const char *qatp_name; + enum qat_chip_type qatp_chip; + const struct qat_hw *qatp_hw; +} qat_products[] = { + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_IQIA_PHYS, + "Intel C2000 QuickAssist PF", + QAT_CHIP_C2XXX, &qat_hw_c2xxx }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT, + "Intel C3000 QuickAssist PF", + QAT_CHIP_C3XXX, &qat_hw_c3xxx }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT, + "Intel C620/Xeon D-2100 QuickAssist PF", + QAT_CHIP_C62X, &qat_hw_c62x }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT, + "Intel Xeon D-1500 QuickAssist PF", + QAT_CHIP_D15XX, &qat_hw_d15xx }, + { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH895XCC_QAT, + "Intel 8950 QuickAssist PCIe Adapter PF", + QAT_CHIP_DH895XCC, &qat_hw_dh895xcc }, + { 0, 0, NULL, 0, NULL }, +}; + +/* Hash Algorithm specific structure */ + +/* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */ +static const uint8_t sha1_initial_state[QAT_HASH_SHA1_STATE_SIZE] = { + 0x67, 0x45, 0x23, 0x01, + 0xef, 0xcd, 0xab, 0x89, + 0x98, 0xba, 0xdc, 0xfe, + 0x10, 0x32, 0x54, 0x76, + 0xc3, 0xd2, 0xe1, 0xf0 +}; + +/* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */ +static const uint8_t sha256_initial_state[QAT_HASH_SHA256_STATE_SIZE] = { + 0x6a, 0x09, 0xe6, 0x67, + 0xbb, 0x67, 0xae, 0x85, + 0x3c, 0x6e, 0xf3, 0x72, + 0xa5, 0x4f, 0xf5, 0x3a, + 0x51, 0x0e, 0x52, 0x7f, + 0x9b, 0x05, 0x68, 0x8c, + 0x1f, 0x83, 0xd9, 0xab, + 0x5b, 0xe0, 0xcd, 0x19 +}; + +/* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ +static const uint8_t sha384_initial_state[QAT_HASH_SHA384_STATE_SIZE] = { + 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, + 0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07, + 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17, + 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, + 0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, + 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, + 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, + 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4 +}; + +/* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ +static const uint8_t sha512_initial_state[QAT_HASH_SHA512_STATE_SIZE] = { + 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, + 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, + 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b, + 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, + 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, + 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f, + 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, + 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79 +}; + +static const struct qat_sym_hash_alg_info sha1_info = { + .qshai_digest_len = QAT_HASH_SHA1_DIGEST_SIZE, + .qshai_block_len = QAT_HASH_SHA1_BLOCK_SIZE, + .qshai_state_size = QAT_HASH_SHA1_STATE_SIZE, + .qshai_init_state = sha1_initial_state, + .qshai_sah = &auth_hash_hmac_sha1, + .qshai_state_offset = 0, + .qshai_state_word = 4, +}; + +static const struct qat_sym_hash_alg_info sha256_info = { + .qshai_digest_len = QAT_HASH_SHA256_DIGEST_SIZE, + .qshai_block_len = QAT_HASH_SHA256_BLOCK_SIZE, + .qshai_state_size = QAT_HASH_SHA256_STATE_SIZE, + .qshai_init_state = sha256_initial_state, + .qshai_sah = &auth_hash_hmac_sha2_256, + .qshai_state_offset = offsetof(SHA256_CTX, state), + .qshai_state_word = 4, +}; + +static const struct qat_sym_hash_alg_info sha384_info = { + .qshai_digest_len = QAT_HASH_SHA384_DIGEST_SIZE, + .qshai_block_len = QAT_HASH_SHA384_BLOCK_SIZE, + .qshai_state_size = QAT_HASH_SHA384_STATE_SIZE, + .qshai_init_state = sha384_initial_state, + .qshai_sah = &auth_hash_hmac_sha2_384, + .qshai_state_offset = offsetof(SHA384_CTX, state), + .qshai_state_word = 8, +}; + +static const struct qat_sym_hash_alg_info sha512_info = { + .qshai_digest_len = QAT_HASH_SHA512_DIGEST_SIZE, + .qshai_block_len = QAT_HASH_SHA512_BLOCK_SIZE, + .qshai_state_size = QAT_HASH_SHA512_STATE_SIZE, + .qshai_init_state = sha512_initial_state, + .qshai_sah = &auth_hash_hmac_sha2_512, + .qshai_state_offset = offsetof(SHA512_CTX, state), + .qshai_state_word = 8, +}; + +static const struct qat_sym_hash_alg_info aes_gcm_info = { + .qshai_digest_len = QAT_HASH_AES_GCM_DIGEST_SIZE, + .qshai_block_len = QAT_HASH_AES_GCM_BLOCK_SIZE, + .qshai_state_size = QAT_HASH_AES_GCM_STATE_SIZE, + .qshai_sah = &auth_hash_nist_gmac_aes_128, +}; + +/* Hash QAT specific structures */ + +static const struct qat_sym_hash_qat_info sha1_config = { + .qshqi_algo_enc = HW_AUTH_ALGO_SHA1, + .qshqi_auth_counter = QAT_HASH_SHA1_BLOCK_SIZE, + .qshqi_state1_len = HW_SHA1_STATE1_SZ, + .qshqi_state2_len = HW_SHA1_STATE2_SZ, +}; + +static const struct qat_sym_hash_qat_info sha256_config = { + .qshqi_algo_enc = HW_AUTH_ALGO_SHA256, + .qshqi_auth_counter = QAT_HASH_SHA256_BLOCK_SIZE, + .qshqi_state1_len = HW_SHA256_STATE1_SZ, + .qshqi_state2_len = HW_SHA256_STATE2_SZ +}; + +static const struct qat_sym_hash_qat_info sha384_config = { + .qshqi_algo_enc = HW_AUTH_ALGO_SHA384, + .qshqi_auth_counter = QAT_HASH_SHA384_BLOCK_SIZE, + .qshqi_state1_len = HW_SHA384_STATE1_SZ, + .qshqi_state2_len = HW_SHA384_STATE2_SZ +}; + +static const struct qat_sym_hash_qat_info sha512_config = { + .qshqi_algo_enc = HW_AUTH_ALGO_SHA512, + .qshqi_auth_counter = QAT_HASH_SHA512_BLOCK_SIZE, + .qshqi_state1_len = HW_SHA512_STATE1_SZ, + .qshqi_state2_len = HW_SHA512_STATE2_SZ +}; + +static const struct qat_sym_hash_qat_info aes_gcm_config = { + .qshqi_algo_enc = HW_AUTH_ALGO_GALOIS_128, + .qshqi_auth_counter = QAT_HASH_AES_GCM_BLOCK_SIZE, + .qshqi_state1_len = HW_GALOIS_128_STATE1_SZ, + .qshqi_state2_len = + HW_GALOIS_H_SZ + HW_GALOIS_LEN_A_SZ + HW_GALOIS_E_CTR0_SZ, +}; + +static const struct qat_sym_hash_def qat_sym_hash_defs[] = { + [QAT_SYM_HASH_SHA1] = { &sha1_info, &sha1_config }, + [QAT_SYM_HASH_SHA256] = { &sha256_info, &sha256_config }, + [QAT_SYM_HASH_SHA384] = { &sha384_info, &sha384_config }, + [QAT_SYM_HASH_SHA512] = { &sha512_info, &sha512_config }, + [QAT_SYM_HASH_AES_GCM] = { &aes_gcm_info, &aes_gcm_config }, +}; + +static const struct qat_product *qat_lookup(device_t); +static int qat_probe(device_t); +static int qat_attach(device_t); +static int qat_init(struct device *); +static int qat_start(struct device *); +static int qat_detach(device_t); + +static int qat_newsession(device_t dev, crypto_session_t cses, + const struct crypto_session_params *csp); +static void qat_freesession(device_t dev, crypto_session_t cses); + +static int qat_setup_msix_intr(struct qat_softc *); + +static void qat_etr_init(struct qat_softc *); +static void qat_etr_deinit(struct qat_softc *); +static void qat_etr_bank_init(struct qat_softc *, int); +static void qat_etr_bank_deinit(struct qat_softc *sc, int); + +static void qat_etr_ap_bank_init(struct qat_softc *); +static void qat_etr_ap_bank_set_ring_mask(uint32_t *, uint32_t, int); +static void qat_etr_ap_bank_set_ring_dest(struct qat_softc *, uint32_t *, + uint32_t, int); +static void qat_etr_ap_bank_setup_ring(struct qat_softc *, + struct qat_ring *); +static int qat_etr_verify_ring_size(uint32_t, uint32_t); + +static int qat_etr_ring_intr(struct qat_softc *, struct qat_bank *, + struct qat_ring *); +static void qat_etr_bank_intr(void *); + +static void qat_arb_update(struct qat_softc *, struct qat_bank *); + +static struct qat_sym_cookie *qat_crypto_alloc_sym_cookie( + struct qat_crypto_bank *); +static void qat_crypto_free_sym_cookie(struct qat_crypto_bank *, + struct qat_sym_cookie *); +static int qat_crypto_setup_ring(struct qat_softc *, + struct qat_crypto_bank *); +static int qat_crypto_bank_init(struct qat_softc *, + struct qat_crypto_bank *); +static int qat_crypto_init(struct qat_softc *); +static void qat_crypto_deinit(struct qat_softc *); +static int qat_crypto_start(struct qat_softc *); +static void qat_crypto_stop(struct qat_softc *); +static int qat_crypto_sym_rxintr(struct qat_softc *, void *, void *); + +static MALLOC_DEFINE(M_QAT, "qat", "Intel QAT driver"); + +static const struct qat_product * +qat_lookup(device_t dev) +{ + const struct qat_product *qatp; + + for (qatp = qat_products; qatp->qatp_name != NULL; qatp++) { + if (pci_get_vendor(dev) == qatp->qatp_vendor && + pci_get_device(dev) == qatp->qatp_product) + return qatp; + } + return NULL; +} + +static int +qat_probe(device_t dev) +{ + const struct qat_product *prod; + + prod = qat_lookup(dev); + if (prod != NULL) { + device_set_desc(dev, prod->qatp_name); + return BUS_PROBE_DEFAULT; + } + return ENXIO; +} + +static int +qat_attach(device_t dev) +{ + struct qat_softc *sc = device_get_softc(dev); + const struct qat_product *qatp; + bus_size_t msixtbl_offset; + int bar, count, error, i, msixoff, msixtbl_bar; + + sc->sc_dev = dev; + sc->sc_rev = pci_get_revid(dev); + + qatp = qat_lookup(dev); + memcpy(&sc->sc_hw, qatp->qatp_hw, sizeof(struct qat_hw)); + + /* Determine active accelerators and engines */ + sc->sc_accel_mask = sc->sc_hw.qhw_get_accel_mask(sc); + sc->sc_ae_mask = sc->sc_hw.qhw_get_ae_mask(sc); + + sc->sc_accel_num = 0; + for (i = 0; i < sc->sc_hw.qhw_num_accel; i++) { + if (sc->sc_accel_mask & (1 << i)) + sc->sc_accel_num++; + } + sc->sc_ae_num = 0; + for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) { + if (sc->sc_ae_mask & (1 << i)) + sc->sc_ae_num++; + } + + if (!sc->sc_accel_mask || (sc->sc_ae_mask & 0x01) == 0) { + device_printf(sc->sc_dev, "couldn't find acceleration"); + goto fail; + } + + MPASS(sc->sc_accel_num <= MAX_NUM_ACCEL); + MPASS(sc->sc_ae_num <= MAX_NUM_AE); + + /* Determine SKU and capabilities */ + sc->sc_sku = sc->sc_hw.qhw_get_sku(sc); + sc->sc_accel_cap = sc->sc_hw.qhw_get_accel_cap(sc); + sc->sc_fw_uof_name = sc->sc_hw.qhw_get_fw_uof_name(sc); + + /* Map BARs */ + msixtbl_bar = 0; + msixtbl_offset = 0; + if (pci_find_cap(dev, PCIY_MSIX, &msixoff) == 0) { + uint32_t msixtbl; + msixtbl = pci_read_config(dev, msixoff + PCIR_MSIX_TABLE, 4); + msixtbl_offset = msixtbl & ~PCIM_MSIX_BIR_MASK; + msixtbl_bar = PCIR_BAR(msixtbl & PCIM_MSIX_BIR_MASK); + } + + i = 0; + if (sc->sc_hw.qhw_sram_bar_id != NO_PCI_REG) { + MPASS(sc->sc_hw.qhw_sram_bar_id == 0); + uint32_t fusectl = pci_read_config(dev, FUSECTL_REG, 4); + /* Skip SRAM BAR */ + i = (fusectl & FUSECTL_MASK) ? 1 : 0; + } + for (bar = 0; bar < PCIR_MAX_BAR_0; bar++) { + uint32_t val = pci_read_config(dev, PCIR_BAR(bar), 4); + if (val == 0 || !PCI_BAR_MEM(val)) + continue; + + sc->sc_rid[i] = PCIR_BAR(bar); + sc->sc_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &sc->sc_rid[i], RF_ACTIVE); + if (sc->sc_res[i] == NULL) { + device_printf(dev, "couldn't map BAR %d\n", bar); + goto fail; + } + + sc->sc_csrt[i] = rman_get_bustag(sc->sc_res[i]); + sc->sc_csrh[i] = rman_get_bushandle(sc->sc_res[i]); + + i++; + if ((val & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64) + bar++; + } + + pci_enable_busmaster(dev); + + count = sc->sc_hw.qhw_num_banks + 1; + if (pci_msix_count(dev) < count) { + device_printf(dev, "insufficient MSI-X vectors (%d vs. %d)\n", + pci_msix_count(dev), count); + goto fail; + } + error = pci_alloc_msix(dev, &count); + if (error != 0) { + device_printf(dev, "failed to allocate MSI-X vectors\n"); + goto fail; + } + + error = qat_init(dev); + if (error == 0) + return 0; + +fail: + qat_detach(dev); + return ENXIO; +} + +static int +qat_init(device_t dev) +{ + struct qat_softc *sc = device_get_softc(dev); + int error; + + qat_etr_init(sc); + + if (sc->sc_hw.qhw_init_admin_comms != NULL && + (error = sc->sc_hw.qhw_init_admin_comms(sc)) != 0) { + device_printf(sc->sc_dev, + "Could not initialize admin comms: %d\n", error); + return error; + } + + if (sc->sc_hw.qhw_init_arb != NULL && + (error = sc->sc_hw.qhw_init_arb(sc)) != 0) { + device_printf(sc->sc_dev, + "Could not initialize hw arbiter: %d\n", error); + return error; + } + + error = qat_ae_init(sc); + if (error) { + device_printf(sc->sc_dev, + "Could not initialize Acceleration Engine: %d\n", error); + return error; + } + + error = qat_aefw_load(sc); + if (error) { + device_printf(sc->sc_dev, + "Could not load firmware: %d\n", error); + return error; + } + + error = qat_setup_msix_intr(sc); + if (error) { + device_printf(sc->sc_dev, + "Could not setup interrupts: %d\n", error); + return error; + } + + sc->sc_hw.qhw_enable_intr(sc); + + error = qat_crypto_init(sc); + if (error) { + device_printf(sc->sc_dev, + "Could not initialize service: %d\n", error); + return error; + } + + if (sc->sc_hw.qhw_enable_error_correction != NULL) + sc->sc_hw.qhw_enable_error_correction(sc); + + if (sc->sc_hw.qhw_set_ssm_wdtimer != NULL && + (error = sc->sc_hw.qhw_set_ssm_wdtimer(sc)) != 0) { + device_printf(sc->sc_dev, + "Could not initialize watchdog timer: %d\n", error); + return error; + } + + error = qat_start(dev); + if (error) { + device_printf(sc->sc_dev, + "Could not start: %d\n", error); + return error; + } + + return 0; +} + +static int +qat_start(device_t dev) +{ + struct qat_softc *sc = device_get_softc(dev); + int error; + + error = qat_ae_start(sc); + if (error) + return error; + + if (sc->sc_hw.qhw_send_admin_init != NULL && + (error = sc->sc_hw.qhw_send_admin_init(sc)) != 0) { + return error; + } + + error = qat_crypto_start(sc); + if (error) + return error; + + return 0; +} + +static int +qat_detach(device_t dev) +{ + struct qat_softc *sc; + int bar, i; + + sc = device_get_softc(dev); + + qat_crypto_stop(sc); + qat_crypto_deinit(sc); + qat_aefw_unload(sc); + + if (sc->sc_etr_banks != NULL) { + for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) { + struct qat_bank *qb = &sc->sc_etr_banks[i]; + + if (qb->qb_ih_cookie != NULL) + (void)bus_teardown_intr(dev, qb->qb_ih, + qb->qb_ih_cookie); + if (qb->qb_ih != NULL) + (void)bus_release_resource(dev, SYS_RES_IRQ, + i + 1, qb->qb_ih); + } + } + if (sc->sc_ih_cookie != NULL) { + (void)bus_teardown_intr(dev, sc->sc_ih, sc->sc_ih_cookie); + sc->sc_ih_cookie = NULL; + } + if (sc->sc_ih != NULL) { + (void)bus_release_resource(dev, SYS_RES_IRQ, i + 1, sc->sc_ih); + sc->sc_ih = NULL; + } + pci_release_msi(dev); + + qat_etr_deinit(sc); + + for (bar = 0; bar < MAX_BARS; bar++) { + if (sc->sc_res[bar] != NULL) { + (void)bus_release_resource(dev, SYS_RES_MEMORY, + sc->sc_rid[bar], sc->sc_res[bar]); + sc->sc_res[bar] = NULL; + } + } + + return 0; +} + +void * +qat_alloc_mem(size_t size) +{ + return (malloc(size, M_QAT, M_WAITOK | M_ZERO)); +} + +void +qat_free_mem(void *ptr) +{ + free(ptr, M_QAT); +} + +static void +qat_alloc_dmamem_cb(void *arg, bus_dma_segment_t *segs, int nseg, + int error) +{ + struct qat_dmamem *qdm; + + if (error != 0) + return; + + KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg)); + qdm = arg; + qdm->qdm_dma_seg = segs[0]; +} + +int +qat_alloc_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm, + int nseg, bus_size_t size, bus_size_t alignment) +{ + int error; + + KASSERT(qdm->qdm_dma_vaddr == NULL, + ("%s: DMA memory descriptor in use", __func__)); + + error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), + alignment, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + size, /* maxsize */ + nseg, /* nsegments */ + size, /* maxsegsize */ + BUS_DMA_COHERENT, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &qdm->qdm_dma_tag); + if (error != 0) + return error; + + error = bus_dmamem_alloc(qdm->qdm_dma_tag, &qdm->qdm_dma_vaddr, + BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, + &qdm->qdm_dma_map); + if (error != 0) { + device_printf(sc->sc_dev, + "couldn't allocate dmamem, error = %d\n", error); + goto fail_0; + } + + error = bus_dmamap_load(qdm->qdm_dma_tag, qdm->qdm_dma_map, + qdm->qdm_dma_vaddr, size, qat_alloc_dmamem_cb, qdm, + BUS_DMA_NOWAIT); + if (error) { + device_printf(sc->sc_dev, + "couldn't load dmamem map, error = %d\n", error); + goto fail_1; + } + + return 0; +fail_1: + bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr, qdm->qdm_dma_map); +fail_0: + bus_dma_tag_destroy(qdm->qdm_dma_tag); + return error; +} + +void +qat_free_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm) +{ + if (qdm->qdm_dma_tag != NULL) { + bus_dmamap_unload(qdm->qdm_dma_tag, qdm->qdm_dma_map); + bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr, + qdm->qdm_dma_map); + bus_dma_tag_destroy(qdm->qdm_dma_tag); + explicit_bzero(qdm, sizeof(*qdm)); + } +} + +static int +qat_setup_msix_intr(struct qat_softc *sc) +{ + device_t dev; + int error, i, rid; + + dev = sc->sc_dev; + + for (i = 1; i <= sc->sc_hw.qhw_num_banks; i++) { + struct qat_bank *qb = &sc->sc_etr_banks[i - 1]; + + rid = i; + qb->qb_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, + RF_ACTIVE); + if (qb->qb_ih == NULL) { + device_printf(dev, + "failed to allocate bank intr resource\n"); + return ENXIO; + } + error = bus_setup_intr(dev, qb->qb_ih, + INTR_TYPE_NET | INTR_MPSAFE, NULL, qat_etr_bank_intr, qb, + &qb->qb_ih_cookie); + if (error != 0) { + device_printf(dev, "failed to set up bank intr\n"); + return error; + } + error = bus_bind_intr(dev, qb->qb_ih, (i - 1) % mp_ncpus); + if (error != 0) + device_printf(dev, "failed to bind intr %d\n", i); + } + + rid = i; + sc->sc_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, + RF_ACTIVE); + if (sc->sc_ih == NULL) + return ENXIO; + error = bus_setup_intr(dev, sc->sc_ih, INTR_TYPE_NET | INTR_MPSAFE, + NULL, qat_ae_cluster_intr, sc, &sc->sc_ih_cookie); + + return error; +} + +static void +qat_etr_init(struct qat_softc *sc) +{ + int i; + + sc->sc_etr_banks = qat_alloc_mem( + sizeof(struct qat_bank) * sc->sc_hw.qhw_num_banks); + + for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) + qat_etr_bank_init(sc, i); + + if (sc->sc_hw.qhw_num_ap_banks) { + sc->sc_etr_ap_banks = qat_alloc_mem( + sizeof(struct qat_ap_bank) * sc->sc_hw.qhw_num_ap_banks); + qat_etr_ap_bank_init(sc); + } +} + +static void +qat_etr_deinit(struct qat_softc *sc) +{ + int i; + + if (sc->sc_etr_banks != NULL) { + for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) + qat_etr_bank_deinit(sc, i); + qat_free_mem(sc->sc_etr_banks); + sc->sc_etr_banks = NULL; + } + if (sc->sc_etr_ap_banks != NULL) { + qat_free_mem(sc->sc_etr_ap_banks); + sc->sc_etr_ap_banks = NULL; + } +} + +static void +qat_etr_bank_init(struct qat_softc *sc, int bank) +{ + struct qat_bank *qb = &sc->sc_etr_banks[bank]; + int i, tx_rx_gap = sc->sc_hw.qhw_tx_rx_gap; + + MPASS(bank < sc->sc_hw.qhw_num_banks); + + mtx_init(&qb->qb_bank_mtx, "qb bank", NULL, MTX_DEF); + + qb->qb_sc = sc; + qb->qb_bank = bank; + qb->qb_coalescing_time = COALESCING_TIME_INTERVAL_DEFAULT; + + /* Clean CSRs for all rings within the bank */ + for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) { + struct qat_ring *qr = &qb->qb_et_rings[i]; + + qat_etr_bank_ring_write_4(sc, bank, i, + ETR_RING_CONFIG, 0); + qat_etr_bank_ring_base_write_8(sc, bank, i, 0); + + if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) { + qr->qr_inflight = qat_alloc_mem(sizeof(uint32_t)); + } else if (sc->sc_hw.qhw_tx_rings_mask & + (1 << (i - tx_rx_gap))) { + /* Share inflight counter with rx and tx */ + qr->qr_inflight = + qb->qb_et_rings[i - tx_rx_gap].qr_inflight; + } + } + + if (sc->sc_hw.qhw_init_etr_intr != NULL) { + sc->sc_hw.qhw_init_etr_intr(sc, bank); + } else { + /* common code in qat 1.7 */ + qat_etr_bank_write_4(sc, bank, ETR_INT_REG, + ETR_INT_REG_CLEAR_MASK); + for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank / + ETR_RINGS_PER_INT_SRCSEL; i++) { + qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL + + (i * ETR_INT_SRCSEL_NEXT_OFFSET), + ETR_INT_SRCSEL_MASK); + } + } +} + +static void +qat_etr_bank_deinit(struct qat_softc *sc, int bank) +{ + struct qat_bank *qb; + struct qat_ring *qr; + int i; + + qb = &sc->sc_etr_banks[bank]; + for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) { + if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) { + qr = &qb->qb_et_rings[i]; + qat_free_mem(qr->qr_inflight); + } + } +} + +static void +qat_etr_ap_bank_init(struct qat_softc *sc) +{ + int ap_bank; + + for (ap_bank = 0; ap_bank < sc->sc_hw.qhw_num_ap_banks; ap_bank++) { + struct qat_ap_bank *qab = &sc->sc_etr_ap_banks[ap_bank]; + + qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_MASK, + ETR_AP_NF_MASK_INIT); + qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 0); + qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_MASK, + ETR_AP_NE_MASK_INIT); + qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 0); + + memset(qab, 0, sizeof(*qab)); + } +} + +static void +qat_etr_ap_bank_set_ring_mask(uint32_t *ap_mask, uint32_t ring, int set_mask) +{ + if (set_mask) + *ap_mask |= (1 << ETR_RING_NUMBER_IN_AP_BANK(ring)); + else + *ap_mask &= ~(1 << ETR_RING_NUMBER_IN_AP_BANK(ring)); +} + +static void +qat_etr_ap_bank_set_ring_dest(struct qat_softc *sc, uint32_t *ap_dest, + uint32_t ring, int set_dest) +{ + uint32_t ae_mask; + uint8_t mailbox, ae, nae; + uint8_t *dest = (uint8_t *)ap_dest; + + mailbox = ETR_RING_AP_MAILBOX_NUMBER(ring); + + nae = 0; + ae_mask = sc->sc_ae_mask; + for (ae = 0; ae < sc->sc_hw.qhw_num_engines; ae++) { + if ((ae_mask & (1 << ae)) == 0) + continue; + + if (set_dest) { + dest[nae] = __SHIFTIN(ae, ETR_AP_DEST_AE) | + __SHIFTIN(mailbox, ETR_AP_DEST_MAILBOX) | + ETR_AP_DEST_ENABLE; + } else { + dest[nae] = 0; + } + nae++; + if (nae == ETR_MAX_AE_PER_MAILBOX) + break; + } +} + +static void +qat_etr_ap_bank_setup_ring(struct qat_softc *sc, struct qat_ring *qr) +{ + struct qat_ap_bank *qab; + int ap_bank; + + if (sc->sc_hw.qhw_num_ap_banks == 0) + return; + + ap_bank = ETR_RING_AP_BANK_NUMBER(qr->qr_ring); + MPASS(ap_bank < sc->sc_hw.qhw_num_ap_banks); + qab = &sc->sc_etr_ap_banks[ap_bank]; + + if (qr->qr_cb == NULL) { + qat_etr_ap_bank_set_ring_mask(&qab->qab_ne_mask, qr->qr_ring, 1); + if (!qab->qab_ne_dest) { + qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_ne_dest, + qr->qr_ring, 1); + qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, + qab->qab_ne_dest); + } + } else { + qat_etr_ap_bank_set_ring_mask(&qab->qab_nf_mask, qr->qr_ring, 1); + if (!qab->qab_nf_dest) { + qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_nf_dest, + qr->qr_ring, 1); + qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, + qab->qab_nf_dest); + } + } +} + +static int +qat_etr_verify_ring_size(uint32_t msg_size, uint32_t num_msgs) +{ + int i = QAT_MIN_RING_SIZE; + + for (; i <= QAT_MAX_RING_SIZE; i++) + if ((msg_size * num_msgs) == QAT_SIZE_TO_RING_SIZE_IN_BYTES(i)) + return i; + + return QAT_DEFAULT_RING_SIZE; +} + +int +qat_etr_setup_ring(struct qat_softc *sc, int bank, uint32_t ring, + uint32_t num_msgs, uint32_t msg_size, qat_cb_t cb, void *cb_arg, + const char *name, struct qat_ring **rqr) +{ + struct qat_bank *qb; + struct qat_ring *qr = NULL; + int error; + uint32_t ring_size_bytes, ring_config; + uint64_t ring_base; + uint32_t wm_nf = ETR_RING_CONFIG_NEAR_WM_512; + uint32_t wm_ne = ETR_RING_CONFIG_NEAR_WM_0; + + MPASS(bank < sc->sc_hw.qhw_num_banks); + + /* Allocate a ring from specified bank */ + qb = &sc->sc_etr_banks[bank]; + + if (ring >= sc->sc_hw.qhw_num_rings_per_bank) + return EINVAL; + if (qb->qb_allocated_rings & (1 << ring)) + return ENOENT; + qr = &qb->qb_et_rings[ring]; + qb->qb_allocated_rings |= 1 << ring; + + /* Initialize allocated ring */ + qr->qr_ring = ring; + qr->qr_bank = bank; + qr->qr_name = name; + qr->qr_ring_id = qr->qr_bank * sc->sc_hw.qhw_num_rings_per_bank + ring; + qr->qr_ring_mask = (1 << ring); + qr->qr_cb = cb; + qr->qr_cb_arg = cb_arg; + + /* Setup the shadow variables */ + qr->qr_head = 0; + qr->qr_tail = 0; + qr->qr_msg_size = QAT_BYTES_TO_MSG_SIZE(msg_size); + qr->qr_ring_size = qat_etr_verify_ring_size(msg_size, num_msgs); + + /* + * To make sure that ring is alligned to ring size allocate + * at least 4k and then tell the user it is smaller. + */ + ring_size_bytes = QAT_SIZE_TO_RING_SIZE_IN_BYTES(qr->qr_ring_size); + ring_size_bytes = QAT_RING_SIZE_BYTES_MIN(ring_size_bytes); + error = qat_alloc_dmamem(sc, &qr->qr_dma, 1, ring_size_bytes, + ring_size_bytes); + if (error) + return error; + + qr->qr_ring_vaddr = qr->qr_dma.qdm_dma_vaddr; + qr->qr_ring_paddr = qr->qr_dma.qdm_dma_seg.ds_addr; + + memset(qr->qr_ring_vaddr, QAT_RING_PATTERN, + qr->qr_dma.qdm_dma_seg.ds_len); + + bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map, + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); + + if (cb == NULL) { + ring_config = ETR_RING_CONFIG_BUILD(qr->qr_ring_size); + } else { + ring_config = + ETR_RING_CONFIG_BUILD_RESP(qr->qr_ring_size, wm_nf, wm_ne); + } + qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_CONFIG, ring_config); + + ring_base = ETR_RING_BASE_BUILD(qr->qr_ring_paddr, qr->qr_ring_size); + qat_etr_bank_ring_base_write_8(sc, bank, ring, ring_base); + + if (sc->sc_hw.qhw_init_arb != NULL) + qat_arb_update(sc, qb); + + mtx_init(&qr->qr_ring_mtx, "qr ring", NULL, MTX_DEF); + + qat_etr_ap_bank_setup_ring(sc, qr); + + if (cb != NULL) { + uint32_t intr_mask; + + qb->qb_intr_mask |= qr->qr_ring_mask; + intr_mask = qb->qb_intr_mask; + + qat_etr_bank_write_4(sc, bank, ETR_INT_COL_EN, intr_mask); + qat_etr_bank_write_4(sc, bank, ETR_INT_COL_CTL, + ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time); + } + + *rqr = qr; + + return 0; +} + +static inline u_int +qat_modulo(u_int data, u_int shift) +{ + u_int div = data >> shift; + u_int mult = div << shift; + return data - mult; +} + +int +qat_etr_put_msg(struct qat_softc *sc, struct qat_ring *qr, uint32_t *msg) +{ + uint32_t inflight; + uint32_t *addr; + + mtx_lock(&qr->qr_ring_mtx); + + inflight = atomic_fetchadd_32(qr->qr_inflight, 1) + 1; + if (inflight > QAT_MAX_INFLIGHTS(qr->qr_ring_size, qr->qr_msg_size)) { + atomic_subtract_32(qr->qr_inflight, 1); + qr->qr_need_wakeup = true; + mtx_unlock(&qr->qr_ring_mtx); + counter_u64_add(sc->sc_ring_full_restarts, 1); + return ERESTART; + } + + addr = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_tail); + + memcpy(addr, msg, QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size)); + + bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map, + BUS_DMASYNC_PREWRITE); + + qr->qr_tail = qat_modulo(qr->qr_tail + + QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), + QAT_RING_SIZE_MODULO(qr->qr_ring_size)); + + qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring, + ETR_RING_TAIL_OFFSET, qr->qr_tail); + + mtx_unlock(&qr->qr_ring_mtx); + + return 0; +} + +static int +qat_etr_ring_intr(struct qat_softc *sc, struct qat_bank *qb, + struct qat_ring *qr) +{ + uint32_t *msg, nmsg = 0; + int handled = 0; + bool blocked = false; + + mtx_lock(&qr->qr_ring_mtx); + + msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head); + + bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + + while (atomic_load_32(msg) != ETR_RING_EMPTY_ENTRY_SIG) { + atomic_subtract_32(qr->qr_inflight, 1); + + if (qr->qr_cb != NULL) { + mtx_unlock(&qr->qr_ring_mtx); + handled |= qr->qr_cb(sc, qr->qr_cb_arg, msg); + mtx_lock(&qr->qr_ring_mtx); + } + + atomic_store_32(msg, ETR_RING_EMPTY_ENTRY_SIG); + + qr->qr_head = qat_modulo(qr->qr_head + + QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), + QAT_RING_SIZE_MODULO(qr->qr_ring_size)); + nmsg++; + + msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head); + } + + bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map, + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); + + if (nmsg > 0) { + qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring, + ETR_RING_HEAD_OFFSET, qr->qr_head); + if (qr->qr_need_wakeup) { + blocked = true; + qr->qr_need_wakeup = false; + } + } + + mtx_unlock(&qr->qr_ring_mtx); + + if (blocked) + crypto_unblock(sc->sc_crypto.qcy_cid, CRYPTO_SYMQ); + + return handled; +} + +static void +qat_etr_bank_intr(void *arg) +{ + struct qat_bank *qb = arg; + struct qat_softc *sc = qb->qb_sc; + uint32_t estat; + int i, handled = 0; + + mtx_lock(&qb->qb_bank_mtx); + + qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 0); + + /* Now handle all the responses */ + estat = ~qat_etr_bank_read_4(sc, qb->qb_bank, ETR_E_STAT); + estat &= qb->qb_intr_mask; + + qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, + ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time); + + mtx_unlock(&qb->qb_bank_mtx); + + while ((i = ffs(estat)) != 0) { + struct qat_ring *qr = &qb->qb_et_rings[--i]; + estat &= ~(1 << i); + handled |= qat_etr_ring_intr(sc, qb, qr); + } +} + +void +qat_arb_update(struct qat_softc *sc, struct qat_bank *qb) +{ + + qat_arb_ringsrvarben_write_4(sc, qb->qb_bank, + qb->qb_allocated_rings & 0xff); +} + +static struct qat_sym_cookie * +qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *qcb) +{ + struct qat_sym_cookie *qsc; + + mtx_lock(&qcb->qcb_bank_mtx); + + if (qcb->qcb_symck_free_count == 0) { + mtx_unlock(&qcb->qcb_bank_mtx); + return NULL; + } + + qsc = qcb->qcb_symck_free[--qcb->qcb_symck_free_count]; + + mtx_unlock(&qcb->qcb_bank_mtx); + + return qsc; +} + +static void +qat_crypto_free_sym_cookie(struct qat_crypto_bank *qcb, + struct qat_sym_cookie *qsc) +{ + + explicit_bzero(qsc->qsc_iv_buf, sizeof(qsc->qsc_iv_buf)); + explicit_bzero(qsc->qsc_auth_res, sizeof(qsc->qsc_auth_res)); + + mtx_lock(&qcb->qcb_bank_mtx); + qcb->qcb_symck_free[qcb->qcb_symck_free_count++] = qsc; + mtx_unlock(&qcb->qcb_bank_mtx); +} + +void +qat_memcpy_htobe64(void *dst, const void *src, size_t len) +{ + uint64_t *dst0 = dst; + const uint64_t *src0 = src; + size_t i; + + MPASS(len % sizeof(*dst0) == 0); + + for (i = 0; i < len / sizeof(*dst0); i++) + *(dst0 + i) = htobe64(*(src0 + i)); +} + +void +qat_memcpy_htobe32(void *dst, const void *src, size_t len) +{ + uint32_t *dst0 = dst; + const uint32_t *src0 = src; + size_t i; + + MPASS(len % sizeof(*dst0) == 0); + + for (i = 0; i < len / sizeof(*dst0); i++) + *(dst0 + i) = htobe32(*(src0 + i)); +} + +void +qat_memcpy_htobe(void *dst, const void *src, size_t len, uint32_t wordbyte) +{ + switch (wordbyte) { + case 4: + qat_memcpy_htobe32(dst, src, len); + break; + case 8: + qat_memcpy_htobe64(dst, src, len); + break; + default: + panic("invalid word size %u", wordbyte); + } +} + +void +qat_crypto_gmac_precompute(const struct qat_crypto_desc *desc, + const uint8_t *key, int klen, const struct qat_sym_hash_def *hash_def, + uint8_t *state) +{ + uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)]; + char zeros[AES_BLOCK_LEN]; + int rounds; + + memset(zeros, 0, sizeof(zeros)); + rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY); + rijndaelEncrypt(ks, rounds, zeros, state); + explicit_bzero(ks, sizeof(ks)); +} + +void +qat_crypto_hmac_precompute(const struct qat_crypto_desc *desc, + const uint8_t *key, int klen, const struct qat_sym_hash_def *hash_def, + uint8_t *state1, uint8_t *state2) +{ + union authctx ctx; + const struct auth_hash *sah = hash_def->qshd_alg->qshai_sah; + uint32_t state_offset = hash_def->qshd_alg->qshai_state_offset; + uint32_t state_size = hash_def->qshd_alg->qshai_state_size; + uint32_t state_word = hash_def->qshd_alg->qshai_state_word; + + hmac_init_ipad(sah, key, klen, &ctx); + qat_memcpy_htobe(state1, (uint8_t *)&ctx + state_offset, state_size, + state_word); + hmac_init_opad(sah, key, klen, &ctx); + qat_memcpy_htobe(state2, (uint8_t *)&ctx + state_offset, state_size, + state_word); + explicit_bzero(&ctx, sizeof(ctx)); +} + +static enum hw_cipher_algo +qat_aes_cipher_algo(int klen) +{ + switch (klen) { + case HW_AES_128_KEY_SZ: + return HW_CIPHER_ALGO_AES128; + case HW_AES_192_KEY_SZ: + return HW_CIPHER_ALGO_AES192; + case HW_AES_256_KEY_SZ: + return HW_CIPHER_ALGO_AES256; + default: + panic("invalid key length %d", klen); + } +} + +uint16_t +qat_crypto_load_cipher_session(const struct qat_crypto_desc *desc, + const struct qat_session *qs) +{ + enum hw_cipher_algo algo; + enum hw_cipher_dir dir; + enum hw_cipher_convert key_convert; + enum hw_cipher_mode mode; + + dir = desc->qcd_cipher_dir; + key_convert = HW_CIPHER_NO_CONVERT; + mode = qs->qs_cipher_mode; + switch (mode) { + case HW_CIPHER_CBC_MODE: + case HW_CIPHER_XTS_MODE: + algo = qs->qs_cipher_algo; + + /* + * AES decrypt key needs to be reversed. + * Instead of reversing the key at session registration, + * it is instead reversed on-the-fly by setting the KEY_CONVERT + * bit here. + */ + if (desc->qcd_cipher_dir == HW_CIPHER_DECRYPT) + key_convert = HW_CIPHER_KEY_CONVERT; + break; + case HW_CIPHER_CTR_MODE: + algo = qs->qs_cipher_algo; + dir = HW_CIPHER_ENCRYPT; + break; + default: + panic("unhandled cipher mode %d", mode); + break; + } + + return HW_CIPHER_CONFIG_BUILD(mode, algo, key_convert, dir); +} + +uint16_t +qat_crypto_load_auth_session(const struct qat_crypto_desc *desc, + const struct qat_session *qs, const struct qat_sym_hash_def **hash_def) +{ + enum qat_sym_hash_algorithm algo; + + switch (qs->qs_auth_algo) { + case HW_AUTH_ALGO_SHA1: + algo = QAT_SYM_HASH_SHA1; + break; + case HW_AUTH_ALGO_SHA256: + algo = QAT_SYM_HASH_SHA256; + break; + case HW_AUTH_ALGO_SHA384: + algo = QAT_SYM_HASH_SHA384; + break; + case HW_AUTH_ALGO_SHA512: + algo = QAT_SYM_HASH_SHA512; + break; + case HW_AUTH_ALGO_GALOIS_128: + algo = QAT_SYM_HASH_AES_GCM; + break; + default: + panic("unhandled auth algorithm %d", qs->qs_auth_algo); + break; + } + *hash_def = &qat_sym_hash_defs[algo]; + + return HW_AUTH_CONFIG_BUILD(qs->qs_auth_mode, + (*hash_def)->qshd_qat->qshqi_algo_enc, + (*hash_def)->qshd_alg->qshai_digest_len); +} + +struct qat_crypto_load_cb_arg { + struct qat_session *qs; + struct qat_sym_cookie *qsc; + struct cryptop *crp; + int error; +}; + +static void +qat_crypto_load_cb(void *_arg, bus_dma_segment_t *segs, int nseg, + int error) +{ + struct cryptop *crp; + struct flat_buffer_desc *flatbuf; + struct qat_crypto_load_cb_arg *arg; + struct qat_session *qs; + struct qat_sym_cookie *qsc; + bus_addr_t addr; + bus_size_t len; + int iseg, oseg, skip; + + arg = _arg; + if (error != 0) { + arg->error = error; + return; + } + + crp = arg->crp; + qs = arg->qs; + qsc = arg->qsc; + + if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) { + /* + * The firmware expects AAD to be in a contiguous buffer and + * padded to a multiple of 16 bytes. To satisfy these + * constraints we bounce the AAD into a per-request buffer. + */ + crypto_copydata(crp, crp->crp_aad_start, crp->crp_aad_length, + qsc->qsc_gcm_aad); + memset(qsc->qsc_gcm_aad + crp->crp_aad_length, 0, + roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN) - + crp->crp_aad_length); + skip = crp->crp_payload_start; + } else if (crp->crp_aad_length > 0) { + skip = crp->crp_aad_start; + } else { + skip = crp->crp_payload_start; + } + + for (iseg = oseg = 0; iseg < nseg; iseg++) { + addr = segs[iseg].ds_addr; + len = segs[iseg].ds_len; + + if (skip > 0) { + if (skip < len) { + addr += skip; + len -= skip; + skip = 0; + } else { + skip -= len; + continue; + } + } + + flatbuf = &qsc->qsc_flat_bufs[oseg++]; + flatbuf->data_len_in_bytes = (uint32_t)len; + flatbuf->phy_buffer = (uint64_t)addr; + } + qsc->qsc_buf_list.num_buffers = oseg; +} + +static int +qat_crypto_load(struct qat_session *qs, struct qat_sym_cookie *qsc, + struct qat_crypto_desc const *desc, struct cryptop *crp) +{ + struct qat_crypto_load_cb_arg arg; + int error; + + crypto_read_iv(crp, qsc->qsc_iv_buf); + + arg.crp = crp; + arg.qs = qs; + arg.qsc = qsc; + arg.error = 0; + error = bus_dmamap_load_crp(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap, + crp, qat_crypto_load_cb, &arg, BUS_DMA_NOWAIT); + if (error == 0) + error = arg.error; + return error; +} + +static inline struct qat_crypto_bank * +qat_crypto_select_bank(struct qat_crypto *qcy) +{ + u_int cpuid = PCPU_GET(cpuid); + + return &qcy->qcy_banks[cpuid % qcy->qcy_num_banks]; +} + +static int +qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb) +{ + int error, i, bank; + int curname = 0; + char *name; + + bank = qcb->qcb_bank; + + name = qcb->qcb_ring_names[curname++]; + snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_tx", bank); + error = qat_etr_setup_ring(sc, qcb->qcb_bank, + sc->sc_hw.qhw_ring_sym_tx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_req_size, + NULL, NULL, name, &qcb->qcb_sym_tx); + if (error) + return error; + + name = qcb->qcb_ring_names[curname++]; + snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_rx", bank); + error = qat_etr_setup_ring(sc, qcb->qcb_bank, + sc->sc_hw.qhw_ring_sym_rx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_resp_size, + qat_crypto_sym_rxintr, qcb, name, &qcb->qcb_sym_rx); + if (error) + return error; + + for (i = 0; i < QAT_NSYMCOOKIE; i++) { + struct qat_dmamem *qdm = &qcb->qcb_symck_dmamems[i]; + struct qat_sym_cookie *qsc; + + error = qat_alloc_dmamem(sc, qdm, 1, + sizeof(struct qat_sym_cookie), QAT_OPTIMAL_ALIGN); + if (error) + return error; + + qsc = qdm->qdm_dma_vaddr; + qsc->qsc_self_dmamap = qdm->qdm_dma_map; + qsc->qsc_self_dma_tag = qdm->qdm_dma_tag; + qsc->qsc_bulk_req_params_buf_paddr = + qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, + u.qsc_bulk_cookie.qsbc_req_params_buf); + qsc->qsc_buffer_list_desc_paddr = + qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, + qsc_buf_list); + qsc->qsc_iv_buf_paddr = + qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, + qsc_iv_buf); + qsc->qsc_auth_res_paddr = + qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, + qsc_auth_res); + qsc->qsc_gcm_aad_paddr = + qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, + qsc_gcm_aad); + qsc->qsc_content_desc_paddr = + qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, + qsc_content_desc); + qcb->qcb_symck_free[i] = qsc; + qcb->qcb_symck_free_count++; + + error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), + 1, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + QAT_MAXLEN, /* maxsize */ + QAT_MAXSEG, /* nsegments */ + QAT_MAXLEN, /* maxsegsize */ + BUS_DMA_COHERENT, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &qsc->qsc_buf_dma_tag); + if (error != 0) + return error; + + error = bus_dmamap_create(qsc->qsc_buf_dma_tag, + BUS_DMA_COHERENT, &qsc->qsc_buf_dmamap); + if (error) + return error; + } + + return 0; +} + +static int +qat_crypto_bank_init(struct qat_softc *sc, struct qat_crypto_bank *qcb) +{ + mtx_init(&qcb->qcb_bank_mtx, "qcb bank", NULL, MTX_DEF); + + return qat_crypto_setup_ring(sc, qcb); +} + +static void +qat_crypto_bank_deinit(struct qat_softc *sc, struct qat_crypto_bank *qcb) +{ + struct qat_dmamem *qdm; + int i; + + for (i = 0; i < QAT_NSYMCOOKIE; i++) { + qdm = &qcb->qcb_symck_dmamems[i]; + qat_free_dmamem(sc, qdm); + } + qat_free_dmamem(sc, &qcb->qcb_sym_tx->qr_dma); + qat_free_dmamem(sc, &qcb->qcb_sym_rx->qr_dma); + + mtx_destroy(&qcb->qcb_bank_mtx); +} + +static int +qat_crypto_init(struct qat_softc *sc) +{ + struct qat_crypto *qcy = &sc->sc_crypto; + struct sysctl_ctx_list *ctx; + struct sysctl_oid *oid; + struct sysctl_oid_list *children; + int bank, error, num_banks; + + qcy->qcy_sc = sc; + + if (sc->sc_hw.qhw_init_arb != NULL) + num_banks = imin(mp_ncpus, sc->sc_hw.qhw_num_banks); + else + num_banks = sc->sc_ae_num; + + qcy->qcy_num_banks = num_banks; + + qcy->qcy_banks = + qat_alloc_mem(sizeof(struct qat_crypto_bank) * num_banks); + + for (bank = 0; bank < num_banks; bank++) { + struct qat_crypto_bank *qcb = &qcy->qcy_banks[bank]; + qcb->qcb_bank = bank; + error = qat_crypto_bank_init(sc, qcb); + if (error) + return error; + } + + mtx_init(&qcy->qcy_crypto_mtx, "qcy crypto", NULL, MTX_DEF); + + ctx = device_get_sysctl_ctx(sc->sc_dev); + oid = device_get_sysctl_tree(sc->sc_dev); + children = SYSCTL_CHILDREN(oid); + oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", + CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics"); + children = SYSCTL_CHILDREN(oid); + + sc->sc_gcm_aad_restarts = counter_u64_alloc(M_WAITOK); + SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_aad_restarts", + CTLFLAG_RD, &sc->sc_gcm_aad_restarts, + "GCM requests deferred due to AAD size change"); + sc->sc_gcm_aad_updates = counter_u64_alloc(M_WAITOK); + SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_aad_updates", + CTLFLAG_RD, &sc->sc_gcm_aad_updates, + "GCM requests that required session state update"); + sc->sc_ring_full_restarts = counter_u64_alloc(M_WAITOK); + SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ring_full", + CTLFLAG_RD, &sc->sc_ring_full_restarts, + "Requests deferred due to in-flight max reached"); + + return 0; +} + +static void +qat_crypto_deinit(struct qat_softc *sc) +{ + struct qat_crypto *qcy = &sc->sc_crypto; + struct qat_crypto_bank *qcb; + int bank; + + if (qcy->qcy_banks != NULL) { + for (bank = 0; bank < qcy->qcy_num_banks; bank++) { + qcb = &qcy->qcy_banks[bank]; + qat_crypto_bank_deinit(sc, qcb); + } + qat_free_mem(qcy->qcy_banks); + mtx_destroy(&qcy->qcy_crypto_mtx); + } +} + +static int +qat_crypto_start(struct qat_softc *sc) +{ + struct qat_crypto *qcy; + + qcy = &sc->sc_crypto; + qcy->qcy_cid = crypto_get_driverid(sc->sc_dev, + sizeof(struct qat_session), CRYPTOCAP_F_HARDWARE); + if (qcy->qcy_cid < 0) { + device_printf(sc->sc_dev, + "could not get opencrypto driver id\n"); + return ENOENT; + } + + return 0; +} + +static void +qat_crypto_stop(struct qat_softc *sc) +{ + struct qat_crypto *qcy; + + qcy = &sc->sc_crypto; + if (qcy->qcy_cid >= 0) + (void)crypto_unregister_all(qcy->qcy_cid); +} + +static int +qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg, void *msg) +{ + char icv[QAT_SYM_HASH_BUFFER_LEN]; + struct qat_crypto_bank *qcb = arg; + struct qat_crypto *qcy; + struct qat_session *qs; + struct qat_sym_cookie *qsc; + struct qat_sym_bulk_cookie *qsbc; + struct cryptop *crp; + int error; + uint16_t auth_sz; + bool blocked; + + qsc = *(void **)((uintptr_t)msg + sc->sc_hw.qhw_crypto_opaque_offset); + + qsbc = &qsc->u.qsc_bulk_cookie; + qcy = qsbc->qsbc_crypto; + qs = qsbc->qsbc_session; + crp = qsbc->qsbc_cb_tag; + + bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + bus_dmamap_sync(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap); + + error = 0; + if ((auth_sz = qs->qs_auth_mlen) != 0) { + if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) { + crypto_copydata(crp, crp->crp_digest_start, + auth_sz, icv); + if (timingsafe_bcmp(icv, qsc->qsc_auth_res, + auth_sz) != 0) { + error = EBADMSG; + } + } else { + crypto_copyback(crp, crp->crp_digest_start, + auth_sz, qsc->qsc_auth_res); + } + } + + qat_crypto_free_sym_cookie(qcb, qsc); + + blocked = false; + mtx_lock(&qs->qs_session_mtx); + MPASS(qs->qs_status & QAT_SESSION_STATUS_ACTIVE); + qs->qs_inflight--; + if (__predict_false(qs->qs_need_wakeup && qs->qs_inflight == 0)) { + blocked = true; + qs->qs_need_wakeup = false; + } + mtx_unlock(&qs->qs_session_mtx); + + crp->crp_etype = error; + crypto_done(crp); + + if (blocked) + crypto_unblock(qcy->qcy_cid, CRYPTO_SYMQ); + + return 1; +} + +static int +qat_probesession(device_t dev, const struct crypto_session_params *csp) +{ + if (csp->csp_cipher_alg == CRYPTO_AES_XTS && + qat_lookup(dev)->qatp_chip == QAT_CHIP_C2XXX) { + /* + * AES-XTS is not supported by the NanoQAT. + */ + return EINVAL; + } + + switch (csp->csp_mode) { + case CSP_MODE_CIPHER: + switch (csp->csp_cipher_alg) { + case CRYPTO_AES_CBC: + case CRYPTO_AES_ICM: + if (csp->csp_ivlen != AES_BLOCK_LEN) + return EINVAL; + break; + case CRYPTO_AES_XTS: + if (csp->csp_ivlen != AES_XTS_IV_LEN) + return EINVAL; + break; + default: + return EINVAL; + } + break; + case CSP_MODE_DIGEST: + switch (csp->csp_auth_alg) { + case CRYPTO_SHA1: + case CRYPTO_SHA1_HMAC: + case CRYPTO_SHA2_256: + case CRYPTO_SHA2_256_HMAC: + case CRYPTO_SHA2_384: + case CRYPTO_SHA2_384_HMAC: + case CRYPTO_SHA2_512: + case CRYPTO_SHA2_512_HMAC: + break; + case CRYPTO_AES_NIST_GMAC: + if (csp->csp_ivlen != AES_GCM_IV_LEN) + return EINVAL; + break; + default: + return EINVAL; + } + break; + case CSP_MODE_AEAD: + switch (csp->csp_cipher_alg) { + case CRYPTO_AES_NIST_GCM_16: + if (csp->csp_ivlen != AES_GCM_IV_LEN) + return EINVAL; + break; + default: + return EINVAL; + } + break; + case CSP_MODE_ETA: + switch (csp->csp_auth_alg) { + case CRYPTO_SHA1_HMAC: + case CRYPTO_SHA2_256_HMAC: + case CRYPTO_SHA2_384_HMAC: + case CRYPTO_SHA2_512_HMAC: + switch (csp->csp_cipher_alg) { + case CRYPTO_AES_CBC: + case CRYPTO_AES_ICM: + if (csp->csp_ivlen != AES_BLOCK_LEN) + return EINVAL; + break; + case CRYPTO_AES_XTS: + if (csp->csp_ivlen != AES_XTS_IV_LEN) + return EINVAL; + break; + default: + return EINVAL; + } + break; + default: + return EINVAL; + } + break; + default: + return EINVAL; + } + + return CRYPTODEV_PROBE_HARDWARE; +} + +static int +qat_newsession(device_t dev, crypto_session_t cses, + const struct crypto_session_params *csp) +{ + struct qat_crypto *qcy; + struct qat_dmamem *qdm; + struct qat_session *qs; + struct qat_softc *sc; + struct qat_crypto_desc *ddesc, *edesc; + int error, slices; + + sc = device_get_softc(dev); + qs = crypto_get_driver_session(cses); + qcy = &sc->sc_crypto; + + qdm = &qs->qs_desc_mem; + error = qat_alloc_dmamem(sc, qdm, QAT_MAXSEG, + sizeof(struct qat_crypto_desc) * 2, QAT_OPTIMAL_ALIGN); + if (error != 0) + return error; + + mtx_init(&qs->qs_session_mtx, "qs session", NULL, MTX_DEF); + qs->qs_aad_length = -1; + + qs->qs_dec_desc = ddesc = qdm->qdm_dma_vaddr; + qs->qs_enc_desc = edesc = ddesc + 1; + + ddesc->qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr; + ddesc->qcd_hash_state_paddr = ddesc->qcd_desc_paddr + + offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf); + edesc->qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr + + sizeof(struct qat_crypto_desc); + edesc->qcd_hash_state_paddr = edesc->qcd_desc_paddr + + offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf); + + qs->qs_status = QAT_SESSION_STATUS_ACTIVE; + qs->qs_inflight = 0; + + qs->qs_cipher_key = csp->csp_cipher_key; + qs->qs_cipher_klen = csp->csp_cipher_klen; + qs->qs_auth_key = csp->csp_auth_key; + qs->qs_auth_klen = csp->csp_auth_klen; + + switch (csp->csp_cipher_alg) { + case CRYPTO_AES_CBC: + qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen); + qs->qs_cipher_mode = HW_CIPHER_CBC_MODE; + break; + case CRYPTO_AES_ICM: + qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen); + qs->qs_cipher_mode = HW_CIPHER_CTR_MODE; + break; + case CRYPTO_AES_XTS: + qs->qs_cipher_algo = + qat_aes_cipher_algo(csp->csp_cipher_klen / 2); + qs->qs_cipher_mode = HW_CIPHER_XTS_MODE; + break; + case CRYPTO_AES_NIST_GCM_16: + qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen); + qs->qs_cipher_mode = HW_CIPHER_CTR_MODE; + qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128; + qs->qs_auth_mode = HW_AUTH_MODE1; + break; + case 0: + break; + default: + panic("%s: unhandled cipher algorithm %d", __func__, + csp->csp_cipher_alg); + } + + switch (csp->csp_auth_alg) { + case CRYPTO_SHA1_HMAC: + qs->qs_auth_algo = HW_AUTH_ALGO_SHA1; + qs->qs_auth_mode = HW_AUTH_MODE1; + break; + case CRYPTO_SHA1: + qs->qs_auth_algo = HW_AUTH_ALGO_SHA1; + qs->qs_auth_mode = HW_AUTH_MODE0; + break; + case CRYPTO_SHA2_256_HMAC: + qs->qs_auth_algo = HW_AUTH_ALGO_SHA256; + qs->qs_auth_mode = HW_AUTH_MODE1; + break; + case CRYPTO_SHA2_256: + qs->qs_auth_algo = HW_AUTH_ALGO_SHA256; + qs->qs_auth_mode = HW_AUTH_MODE0; + break; + case CRYPTO_SHA2_384_HMAC: + qs->qs_auth_algo = HW_AUTH_ALGO_SHA384; + qs->qs_auth_mode = HW_AUTH_MODE1; + break; + case CRYPTO_SHA2_384: + qs->qs_auth_algo = HW_AUTH_ALGO_SHA384; + qs->qs_auth_mode = HW_AUTH_MODE0; + break; + case CRYPTO_SHA2_512_HMAC: + qs->qs_auth_algo = HW_AUTH_ALGO_SHA512; + qs->qs_auth_mode = HW_AUTH_MODE1; + break; + case CRYPTO_SHA2_512: + qs->qs_auth_algo = HW_AUTH_ALGO_SHA512; + qs->qs_auth_mode = HW_AUTH_MODE0; + break; + case CRYPTO_AES_NIST_GMAC: + qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_auth_klen); + qs->qs_cipher_mode = HW_CIPHER_CTR_MODE; + qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128; + qs->qs_auth_mode = HW_AUTH_MODE1; + + qs->qs_cipher_key = qs->qs_auth_key; + qs->qs_cipher_klen = qs->qs_auth_klen; + break; + case 0: + break; + default: + panic("%s: unhandled auth algorithm %d", __func__, + csp->csp_auth_alg); + } + + slices = 0; + switch (csp->csp_mode) { + case CSP_MODE_AEAD: + case CSP_MODE_ETA: + /* auth then decrypt */ + ddesc->qcd_slices[0] = FW_SLICE_AUTH; + ddesc->qcd_slices[1] = FW_SLICE_CIPHER; + ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT; + ddesc->qcd_cmd_id = FW_LA_CMD_HASH_CIPHER; + /* encrypt then auth */ + edesc->qcd_slices[0] = FW_SLICE_CIPHER; + edesc->qcd_slices[1] = FW_SLICE_AUTH; + edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT; + edesc->qcd_cmd_id = FW_LA_CMD_CIPHER_HASH; + slices = 2; + break; + case CSP_MODE_CIPHER: + /* decrypt */ + ddesc->qcd_slices[0] = FW_SLICE_CIPHER; + ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT; + ddesc->qcd_cmd_id = FW_LA_CMD_CIPHER; + /* encrypt */ + edesc->qcd_slices[0] = FW_SLICE_CIPHER; + edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT; + edesc->qcd_cmd_id = FW_LA_CMD_CIPHER; + slices = 1; + break; + case CSP_MODE_DIGEST: + if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) { + /* auth then decrypt */ + ddesc->qcd_slices[0] = FW_SLICE_AUTH; + ddesc->qcd_slices[1] = FW_SLICE_CIPHER; + ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT; + ddesc->qcd_cmd_id = FW_LA_CMD_HASH_CIPHER; + /* encrypt then auth */ + edesc->qcd_slices[0] = FW_SLICE_CIPHER; + edesc->qcd_slices[1] = FW_SLICE_AUTH; + edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT; + edesc->qcd_cmd_id = FW_LA_CMD_CIPHER_HASH; + slices = 2; + } else { + ddesc->qcd_slices[0] = FW_SLICE_AUTH; + ddesc->qcd_cmd_id = FW_LA_CMD_AUTH; + edesc->qcd_slices[0] = FW_SLICE_AUTH; + edesc->qcd_cmd_id = FW_LA_CMD_AUTH; + slices = 1; + } + break; + default: + panic("%s: unhandled crypto algorithm %d, %d", __func__, + csp->csp_cipher_alg, csp->csp_auth_alg); + } + ddesc->qcd_slices[slices] = FW_SLICE_DRAM_WR; + edesc->qcd_slices[slices] = FW_SLICE_DRAM_WR; + + qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, ddesc); + qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, edesc); + + if (csp->csp_auth_mlen != 0) + qs->qs_auth_mlen = csp->csp_auth_mlen; + else + qs->qs_auth_mlen = edesc->qcd_auth_sz; + + /* Compute the GMAC by specifying a null cipher payload. */ + if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC) + ddesc->qcd_cmd_id = edesc->qcd_cmd_id = FW_LA_CMD_AUTH; + + return 0; +} + +static void +qat_crypto_clear_desc(struct qat_crypto_desc *desc) +{ + explicit_bzero(desc->qcd_content_desc, sizeof(desc->qcd_content_desc)); + explicit_bzero(desc->qcd_hash_state_prefix_buf, + sizeof(desc->qcd_hash_state_prefix_buf)); + explicit_bzero(desc->qcd_req_cache, sizeof(desc->qcd_req_cache)); +} + +static void +qat_freesession(device_t dev, crypto_session_t cses) +{ + struct qat_session *qs; + + qs = crypto_get_driver_session(cses); + KASSERT(qs->qs_inflight == 0, + ("%s: session %p has requests in flight", __func__, qs)); + + qat_crypto_clear_desc(qs->qs_enc_desc); + qat_crypto_clear_desc(qs->qs_dec_desc); + qat_free_dmamem(device_get_softc(dev), &qs->qs_desc_mem); + mtx_destroy(&qs->qs_session_mtx); +} + +static int +qat_process(device_t dev, struct cryptop *crp, int hint) +{ + struct qat_crypto *qcy; + struct qat_crypto_bank *qcb; + struct qat_crypto_desc const *desc; + struct qat_session *qs; + struct qat_softc *sc; + struct qat_sym_cookie *qsc; + struct qat_sym_bulk_cookie *qsbc; + int error; + + sc = device_get_softc(dev); + qcy = &sc->sc_crypto; + qs = crypto_get_driver_session(crp->crp_session); + qsc = NULL; + + if (__predict_false(crypto_buffer_len(&crp->crp_buf) > QAT_MAXLEN)) { + error = E2BIG; + goto fail1; + } + + mtx_lock(&qs->qs_session_mtx); + if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) { + if (crp->crp_aad_length > QAT_GCM_AAD_SIZE_MAX) { + error = E2BIG; + mtx_unlock(&qs->qs_session_mtx); + goto fail1; + } + + /* + * The firmware interface for GCM annoyingly requires the AAD + * size to be stored in the session's content descriptor, which + * is not really meant to be updated after session + * initialization. For IPSec the AAD size is fixed so this is + * not much of a problem in practice, but we have to catch AAD + * size updates here so that the device code can safely update + * the session's recorded AAD size. + */ + if (__predict_false(crp->crp_aad_length != qs->qs_aad_length)) { + if (qs->qs_inflight == 0) { + if (qs->qs_aad_length != -1) { + counter_u64_add(sc->sc_gcm_aad_updates, + 1); + } + qs->qs_aad_length = crp->crp_aad_length; + } else { + qs->qs_need_wakeup = true; + mtx_unlock(&qs->qs_session_mtx); + counter_u64_add(sc->sc_gcm_aad_restarts, 1); + error = ERESTART; + goto fail1; + } + } + } + qs->qs_inflight++; + mtx_unlock(&qs->qs_session_mtx); + + qcb = qat_crypto_select_bank(qcy); + + qsc = qat_crypto_alloc_sym_cookie(qcb); + if (qsc == NULL) { + error = ENOBUFS; + goto fail2; + } + + if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) + desc = qs->qs_enc_desc; + else + desc = qs->qs_dec_desc; + + error = qat_crypto_load(qs, qsc, desc, crp); + if (error != 0) + goto fail2; + + qsbc = &qsc->u.qsc_bulk_cookie; + qsbc->qsbc_crypto = qcy; + qsbc->qsbc_session = qs; + qsbc->qsbc_cb_tag = crp; + + sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc, crp); + + bus_dmamap_sync(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap, + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); + bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap, + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); + + error = qat_etr_put_msg(sc, qcb->qcb_sym_tx, + (uint32_t *)qsbc->qsbc_msg); + if (error) + goto fail2; + + return 0; + +fail2: + if (qsc) + qat_crypto_free_sym_cookie(qcb, qsc); + mtx_lock(&qs->qs_session_mtx); + qs->qs_inflight--; + mtx_unlock(&qs->qs_session_mtx); +fail1: + crp->crp_etype = error; + crypto_done(crp); + return 0; +} + +static device_method_t qat_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, qat_probe), + DEVMETHOD(device_attach, qat_attach), + DEVMETHOD(device_detach, qat_detach), + + /* Cryptodev interface */ + DEVMETHOD(cryptodev_probesession, qat_probesession), + DEVMETHOD(cryptodev_newsession, qat_newsession), + DEVMETHOD(cryptodev_freesession, qat_freesession), + DEVMETHOD(cryptodev_process, qat_process), + + DEVMETHOD_END +}; + +static devclass_t qat_devclass; + +static driver_t qat_driver = { + .name = "qat", + .methods = qat_methods, + .size = sizeof(struct qat_softc), +}; + +DRIVER_MODULE(qat, pci, qat_driver, qat_devclass, 0, 0); +MODULE_VERSION(qat, 1); +MODULE_DEPEND(qat, crypto, 1, 1, 1); +MODULE_DEPEND(qat, pci, 1, 1, 1); Property changes on: head/sys/dev/qat/qat.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_ae.c =================================================================== --- head/sys/dev/qat/qat_ae.c (nonexistent) +++ head/sys/dev/qat/qat_ae.c (revision 367386) @@ -0,0 +1,3456 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_ae.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2007-2019 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); +#if 0 +__KERNEL_RCSID(0, "$NetBSD: qat_ae.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $"); +#endif + +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "qatreg.h" +#include "qatvar.h" +#include "qat_aevar.h" + +static int qat_ae_write_4(struct qat_softc *, u_char, bus_size_t, + uint32_t); +static int qat_ae_read_4(struct qat_softc *, u_char, bus_size_t, + uint32_t *); +static int qat_ae_write_4(struct qat_softc *, u_char, bus_size_t, + uint32_t); +static void qat_ae_ctx_indr_write(struct qat_softc *, u_char, uint32_t, + bus_size_t, uint32_t); +static int qat_ae_ctx_indr_read(struct qat_softc *, u_char, uint32_t, + bus_size_t, uint32_t *); + +static u_short qat_aereg_get_10bit_addr(enum aereg_type, u_short); +static int qat_aereg_rel_data_write(struct qat_softc *, u_char, u_char, + enum aereg_type, u_short, uint32_t); +static int qat_aereg_rel_data_read(struct qat_softc *, u_char, u_char, + enum aereg_type, u_short, uint32_t *); +static int qat_aereg_rel_rdxfer_write(struct qat_softc *, u_char, u_char, + enum aereg_type, u_short, uint32_t); +static int qat_aereg_rel_wrxfer_write(struct qat_softc *, u_char, u_char, + enum aereg_type, u_short, uint32_t); +static int qat_aereg_rel_nn_write(struct qat_softc *, u_char, u_char, + enum aereg_type, u_short, uint32_t); +static int qat_aereg_abs_to_rel(struct qat_softc *, u_char, u_short, + u_short *, u_char *); +static int qat_aereg_abs_data_write(struct qat_softc *, u_char, + enum aereg_type, u_short, uint32_t); + +static void qat_ae_enable_ctx(struct qat_softc *, u_char, u_int); +static void qat_ae_disable_ctx(struct qat_softc *, u_char, u_int); +static void qat_ae_write_ctx_mode(struct qat_softc *, u_char, u_char); +static void qat_ae_write_nn_mode(struct qat_softc *, u_char, u_char); +static void qat_ae_write_lm_mode(struct qat_softc *, u_char, + enum aereg_type, u_char); +static void qat_ae_write_shared_cs_mode0(struct qat_softc *, u_char, + u_char); +static void qat_ae_write_shared_cs_mode(struct qat_softc *, u_char, u_char); +static int qat_ae_set_reload_ustore(struct qat_softc *, u_char, u_int, int, + u_int); + +static enum qat_ae_status qat_ae_get_status(struct qat_softc *, u_char); +static int qat_ae_is_active(struct qat_softc *, u_char); +static int qat_ae_wait_num_cycles(struct qat_softc *, u_char, int, int); + +static int qat_ae_clear_reset(struct qat_softc *); +static int qat_ae_check(struct qat_softc *); +static int qat_ae_reset_timestamp(struct qat_softc *); +static void qat_ae_clear_xfer(struct qat_softc *); +static int qat_ae_clear_gprs(struct qat_softc *); + +static void qat_ae_get_shared_ustore_ae(u_char, u_char *); +static u_int qat_ae_ucode_parity64(uint64_t); +static uint64_t qat_ae_ucode_set_ecc(uint64_t); +static int qat_ae_ucode_write(struct qat_softc *, u_char, u_int, u_int, + const uint64_t *); +static int qat_ae_ucode_read(struct qat_softc *, u_char, u_int, u_int, + uint64_t *); +static u_int qat_ae_concat_ucode(uint64_t *, u_int, u_int, u_int, u_int *); +static int qat_ae_exec_ucode(struct qat_softc *, u_char, u_char, + uint64_t *, u_int, int, u_int, u_int *); +static int qat_ae_exec_ucode_init_lm(struct qat_softc *, u_char, u_char, + int *, uint64_t *, u_int, + u_int *, u_int *, u_int *, u_int *, u_int *); +static int qat_ae_restore_init_lm_gprs(struct qat_softc *, u_char, u_char, + u_int, u_int, u_int, u_int, u_int); +static int qat_ae_get_inst_num(int); +static int qat_ae_batch_put_lm(struct qat_softc *, u_char, + struct qat_ae_batch_init_list *, size_t); +static int qat_ae_write_pc(struct qat_softc *, u_char, u_int, u_int); + +static u_int qat_aefw_csum(char *, int); +static const char *qat_aefw_uof_string(struct qat_softc *, size_t); +static struct uof_chunk_hdr *qat_aefw_uof_find_chunk(struct qat_softc *, + const char *, struct uof_chunk_hdr *); + +static int qat_aefw_load_mof(struct qat_softc *); +static void qat_aefw_unload_mof(struct qat_softc *); +static int qat_aefw_load_mmp(struct qat_softc *); +static void qat_aefw_unload_mmp(struct qat_softc *); + +static int qat_aefw_mof_find_uof0(struct qat_softc *, + struct mof_uof_hdr *, struct mof_uof_chunk_hdr *, + u_int, size_t, const char *, + size_t *, void **); +static int qat_aefw_mof_find_uof(struct qat_softc *); +static int qat_aefw_mof_parse(struct qat_softc *); + +static int qat_aefw_uof_parse_image(struct qat_softc *, + struct qat_uof_image *, struct uof_chunk_hdr *uch); +static int qat_aefw_uof_parse_images(struct qat_softc *); +static int qat_aefw_uof_parse(struct qat_softc *); + +static int qat_aefw_alloc_auth_dmamem(struct qat_softc *, char *, size_t, + struct qat_dmamem *); +static int qat_aefw_auth(struct qat_softc *, struct qat_dmamem *); +static int qat_aefw_suof_load(struct qat_softc *sc, + struct qat_dmamem *dma); +static int qat_aefw_suof_parse_image(struct qat_softc *, + struct qat_suof_image *, struct suof_chunk_hdr *); +static int qat_aefw_suof_parse(struct qat_softc *); +static int qat_aefw_suof_write(struct qat_softc *); + +static int qat_aefw_uof_assign_image(struct qat_softc *, struct qat_ae *, + struct qat_uof_image *); +static int qat_aefw_uof_init_ae(struct qat_softc *, u_char); +static int qat_aefw_uof_init(struct qat_softc *); + +static int qat_aefw_init_memory_one(struct qat_softc *, + struct uof_init_mem *); +static void qat_aefw_free_lm_init(struct qat_softc *, u_char); +static int qat_aefw_init_ustore(struct qat_softc *); +static int qat_aefw_init_reg(struct qat_softc *, u_char, u_char, + enum aereg_type, u_short, u_int); +static int qat_aefw_init_reg_sym_expr(struct qat_softc *, u_char, + struct qat_uof_image *); +static int qat_aefw_init_memory(struct qat_softc *); +static int qat_aefw_init_globals(struct qat_softc *); +static uint64_t qat_aefw_get_uof_inst(struct qat_softc *, + struct qat_uof_page *, u_int); +static int qat_aefw_do_pagein(struct qat_softc *, u_char, + struct qat_uof_page *); +static int qat_aefw_uof_write_one(struct qat_softc *, + struct qat_uof_image *); +static int qat_aefw_uof_write(struct qat_softc *); + +static int +qat_ae_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset, + uint32_t value) +{ + int times = TIMEOUT_AE_CSR; + + do { + qat_ae_local_write_4(sc, ae, offset, value); + if ((qat_ae_local_read_4(sc, ae, LOCAL_CSR_STATUS) & + LOCAL_CSR_STATUS_STATUS) == 0) + return 0; + + } while (times--); + + device_printf(sc->sc_dev, + "couldn't write AE CSR: ae 0x%hhx offset 0x%lx\n", ae, (long)offset); + return EFAULT; +} + +static int +qat_ae_read_4(struct qat_softc *sc, u_char ae, bus_size_t offset, + uint32_t *value) +{ + int times = TIMEOUT_AE_CSR; + uint32_t v; + + do { + v = qat_ae_local_read_4(sc, ae, offset); + if ((qat_ae_local_read_4(sc, ae, LOCAL_CSR_STATUS) & + LOCAL_CSR_STATUS_STATUS) == 0) { + *value = v; + return 0; + } + } while (times--); + + device_printf(sc->sc_dev, + "couldn't read AE CSR: ae 0x%hhx offset 0x%lx\n", ae, (long)offset); + return EFAULT; +} + +static void +qat_ae_ctx_indr_write(struct qat_softc *sc, u_char ae, uint32_t ctx_mask, + bus_size_t offset, uint32_t value) +{ + int ctx; + uint32_t ctxptr; + + MPASS(offset == CTX_FUTURE_COUNT_INDIRECT || + offset == FUTURE_COUNT_SIGNAL_INDIRECT || + offset == CTX_STS_INDIRECT || + offset == CTX_WAKEUP_EVENTS_INDIRECT || + offset == CTX_SIG_EVENTS_INDIRECT || + offset == LM_ADDR_0_INDIRECT || + offset == LM_ADDR_1_INDIRECT || + offset == INDIRECT_LM_ADDR_0_BYTE_INDEX || + offset == INDIRECT_LM_ADDR_1_BYTE_INDEX); + + qat_ae_read_4(sc, ae, CSR_CTX_POINTER, &ctxptr); + for (ctx = 0; ctx < MAX_AE_CTX; ctx++) { + if ((ctx_mask & (1 << ctx)) == 0) + continue; + qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctx); + qat_ae_write_4(sc, ae, offset, value); + } + qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctxptr); +} + +static int +qat_ae_ctx_indr_read(struct qat_softc *sc, u_char ae, uint32_t ctx, + bus_size_t offset, uint32_t *value) +{ + int error; + uint32_t ctxptr; + + MPASS(offset == CTX_FUTURE_COUNT_INDIRECT || + offset == FUTURE_COUNT_SIGNAL_INDIRECT || + offset == CTX_STS_INDIRECT || + offset == CTX_WAKEUP_EVENTS_INDIRECT || + offset == CTX_SIG_EVENTS_INDIRECT || + offset == LM_ADDR_0_INDIRECT || + offset == LM_ADDR_1_INDIRECT || + offset == INDIRECT_LM_ADDR_0_BYTE_INDEX || + offset == INDIRECT_LM_ADDR_1_BYTE_INDEX); + + /* save the ctx ptr */ + qat_ae_read_4(sc, ae, CSR_CTX_POINTER, &ctxptr); + if ((ctxptr & CSR_CTX_POINTER_CONTEXT) != + (ctx & CSR_CTX_POINTER_CONTEXT)) + qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctx); + + error = qat_ae_read_4(sc, ae, offset, value); + + /* restore ctx ptr */ + if ((ctxptr & CSR_CTX_POINTER_CONTEXT) != + (ctx & CSR_CTX_POINTER_CONTEXT)) + qat_ae_write_4(sc, ae, CSR_CTX_POINTER, ctxptr); + + return error; +} + +static u_short +qat_aereg_get_10bit_addr(enum aereg_type regtype, u_short reg) +{ + u_short addr; + + switch (regtype) { + case AEREG_GPA_ABS: + case AEREG_GPB_ABS: + addr = (reg & 0x7f) | 0x80; + break; + case AEREG_GPA_REL: + case AEREG_GPB_REL: + addr = reg & 0x1f; + break; + case AEREG_SR_RD_REL: + case AEREG_SR_WR_REL: + case AEREG_SR_REL: + addr = 0x180 | (reg & 0x1f); + break; + case AEREG_SR_INDX: + addr = 0x140 | ((reg & 0x3) << 1); + break; + case AEREG_DR_RD_REL: + case AEREG_DR_WR_REL: + case AEREG_DR_REL: + addr = 0x1c0 | (reg & 0x1f); + break; + case AEREG_DR_INDX: + addr = 0x100 | ((reg & 0x3) << 1); + break; + case AEREG_NEIGH_INDX: + addr = 0x241 | ((reg & 0x3) << 1); + break; + case AEREG_NEIGH_REL: + addr = 0x280 | (reg & 0x1f); + break; + case AEREG_LMEM0: + addr = 0x200; + break; + case AEREG_LMEM1: + addr = 0x220; + break; + case AEREG_NO_DEST: + addr = 0x300 | (reg & 0xff); + break; + default: + addr = AEREG_BAD_REGADDR; + break; + } + return (addr); +} + +static int +qat_aereg_rel_data_write(struct qat_softc *sc, u_char ae, u_char ctx, + enum aereg_type regtype, u_short relreg, uint32_t value) +{ + uint16_t srchi, srclo, destaddr, data16hi, data16lo; + uint64_t inst[] = { + 0x0F440000000ull, /* immed_w1[reg, val_hi16] */ + 0x0F040000000ull, /* immed_w0[reg, val_lo16] */ + 0x0F0000C0300ull, /* nop */ + 0x0E000010000ull /* ctx_arb[kill] */ + }; + const int ninst = nitems(inst); + const int imm_w1 = 0, imm_w0 = 1; + unsigned int ctxen; + uint16_t mask; + + /* This logic only works for GPRs and LM index registers, + not NN or XFER registers! */ + MPASS(regtype == AEREG_GPA_REL || regtype == AEREG_GPB_REL || + regtype == AEREG_LMEM0 || regtype == AEREG_LMEM1); + + if ((regtype == AEREG_GPA_REL) || (regtype == AEREG_GPB_REL)) { + /* determine the context mode */ + qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); + if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) { + /* 4-ctx mode */ + if (ctx & 0x1) + return EINVAL; + mask = 0x1f; + } else { + /* 8-ctx mode */ + mask = 0x0f; + } + if (relreg & ~mask) + return EINVAL; + } + if ((destaddr = qat_aereg_get_10bit_addr(regtype, relreg)) == + AEREG_BAD_REGADDR) { + return EINVAL; + } + + data16lo = 0xffff & value; + data16hi = 0xffff & (value >> 16); + srchi = qat_aereg_get_10bit_addr(AEREG_NO_DEST, + (uint16_t)(0xff & data16hi)); + srclo = qat_aereg_get_10bit_addr(AEREG_NO_DEST, + (uint16_t)(0xff & data16lo)); + + switch (regtype) { + case AEREG_GPA_REL: /* A rel source */ + inst[imm_w1] = inst[imm_w1] | ((data16hi >> 8) << 20) | + ((srchi & 0x3ff) << 10) | (destaddr & 0x3ff); + inst[imm_w0] = inst[imm_w0] | ((data16lo >> 8) << 20) | + ((srclo & 0x3ff) << 10) | (destaddr & 0x3ff); + break; + default: + inst[imm_w1] = inst[imm_w1] | ((data16hi >> 8) << 20) | + ((destaddr & 0x3ff) << 10) | (srchi & 0x3ff); + inst[imm_w0] = inst[imm_w0] | ((data16lo >> 8) << 20) | + ((destaddr & 0x3ff) << 10) | (srclo & 0x3ff); + break; + } + + return qat_ae_exec_ucode(sc, ae, ctx, inst, ninst, 1, ninst * 5, NULL); +} + +static int +qat_aereg_rel_data_read(struct qat_softc *sc, u_char ae, u_char ctx, + enum aereg_type regtype, u_short relreg, uint32_t *value) +{ + uint64_t inst, savucode; + uint32_t ctxen, misc, nmisc, savctx, ctxarbctl, ulo, uhi; + u_int uaddr, ustore_addr; + int error; + u_short mask, regaddr; + u_char nae; + + MPASS(regtype == AEREG_GPA_REL || regtype == AEREG_GPB_REL || + regtype == AEREG_SR_REL || regtype == AEREG_SR_RD_REL || + regtype == AEREG_DR_REL || regtype == AEREG_DR_RD_REL || + regtype == AEREG_LMEM0 || regtype == AEREG_LMEM1); + + if ((regtype == AEREG_GPA_REL) || (regtype == AEREG_GPB_REL) || + (regtype == AEREG_SR_REL) || (regtype == AEREG_SR_RD_REL) || + (regtype == AEREG_DR_REL) || (regtype == AEREG_DR_RD_REL)) + { + /* determine the context mode */ + qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); + if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) { + /* 4-ctx mode */ + if (ctx & 0x1) + return EINVAL; + mask = 0x1f; + } else { + /* 8-ctx mode */ + mask = 0x0f; + } + if (relreg & ~mask) + return EINVAL; + } + if ((regaddr = qat_aereg_get_10bit_addr(regtype, relreg)) == + AEREG_BAD_REGADDR) { + return EINVAL; + } + + /* instruction -- alu[--, --, B, reg] */ + switch (regtype) { + case AEREG_GPA_REL: + /* A rel source */ + inst = 0xA070000000ull | (regaddr & 0x3ff); + break; + default: + inst = (0xA030000000ull | ((regaddr & 0x3ff) << 10)); + break; + } + + /* backup shared control store bit, and force AE to + * none-shared mode before executing ucode snippet */ + qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc); + if (misc & AE_MISC_CONTROL_SHARE_CS) { + qat_ae_get_shared_ustore_ae(ae, &nae); + if ((1 << nae) & sc->sc_ae_mask && qat_ae_is_active(sc, nae)) + return EBUSY; + } + + nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS; + qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc); + + /* read current context */ + qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &savctx); + qat_ae_read_4(sc, ae, CTX_ARB_CNTL, &ctxarbctl); + + qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); + /* prevent clearing the W1C bits: the breakpoint bit, + ECC error bit, and Parity error bit */ + ctxen &= CTX_ENABLES_IGNORE_W1C_MASK; + + /* change the context */ + if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO)) + qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, + ctx & ACTIVE_CTX_STATUS_ACNO); + /* save a ustore location */ + if ((error = qat_ae_ucode_read(sc, ae, 0, 1, &savucode)) != 0) { + /* restore AE_MISC_CONTROL csr */ + qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc); + + /* restore the context */ + if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO)) { + qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, + savctx & ACTIVE_CTX_STATUS_ACNO); + } + qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl); + + return (error); + } + + /* turn off ustore parity */ + qat_ae_write_4(sc, ae, CTX_ENABLES, + ctxen & (~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE)); + + /* save ustore-addr csr */ + qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr); + + /* write the ALU instruction to ustore, enable ecs bit */ + uaddr = 0 | USTORE_ADDRESS_ECS; + + /* set the uaddress */ + qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr); + inst = qat_ae_ucode_set_ecc(inst); + + ulo = (uint32_t)(inst & 0xffffffff); + uhi = (uint32_t)(inst >> 32); + + qat_ae_write_4(sc, ae, USTORE_DATA_LOWER, ulo); + + /* this will auto increment the address */ + qat_ae_write_4(sc, ae, USTORE_DATA_UPPER, uhi); + + /* set the uaddress */ + qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr); + + /* delay for at least 8 cycles */ + qat_ae_wait_num_cycles(sc, ae, 0x8, 0); + + /* read ALU output -- the instruction should have been executed + prior to clearing the ECS in putUwords */ + qat_ae_read_4(sc, ae, ALU_OUT, value); + + /* restore ustore-addr csr */ + qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr); + + /* restore the ustore */ + error = qat_ae_ucode_write(sc, ae, 0, 1, &savucode); + + /* restore the context */ + if (ctx != (savctx & ACTIVE_CTX_STATUS_ACNO)) { + qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, + savctx & ACTIVE_CTX_STATUS_ACNO); + } + + qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl); + + /* restore AE_MISC_CONTROL csr */ + qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc); + + qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen); + + return error; +} + +static int +qat_aereg_rel_rdxfer_write(struct qat_softc *sc, u_char ae, u_char ctx, + enum aereg_type regtype, u_short relreg, uint32_t value) +{ + bus_size_t addr; + int error; + uint32_t ctxen; + u_short mask; + u_short dr_offset; + + MPASS(regtype == AEREG_SR_REL || regtype == AEREG_DR_REL || + regtype == AEREG_SR_RD_REL || regtype == AEREG_DR_RD_REL); + + error = qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); + if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) { + if (ctx & 0x1) { + device_printf(sc->sc_dev, + "bad ctx argument in 4-ctx mode,ctx=0x%x\n", ctx); + return EINVAL; + } + mask = 0x1f; + dr_offset = 0x20; + + } else { + mask = 0x0f; + dr_offset = 0x10; + } + + if (relreg & ~mask) + return EINVAL; + + addr = relreg + (ctx << 0x5); + + switch (regtype) { + case AEREG_SR_REL: + case AEREG_SR_RD_REL: + qat_ae_xfer_write_4(sc, ae, addr, value); + break; + case AEREG_DR_REL: + case AEREG_DR_RD_REL: + qat_ae_xfer_write_4(sc, ae, addr + dr_offset, value); + break; + default: + error = EINVAL; + } + + return error; +} + +static int +qat_aereg_rel_wrxfer_write(struct qat_softc *sc, u_char ae, u_char ctx, + enum aereg_type regtype, u_short relreg, uint32_t value) +{ + + panic("notyet"); + + return 0; +} + +static int +qat_aereg_rel_nn_write(struct qat_softc *sc, u_char ae, u_char ctx, + enum aereg_type regtype, u_short relreg, uint32_t value) +{ + + panic("notyet"); + + return 0; +} + +static int +qat_aereg_abs_to_rel(struct qat_softc *sc, u_char ae, + u_short absreg, u_short *relreg, u_char *ctx) +{ + uint32_t ctxen; + + qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); + if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) { + /* 4-ctx mode */ + *relreg = absreg & 0x1f; + *ctx = (absreg >> 0x4) & 0x6; + } else { + /* 8-ctx mode */ + *relreg = absreg & 0x0f; + *ctx = (absreg >> 0x4) & 0x7; + } + + return 0; +} + +static int +qat_aereg_abs_data_write(struct qat_softc *sc, u_char ae, + enum aereg_type regtype, u_short absreg, uint32_t value) +{ + int error; + u_short relreg; + u_char ctx; + + qat_aereg_abs_to_rel(sc, ae, absreg, &relreg, &ctx); + + switch (regtype) { + case AEREG_GPA_ABS: + MPASS(absreg < MAX_GPR_REG); + error = qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, + relreg, value); + break; + case AEREG_GPB_ABS: + MPASS(absreg < MAX_GPR_REG); + error = qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL, + relreg, value); + break; + case AEREG_DR_RD_ABS: + MPASS(absreg < MAX_XFER_REG); + error = qat_aereg_rel_rdxfer_write(sc, ae, ctx, AEREG_DR_RD_REL, + relreg, value); + break; + case AEREG_SR_RD_ABS: + MPASS(absreg < MAX_XFER_REG); + error = qat_aereg_rel_rdxfer_write(sc, ae, ctx, AEREG_SR_RD_REL, + relreg, value); + break; + case AEREG_DR_WR_ABS: + MPASS(absreg < MAX_XFER_REG); + error = qat_aereg_rel_wrxfer_write(sc, ae, ctx, AEREG_DR_WR_REL, + relreg, value); + break; + case AEREG_SR_WR_ABS: + MPASS(absreg < MAX_XFER_REG); + error = qat_aereg_rel_wrxfer_write(sc, ae, ctx, AEREG_SR_WR_REL, + relreg, value); + break; + case AEREG_NEIGH_ABS: + MPASS(absreg < MAX_NN_REG); + if (absreg >= MAX_NN_REG) + return EINVAL; + error = qat_aereg_rel_nn_write(sc, ae, ctx, AEREG_NEIGH_REL, + relreg, value); + break; + default: + panic("Invalid Register Type"); + } + + return error; +} + +static void +qat_ae_enable_ctx(struct qat_softc *sc, u_char ae, u_int ctx_mask) +{ + uint32_t ctxen; + + qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); + ctxen &= CTX_ENABLES_IGNORE_W1C_MASK; + + if (ctxen & CTX_ENABLES_INUSE_CONTEXTS) { + ctx_mask &= 0x55; + } else { + ctx_mask &= 0xff; + } + + ctxen |= __SHIFTIN(ctx_mask, CTX_ENABLES_ENABLE); + qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen); +} + +static void +qat_ae_disable_ctx(struct qat_softc *sc, u_char ae, u_int ctx_mask) +{ + uint32_t ctxen; + + qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); + ctxen &= CTX_ENABLES_IGNORE_W1C_MASK; + ctxen &= ~(__SHIFTIN(ctx_mask & AE_ALL_CTX, CTX_ENABLES_ENABLE)); + qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen); +} + +static void +qat_ae_write_ctx_mode(struct qat_softc *sc, u_char ae, u_char mode) +{ + uint32_t val, nval; + + qat_ae_read_4(sc, ae, CTX_ENABLES, &val); + val &= CTX_ENABLES_IGNORE_W1C_MASK; + + if (mode == 4) + nval = val | CTX_ENABLES_INUSE_CONTEXTS; + else + nval = val & ~CTX_ENABLES_INUSE_CONTEXTS; + + if (val != nval) + qat_ae_write_4(sc, ae, CTX_ENABLES, nval); +} + +static void +qat_ae_write_nn_mode(struct qat_softc *sc, u_char ae, u_char mode) +{ + uint32_t val, nval; + + qat_ae_read_4(sc, ae, CTX_ENABLES, &val); + val &= CTX_ENABLES_IGNORE_W1C_MASK; + + if (mode) + nval = val | CTX_ENABLES_NN_MODE; + else + nval = val & ~CTX_ENABLES_NN_MODE; + + if (val != nval) + qat_ae_write_4(sc, ae, CTX_ENABLES, nval); +} + +static void +qat_ae_write_lm_mode(struct qat_softc *sc, u_char ae, + enum aereg_type lm, u_char mode) +{ + uint32_t val, nval; + uint32_t bit; + + qat_ae_read_4(sc, ae, CTX_ENABLES, &val); + val &= CTX_ENABLES_IGNORE_W1C_MASK; + + switch (lm) { + case AEREG_LMEM0: + bit = CTX_ENABLES_LMADDR_0_GLOBAL; + break; + case AEREG_LMEM1: + bit = CTX_ENABLES_LMADDR_1_GLOBAL; + break; + default: + panic("invalid lmem reg type"); + break; + } + + if (mode) + nval = val | bit; + else + nval = val & ~bit; + + if (val != nval) + qat_ae_write_4(sc, ae, CTX_ENABLES, nval); +} + +static void +qat_ae_write_shared_cs_mode0(struct qat_softc *sc, u_char ae, u_char mode) +{ + uint32_t val, nval; + + qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val); + + if (mode == 1) + nval = val | AE_MISC_CONTROL_SHARE_CS; + else + nval = val & ~AE_MISC_CONTROL_SHARE_CS; + + if (val != nval) + qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nval); +} + +static void +qat_ae_write_shared_cs_mode(struct qat_softc *sc, u_char ae, u_char mode) +{ + u_char nae; + + qat_ae_get_shared_ustore_ae(ae, &nae); + + qat_ae_write_shared_cs_mode0(sc, ae, mode); + + if ((sc->sc_ae_mask & (1 << nae))) { + qat_ae_write_shared_cs_mode0(sc, nae, mode); + } +} + +static int +qat_ae_set_reload_ustore(struct qat_softc *sc, u_char ae, + u_int reload_size, int shared_mode, u_int ustore_dram_addr) +{ + uint32_t val, cs_reload; + + switch (reload_size) { + case 0: + cs_reload = 0x0; + break; + case QAT_2K: + cs_reload = 0x1; + break; + case QAT_4K: + cs_reload = 0x2; + break; + case QAT_8K: + cs_reload = 0x3; + break; + default: + return EINVAL; + } + + if (cs_reload) + QAT_AE(sc, ae).qae_ustore_dram_addr = ustore_dram_addr; + + QAT_AE(sc, ae).qae_reload_size = reload_size; + + qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val); + val &= ~(AE_MISC_CONTROL_ONE_CTX_RELOAD | + AE_MISC_CONTROL_CS_RELOAD | AE_MISC_CONTROL_SHARE_CS); + val |= __SHIFTIN(cs_reload, AE_MISC_CONTROL_CS_RELOAD) | + __SHIFTIN(shared_mode, AE_MISC_CONTROL_ONE_CTX_RELOAD); + qat_ae_write_4(sc, ae, AE_MISC_CONTROL, val); + + return 0; +} + +static enum qat_ae_status +qat_ae_get_status(struct qat_softc *sc, u_char ae) +{ + int error; + uint32_t val = 0; + + error = qat_ae_read_4(sc, ae, CTX_ENABLES, &val); + if (error || val & CTX_ENABLES_ENABLE) + return QAT_AE_ENABLED; + + qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &val); + if (val & ACTIVE_CTX_STATUS_ABO) + return QAT_AE_ACTIVE; + + return QAT_AE_DISABLED; +} + + +static int +qat_ae_is_active(struct qat_softc *sc, u_char ae) +{ + uint32_t val; + + if (qat_ae_get_status(sc, ae) != QAT_AE_DISABLED) + return 1; + + qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &val); + if (val & ACTIVE_CTX_STATUS_ABO) + return 1; + else + return 0; +} + +/* returns 1 if actually waited for specified number of cycles */ +static int +qat_ae_wait_num_cycles(struct qat_softc *sc, u_char ae, int cycles, int check) +{ + uint32_t cnt, actx; + int pcnt, ccnt, elapsed, times; + + qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt); + pcnt = cnt & 0xffff; + + times = TIMEOUT_AE_CHECK; + do { + qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt); + ccnt = cnt & 0xffff; + + elapsed = ccnt - pcnt; + if (elapsed == 0) { + times--; + } + if (times <= 0) { + device_printf(sc->sc_dev, + "qat_ae_wait_num_cycles timeout\n"); + return -1; + } + + if (elapsed < 0) + elapsed += 0x10000; + + if (elapsed >= CYCLES_FROM_READY2EXE && check) { + if (qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, + &actx) == 0) { + if ((actx & ACTIVE_CTX_STATUS_ABO) == 0) + return 0; + } + } + } while (cycles > elapsed); + + if (check && qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &actx) == 0) { + if ((actx & ACTIVE_CTX_STATUS_ABO) == 0) + return 0; + } + + return 1; +} + +int +qat_ae_init(struct qat_softc *sc) +{ + int error; + uint32_t mask, val = 0; + u_char ae; + + /* XXX adf_initSysMemInfo */ + + /* XXX Disable clock gating for some chip if debug mode */ + + for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { + struct qat_ae *qae = &sc->sc_ae[ae]; + if (!(mask & 1)) + continue; + + qae->qae_ustore_size = USTORE_SIZE; + + qae->qae_free_addr = 0; + qae->qae_free_size = USTORE_SIZE; + qae->qae_live_ctx_mask = AE_ALL_CTX; + qae->qae_ustore_dram_addr = 0; + qae->qae_reload_size = 0; + } + + /* XXX Enable attention interrupt */ + + error = qat_ae_clear_reset(sc); + if (error) + return error; + + qat_ae_clear_xfer(sc); + + if (!sc->sc_hw.qhw_fw_auth) { + error = qat_ae_clear_gprs(sc); + if (error) + return error; + } + + /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ + for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { + if (!(mask & 1)) + continue; + qat_ae_read_4(sc, ae, SIGNATURE_ENABLE, &val); + val |= 0x1; + qat_ae_write_4(sc, ae, SIGNATURE_ENABLE, val); + } + + error = qat_ae_clear_reset(sc); + if (error) + return error; + + /* XXX XXX XXX Clean MMP memory if mem scrub is supported */ + /* halMem_ScrubMMPMemory */ + + return 0; +} + +int +qat_ae_start(struct qat_softc *sc) +{ + int error; + u_char ae; + + for (ae = 0; ae < sc->sc_ae_num; ae++) { + if ((sc->sc_ae_mask & (1 << ae)) == 0) + continue; + + error = qat_aefw_start(sc, ae, 0xff); + if (error) + return error; + } + + return 0; +} + +void +qat_ae_cluster_intr(void *arg) +{ + /* Nothing to implement until we support SRIOV. */ + printf("qat_ae_cluster_intr\n"); +} + +static int +qat_ae_clear_reset(struct qat_softc *sc) +{ + int error; + uint32_t times, reset, clock, reg, mask; + u_char ae; + + reset = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_RESET); + reset &= ~(__SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_RESET_AE_MASK)); + reset &= ~(__SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_RESET_ACCEL_MASK)); + times = TIMEOUT_AE_RESET; + do { + qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_RESET, reset); + if ((times--) == 0) { + device_printf(sc->sc_dev, "couldn't reset AEs\n"); + return EBUSY; + } + reg = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_RESET); + } while ((__SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_RESET_AE_MASK) | + __SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_RESET_ACCEL_MASK)) + & reg); + + /* Enable clock for AE and QAT */ + clock = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_CLK_EN); + clock |= __SHIFTIN(sc->sc_ae_mask, CAP_GLOBAL_CTL_CLK_EN_AE_MASK); + clock |= __SHIFTIN(sc->sc_accel_mask, CAP_GLOBAL_CTL_CLK_EN_ACCEL_MASK); + qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_CLK_EN, clock); + + error = qat_ae_check(sc); + if (error) + return error; + + /* + * Set undefined power-up/reset states to reasonable default values... + * just to make sure we're starting from a known point + */ + for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { + if (!(mask & 1)) + continue; + + /* init the ctx_enable */ + qat_ae_write_4(sc, ae, CTX_ENABLES, + CTX_ENABLES_INIT); + + /* initialize the PCs */ + qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, + CTX_STS_INDIRECT, + UPC_MASK & CTX_STS_INDIRECT_UPC_INIT); + + /* init the ctx_arb */ + qat_ae_write_4(sc, ae, CTX_ARB_CNTL, + CTX_ARB_CNTL_INIT); + + /* enable cc */ + qat_ae_write_4(sc, ae, CC_ENABLE, + CC_ENABLE_INIT); + qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, + CTX_WAKEUP_EVENTS_INDIRECT, + CTX_WAKEUP_EVENTS_INDIRECT_INIT); + qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, + CTX_SIG_EVENTS_INDIRECT, + CTX_SIG_EVENTS_INDIRECT_INIT); + } + + if ((sc->sc_ae_mask != 0) && + sc->sc_flags & QAT_FLAG_ESRAM_ENABLE_AUTO_INIT) { + /* XXX XXX XXX init eSram only when this is boot time */ + } + + if ((sc->sc_ae_mask != 0) && + sc->sc_flags & QAT_FLAG_SHRAM_WAIT_READY) { + /* XXX XXX XXX wait shram to complete initialization */ + } + + qat_ae_reset_timestamp(sc); + + return 0; +} + +static int +qat_ae_check(struct qat_softc *sc) +{ + int error, times, ae; + uint32_t cnt, pcnt, mask; + + for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { + if (!(mask & 1)) + continue; + + times = TIMEOUT_AE_CHECK; + error = qat_ae_read_4(sc, ae, PROFILE_COUNT, &cnt); + if (error) { + device_printf(sc->sc_dev, + "couldn't access AE %d CSR\n", ae); + return error; + } + pcnt = cnt & 0xffff; + + while (1) { + error = qat_ae_read_4(sc, ae, + PROFILE_COUNT, &cnt); + if (error) { + device_printf(sc->sc_dev, + "couldn't access AE %d CSR\n", ae); + return error; + } + cnt &= 0xffff; + if (cnt == pcnt) + times--; + else + break; + if (times <= 0) { + device_printf(sc->sc_dev, + "AE %d CSR is useless\n", ae); + return EFAULT; + } + } + } + + return 0; +} + +static int +qat_ae_reset_timestamp(struct qat_softc *sc) +{ + uint32_t misc, mask; + u_char ae; + + /* stop the timestamp timers */ + misc = qat_cap_global_read_4(sc, CAP_GLOBAL_CTL_MISC); + if (misc & CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN) { + qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_MISC, + misc & (~CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN)); + } + + for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { + if (!(mask & 1)) + continue; + qat_ae_write_4(sc, ae, TIMESTAMP_LOW, 0); + qat_ae_write_4(sc, ae, TIMESTAMP_HIGH, 0); + } + + /* start timestamp timers */ + qat_cap_global_write_4(sc, CAP_GLOBAL_CTL_MISC, + misc | CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN); + + return 0; +} + +static void +qat_ae_clear_xfer(struct qat_softc *sc) +{ + u_int mask, reg; + u_char ae; + + for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { + if (!(mask & 1)) + continue; + + for (reg = 0; reg < MAX_GPR_REG; reg++) { + qat_aereg_abs_data_write(sc, ae, AEREG_SR_RD_ABS, + reg, 0); + qat_aereg_abs_data_write(sc, ae, AEREG_DR_RD_ABS, + reg, 0); + } + } +} + +static int +qat_ae_clear_gprs(struct qat_softc *sc) +{ + uint32_t val; + uint32_t saved_ctx = 0; + int times = TIMEOUT_AE_CHECK, rv; + u_char ae; + u_int mask; + + for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { + if (!(mask & 1)) + continue; + + /* turn off share control store bit */ + val = qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &val); + val &= ~AE_MISC_CONTROL_SHARE_CS; + qat_ae_write_4(sc, ae, AE_MISC_CONTROL, val); + + /* turn off ucode parity */ + /* make sure nn_mode is set to self */ + qat_ae_read_4(sc, ae, CTX_ENABLES, &val); + val &= CTX_ENABLES_IGNORE_W1C_MASK; + val |= CTX_ENABLES_NN_MODE; + val &= ~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE; + qat_ae_write_4(sc, ae, CTX_ENABLES, val); + + /* copy instructions to ustore */ + qat_ae_ucode_write(sc, ae, 0, nitems(ae_clear_gprs_inst), + ae_clear_gprs_inst); + + /* set PC */ + qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_STS_INDIRECT, + UPC_MASK & CTX_STS_INDIRECT_UPC_INIT); + + /* save current context */ + qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &saved_ctx); + /* change the active context */ + /* start the context from ctx 0 */ + qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, 0); + + /* wakeup-event voluntary */ + qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, + CTX_WAKEUP_EVENTS_INDIRECT, + CTX_WAKEUP_EVENTS_INDIRECT_VOLUNTARY); + /* clean signals */ + qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, + CTX_SIG_EVENTS_INDIRECT, 0); + qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, 0); + + qat_ae_enable_ctx(sc, ae, AE_ALL_CTX); + } + + for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { + if (!(mask & 1)) + continue; + /* wait for AE to finish */ + do { + rv = qat_ae_wait_num_cycles(sc, ae, AE_EXEC_CYCLE, 1); + } while (rv && times--); + if (times <= 0) { + device_printf(sc->sc_dev, + "qat_ae_clear_gprs timeout"); + return ETIMEDOUT; + } + qat_ae_disable_ctx(sc, ae, AE_ALL_CTX); + /* change the active context */ + qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, + saved_ctx & ACTIVE_CTX_STATUS_ACNO); + /* init the ctx_enable */ + qat_ae_write_4(sc, ae, CTX_ENABLES, CTX_ENABLES_INIT); + /* initialize the PCs */ + qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, + CTX_STS_INDIRECT, UPC_MASK & CTX_STS_INDIRECT_UPC_INIT); + /* init the ctx_arb */ + qat_ae_write_4(sc, ae, CTX_ARB_CNTL, CTX_ARB_CNTL_INIT); + /* enable cc */ + qat_ae_write_4(sc, ae, CC_ENABLE, CC_ENABLE_INIT); + qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, + CTX_WAKEUP_EVENTS_INDIRECT, CTX_WAKEUP_EVENTS_INDIRECT_INIT); + qat_ae_ctx_indr_write(sc, ae, AE_ALL_CTX, CTX_SIG_EVENTS_INDIRECT, + CTX_SIG_EVENTS_INDIRECT_INIT); + } + + return 0; +} + +static void +qat_ae_get_shared_ustore_ae(u_char ae, u_char *nae) +{ + if (ae & 0x1) + *nae = ae - 1; + else + *nae = ae + 1; +} + +static u_int +qat_ae_ucode_parity64(uint64_t ucode) +{ + + ucode ^= ucode >> 1; + ucode ^= ucode >> 2; + ucode ^= ucode >> 4; + ucode ^= ucode >> 8; + ucode ^= ucode >> 16; + ucode ^= ucode >> 32; + + return ((u_int)(ucode & 1)); +} + +static uint64_t +qat_ae_ucode_set_ecc(uint64_t ucode) +{ + static const uint64_t + bit0mask=0xff800007fffULL, bit1mask=0x1f801ff801fULL, + bit2mask=0xe387e0781e1ULL, bit3mask=0x7cb8e388e22ULL, + bit4mask=0xaf5b2c93244ULL, bit5mask=0xf56d5525488ULL, + bit6mask=0xdaf69a46910ULL; + + /* clear the ecc bits */ + ucode &= ~(0x7fULL << USTORE_ECC_BIT_0); + + ucode |= (uint64_t)qat_ae_ucode_parity64(bit0mask & ucode) << + USTORE_ECC_BIT_0; + ucode |= (uint64_t)qat_ae_ucode_parity64(bit1mask & ucode) << + USTORE_ECC_BIT_1; + ucode |= (uint64_t)qat_ae_ucode_parity64(bit2mask & ucode) << + USTORE_ECC_BIT_2; + ucode |= (uint64_t)qat_ae_ucode_parity64(bit3mask & ucode) << + USTORE_ECC_BIT_3; + ucode |= (uint64_t)qat_ae_ucode_parity64(bit4mask & ucode) << + USTORE_ECC_BIT_4; + ucode |= (uint64_t)qat_ae_ucode_parity64(bit5mask & ucode) << + USTORE_ECC_BIT_5; + ucode |= (uint64_t)qat_ae_ucode_parity64(bit6mask & ucode) << + USTORE_ECC_BIT_6; + + return (ucode); +} + +static int +qat_ae_ucode_write(struct qat_softc *sc, u_char ae, u_int uaddr, u_int ninst, + const uint64_t *ucode) +{ + uint64_t tmp; + uint32_t ustore_addr, ulo, uhi; + int i; + + qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr); + uaddr |= USTORE_ADDRESS_ECS; + + qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr); + for (i = 0; i < ninst; i++) { + tmp = qat_ae_ucode_set_ecc(ucode[i]); + ulo = (uint32_t)(tmp & 0xffffffff); + uhi = (uint32_t)(tmp >> 32); + + qat_ae_write_4(sc, ae, USTORE_DATA_LOWER, ulo); + /* this will auto increment the address */ + qat_ae_write_4(sc, ae, USTORE_DATA_UPPER, uhi); + } + qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr); + + return 0; +} + +static int +qat_ae_ucode_read(struct qat_softc *sc, u_char ae, u_int uaddr, u_int ninst, + uint64_t *ucode) +{ + uint32_t misc, ustore_addr, ulo, uhi; + u_int ii; + u_char nae; + + if (qat_ae_get_status(sc, ae) != QAT_AE_DISABLED) + return EBUSY; + + /* determine whether it neighbour AE runs in shared control store + * status */ + qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc); + if (misc & AE_MISC_CONTROL_SHARE_CS) { + qat_ae_get_shared_ustore_ae(ae, &nae); + if ((sc->sc_ae_mask & (1 << nae)) && qat_ae_is_active(sc, nae)) + return EBUSY; + } + + /* if reloadable, then get it all from dram-ustore */ + if (__SHIFTOUT(misc, AE_MISC_CONTROL_CS_RELOAD)) + panic("notyet"); /* XXX getReloadUwords */ + + /* disable SHARE_CS bit to workaround silicon bug */ + qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc & 0xfffffffb); + + MPASS(uaddr + ninst <= USTORE_SIZE); + + /* save ustore-addr csr */ + qat_ae_read_4(sc, ae, USTORE_ADDRESS, &ustore_addr); + + uaddr |= USTORE_ADDRESS_ECS; /* enable ecs bit */ + for (ii = 0; ii < ninst; ii++) { + qat_ae_write_4(sc, ae, USTORE_ADDRESS, uaddr); + + uaddr++; + qat_ae_read_4(sc, ae, USTORE_DATA_LOWER, &ulo); + qat_ae_read_4(sc, ae, USTORE_DATA_UPPER, &uhi); + ucode[ii] = uhi; + ucode[ii] = (ucode[ii] << 32) | ulo; + } + + /* restore SHARE_CS bit to workaround silicon bug */ + qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc); + qat_ae_write_4(sc, ae, USTORE_ADDRESS, ustore_addr); + + return 0; +} + +static u_int +qat_ae_concat_ucode(uint64_t *ucode, u_int ninst, u_int size, u_int addr, + u_int *value) +{ + const uint64_t *inst_arr; + u_int ninst0, curvalue; + int ii, vali, fixup, usize = 0; + + if (size == 0) + return 0; + + ninst0 = ninst; + vali = 0; + curvalue = value[vali++]; + + switch (size) { + case 0x1: + inst_arr = ae_inst_1b; + usize = nitems(ae_inst_1b); + break; + case 0x2: + inst_arr = ae_inst_2b; + usize = nitems(ae_inst_2b); + break; + case 0x3: + inst_arr = ae_inst_3b; + usize = nitems(ae_inst_3b); + break; + default: + inst_arr = ae_inst_4b; + usize = nitems(ae_inst_4b); + break; + } + + fixup = ninst; + for (ii = 0; ii < usize; ii++) + ucode[ninst++] = inst_arr[ii]; + + INSERT_IMMED_GPRA_CONST(ucode[fixup], (addr)); + fixup++; + INSERT_IMMED_GPRA_CONST(ucode[fixup], 0); + fixup++; + INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 0)); + fixup++; + INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 16)); + /* XXX fixup++ ? */ + + if (size <= 0x4) + return (ninst - ninst0); + + size -= sizeof(u_int); + while (size >= sizeof(u_int)) { + curvalue = value[vali++]; + fixup = ninst; + ucode[ninst++] = ae_inst_4b[0x2]; + ucode[ninst++] = ae_inst_4b[0x3]; + ucode[ninst++] = ae_inst_4b[0x8]; + INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 16)); + fixup++; + INSERT_IMMED_GPRB_CONST(ucode[fixup], (curvalue >> 0)); + /* XXX fixup++ ? */ + + addr += sizeof(u_int); + size -= sizeof(u_int); + } + /* call this function recusive when the left size less than 4 */ + ninst += + qat_ae_concat_ucode(ucode, ninst, size, addr, value + vali); + + return (ninst - ninst0); +} + +static int +qat_ae_exec_ucode(struct qat_softc *sc, u_char ae, u_char ctx, + uint64_t *ucode, u_int ninst, int cond_code_off, u_int max_cycles, + u_int *endpc) +{ + int error = 0, share_cs = 0; + uint64_t savucode[MAX_EXEC_INST]; + uint32_t indr_lm_addr_0, indr_lm_addr_1; + uint32_t indr_lm_addr_byte_0, indr_lm_addr_byte_1; + uint32_t indr_future_cnt_sig; + uint32_t indr_sig, active_sig; + uint32_t wakeup_ev, savpc, savcc, savctx, ctxarbctl; + uint32_t misc, nmisc, ctxen; + u_char nae; + + MPASS(ninst <= USTORE_SIZE); + + if (qat_ae_is_active(sc, ae)) + return EBUSY; + + /* save current LM addr */ + qat_ae_ctx_indr_read(sc, ae, ctx, LM_ADDR_0_INDIRECT, &indr_lm_addr_0); + qat_ae_ctx_indr_read(sc, ae, ctx, LM_ADDR_1_INDIRECT, &indr_lm_addr_1); + qat_ae_ctx_indr_read(sc, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, + &indr_lm_addr_byte_0); + qat_ae_ctx_indr_read(sc, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, + &indr_lm_addr_byte_1); + + /* backup shared control store bit, and force AE to + none-shared mode before executing ucode snippet */ + qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc); + if (misc & AE_MISC_CONTROL_SHARE_CS) { + share_cs = 1; + qat_ae_get_shared_ustore_ae(ae, &nae); + if ((sc->sc_ae_mask & (1 << nae)) && qat_ae_is_active(sc, nae)) + return EBUSY; + } + nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS; + qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc); + + /* save current states: */ + if (ninst <= MAX_EXEC_INST) { + error = qat_ae_ucode_read(sc, ae, 0, ninst, savucode); + if (error) { + qat_ae_write_4(sc, ae, AE_MISC_CONTROL, misc); + return error; + } + } + + /* save wakeup-events */ + qat_ae_ctx_indr_read(sc, ae, ctx, CTX_WAKEUP_EVENTS_INDIRECT, + &wakeup_ev); + /* save PC */ + qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT, &savpc); + savpc &= UPC_MASK; + + /* save ctx enables */ + qat_ae_read_4(sc, ae, CTX_ENABLES, &ctxen); + ctxen &= CTX_ENABLES_IGNORE_W1C_MASK; + /* save conditional-code */ + qat_ae_read_4(sc, ae, CC_ENABLE, &savcc); + /* save current context */ + qat_ae_read_4(sc, ae, ACTIVE_CTX_STATUS, &savctx); + qat_ae_read_4(sc, ae, CTX_ARB_CNTL, &ctxarbctl); + + /* save indirect csrs */ + qat_ae_ctx_indr_read(sc, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT, + &indr_future_cnt_sig); + qat_ae_ctx_indr_read(sc, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &indr_sig); + qat_ae_read_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, &active_sig); + + /* turn off ucode parity */ + qat_ae_write_4(sc, ae, CTX_ENABLES, + ctxen & ~CTX_ENABLES_CNTL_STORE_PARITY_ENABLE); + + /* copy instructions to ustore */ + qat_ae_ucode_write(sc, ae, 0, ninst, ucode); + /* set PC */ + qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_STS_INDIRECT, 0); + /* change the active context */ + qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, + ctx & ACTIVE_CTX_STATUS_ACNO); + + if (cond_code_off) { + /* disable conditional-code*/ + qat_ae_write_4(sc, ae, CC_ENABLE, savcc & 0xffffdfff); + } + + /* wakeup-event voluntary */ + qat_ae_ctx_indr_write(sc, ae, 1 << ctx, + CTX_WAKEUP_EVENTS_INDIRECT, CTX_WAKEUP_EVENTS_INDIRECT_VOLUNTARY); + + /* clean signals */ + qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_SIG_EVENTS_INDIRECT, 0); + qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, 0); + + /* enable context */ + qat_ae_enable_ctx(sc, ae, 1 << ctx); + + /* wait for it to finish */ + if (qat_ae_wait_num_cycles(sc, ae, max_cycles, 1) != 0) + error = ETIMEDOUT; + + /* see if we need to get the current PC */ + if (endpc != NULL) { + uint32_t ctx_status; + + qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT, + &ctx_status); + *endpc = ctx_status & UPC_MASK; + } +#if 0 + { + uint32_t ctx_status; + + qat_ae_ctx_indr_read(sc, ae, ctx, CTX_STS_INDIRECT, + &ctx_status); + printf("%s: endpc 0x%08x\n", __func__, + ctx_status & UPC_MASK); + } +#endif + + /* retore to previous states: */ + /* disable context */ + qat_ae_disable_ctx(sc, ae, 1 << ctx); + if (ninst <= MAX_EXEC_INST) { + /* instructions */ + qat_ae_ucode_write(sc, ae, 0, ninst, savucode); + } + /* wakeup-events */ + qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_WAKEUP_EVENTS_INDIRECT, + wakeup_ev); + qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_STS_INDIRECT, savpc); + + /* only restore shared control store bit, + other bit might be changed by AE code snippet */ + qat_ae_read_4(sc, ae, AE_MISC_CONTROL, &misc); + if (share_cs) + nmisc = misc | AE_MISC_CONTROL_SHARE_CS; + else + nmisc = misc & ~AE_MISC_CONTROL_SHARE_CS; + qat_ae_write_4(sc, ae, AE_MISC_CONTROL, nmisc); + /* conditional-code */ + qat_ae_write_4(sc, ae, CC_ENABLE, savcc); + /* change the active context */ + qat_ae_write_4(sc, ae, ACTIVE_CTX_STATUS, + savctx & ACTIVE_CTX_STATUS_ACNO); + /* restore the nxt ctx to run */ + qat_ae_write_4(sc, ae, CTX_ARB_CNTL, ctxarbctl); + /* restore current LM addr */ + qat_ae_ctx_indr_write(sc, ae, 1 << ctx, LM_ADDR_0_INDIRECT, + indr_lm_addr_0); + qat_ae_ctx_indr_write(sc, ae, 1 << ctx, LM_ADDR_1_INDIRECT, + indr_lm_addr_1); + qat_ae_ctx_indr_write(sc, ae, 1 << ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, + indr_lm_addr_byte_0); + qat_ae_ctx_indr_write(sc, ae, 1 << ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, + indr_lm_addr_byte_1); + + /* restore indirect csrs */ + qat_ae_ctx_indr_write(sc, ae, 1 << ctx, FUTURE_COUNT_SIGNAL_INDIRECT, + indr_future_cnt_sig); + qat_ae_ctx_indr_write(sc, ae, 1 << ctx, CTX_SIG_EVENTS_INDIRECT, + indr_sig); + qat_ae_write_4(sc, ae, CTX_SIG_EVENTS_ACTIVE, active_sig); + + /* ctx-enables */ + qat_ae_write_4(sc, ae, CTX_ENABLES, ctxen); + + return error; +} + +static int +qat_ae_exec_ucode_init_lm(struct qat_softc *sc, u_char ae, u_char ctx, + int *first_exec, uint64_t *ucode, u_int ninst, + u_int *gpr_a0, u_int *gpr_a1, u_int *gpr_a2, u_int *gpr_b0, u_int *gpr_b1) +{ + + if (*first_exec) { + qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 0, gpr_a0); + qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 1, gpr_a1); + qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPA_REL, 2, gpr_a2); + qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPB_REL, 0, gpr_b0); + qat_aereg_rel_data_read(sc, ae, ctx, AEREG_GPB_REL, 1, gpr_b1); + *first_exec = 0; + } + + return qat_ae_exec_ucode(sc, ae, ctx, ucode, ninst, 1, ninst * 5, NULL); +} + +static int +qat_ae_restore_init_lm_gprs(struct qat_softc *sc, u_char ae, u_char ctx, + u_int gpr_a0, u_int gpr_a1, u_int gpr_a2, u_int gpr_b0, u_int gpr_b1) +{ + qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 0, gpr_a0); + qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 1, gpr_a1); + qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPA_REL, 2, gpr_a2); + qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL, 0, gpr_b0); + qat_aereg_rel_data_write(sc, ae, ctx, AEREG_GPB_REL, 1, gpr_b1); + + return 0; +} + +static int +qat_ae_get_inst_num(int lmsize) +{ + int ninst, left; + + if (lmsize == 0) + return 0; + + left = lmsize % sizeof(u_int); + + if (left) { + ninst = nitems(ae_inst_1b) + + qat_ae_get_inst_num(lmsize - left); + } else { + /* 3 instruction is needed for further code */ + ninst = (lmsize - sizeof(u_int)) * 3 / 4 + nitems(ae_inst_4b); + } + + return (ninst); +} + +static int +qat_ae_batch_put_lm(struct qat_softc *sc, u_char ae, + struct qat_ae_batch_init_list *qabi_list, size_t nqabi) +{ + struct qat_ae_batch_init *qabi; + size_t alloc_ninst, ninst; + uint64_t *ucode; + u_int gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1; + int insnsz, error = 0, execed = 0, first_exec = 1; + + if (STAILQ_FIRST(qabi_list) == NULL) + return 0; + + alloc_ninst = min(USTORE_SIZE, nqabi); + ucode = qat_alloc_mem(sizeof(uint64_t) * alloc_ninst); + + ninst = 0; + STAILQ_FOREACH(qabi, qabi_list, qabi_next) { + insnsz = qat_ae_get_inst_num(qabi->qabi_size); + if (insnsz + ninst > alloc_ninst) { + /* add ctx_arb[kill] */ + ucode[ninst++] = 0x0E000010000ull; + execed = 1; + + error = qat_ae_exec_ucode_init_lm(sc, ae, 0, + &first_exec, ucode, ninst, + &gpr_a0, &gpr_a1, &gpr_a2, &gpr_b0, &gpr_b1); + if (error) { + qat_ae_restore_init_lm_gprs(sc, ae, 0, + gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1); + qat_free_mem(ucode); + return error; + } + /* run microExec to execute the microcode */ + ninst = 0; + } + ninst += qat_ae_concat_ucode(ucode, ninst, + qabi->qabi_size, qabi->qabi_addr, qabi->qabi_value); + } + + if (ninst > 0) { + ucode[ninst++] = 0x0E000010000ull; + execed = 1; + + error = qat_ae_exec_ucode_init_lm(sc, ae, 0, + &first_exec, ucode, ninst, + &gpr_a0, &gpr_a1, &gpr_a2, &gpr_b0, &gpr_b1); + } + if (execed) { + qat_ae_restore_init_lm_gprs(sc, ae, 0, + gpr_a0, gpr_a1, gpr_a2, gpr_b0, gpr_b1); + } + + qat_free_mem(ucode); + + return error; +} + +static int +qat_ae_write_pc(struct qat_softc *sc, u_char ae, u_int ctx_mask, u_int upc) +{ + + if (qat_ae_is_active(sc, ae)) + return EBUSY; + + qat_ae_ctx_indr_write(sc, ae, ctx_mask, CTX_STS_INDIRECT, + UPC_MASK & upc); + return 0; +} + +static inline u_int +qat_aefw_csum_calc(u_int reg, int ch) +{ + int i; + u_int topbit = CRC_BITMASK(CRC_WIDTH - 1); + u_int inbyte = (u_int)((reg >> 0x18) ^ ch); + + reg ^= inbyte << (CRC_WIDTH - 0x8); + for (i = 0; i < 0x8; i++) { + if (reg & topbit) + reg = (reg << 1) ^ CRC_POLY; + else + reg <<= 1; + } + + return (reg & CRC_WIDTHMASK(CRC_WIDTH)); +} + +static u_int +qat_aefw_csum(char *buf, int size) +{ + u_int csum = 0; + + while (size--) { + csum = qat_aefw_csum_calc(csum, *buf++); + } + + return csum; +} + +static const char * +qat_aefw_uof_string(struct qat_softc *sc, size_t offset) +{ + if (offset >= sc->sc_aefw_uof.qafu_str_tab_size) + return NULL; + if (sc->sc_aefw_uof.qafu_str_tab == NULL) + return NULL; + + return (const char *)((uintptr_t)sc->sc_aefw_uof.qafu_str_tab + offset); +} + +static struct uof_chunk_hdr * +qat_aefw_uof_find_chunk(struct qat_softc *sc, + const char *id, struct uof_chunk_hdr *cur) +{ + struct uof_obj_hdr *uoh = sc->sc_aefw_uof.qafu_obj_hdr; + struct uof_chunk_hdr *uch; + int i; + + uch = (struct uof_chunk_hdr *)(uoh + 1); + for (i = 0; i < uoh->uoh_num_chunks; i++, uch++) { + if (uch->uch_offset + uch->uch_size > sc->sc_aefw_uof.qafu_size) + return NULL; + + if (cur < uch && !strncmp(uch->uch_id, id, UOF_OBJ_ID_LEN)) + return uch; + } + + return NULL; +} + +static int +qat_aefw_load_mof(struct qat_softc *sc) +{ + const struct firmware *fw; + + fw = firmware_get(sc->sc_hw.qhw_mof_fwname); + if (fw == NULL) { + device_printf(sc->sc_dev, "couldn't load MOF firmware %s\n", + sc->sc_hw.qhw_mof_fwname); + return ENXIO; + } + + sc->sc_fw_mof = qat_alloc_mem(fw->datasize); + sc->sc_fw_mof_size = fw->datasize; + memcpy(sc->sc_fw_mof, fw->data, fw->datasize); + firmware_put(fw, FIRMWARE_UNLOAD); + return 0; +} + +static void +qat_aefw_unload_mof(struct qat_softc *sc) +{ + if (sc->sc_fw_mof != NULL) { + qat_free_mem(sc->sc_fw_mof); + sc->sc_fw_mof = NULL; + } +} + +static int +qat_aefw_load_mmp(struct qat_softc *sc) +{ + const struct firmware *fw; + + fw = firmware_get(sc->sc_hw.qhw_mmp_fwname); + if (fw == NULL) { + device_printf(sc->sc_dev, "couldn't load MOF firmware %s\n", + sc->sc_hw.qhw_mmp_fwname); + return ENXIO; + } + + sc->sc_fw_mmp = qat_alloc_mem(fw->datasize); + sc->sc_fw_mmp_size = fw->datasize; + memcpy(sc->sc_fw_mmp, fw->data, fw->datasize); + firmware_put(fw, FIRMWARE_UNLOAD); + return 0; +} + +static void +qat_aefw_unload_mmp(struct qat_softc *sc) +{ + if (sc->sc_fw_mmp != NULL) { + qat_free_mem(sc->sc_fw_mmp); + sc->sc_fw_mmp = NULL; + } +} + +static int +qat_aefw_mof_find_uof0(struct qat_softc *sc, + struct mof_uof_hdr *muh, struct mof_uof_chunk_hdr *head, + u_int nchunk, size_t size, const char *id, + size_t *fwsize, void **fwptr) +{ + int i; + char *uof_name; + + for (i = 0; i < nchunk; i++) { + struct mof_uof_chunk_hdr *much = &head[i]; + + if (strncmp(much->much_id, id, MOF_OBJ_ID_LEN)) + return EINVAL; + + if (much->much_offset + much->much_size > size) + return EINVAL; + + if (sc->sc_mof.qmf_sym_size <= much->much_name) + return EINVAL; + + uof_name = (char *)((uintptr_t)sc->sc_mof.qmf_sym + + much->much_name); + + if (!strcmp(uof_name, sc->sc_fw_uof_name)) { + *fwptr = (void *)((uintptr_t)muh + + (uintptr_t)much->much_offset); + *fwsize = (size_t)much->much_size; + return 0; + } + } + + return ENOENT; +} + +static int +qat_aefw_mof_find_uof(struct qat_softc *sc) +{ + struct mof_uof_hdr *uof_hdr, *suof_hdr; + u_int nuof_chunks = 0, nsuof_chunks = 0; + int error; + + uof_hdr = sc->sc_mof.qmf_uof_objs; + suof_hdr = sc->sc_mof.qmf_suof_objs; + + if (uof_hdr != NULL) { + if (uof_hdr->muh_max_chunks < uof_hdr->muh_num_chunks) { + return EINVAL; + } + nuof_chunks = uof_hdr->muh_num_chunks; + } + if (suof_hdr != NULL) { + if (suof_hdr->muh_max_chunks < suof_hdr->muh_num_chunks) + return EINVAL; + nsuof_chunks = suof_hdr->muh_num_chunks; + } + + if (nuof_chunks + nsuof_chunks == 0) + return EINVAL; + + if (uof_hdr != NULL) { + error = qat_aefw_mof_find_uof0(sc, uof_hdr, + (struct mof_uof_chunk_hdr *)(uof_hdr + 1), nuof_chunks, + sc->sc_mof.qmf_uof_objs_size, UOF_IMAG, + &sc->sc_fw_uof_size, &sc->sc_fw_uof); + if (error && error != ENOENT) + return error; + } + + if (suof_hdr != NULL) { + error = qat_aefw_mof_find_uof0(sc, suof_hdr, + (struct mof_uof_chunk_hdr *)(suof_hdr + 1), nsuof_chunks, + sc->sc_mof.qmf_suof_objs_size, SUOF_IMAG, + &sc->sc_fw_suof_size, &sc->sc_fw_suof); + if (error && error != ENOENT) + return error; + } + + if (sc->sc_fw_uof == NULL && sc->sc_fw_suof == NULL) + return ENOENT; + + return 0; +} + +static int +qat_aefw_mof_parse(struct qat_softc *sc) +{ + const struct mof_file_hdr *mfh; + const struct mof_file_chunk_hdr *mfch; + size_t size; + u_int csum; + int error, i; + + size = sc->sc_fw_mof_size; + + if (size < sizeof(struct mof_file_hdr)) + return EINVAL; + size -= sizeof(struct mof_file_hdr); + + mfh = sc->sc_fw_mof; + + if (mfh->mfh_fid != MOF_FID) + return EINVAL; + + csum = qat_aefw_csum((char *)((uintptr_t)sc->sc_fw_mof + + offsetof(struct mof_file_hdr, mfh_min_ver)), + sc->sc_fw_mof_size - + offsetof(struct mof_file_hdr, mfh_min_ver)); + if (mfh->mfh_csum != csum) + return EINVAL; + + if (mfh->mfh_min_ver != MOF_MIN_VER || + mfh->mfh_maj_ver != MOF_MAJ_VER) + return EINVAL; + + if (mfh->mfh_max_chunks < mfh->mfh_num_chunks) + return EINVAL; + + if (size < sizeof(struct mof_file_chunk_hdr) * mfh->mfh_num_chunks) + return EINVAL; + mfch = (const struct mof_file_chunk_hdr *)(mfh + 1); + + for (i = 0; i < mfh->mfh_num_chunks; i++, mfch++) { + if (mfch->mfch_offset + mfch->mfch_size > sc->sc_fw_mof_size) + return EINVAL; + + if (!strncmp(mfch->mfch_id, SYM_OBJS, MOF_OBJ_ID_LEN)) { + if (sc->sc_mof.qmf_sym != NULL) + return EINVAL; + + sc->sc_mof.qmf_sym = + (void *)((uintptr_t)sc->sc_fw_mof + + (uintptr_t)mfch->mfch_offset + sizeof(u_int)); + sc->sc_mof.qmf_sym_size = + *(u_int *)((uintptr_t)sc->sc_fw_mof + + (uintptr_t)mfch->mfch_offset); + + if (sc->sc_mof.qmf_sym_size % sizeof(u_int) != 0) + return EINVAL; + if (mfch->mfch_size != sc->sc_mof.qmf_sym_size + + sizeof(u_int) || mfch->mfch_size == 0) + return EINVAL; + if (*(char *)((uintptr_t)sc->sc_mof.qmf_sym + + sc->sc_mof.qmf_sym_size - 1) != '\0') + return EINVAL; + + } else if (!strncmp(mfch->mfch_id, UOF_OBJS, MOF_OBJ_ID_LEN)) { + if (sc->sc_mof.qmf_uof_objs != NULL) + return EINVAL; + + sc->sc_mof.qmf_uof_objs = + (void *)((uintptr_t)sc->sc_fw_mof + + (uintptr_t)mfch->mfch_offset); + sc->sc_mof.qmf_uof_objs_size = mfch->mfch_size; + + } else if (!strncmp(mfch->mfch_id, SUOF_OBJS, MOF_OBJ_ID_LEN)) { + if (sc->sc_mof.qmf_suof_objs != NULL) + return EINVAL; + + sc->sc_mof.qmf_suof_objs = + (void *)((uintptr_t)sc->sc_fw_mof + + (uintptr_t)mfch->mfch_offset); + sc->sc_mof.qmf_suof_objs_size = mfch->mfch_size; + } + } + + if (sc->sc_mof.qmf_sym == NULL || + (sc->sc_mof.qmf_uof_objs == NULL && + sc->sc_mof.qmf_suof_objs == NULL)) + return EINVAL; + + error = qat_aefw_mof_find_uof(sc); + if (error) + return error; + return 0; +} + +static int +qat_aefw_uof_parse_image(struct qat_softc *sc, + struct qat_uof_image *qui, struct uof_chunk_hdr *uch) +{ + struct uof_image *image; + struct uof_code_page *page; + uintptr_t base = (uintptr_t)sc->sc_aefw_uof.qafu_obj_hdr; + size_t lim = uch->uch_offset + uch->uch_size, size; + int i, p; + + size = uch->uch_size; + if (size < sizeof(struct uof_image)) + return EINVAL; + size -= sizeof(struct uof_image); + + qui->qui_image = image = + (struct uof_image *)(base + uch->uch_offset); + +#define ASSIGN_OBJ_TAB(np, typep, type, base, off, lim) \ +do { \ + u_int nent; \ + nent = ((struct uof_obj_table *)((base) + (off)))->uot_nentries;\ + if ((lim) < off + sizeof(struct uof_obj_table) + \ + sizeof(type) * nent) \ + return EINVAL; \ + *(np) = nent; \ + if (nent > 0) \ + *(typep) = (type)((struct uof_obj_table *) \ + ((base) + (off)) + 1); \ + else \ + *(typep) = NULL; \ +} while (0) + + ASSIGN_OBJ_TAB(&qui->qui_num_ae_reg, &qui->qui_ae_reg, + struct uof_ae_reg *, base, image->ui_reg_tab, lim); + ASSIGN_OBJ_TAB(&qui->qui_num_init_reg_sym, &qui->qui_init_reg_sym, + struct uof_init_reg_sym *, base, image->ui_init_reg_sym_tab, lim); + ASSIGN_OBJ_TAB(&qui->qui_num_sbreak, &qui->qui_sbreak, + struct qui_sbreak *, base, image->ui_sbreak_tab, lim); + + if (size < sizeof(struct uof_code_page) * image->ui_num_pages) + return EINVAL; + if (nitems(qui->qui_pages) < image->ui_num_pages) + return EINVAL; + + page = (struct uof_code_page *)(image + 1); + + for (p = 0; p < image->ui_num_pages; p++, page++) { + struct qat_uof_page *qup = &qui->qui_pages[p]; + struct uof_code_area *uca; + + qup->qup_page_num = page->ucp_page_num; + qup->qup_def_page = page->ucp_def_page; + qup->qup_page_region = page->ucp_page_region; + qup->qup_beg_vaddr = page->ucp_beg_vaddr; + qup->qup_beg_paddr = page->ucp_beg_paddr; + + ASSIGN_OBJ_TAB(&qup->qup_num_uc_var, &qup->qup_uc_var, + struct uof_uword_fixup *, base, + page->ucp_uc_var_tab, lim); + ASSIGN_OBJ_TAB(&qup->qup_num_imp_var, &qup->qup_imp_var, + struct uof_import_var *, base, + page->ucp_imp_var_tab, lim); + ASSIGN_OBJ_TAB(&qup->qup_num_imp_expr, &qup->qup_imp_expr, + struct uof_uword_fixup *, base, + page->ucp_imp_expr_tab, lim); + ASSIGN_OBJ_TAB(&qup->qup_num_neigh_reg, &qup->qup_neigh_reg, + struct uof_uword_fixup *, base, + page->ucp_neigh_reg_tab, lim); + + if (lim < page->ucp_code_area + sizeof(struct uof_code_area)) + return EINVAL; + + uca = (struct uof_code_area *)(base + page->ucp_code_area); + qup->qup_num_micro_words = uca->uca_num_micro_words; + + ASSIGN_OBJ_TAB(&qup->qup_num_uw_blocks, &qup->qup_uw_blocks, + struct qat_uof_uword_block *, base, + uca->uca_uword_block_tab, lim); + + for (i = 0; i < qup->qup_num_uw_blocks; i++) { + u_int uwordoff = ((struct uof_uword_block *)( + &qup->qup_uw_blocks[i]))->uub_uword_offset; + + if (lim < uwordoff) + return EINVAL; + + qup->qup_uw_blocks[i].quub_micro_words = + (base + uwordoff); + } + } + +#undef ASSIGN_OBJ_TAB + + return 0; +} + +static int +qat_aefw_uof_parse_images(struct qat_softc *sc) +{ + struct uof_chunk_hdr *uch = NULL; + u_int assigned_ae; + int i, error; + + for (i = 0; i < MAX_NUM_AE * MAX_AE_CTX; i++) { + uch = qat_aefw_uof_find_chunk(sc, UOF_IMAG, uch); + if (uch == NULL) + break; + + if (i >= nitems(sc->sc_aefw_uof.qafu_imgs)) + return ENOENT; + + error = qat_aefw_uof_parse_image(sc, &sc->sc_aefw_uof.qafu_imgs[i], uch); + if (error) + return error; + + sc->sc_aefw_uof.qafu_num_imgs++; + } + + assigned_ae = 0; + for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) { + assigned_ae |= sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_ae_assigned; + } + + return 0; +} + +static int +qat_aefw_uof_parse(struct qat_softc *sc) +{ + struct uof_file_hdr *ufh; + struct uof_file_chunk_hdr *ufch; + struct uof_obj_hdr *uoh; + struct uof_chunk_hdr *uch; + void *uof = NULL; + size_t size, uof_size, hdr_size; + uintptr_t base; + u_int csum; + int i; + + size = sc->sc_fw_uof_size; + if (size < MIN_UOF_SIZE) + return EINVAL; + size -= sizeof(struct uof_file_hdr); + + ufh = sc->sc_fw_uof; + + if (ufh->ufh_id != UOF_FID) + return EINVAL; + if (ufh->ufh_min_ver != UOF_MIN_VER || ufh->ufh_maj_ver != UOF_MAJ_VER) + return EINVAL; + + if (ufh->ufh_max_chunks < ufh->ufh_num_chunks) + return EINVAL; + if (size < sizeof(struct uof_file_chunk_hdr) * ufh->ufh_num_chunks) + return EINVAL; + ufch = (struct uof_file_chunk_hdr *)(ufh + 1); + + uof_size = 0; + for (i = 0; i < ufh->ufh_num_chunks; i++, ufch++) { + if (ufch->ufch_offset + ufch->ufch_size > sc->sc_fw_uof_size) + return EINVAL; + + if (!strncmp(ufch->ufch_id, UOF_OBJS, UOF_OBJ_ID_LEN)) { + if (uof != NULL) + return EINVAL; + + uof = + (void *)((uintptr_t)sc->sc_fw_uof + + ufch->ufch_offset); + uof_size = ufch->ufch_size; + + csum = qat_aefw_csum(uof, uof_size); + if (csum != ufch->ufch_csum) + return EINVAL; + } + } + + if (uof == NULL) + return ENOENT; + + size = uof_size; + if (size < sizeof(struct uof_obj_hdr)) + return EINVAL; + size -= sizeof(struct uof_obj_hdr); + + uoh = uof; + + if (size < sizeof(struct uof_chunk_hdr) * uoh->uoh_num_chunks) + return EINVAL; + + /* Check if the UOF objects are compatible with the chip */ + if ((uoh->uoh_cpu_type & sc->sc_hw.qhw_prod_type) == 0) + return ENOTSUP; + + if (uoh->uoh_min_cpu_ver > sc->sc_rev || + uoh->uoh_max_cpu_ver < sc->sc_rev) + return ENOTSUP; + + sc->sc_aefw_uof.qafu_size = uof_size; + sc->sc_aefw_uof.qafu_obj_hdr = uoh; + + base = (uintptr_t)sc->sc_aefw_uof.qafu_obj_hdr; + + /* map uof string-table */ + uch = qat_aefw_uof_find_chunk(sc, UOF_STRT, NULL); + if (uch != NULL) { + hdr_size = offsetof(struct uof_str_tab, ust_strings); + sc->sc_aefw_uof.qafu_str_tab = + (void *)(base + uch->uch_offset + hdr_size); + sc->sc_aefw_uof.qafu_str_tab_size = uch->uch_size - hdr_size; + } + + /* get ustore mem inits table -- should be only one */ + uch = qat_aefw_uof_find_chunk(sc, UOF_IMEM, NULL); + if (uch != NULL) { + if (uch->uch_size < sizeof(struct uof_obj_table)) + return EINVAL; + sc->sc_aefw_uof.qafu_num_init_mem = ((struct uof_obj_table *)(base + + uch->uch_offset))->uot_nentries; + if (sc->sc_aefw_uof.qafu_num_init_mem) { + sc->sc_aefw_uof.qafu_init_mem = + (struct uof_init_mem *)(base + uch->uch_offset + + sizeof(struct uof_obj_table)); + sc->sc_aefw_uof.qafu_init_mem_size = + uch->uch_size - sizeof(struct uof_obj_table); + } + } + + uch = qat_aefw_uof_find_chunk(sc, UOF_MSEG, NULL); + if (uch != NULL) { + if (uch->uch_size < sizeof(struct uof_obj_table) + + sizeof(struct uof_var_mem_seg)) + return EINVAL; + sc->sc_aefw_uof.qafu_var_mem_seg = + (struct uof_var_mem_seg *)(base + uch->uch_offset + + sizeof(struct uof_obj_table)); + } + + return qat_aefw_uof_parse_images(sc); +} + +static int +qat_aefw_suof_parse_image(struct qat_softc *sc, struct qat_suof_image *qsi, + struct suof_chunk_hdr *sch) +{ + struct qat_aefw_suof *qafs = &sc->sc_aefw_suof; + struct simg_ae_mode *ae_mode; + u_int maj_ver; + + qsi->qsi_simg_buf = qafs->qafs_suof_buf + sch->sch_offset + + sizeof(struct suof_obj_hdr); + qsi->qsi_simg_len = + ((struct suof_obj_hdr *) + (qafs->qafs_suof_buf + sch->sch_offset))->soh_img_length; + + qsi->qsi_css_header = qsi->qsi_simg_buf; + qsi->qsi_css_key = qsi->qsi_css_header + sizeof(struct css_hdr); + qsi->qsi_css_signature = qsi->qsi_css_key + + CSS_FWSK_MODULUS_LEN + CSS_FWSK_EXPONENT_LEN; + qsi->qsi_css_simg = qsi->qsi_css_signature + CSS_SIGNATURE_LEN; + + ae_mode = (struct simg_ae_mode *)qsi->qsi_css_simg; + qsi->qsi_ae_mask = ae_mode->sam_ae_mask; + qsi->qsi_simg_name = (u_long)&ae_mode->sam_simg_name; + qsi->qsi_appmeta_data = (u_long)&ae_mode->sam_appmeta_data; + qsi->qsi_fw_type = ae_mode->sam_fw_type; + + if (ae_mode->sam_dev_type != sc->sc_hw.qhw_prod_type) + return EINVAL; + + maj_ver = (QAT_PID_MAJOR_REV | (sc->sc_rev & QAT_PID_MINOR_REV)) & 0xff; + if ((maj_ver > ae_mode->sam_devmax_ver) || + (maj_ver < ae_mode->sam_devmin_ver)) { + return EINVAL; + } + + return 0; +} + +static int +qat_aefw_suof_parse(struct qat_softc *sc) +{ + struct suof_file_hdr *sfh; + struct suof_chunk_hdr *sch; + struct qat_aefw_suof *qafs = &sc->sc_aefw_suof; + struct qat_suof_image *qsi; + size_t size; + u_int csum; + int ae0_img = MAX_AE; + int i, error; + + size = sc->sc_fw_suof_size; + if (size < sizeof(struct suof_file_hdr)) + return EINVAL; + + sfh = sc->sc_fw_suof; + + if (sfh->sfh_file_id != SUOF_FID) + return EINVAL; + if (sfh->sfh_fw_type != 0) + return EINVAL; + if (sfh->sfh_num_chunks <= 1) + return EINVAL; + if (sfh->sfh_min_ver != SUOF_MIN_VER || + sfh->sfh_maj_ver != SUOF_MAJ_VER) + return EINVAL; + + csum = qat_aefw_csum((char *)&sfh->sfh_min_ver, + size - offsetof(struct suof_file_hdr, sfh_min_ver)); + if (csum != sfh->sfh_check_sum) + return EINVAL; + + size -= sizeof(struct suof_file_hdr); + + qafs->qafs_file_id = SUOF_FID; + qafs->qafs_suof_buf = sc->sc_fw_suof; + qafs->qafs_suof_size = sc->sc_fw_suof_size; + qafs->qafs_check_sum = sfh->sfh_check_sum; + qafs->qafs_min_ver = sfh->sfh_min_ver; + qafs->qafs_maj_ver = sfh->sfh_maj_ver; + qafs->qafs_fw_type = sfh->sfh_fw_type; + + if (size < sizeof(struct suof_chunk_hdr)) + return EINVAL; + sch = (struct suof_chunk_hdr *)(sfh + 1); + size -= sizeof(struct suof_chunk_hdr); + + if (size < sizeof(struct suof_str_tab)) + return EINVAL; + size -= offsetof(struct suof_str_tab, sst_strings); + + qafs->qafs_sym_size = ((struct suof_str_tab *) + (qafs->qafs_suof_buf + sch->sch_offset))->sst_tab_length; + if (size < qafs->qafs_sym_size) + return EINVAL; + qafs->qafs_sym_str = qafs->qafs_suof_buf + sch->sch_offset + + offsetof(struct suof_str_tab, sst_strings); + + qafs->qafs_num_simgs = sfh->sfh_num_chunks - 1; + if (qafs->qafs_num_simgs == 0) + return EINVAL; + + qsi = qat_alloc_mem( + sizeof(struct qat_suof_image) * qafs->qafs_num_simgs); + qafs->qafs_simg = qsi; + + for (i = 0; i < qafs->qafs_num_simgs; i++) { + error = qat_aefw_suof_parse_image(sc, &qsi[i], &sch[i + 1]); + if (error) + return error; + if ((qsi[i].qsi_ae_mask & 0x1) != 0) + ae0_img = i; + } + + if (ae0_img != qafs->qafs_num_simgs - 1) { + struct qat_suof_image last_qsi; + + memcpy(&last_qsi, &qsi[qafs->qafs_num_simgs - 1], + sizeof(struct qat_suof_image)); + memcpy(&qsi[qafs->qafs_num_simgs - 1], &qsi[ae0_img], + sizeof(struct qat_suof_image)); + memcpy(&qsi[ae0_img], &last_qsi, + sizeof(struct qat_suof_image)); + } + + return 0; +} + +static int +qat_aefw_alloc_auth_dmamem(struct qat_softc *sc, char *image, size_t size, + struct qat_dmamem *dma) +{ + struct css_hdr *css = (struct css_hdr *)image; + struct auth_chunk *auth_chunk; + struct fw_auth_desc *auth_desc; + size_t mapsize, simg_offset = sizeof(struct auth_chunk); + bus_size_t bus_addr; + uintptr_t virt_addr; + int error; + + if (size > AE_IMG_OFFSET + CSS_MAX_IMAGE_LEN) + return EINVAL; + + mapsize = (css->css_fw_type == CSS_AE_FIRMWARE) ? + CSS_AE_SIMG_LEN + simg_offset : + size + CSS_FWSK_PAD_LEN + simg_offset; + error = qat_alloc_dmamem(sc, dma, 1, mapsize, PAGE_SIZE); + if (error) + return error; + + memset(dma->qdm_dma_vaddr, 0, mapsize); + + auth_chunk = dma->qdm_dma_vaddr; + auth_chunk->ac_chunk_size = mapsize; + auth_chunk->ac_chunk_bus_addr = dma->qdm_dma_seg.ds_addr; + + virt_addr = (uintptr_t)dma->qdm_dma_vaddr; + virt_addr += simg_offset; + bus_addr = auth_chunk->ac_chunk_bus_addr; + bus_addr += simg_offset; + + auth_desc = &auth_chunk->ac_fw_auth_desc; + auth_desc->fad_css_hdr_high = (uint64_t)bus_addr >> 32; + auth_desc->fad_css_hdr_low = bus_addr; + + memcpy((void *)virt_addr, image, sizeof(struct css_hdr)); + /* pub key */ + virt_addr += sizeof(struct css_hdr); + bus_addr += sizeof(struct css_hdr); + image += sizeof(struct css_hdr); + + auth_desc->fad_fwsk_pub_high = (uint64_t)bus_addr >> 32; + auth_desc->fad_fwsk_pub_low = bus_addr; + + memcpy((void *)virt_addr, image, CSS_FWSK_MODULUS_LEN); + memset((void *)(virt_addr + CSS_FWSK_MODULUS_LEN), 0, CSS_FWSK_PAD_LEN); + memcpy((void *)(virt_addr + CSS_FWSK_MODULUS_LEN + CSS_FWSK_PAD_LEN), + image + CSS_FWSK_MODULUS_LEN, sizeof(uint32_t)); + + virt_addr += CSS_FWSK_PUB_LEN; + bus_addr += CSS_FWSK_PUB_LEN; + image += CSS_FWSK_MODULUS_LEN + CSS_FWSK_EXPONENT_LEN; + + auth_desc->fad_signature_high = (uint64_t)bus_addr >> 32; + auth_desc->fad_signature_low = bus_addr; + + memcpy((void *)virt_addr, image, CSS_SIGNATURE_LEN); + + virt_addr += CSS_SIGNATURE_LEN; + bus_addr += CSS_SIGNATURE_LEN; + image += CSS_SIGNATURE_LEN; + + auth_desc->fad_img_high = (uint64_t)bus_addr >> 32; + auth_desc->fad_img_low = bus_addr; + auth_desc->fad_img_len = size - AE_IMG_OFFSET; + + memcpy((void *)virt_addr, image, auth_desc->fad_img_len); + + if (css->css_fw_type == CSS_AE_FIRMWARE) { + auth_desc->fad_img_ae_mode_data_high = auth_desc->fad_img_high; + auth_desc->fad_img_ae_mode_data_low = auth_desc->fad_img_low; + + bus_addr += sizeof(struct simg_ae_mode); + + auth_desc->fad_img_ae_init_data_high = (uint64_t)bus_addr >> 32; + auth_desc->fad_img_ae_init_data_low = bus_addr; + + bus_addr += SIMG_AE_INIT_SEQ_LEN; + + auth_desc->fad_img_ae_insts_high = (uint64_t)bus_addr >> 32; + auth_desc->fad_img_ae_insts_low = bus_addr; + } else { + auth_desc->fad_img_ae_insts_high = auth_desc->fad_img_high; + auth_desc->fad_img_ae_insts_low = auth_desc->fad_img_low; + } + + bus_dmamap_sync(dma->qdm_dma_tag, dma->qdm_dma_map, + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); + + return 0; +} + +static int +qat_aefw_auth(struct qat_softc *sc, struct qat_dmamem *dma) +{ + bus_addr_t addr; + uint32_t fcu, sts; + int retry = 0; + + addr = dma->qdm_dma_seg.ds_addr; + qat_cap_global_write_4(sc, FCU_DRAM_ADDR_HI, (uint64_t)addr >> 32); + qat_cap_global_write_4(sc, FCU_DRAM_ADDR_LO, addr); + qat_cap_global_write_4(sc, FCU_CTRL, FCU_CTRL_CMD_AUTH); + + do { + DELAY(FW_AUTH_WAIT_PERIOD * 1000); + fcu = qat_cap_global_read_4(sc, FCU_STATUS); + sts = __SHIFTOUT(fcu, FCU_STATUS_STS); + if (sts == FCU_STATUS_STS_VERI_FAIL) + goto fail; + if (fcu & FCU_STATUS_AUTHFWLD && + sts == FCU_STATUS_STS_VERI_DONE) { + return 0; + } + } while (retry++ < FW_AUTH_MAX_RETRY); + +fail: + device_printf(sc->sc_dev, + "firmware authentication error: status 0x%08x retry %d\n", + fcu, retry); + return EINVAL; +} + +static int +qat_aefw_suof_load(struct qat_softc *sc, struct qat_dmamem *dma) +{ + struct simg_ae_mode *ae_mode; + uint32_t fcu, sts, loaded; + u_int mask; + u_char ae; + int retry = 0; + + ae_mode = (struct simg_ae_mode *)((uintptr_t)dma->qdm_dma_vaddr + + sizeof(struct auth_chunk) + sizeof(struct css_hdr) + + CSS_FWSK_PUB_LEN + CSS_SIGNATURE_LEN); + + for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { + if (!(mask & 1)) + continue; + if (!((ae_mode->sam_ae_mask >> ae) & 0x1)) + continue; + if (qat_ae_is_active(sc, ae)) { + device_printf(sc->sc_dev, "AE %d is active\n", ae); + return EINVAL; + } + qat_cap_global_write_4(sc, FCU_CTRL, + FCU_CTRL_CMD_LOAD | __SHIFTIN(ae, FCU_CTRL_AE)); + do { + DELAY(FW_AUTH_WAIT_PERIOD * 1000); + fcu = qat_cap_global_read_4(sc, FCU_STATUS); + sts = __SHIFTOUT(fcu, FCU_STATUS_STS); + loaded = __SHIFTOUT(fcu, FCU_STATUS_LOADED_AE); + if (sts == FCU_STATUS_STS_LOAD_DONE && + (loaded & (1 << ae))) { + break; + } + } while (retry++ < FW_AUTH_MAX_RETRY); + + if (retry > FW_AUTH_MAX_RETRY) { + device_printf(sc->sc_dev, + "firmware load timeout: status %08x\n", fcu); + return EINVAL; + } + } + + return 0; +} + +static int +qat_aefw_suof_write(struct qat_softc *sc) +{ + struct qat_suof_image *qsi = NULL; + int i, error = 0; + + for (i = 0; i < sc->sc_aefw_suof.qafs_num_simgs; i++) { + qsi = &sc->sc_aefw_suof.qafs_simg[i]; + error = qat_aefw_alloc_auth_dmamem(sc, qsi->qsi_simg_buf, + qsi->qsi_simg_len, &qsi->qsi_dma); + if (error) + return error; + error = qat_aefw_auth(sc, &qsi->qsi_dma); + if (error) + goto fail; + error = qat_aefw_suof_load(sc, &qsi->qsi_dma); + if (error) + goto fail; + + qat_free_dmamem(sc, &qsi->qsi_dma); + } + qat_free_mem(sc->sc_aefw_suof.qafs_simg); + + return 0; +fail: + if (qsi != NULL) + qat_free_dmamem(sc, &qsi->qsi_dma); + return error; +} + +static int +qat_aefw_uof_assign_image(struct qat_softc *sc, struct qat_ae *qae, + struct qat_uof_image *qui) +{ + struct qat_ae_slice *slice; + int i, npages, nregions; + + if (qae->qae_num_slices >= nitems(qae->qae_slices)) + return ENOENT; + + if (qui->qui_image->ui_ae_mode & + (AE_MODE_RELOAD_CTX_SHARED | AE_MODE_SHARED_USTORE)) { + /* XXX */ + device_printf(sc->sc_dev, + "shared ae mode is not supported yet\n"); + return ENOTSUP; + } + + qae->qae_shareable_ustore = 0; /* XXX */ + qae->qae_effect_ustore_size = USTORE_SIZE; + + slice = &qae->qae_slices[qae->qae_num_slices]; + + slice->qas_image = qui; + slice->qas_assigned_ctx_mask = qui->qui_image->ui_ctx_assigned; + + nregions = qui->qui_image->ui_num_page_regions; + npages = qui->qui_image->ui_num_pages; + + if (nregions > nitems(slice->qas_regions)) + return ENOENT; + if (npages > nitems(slice->qas_pages)) + return ENOENT; + + for (i = 0; i < nregions; i++) { + STAILQ_INIT(&slice->qas_regions[i].qar_waiting_pages); + } + for (i = 0; i < npages; i++) { + struct qat_ae_page *page = &slice->qas_pages[i]; + int region; + + page->qap_page = &qui->qui_pages[i]; + region = page->qap_page->qup_page_region; + if (region >= nregions) + return EINVAL; + + page->qap_region = &slice->qas_regions[region]; + } + + qae->qae_num_slices++; + + return 0; +} + +static int +qat_aefw_uof_init_ae(struct qat_softc *sc, u_char ae) +{ + struct uof_image *image; + struct qat_ae *qae = &(QAT_AE(sc, ae)); + int s; + u_char nn_mode; + + for (s = 0; s < qae->qae_num_slices; s++) { + if (qae->qae_slices[s].qas_image == NULL) + continue; + + image = qae->qae_slices[s].qas_image->qui_image; + qat_ae_write_ctx_mode(sc, ae, + __SHIFTOUT(image->ui_ae_mode, AE_MODE_CTX_MODE)); + + nn_mode = __SHIFTOUT(image->ui_ae_mode, AE_MODE_NN_MODE); + if (nn_mode != AE_MODE_NN_MODE_DONTCARE) + qat_ae_write_nn_mode(sc, ae, nn_mode); + + qat_ae_write_lm_mode(sc, ae, AEREG_LMEM0, + __SHIFTOUT(image->ui_ae_mode, AE_MODE_LMEM0)); + qat_ae_write_lm_mode(sc, ae, AEREG_LMEM1, + __SHIFTOUT(image->ui_ae_mode, AE_MODE_LMEM1)); + + qat_ae_write_shared_cs_mode(sc, ae, + __SHIFTOUT(image->ui_ae_mode, AE_MODE_SHARED_USTORE)); + qat_ae_set_reload_ustore(sc, ae, image->ui_reloadable_size, + __SHIFTOUT(image->ui_ae_mode, AE_MODE_RELOAD_CTX_SHARED), + qae->qae_reloc_ustore_dram); + } + + return 0; +} + +static int +qat_aefw_uof_init(struct qat_softc *sc) +{ + int ae, i, error; + uint32_t mask; + + for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { + struct qat_ae *qae; + + if (!(mask & 1)) + continue; + + qae = &(QAT_AE(sc, ae)); + + for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) { + if ((sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_ae_assigned & + (1 << ae)) == 0) + continue; + + error = qat_aefw_uof_assign_image(sc, qae, + &sc->sc_aefw_uof.qafu_imgs[i]); + if (error) + return error; + } + + /* XXX UcLo_initNumUwordUsed */ + + qae->qae_reloc_ustore_dram = UINT_MAX; /* XXX */ + + error = qat_aefw_uof_init_ae(sc, ae); + if (error) + return error; + } + + return 0; +} + +int +qat_aefw_load(struct qat_softc *sc) +{ + int error; + + error = qat_aefw_load_mof(sc); + if (error) + return error; + + error = qat_aefw_load_mmp(sc); + if (error) + return error; + + error = qat_aefw_mof_parse(sc); + if (error) { + device_printf(sc->sc_dev, "couldn't parse mof: %d\n", error); + return error; + } + + if (sc->sc_hw.qhw_fw_auth) { + error = qat_aefw_suof_parse(sc); + if (error) { + device_printf(sc->sc_dev, "couldn't parse suof: %d\n", + error); + return error; + } + + error = qat_aefw_suof_write(sc); + if (error) { + device_printf(sc->sc_dev, + "could not write firmware: %d\n", error); + return error; + } + + } else { + error = qat_aefw_uof_parse(sc); + if (error) { + device_printf(sc->sc_dev, "couldn't parse uof: %d\n", + error); + return error; + } + + error = qat_aefw_uof_init(sc); + if (error) { + device_printf(sc->sc_dev, + "couldn't init for aefw: %d\n", error); + return error; + } + + error = qat_aefw_uof_write(sc); + if (error) { + device_printf(sc->sc_dev, + "Could not write firmware: %d\n", error); + return error; + } + } + + return 0; +} + +void +qat_aefw_unload(struct qat_softc *sc) +{ + qat_aefw_unload_mmp(sc); + qat_aefw_unload_mof(sc); +} + +int +qat_aefw_start(struct qat_softc *sc, u_char ae, u_int ctx_mask) +{ + uint32_t fcu; + int retry = 0; + + if (sc->sc_hw.qhw_fw_auth) { + qat_cap_global_write_4(sc, FCU_CTRL, FCU_CTRL_CMD_START); + do { + DELAY(FW_AUTH_WAIT_PERIOD * 1000); + fcu = qat_cap_global_read_4(sc, FCU_STATUS); + if (fcu & FCU_STATUS_DONE) + return 0; + } while (retry++ < FW_AUTH_MAX_RETRY); + + device_printf(sc->sc_dev, + "firmware start timeout: status %08x\n", fcu); + return EINVAL; + } else { + qat_ae_ctx_indr_write(sc, ae, (~ctx_mask) & AE_ALL_CTX, + CTX_WAKEUP_EVENTS_INDIRECT, + CTX_WAKEUP_EVENTS_INDIRECT_SLEEP); + qat_ae_enable_ctx(sc, ae, ctx_mask); + } + + return 0; +} + +static int +qat_aefw_init_memory_one(struct qat_softc *sc, struct uof_init_mem *uim) +{ + struct qat_aefw_uof *qafu = &sc->sc_aefw_uof; + struct qat_ae_batch_init_list *qabi_list; + struct uof_mem_val_attr *memattr; + size_t *curinit; + u_long ael; + int i; + const char *sym; + char *ep; + + memattr = (struct uof_mem_val_attr *)(uim + 1); + + switch (uim->uim_region) { + case LMEM_REGION: + if ((uim->uim_addr + uim->uim_num_bytes) > MAX_LMEM_REG * 4) { + device_printf(sc->sc_dev, + "Invalid lmem addr or bytes\n"); + return ENOBUFS; + } + if (uim->uim_scope != UOF_SCOPE_LOCAL) + return EINVAL; + sym = qat_aefw_uof_string(sc, uim->uim_sym_name); + ael = strtoul(sym, &ep, 10); + if (ep == sym || ael > MAX_AE) + return EINVAL; + if ((sc->sc_ae_mask & (1 << ael)) == 0) + return 0; /* ae is fused out */ + + curinit = &qafu->qafu_num_lm_init[ael]; + qabi_list = &qafu->qafu_lm_init[ael]; + + for (i = 0; i < uim->uim_num_val_attr; i++, memattr++) { + struct qat_ae_batch_init *qabi; + + qabi = qat_alloc_mem(sizeof(struct qat_ae_batch_init)); + if (*curinit == 0) + STAILQ_INIT(qabi_list); + STAILQ_INSERT_TAIL(qabi_list, qabi, qabi_next); + + qabi->qabi_ae = (u_int)ael; + qabi->qabi_addr = + uim->uim_addr + memattr->umva_byte_offset; + qabi->qabi_value = &memattr->umva_value; + qabi->qabi_size = 4; + qafu->qafu_num_lm_init_inst[ael] += + qat_ae_get_inst_num(qabi->qabi_size); + (*curinit)++; + if (*curinit >= MAX_LMEM_REG) { + device_printf(sc->sc_dev, + "Invalid lmem val attr\n"); + return ENOBUFS; + } + } + break; + case SRAM_REGION: + case DRAM_REGION: + case DRAM1_REGION: + case SCRATCH_REGION: + case UMEM_REGION: + /* XXX */ + /* fallthrough */ + default: + device_printf(sc->sc_dev, + "unsupported memory region to init: %d\n", + uim->uim_region); + return ENOTSUP; + } + + return 0; +} + +static void +qat_aefw_free_lm_init(struct qat_softc *sc, u_char ae) +{ + struct qat_aefw_uof *qafu = &sc->sc_aefw_uof; + struct qat_ae_batch_init *qabi; + + while ((qabi = STAILQ_FIRST(&qafu->qafu_lm_init[ae])) != NULL) { + STAILQ_REMOVE_HEAD(&qafu->qafu_lm_init[ae], qabi_next); + qat_free_mem(qabi); + } + + qafu->qafu_num_lm_init[ae] = 0; + qafu->qafu_num_lm_init_inst[ae] = 0; +} + +static int +qat_aefw_init_ustore(struct qat_softc *sc) +{ + uint64_t *fill; + uint32_t dont_init; + int a, i, p; + int error = 0; + int usz, end, start; + u_char ae, nae; + + fill = qat_alloc_mem(MAX_USTORE * sizeof(uint64_t)); + + for (a = 0; a < sc->sc_aefw_uof.qafu_num_imgs; a++) { + struct qat_uof_image *qui = &sc->sc_aefw_uof.qafu_imgs[a]; + struct uof_image *ui = qui->qui_image; + + for (i = 0; i < MAX_USTORE; i++) + memcpy(&fill[i], ui->ui_fill_pattern, sizeof(uint64_t)); + /* + * Compute do_not_init value as a value that will not be equal + * to fill data when cast to an int + */ + dont_init = 0; + if (dont_init == (uint32_t)fill[0]) + dont_init = 0xffffffff; + + for (p = 0; p < ui->ui_num_pages; p++) { + struct qat_uof_page *qup = &qui->qui_pages[p]; + if (!qup->qup_def_page) + continue; + + for (i = qup->qup_beg_paddr; + i < qup->qup_beg_paddr + qup->qup_num_micro_words; + i++ ) { + fill[i] = (uint64_t)dont_init; + } + } + + for (ae = 0; ae < sc->sc_ae_num; ae++) { + MPASS(ae < UOF_MAX_NUM_OF_AE); + if ((ui->ui_ae_assigned & (1 << ae)) == 0) + continue; + + if (QAT_AE(sc, ae).qae_shareable_ustore && (ae & 1)) { + qat_ae_get_shared_ustore_ae(ae, &nae); + if (ui->ui_ae_assigned & (1 << ae)) + continue; + } + usz = QAT_AE(sc, ae).qae_effect_ustore_size; + + /* initialize the areas not going to be overwritten */ + end = -1; + do { + /* find next uword that needs to be initialized */ + for (start = end + 1; start < usz; start++) { + if ((uint32_t)fill[start] != dont_init) + break; + } + /* see if there are no more such uwords */ + if (start >= usz) + break; + for (end = start + 1; end < usz; end++) { + if ((uint32_t)fill[end] == dont_init) + break; + } + if (QAT_AE(sc, ae).qae_shareable_ustore) { + error = ENOTSUP; /* XXX */ + goto out; + } else { + error = qat_ae_ucode_write(sc, ae, + start, end - start, &fill[start]); + if (error) { + goto out; + } + } + + } while (end < usz); + } + } + +out: + qat_free_mem(fill); + return error; +} + +static int +qat_aefw_init_reg(struct qat_softc *sc, u_char ae, u_char ctx_mask, + enum aereg_type regtype, u_short regaddr, u_int value) +{ + int error = 0; + u_char ctx; + + switch (regtype) { + case AEREG_GPA_REL: + case AEREG_GPB_REL: + case AEREG_SR_REL: + case AEREG_SR_RD_REL: + case AEREG_SR_WR_REL: + case AEREG_DR_REL: + case AEREG_DR_RD_REL: + case AEREG_DR_WR_REL: + case AEREG_NEIGH_REL: + /* init for all valid ctx */ + for (ctx = 0; ctx < MAX_AE_CTX; ctx++) { + if ((ctx_mask & (1 << ctx)) == 0) + continue; + error = qat_aereg_rel_data_write(sc, ae, ctx, regtype, + regaddr, value); + } + break; + case AEREG_GPA_ABS: + case AEREG_GPB_ABS: + case AEREG_SR_ABS: + case AEREG_SR_RD_ABS: + case AEREG_SR_WR_ABS: + case AEREG_DR_ABS: + case AEREG_DR_RD_ABS: + case AEREG_DR_WR_ABS: + error = qat_aereg_abs_data_write(sc, ae, regtype, + regaddr, value); + break; + default: + error = EINVAL; + break; + } + + return error; +} + +static int +qat_aefw_init_reg_sym_expr(struct qat_softc *sc, u_char ae, + struct qat_uof_image *qui) +{ + u_int i, expres; + u_char ctx_mask; + + for (i = 0; i < qui->qui_num_init_reg_sym; i++) { + struct uof_init_reg_sym *uirs = &qui->qui_init_reg_sym[i]; + + if (uirs->uirs_value_type == EXPR_VAL) { + /* XXX */ + device_printf(sc->sc_dev, + "does not support initializing EXPR_VAL\n"); + return ENOTSUP; + } else { + expres = uirs->uirs_value; + } + + switch (uirs->uirs_init_type) { + case INIT_REG: + if (__SHIFTOUT(qui->qui_image->ui_ae_mode, + AE_MODE_CTX_MODE) == MAX_AE_CTX) { + ctx_mask = 0xff; /* 8-ctx mode */ + } else { + ctx_mask = 0x55; /* 4-ctx mode */ + } + qat_aefw_init_reg(sc, ae, ctx_mask, + (enum aereg_type)uirs->uirs_reg_type, + (u_short)uirs->uirs_addr_offset, expres); + break; + case INIT_REG_CTX: + if (__SHIFTOUT(qui->qui_image->ui_ae_mode, + AE_MODE_CTX_MODE) == MAX_AE_CTX) { + ctx_mask = 0xff; /* 8-ctx mode */ + } else { + ctx_mask = 0x55; /* 4-ctx mode */ + } + if (((1 << uirs->uirs_ctx) & ctx_mask) == 0) + return EINVAL; + qat_aefw_init_reg(sc, ae, 1 << uirs->uirs_ctx, + (enum aereg_type)uirs->uirs_reg_type, + (u_short)uirs->uirs_addr_offset, expres); + break; + case INIT_EXPR: + case INIT_EXPR_ENDIAN_SWAP: + default: + device_printf(sc->sc_dev, + "does not support initializing init_type %d\n", + uirs->uirs_init_type); + return ENOTSUP; + } + } + + return 0; +} + +static int +qat_aefw_init_memory(struct qat_softc *sc) +{ + struct qat_aefw_uof *qafu = &sc->sc_aefw_uof; + size_t uimsz, initmemsz = qafu->qafu_init_mem_size; + struct uof_init_mem *uim; + int error, i; + u_char ae; + + uim = qafu->qafu_init_mem; + for (i = 0; i < qafu->qafu_num_init_mem; i++) { + uimsz = sizeof(struct uof_init_mem) + + sizeof(struct uof_mem_val_attr) * uim->uim_num_val_attr; + if (uimsz > initmemsz) { + device_printf(sc->sc_dev, + "invalid uof_init_mem or uof_mem_val_attr size\n"); + return EINVAL; + } + + if (uim->uim_num_bytes > 0) { + error = qat_aefw_init_memory_one(sc, uim); + if (error) { + device_printf(sc->sc_dev, + "Could not init ae memory: %d\n", error); + return error; + } + } + uim = (struct uof_init_mem *)((uintptr_t)uim + uimsz); + initmemsz -= uimsz; + } + + /* run Batch put LM API */ + for (ae = 0; ae < MAX_AE; ae++) { + error = qat_ae_batch_put_lm(sc, ae, &qafu->qafu_lm_init[ae], + qafu->qafu_num_lm_init_inst[ae]); + if (error) + device_printf(sc->sc_dev, "Could not put lm\n"); + + qat_aefw_free_lm_init(sc, ae); + } + + error = qat_aefw_init_ustore(sc); + + /* XXX run Batch put LM API */ + + return error; +} + +static int +qat_aefw_init_globals(struct qat_softc *sc) +{ + struct qat_aefw_uof *qafu = &sc->sc_aefw_uof; + int error, i, p, s; + u_char ae; + + /* initialize the memory segments */ + if (qafu->qafu_num_init_mem > 0) { + error = qat_aefw_init_memory(sc); + if (error) + return error; + } else { + error = qat_aefw_init_ustore(sc); + if (error) + return error; + } + + /* XXX bind import variables with ivd values */ + + /* XXX bind the uC global variables + * local variables will done on-the-fly */ + for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) { + for (p = 0; p < sc->sc_aefw_uof.qafu_imgs[i].qui_image->ui_num_pages; p++) { + struct qat_uof_page *qup = + &sc->sc_aefw_uof.qafu_imgs[i].qui_pages[p]; + if (qup->qup_num_uw_blocks && + (qup->qup_num_uc_var || qup->qup_num_imp_var)) { + device_printf(sc->sc_dev, + "not support uC global variables\n"); + return ENOTSUP; + } + } + } + + for (ae = 0; ae < sc->sc_ae_num; ae++) { + struct qat_ae *qae = &(QAT_AE(sc, ae)); + + for (s = 0; s < qae->qae_num_slices; s++) { + struct qat_ae_slice *qas = &qae->qae_slices[s]; + + if (qas->qas_image == NULL) + continue; + + error = + qat_aefw_init_reg_sym_expr(sc, ae, qas->qas_image); + if (error) + return error; + } + } + + return 0; +} + +static uint64_t +qat_aefw_get_uof_inst(struct qat_softc *sc, struct qat_uof_page *qup, + u_int addr) +{ + uint64_t uinst = 0; + u_int i; + + /* find the block */ + for (i = 0; i < qup->qup_num_uw_blocks; i++) { + struct qat_uof_uword_block *quub = &qup->qup_uw_blocks[i]; + + if ((addr >= quub->quub_start_addr) && + (addr <= (quub->quub_start_addr + + (quub->quub_num_words - 1)))) { + /* unpack n bytes and assigned to the 64-bit uword value. + note: the microwords are stored as packed bytes. + */ + addr -= quub->quub_start_addr; + addr *= AEV2_PACKED_UWORD_BYTES; + memcpy(&uinst, + (void *)((uintptr_t)quub->quub_micro_words + addr), + AEV2_PACKED_UWORD_BYTES); + uinst = uinst & UWORD_MASK; + + return uinst; + } + } + + return INVLD_UWORD; +} + +static int +qat_aefw_do_pagein(struct qat_softc *sc, u_char ae, struct qat_uof_page *qup) +{ + struct qat_ae *qae = &(QAT_AE(sc, ae)); + uint64_t fill, *ucode_cpybuf; + u_int error, i, upaddr, uraddr, ninst, cpylen; + + if (qup->qup_num_uc_var || qup->qup_num_neigh_reg || + qup->qup_num_imp_var || qup->qup_num_imp_expr) { + device_printf(sc->sc_dev, + "does not support fixup locals\n"); + return ENOTSUP; + } + + ucode_cpybuf = qat_alloc_mem(UWORD_CPYBUF_SIZE * sizeof(uint64_t)); + + /* XXX get fill-pattern from an image -- they are all the same */ + memcpy(&fill, sc->sc_aefw_uof.qafu_imgs[0].qui_image->ui_fill_pattern, + sizeof(uint64_t)); + + upaddr = qup->qup_beg_paddr; + uraddr = 0; + ninst = qup->qup_num_micro_words; + while (ninst > 0) { + cpylen = min(ninst, UWORD_CPYBUF_SIZE); + + /* load the buffer */ + for (i = 0; i < cpylen; i++) { + /* keep below code structure in case there are + * different handling for shared secnarios */ + if (!qae->qae_shareable_ustore) { + /* qat_aefw_get_uof_inst() takes an address that + * is relative to the start of the page. + * So we don't need to add in the physical + * offset of the page. */ + if (qup->qup_page_region != 0) { + /* XXX */ + device_printf(sc->sc_dev, + "region != 0 is not supported\n"); + qat_free_mem(ucode_cpybuf); + return ENOTSUP; + } else { + /* for mixing case, it should take + * physical address */ + ucode_cpybuf[i] = qat_aefw_get_uof_inst( + sc, qup, upaddr + i); + if (ucode_cpybuf[i] == INVLD_UWORD) { + /* fill hole in the uof */ + ucode_cpybuf[i] = fill; + } + } + } else { + /* XXX */ + qat_free_mem(ucode_cpybuf); + return ENOTSUP; + } + } + + /* copy the buffer to ustore */ + if (!qae->qae_shareable_ustore) { + error = qat_ae_ucode_write(sc, ae, upaddr, cpylen, + ucode_cpybuf); + if (error) + return error; + } else { + /* XXX */ + qat_free_mem(ucode_cpybuf); + return ENOTSUP; + } + upaddr += cpylen; + uraddr += cpylen; + ninst -= cpylen; + } + + qat_free_mem(ucode_cpybuf); + + return 0; +} + +static int +qat_aefw_uof_write_one(struct qat_softc *sc, struct qat_uof_image *qui) +{ + struct uof_image *ui = qui->qui_image; + struct qat_ae_page *qap; + u_int s, p, c; + int error; + u_char ae, ctx_mask; + + if (__SHIFTOUT(ui->ui_ae_mode, AE_MODE_CTX_MODE) == MAX_AE_CTX) + ctx_mask = 0xff; /* 8-ctx mode */ + else + ctx_mask = 0x55; /* 4-ctx mode */ + + /* load the default page and set assigned CTX PC + * to the entrypoint address */ + for (ae = 0; ae < sc->sc_ae_num; ae++) { + struct qat_ae *qae = &(QAT_AE(sc, ae)); + struct qat_ae_slice *qas; + u_int metadata; + + MPASS(ae < UOF_MAX_NUM_OF_AE); + + if ((ui->ui_ae_assigned & (1 << ae)) == 0) + continue; + + /* find the slice to which this image is assigned */ + for (s = 0; s < qae->qae_num_slices; s++) { + qas = &qae->qae_slices[s]; + if (ui->ui_ctx_assigned & qas->qas_assigned_ctx_mask) + break; + } + if (s >= qae->qae_num_slices) + continue; + + qas = &qae->qae_slices[s]; + + for (p = 0; p < ui->ui_num_pages; p++) { + qap = &qas->qas_pages[p]; + + /* Only load pages loaded by default */ + if (!qap->qap_page->qup_def_page) + continue; + + error = qat_aefw_do_pagein(sc, ae, qap->qap_page); + if (error) + return error; + } + + metadata = qas->qas_image->qui_image->ui_app_metadata; + if (metadata != 0xffffffff && bootverbose) { + device_printf(sc->sc_dev, + "loaded firmware: %s\n", + qat_aefw_uof_string(sc, metadata)); + } + + /* Assume starting page is page 0 */ + qap = &qas->qas_pages[0]; + for (c = 0; c < MAX_AE_CTX; c++) { + if (ctx_mask & (1 << c)) + qas->qas_cur_pages[c] = qap; + else + qas->qas_cur_pages[c] = NULL; + } + + /* set the live context */ + qae->qae_live_ctx_mask = ui->ui_ctx_assigned; + + /* set context PC to the image entrypoint address */ + error = qat_ae_write_pc(sc, ae, ui->ui_ctx_assigned, + ui->ui_entry_address); + if (error) + return error; + } + + /* XXX store the checksum for convenience */ + + return 0; +} + +static int +qat_aefw_uof_write(struct qat_softc *sc) +{ + int error = 0; + int i; + + error = qat_aefw_init_globals(sc); + if (error) { + device_printf(sc->sc_dev, + "Could not initialize globals\n"); + return error; + } + + for (i = 0; i < sc->sc_aefw_uof.qafu_num_imgs; i++) { + error = qat_aefw_uof_write_one(sc, + &sc->sc_aefw_uof.qafu_imgs[i]); + if (error) + break; + } + + /* XXX UcLo_computeFreeUstore */ + + return error; +} Property changes on: head/sys/dev/qat/qat_ae.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_aevar.h =================================================================== --- head/sys/dev/qat/qat_aevar.h (nonexistent) +++ head/sys/dev/qat/qat_aevar.h (revision 367386) @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_aevar.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2007-2019 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* $FreeBSD$ */ + +#ifndef _DEV_PCI_QAT_AEVAR_H_ +#define _DEV_PCI_QAT_AEVAR_H_ + +int qat_ae_init(struct qat_softc *); +int qat_ae_start(struct qat_softc *); +void qat_ae_cluster_intr(void *); + +int qat_aefw_load(struct qat_softc *); +void qat_aefw_unload(struct qat_softc *); +int qat_aefw_start(struct qat_softc *, u_char, u_int); + +#endif Property changes on: head/sys/dev/qat/qat_aevar.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_c2xxx.c =================================================================== --- head/sys/dev/qat/qat_c2xxx.c (nonexistent) +++ head/sys/dev/qat/qat_c2xxx.c (revision 367386) @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_c2xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2007-2013 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); +#if 0 +__KERNEL_RCSID(0, "$NetBSD: qat_c2xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $"); +#endif + +#include +#include +#include + +#include + +#include +#include + +#include "qatreg.h" +#include "qat_hw15reg.h" +#include "qat_c2xxxreg.h" +#include "qatvar.h" +#include "qat_hw15var.h" + +static uint32_t +qat_c2xxx_get_accel_mask(struct qat_softc *sc) +{ + uint32_t fusectl; + + fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4); + + return ((~fusectl) & ACCEL_MASK_C2XXX); +} + +static uint32_t +qat_c2xxx_get_ae_mask(struct qat_softc *sc) +{ + uint32_t fusectl; + + fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4); + if (fusectl & ( + FUSECTL_C2XXX_PKE_DISABLE | + FUSECTL_C2XXX_ATH_DISABLE | + FUSECTL_C2XXX_CPH_DISABLE)) { + return 0; + } else { + if ((~fusectl & AE_MASK_C2XXX) == 0x3) { + /* + * With both AEs enabled we get spurious completions on + * ETR rings. Work around that for now by simply + * disabling the second AE. + */ + device_printf(sc->sc_dev, "disabling second AE\n"); + fusectl |= 0x2; + } + return ((~fusectl) & AE_MASK_C2XXX); + } +} + +static enum qat_sku +qat_c2xxx_get_sku(struct qat_softc *sc) +{ + uint32_t fusectl; + + fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4); + + switch (sc->sc_ae_num) { + case 1: + if (fusectl & FUSECTL_C2XXX_LOW_SKU) + return QAT_SKU_3; + else if (fusectl & FUSECTL_C2XXX_MID_SKU) + return QAT_SKU_2; + break; + case MAX_AE_C2XXX: + return QAT_SKU_1; + } + + return QAT_SKU_UNKNOWN; +} + +static uint32_t +qat_c2xxx_get_accel_cap(struct qat_softc *sc) +{ + return QAT_ACCEL_CAP_CRYPTO_SYMMETRIC | + QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC | + QAT_ACCEL_CAP_CIPHER | + QAT_ACCEL_CAP_AUTHENTICATION; +} + +static const char * +qat_c2xxx_get_fw_uof_name(struct qat_softc *sc) +{ + if (sc->sc_rev < QAT_REVID_C2XXX_B0) + return AE_FW_UOF_NAME_C2XXX_A0; + + /* QAT_REVID_C2XXX_B0 and QAT_REVID_C2XXX_C0 */ + return AE_FW_UOF_NAME_C2XXX_B0; +} + +static void +qat_c2xxx_enable_intr(struct qat_softc *sc) +{ + + qat_misc_write_4(sc, EP_SMIA_C2XXX, EP_SMIA_MASK_C2XXX); +} + +static void +qat_c2xxx_init_etr_intr(struct qat_softc *sc, int bank) +{ + /* + * For now, all rings within the bank are setup such that the generation + * of flag interrupts will be triggered when ring leaves the empty + * state. Note that in order for the ring interrupt to generate an IRQ + * the interrupt must also be enabled for the ring. + */ + qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL, + ETR_INT_SRCSEL_MASK_0_C2XXX); + qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL_2, + ETR_INT_SRCSEL_MASK_X_C2XXX); +} + +const struct qat_hw qat_hw_c2xxx = { + .qhw_sram_bar_id = BAR_SRAM_ID_C2XXX, + .qhw_misc_bar_id = BAR_PMISC_ID_C2XXX, + .qhw_etr_bar_id = BAR_ETR_ID_C2XXX, + .qhw_cap_global_offset = CAP_GLOBAL_OFFSET_C2XXX, + .qhw_ae_offset = AE_OFFSET_C2XXX, + .qhw_ae_local_offset = AE_LOCAL_OFFSET_C2XXX, + .qhw_etr_bundle_size = ETR_BUNDLE_SIZE_C2XXX, + .qhw_num_banks = ETR_MAX_BANKS_C2XXX, + .qhw_num_ap_banks = ETR_MAX_AP_BANKS_C2XXX, + .qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK, + .qhw_num_accel = MAX_ACCEL_C2XXX, + .qhw_num_engines = MAX_AE_C2XXX, + .qhw_tx_rx_gap = ETR_TX_RX_GAP_C2XXX, + .qhw_tx_rings_mask = ETR_TX_RINGS_MASK_C2XXX, + .qhw_msix_ae_vec_gap = MSIX_AE_VEC_GAP_C2XXX, + .qhw_fw_auth = false, + .qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW15, + .qhw_fw_resp_size = FW_REQ_DEFAULT_SZ_HW15, + .qhw_ring_asym_tx = 2, + .qhw_ring_asym_rx = 3, + .qhw_ring_sym_tx = 4, + .qhw_ring_sym_rx = 5, + .qhw_mof_fwname = AE_FW_MOF_NAME_C2XXX, + .qhw_mmp_fwname = AE_FW_MMP_NAME_C2XXX, + .qhw_prod_type = AE_FW_PROD_TYPE_C2XXX, + .qhw_get_accel_mask = qat_c2xxx_get_accel_mask, + .qhw_get_ae_mask = qat_c2xxx_get_ae_mask, + .qhw_get_sku = qat_c2xxx_get_sku, + .qhw_get_accel_cap = qat_c2xxx_get_accel_cap, + .qhw_get_fw_uof_name = qat_c2xxx_get_fw_uof_name, + .qhw_enable_intr = qat_c2xxx_enable_intr, + .qhw_init_etr_intr = qat_c2xxx_init_etr_intr, + .qhw_init_admin_comms = qat_adm_ring_init, + .qhw_send_admin_init = qat_adm_ring_send_init, + .qhw_crypto_setup_desc = qat_hw15_crypto_setup_desc, + .qhw_crypto_setup_req_params = qat_hw15_crypto_setup_req_params, + .qhw_crypto_opaque_offset = + offsetof(struct fw_la_resp, comn_resp.opaque_data), +}; Property changes on: head/sys/dev/qat/qat_c2xxx.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_c2xxxreg.h =================================================================== --- head/sys/dev/qat/qat_c2xxxreg.h (nonexistent) +++ head/sys/dev/qat/qat_c2xxxreg.h (revision 367386) @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_c2xxxreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2007-2013 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* $FreeBSD$ */ + +#ifndef _DEV_PCI_QAT_C2XXXREG_H_ +#define _DEV_PCI_QAT_C2XXXREG_H_ + +/* PCI revision IDs */ +#define QAT_REVID_C2XXX_A0 0x00 +#define QAT_REVID_C2XXX_B0 0x02 +#define QAT_REVID_C2XXX_C0 0x03 + +/* Max number of accelerators and engines */ +#define MAX_ACCEL_C2XXX 1 +#define MAX_AE_C2XXX 2 + +/* PCIe BAR index */ +#define BAR_SRAM_ID_C2XXX NO_PCI_REG +#define BAR_PMISC_ID_C2XXX 0 +#define BAR_ETR_ID_C2XXX 1 + +#define ACCEL_MASK_C2XXX 0x1 +#define AE_MASK_C2XXX 0x3 + +#define MSIX_AE_VEC_GAP_C2XXX 8 + +/* PCIe configuration space registers */ +/* PESRAM: 512K eSRAM */ +#define BAR_PESRAM_C2XXX NO_PCI_REG +#define BAR_PESRAM_SIZE_C2XXX 0 + +/* + * PMISC: 16K CAP, 16K Scratch, 32K SSU(QATs), + * 32K AE CSRs and transfer registers, 8K CHAP/PMU, + * 4K EP CSRs, 4K MSI-X Tables + */ +#define BAR_PMISC_C2XXX 0x18 +#define BAR_PMISC_SIZE_C2XXX 0x20000 /* 128K */ + +/* PETRINGCSR: 8K 16 bundles of ET Ring CSRs */ +#define BAR_PETRINGCSR_C2XXX 0x20 +#define BAR_PETRINGCSR_SIZE_C2XXX 0x4000 /* 16K */ + +/* Fuse Control */ +#define FUSECTL_C2XXX_PKE_DISABLE (1 << 6) +#define FUSECTL_C2XXX_ATH_DISABLE (1 << 5) +#define FUSECTL_C2XXX_CPH_DISABLE (1 << 4) +#define FUSECTL_C2XXX_LOW_SKU (1 << 3) +#define FUSECTL_C2XXX_MID_SKU (1 << 2) +#define FUSECTL_C2XXX_AE1_DISABLE (1 << 1) + +/* SINT: Signal Target Raw Interrupt Register */ +#define EP_SINTPF_C2XXX 0x1A024 + +/* SMIA: Signal Target IA Mask Register */ +#define EP_SMIA_C2XXX 0x1A028 +#define EP_SMIA_BUNDLES_IRQ_MASK_C2XXX 0xFF +#define EP_SMIA_AE_IRQ_MASK_C2XXX 0x10000 +#define EP_SMIA_MASK_C2XXX \ + (EP_SMIA_BUNDLES_IRQ_MASK_C2XXX | EP_SMIA_AE_IRQ_MASK_C2XXX) + +#define EP_RIMISCCTL_C2XXX 0x1A0C4 +#define EP_RIMISCCTL_MASK_C2XXX 0x40000000 + +#define PFCGCIOSFPRIR_REG_C2XXX 0x2C0 +#define PFCGCIOSFPRIR_MASK_C2XXX 0XFFFF7FFF + +/* BAR sub-regions */ +#define PESRAM_BAR_C2XXX NO_PCI_REG +#define PESRAM_OFFSET_C2XXX 0x0 +#define PESRAM_SIZE_C2XXX 0x0 +#define CAP_GLOBAL_BAR_C2XXX BAR_PMISC_C2XXX +#define CAP_GLOBAL_OFFSET_C2XXX 0x00000 +#define CAP_GLOBAL_SIZE_C2XXX 0x04000 +#define CAP_HASH_OFFSET 0x900 +#define SCRATCH_BAR_C2XXX NO_PCI_REG +#define SCRATCH_OFFSET_C2XXX NO_REG_OFFSET +#define SCRATCH_SIZE_C2XXX 0x0 +#define SSU_BAR_C2XXX BAR_PMISC_C2XXX +#define SSU_OFFSET_C2XXX 0x08000 +#define SSU_SIZE_C2XXX 0x08000 +#define AE_BAR_C2XXX BAR_PMISC_C2XXX +#define AE_OFFSET_C2XXX 0x10000 +#define AE_LOCAL_OFFSET_C2XXX 0x10800 +#define PMU_BAR_C2XXX NO_PCI_REG +#define PMU_OFFSET_C2XXX NO_REG_OFFSET +#define PMU_SIZE_C2XXX 0x0 +#define EP_BAR_C2XXX BAR_PMISC_C2XXX +#define EP_OFFSET_C2XXX 0x1A000 +#define EP_SIZE_C2XXX 0x01000 +#define MSIX_TAB_BAR_C2XXX NO_PCI_REG /* mapped by pci(9) */ +#define MSIX_TAB_OFFSET_C2XXX 0x1B000 +#define MSIX_TAB_SIZE_C2XXX 0x01000 +#define PETRINGCSR_BAR_C2XXX BAR_PETRINGCSR_C2XXX +#define PETRINGCSR_OFFSET_C2XXX 0x0 +#define PETRINGCSR_SIZE_C2XXX 0x0 /* use size of BAR */ + +/* ETR */ +#define ETR_MAX_BANKS_C2XXX 8 +#define ETR_MAX_ET_RINGS_C2XXX \ + (ETR_MAX_BANKS_C2XXX * ETR_MAX_RINGS_PER_BANK_C2XXX) +#define ETR_MAX_AP_BANKS_C2XXX 4 + +#define ETR_TX_RX_GAP_C2XXX 1 +#define ETR_TX_RINGS_MASK_C2XXX 0x51 + +#define ETR_BUNDLE_SIZE_C2XXX 0x0200 + +/* Initial bank Interrupt Source mask */ +#define ETR_INT_SRCSEL_MASK_0_C2XXX 0x4444444CUL +#define ETR_INT_SRCSEL_MASK_X_C2XXX 0x44444444UL + +/* AE firmware */ +#define AE_FW_PROD_TYPE_C2XXX 0x00800000 +#define AE_FW_MOF_NAME_C2XXX "mof_firmware_c2xxx" +#define AE_FW_MMP_NAME_C2XXX "mmp_firmware_c2xxx" +#define AE_FW_UOF_NAME_C2XXX_A0 "icp_qat_nae.uof" +#define AE_FW_UOF_NAME_C2XXX_B0 "icp_qat_nae_b0.uof" + +#endif Property changes on: head/sys/dev/qat/qat_c2xxxreg.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_c3xxx.c =================================================================== --- head/sys/dev/qat/qat_c3xxx.c (nonexistent) +++ head/sys/dev/qat/qat_c3xxx.c (revision 367386) @@ -0,0 +1,298 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_c3xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2014 Intel Corporation. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); +#if 0 +__KERNEL_RCSID(0, "$NetBSD: qat_c3xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $"); +#endif + +#include +#include +#include + +#include + +#include +#include + +#include "qatreg.h" +#include "qat_hw17reg.h" +#include "qat_c3xxxreg.h" +#include "qatvar.h" +#include "qat_hw17var.h" + +static uint32_t +qat_c3xxx_get_accel_mask(struct qat_softc *sc) +{ + uint32_t fusectl, strap; + + fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4); + strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C3XXX, 4); + + return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_C3XXX) & + ACCEL_MASK_C3XXX); +} + +static uint32_t +qat_c3xxx_get_ae_mask(struct qat_softc *sc) +{ + uint32_t fusectl, me_strap, me_disable, ssms_disabled; + + fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4); + me_strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C3XXX, 4); + + /* If SSMs are disabled, then disable the corresponding MEs */ + ssms_disabled = (~qat_c3xxx_get_accel_mask(sc)) & ACCEL_MASK_C3XXX; + me_disable = 0x3; + while (ssms_disabled) { + if (ssms_disabled & 1) + me_strap |= me_disable; + ssms_disabled >>= 1; + me_disable <<= 2; + } + + return (~(fusectl | me_strap)) & AE_MASK_C3XXX; +} + +static enum qat_sku +qat_c3xxx_get_sku(struct qat_softc *sc) +{ + switch (sc->sc_ae_num) { + case MAX_AE_C3XXX: + return QAT_SKU_4; + } + + return QAT_SKU_UNKNOWN; +} + +static uint32_t +qat_c3xxx_get_accel_cap(struct qat_softc *sc) +{ + uint32_t cap, legfuse, strap; + + legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4); + strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C3XXX, 4); + + cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC + + QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC + + QAT_ACCEL_CAP_CIPHER + + QAT_ACCEL_CAP_AUTHENTICATION + + QAT_ACCEL_CAP_COMPRESSION + + QAT_ACCEL_CAP_ZUC + + QAT_ACCEL_CAP_SHA3; + + if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) { + cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC; + cap &= ~QAT_ACCEL_CAP_CIPHER; + } + if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE) + cap &= ~QAT_ACCEL_CAP_AUTHENTICATION; + if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE) + cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC; + if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE) + cap &= ~QAT_ACCEL_CAP_COMPRESSION; + if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE) + cap &= ~QAT_ACCEL_CAP_ZUC; + + if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_PKE_C3XXX) + cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC; + if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_CY_C3XXX) + cap &= ~QAT_ACCEL_CAP_COMPRESSION; + + return cap; +} + +static const char * +qat_c3xxx_get_fw_uof_name(struct qat_softc *sc) +{ + + return AE_FW_UOF_NAME_C3XXX; +} + +static void +qat_c3xxx_enable_intr(struct qat_softc *sc) +{ + + /* Enable bundle and misc interrupts */ + qat_misc_write_4(sc, SMIAPF0_C3XXX, SMIA0_MASK_C3XXX); + qat_misc_write_4(sc, SMIAPF1_C3XXX, SMIA1_MASK_C3XXX); +} + +/* Worker thread to service arbiter mappings */ +static uint32_t thrd_to_arb_map[] = { + 0x12222AAA, 0x11222AAA, 0x12222AAA, + 0x11222AAA, 0x12222AAA, 0x11222AAA +}; + +static void +qat_c3xxx_get_arb_mapping(struct qat_softc *sc, const uint32_t **arb_map_config) +{ + int i; + + for (i = 1; i < MAX_AE_C3XXX; i++) { + if ((~sc->sc_ae_mask) & (1 << i)) + thrd_to_arb_map[i] = 0; + } + *arb_map_config = thrd_to_arb_map; +} + +static void +qat_c3xxx_enable_error_interrupts(struct qat_softc *sc) +{ + qat_misc_write_4(sc, ERRMSK0, ERRMSK0_CERR_C3XXX); /* ME0-ME3 */ + qat_misc_write_4(sc, ERRMSK1, ERRMSK1_CERR_C3XXX); /* ME4-ME5 */ + qat_misc_write_4(sc, ERRMSK5, ERRMSK5_CERR_C3XXX); /* SSM2 */ + + /* Reset everything except VFtoPF1_16. */ + qat_misc_read_write_and_4(sc, ERRMSK3, VF2PF1_16_C3XXX); + + /* RI CPP bus interface error detection and reporting. */ + qat_misc_write_4(sc, RICPPINTCTL_C3XXX, RICPP_EN_C3XXX); + + /* TI CPP bus interface error detection and reporting. */ + qat_misc_write_4(sc, TICPPINTCTL_C3XXX, TICPP_EN_C3XXX); + + /* Enable CFC Error interrupts and logging. */ + qat_misc_write_4(sc, CPP_CFC_ERR_CTRL_C3XXX, CPP_CFC_UE_C3XXX); +} + +static void +qat_c3xxx_disable_error_interrupts(struct qat_softc *sc) +{ + /* ME0-ME3 */ + qat_misc_write_4(sc, ERRMSK0, ERRMSK0_UERR_C3XXX | ERRMSK0_CERR_C3XXX); + /* ME4-ME5 */ + qat_misc_write_4(sc, ERRMSK1, ERRMSK1_UERR_C3XXX | ERRMSK1_CERR_C3XXX); + /* CPP Push Pull, RI, TI, SSM0-SSM1, CFC */ + qat_misc_write_4(sc, ERRMSK3, ERRMSK3_UERR_C3XXX); + /* SSM2 */ + qat_misc_write_4(sc, ERRMSK5, ERRMSK5_UERR_C3XXX); +} + +static void +qat_c3xxx_enable_error_correction(struct qat_softc *sc) +{ + u_int i, mask; + + /* Enable Accel Engine error detection & correction */ + for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) { + if (!(mask & 1)) + continue; + qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_C3XXX(i), + ENABLE_AE_ECC_ERR_C3XXX); + qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_C3XXX(i), + ENABLE_AE_ECC_PARITY_CORR_C3XXX); + } + + /* Enable shared memory error detection & correction */ + for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) { + if (!(mask & 1)) + continue; + + qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_C3XXX); + qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_C3XXX); + qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_C3XXX); + } + + qat_c3xxx_enable_error_interrupts(sc); +} + +const struct qat_hw qat_hw_c3xxx = { + .qhw_sram_bar_id = BAR_SRAM_ID_C3XXX, + .qhw_misc_bar_id = BAR_PMISC_ID_C3XXX, + .qhw_etr_bar_id = BAR_ETR_ID_C3XXX, + .qhw_cap_global_offset = CAP_GLOBAL_OFFSET_C3XXX, + .qhw_ae_offset = AE_OFFSET_C3XXX, + .qhw_ae_local_offset = AE_LOCAL_OFFSET_C3XXX, + .qhw_etr_bundle_size = ETR_BUNDLE_SIZE_C3XXX, + .qhw_num_banks = ETR_MAX_BANKS_C3XXX, + .qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK, + .qhw_num_accel = MAX_ACCEL_C3XXX, + .qhw_num_engines = MAX_AE_C3XXX, + .qhw_tx_rx_gap = ETR_TX_RX_GAP_C3XXX, + .qhw_tx_rings_mask = ETR_TX_RINGS_MASK_C3XXX, + .qhw_clock_per_sec = CLOCK_PER_SEC_C3XXX, + .qhw_fw_auth = true, + .qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17, + .qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17, + .qhw_ring_asym_tx = 0, + .qhw_ring_asym_rx = 8, + .qhw_ring_sym_tx = 2, + .qhw_ring_sym_rx = 10, + .qhw_mof_fwname = AE_FW_MOF_NAME_C3XXX, + .qhw_mmp_fwname = AE_FW_MMP_NAME_C3XXX, + .qhw_prod_type = AE_FW_PROD_TYPE_C3XXX, + .qhw_get_accel_mask = qat_c3xxx_get_accel_mask, + .qhw_get_ae_mask = qat_c3xxx_get_ae_mask, + .qhw_get_sku = qat_c3xxx_get_sku, + .qhw_get_accel_cap = qat_c3xxx_get_accel_cap, + .qhw_get_fw_uof_name = qat_c3xxx_get_fw_uof_name, + .qhw_enable_intr = qat_c3xxx_enable_intr, + .qhw_init_admin_comms = qat_adm_mailbox_init, + .qhw_send_admin_init = qat_adm_mailbox_send_init, + .qhw_init_arb = qat_arb_init, + .qhw_get_arb_mapping = qat_c3xxx_get_arb_mapping, + .qhw_enable_error_correction = qat_c3xxx_enable_error_correction, + .qhw_disable_error_interrupts = qat_c3xxx_disable_error_interrupts, + .qhw_set_ssm_wdtimer = qat_set_ssm_wdtimer, + .qhw_check_slice_hang = qat_check_slice_hang, + .qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc, + .qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params, + .qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data), +}; Property changes on: head/sys/dev/qat/qat_c3xxx.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_c3xxxreg.h =================================================================== --- head/sys/dev/qat/qat_c3xxxreg.h (nonexistent) +++ head/sys/dev/qat/qat_c3xxxreg.h (revision 367386) @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_c3xxxreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2014 Intel Corporation. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* $FreeBSD$ */ + +#ifndef _DEV_PCI_QAT_C3XXXREG_H_ +#define _DEV_PCI_QAT_C3XXXREG_H_ + +/* Max number of accelerators and engines */ +#define MAX_ACCEL_C3XXX 3 +#define MAX_AE_C3XXX 6 + +/* PCIe BAR index */ +#define BAR_SRAM_ID_C3XXX NO_PCI_REG +#define BAR_PMISC_ID_C3XXX 0 +#define BAR_ETR_ID_C3XXX 1 + +/* BAR PMISC sub-regions */ +#define AE_OFFSET_C3XXX 0x20000 +#define AE_LOCAL_OFFSET_C3XXX 0x20800 +#define CAP_GLOBAL_OFFSET_C3XXX 0x30000 + +#define SOFTSTRAP_REG_C3XXX 0x2EC +#define SOFTSTRAP_SS_POWERGATE_CY_C3XXX __BIT(23) +#define SOFTSTRAP_SS_POWERGATE_PKE_C3XXX __BIT(24) + +#define ACCEL_REG_OFFSET_C3XXX 16 +#define ACCEL_MASK_C3XXX 0x7 +#define AE_MASK_C3XXX 0x3F + +#define SMIAPF0_C3XXX 0x3A028 +#define SMIAPF1_C3XXX 0x3A030 +#define SMIA0_MASK_C3XXX 0xFFFF +#define SMIA1_MASK_C3XXX 0x1 + +/* Error detection and correction */ +#define AE_CTX_ENABLES_C3XXX(i) ((i) * 0x1000 + 0x20818) +#define AE_MISC_CONTROL_C3XXX(i) ((i) * 0x1000 + 0x20960) +#define ENABLE_AE_ECC_ERR_C3XXX __BIT(28) +#define ENABLE_AE_ECC_PARITY_CORR_C3XXX (__BIT(24) | __BIT(12)) +#define ERRSSMSH_EN_C3XXX __BIT(3) +/* BIT(2) enables the logging of push/pull data errors. */ +#define PPERR_EN_C3XXX (__BIT(2)) + +/* Mask for VF2PF interrupts */ +#define VF2PF1_16_C3XXX (0xFFFF << 9) +#define ERRSOU3_VF2PF_C3XXX(errsou3) (((errsou3) & 0x01FFFE00) >> 9) +#define ERRMSK3_VF2PF_C3XXX(vf_mask) (((vf_mask) & 0xFFFF) << 9) + +/* Masks for correctable error interrupts. */ +#define ERRMSK0_CERR_C3XXX (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0)) +#define ERRMSK1_CERR_C3XXX (__BIT(8) | __BIT(0)) +#define ERRMSK5_CERR_C3XXX (0) + +/* Masks for uncorrectable error interrupts. */ +#define ERRMSK0_UERR_C3XXX (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1)) +#define ERRMSK1_UERR_C3XXX (__BIT(9) | __BIT(1)) +#define ERRMSK3_UERR_C3XXX (__BIT(6) | __BIT(5) | __BIT(4) | __BIT(3) | \ + __BIT(2) | __BIT(0)) +#define ERRMSK5_UERR_C3XXX (__BIT(16)) + +/* RI CPP control */ +#define RICPPINTCTL_C3XXX (0x3A000 + 0x110) +/* + * BIT(2) enables error detection and reporting on the RI Parity Error. + * BIT(1) enables error detection and reporting on the RI CPP Pull interface. + * BIT(0) enables error detection and reporting on the RI CPP Push interface. + */ +#define RICPP_EN_C3XXX (__BIT(2) | __BIT(1) | __BIT(0)) + +/* TI CPP control */ +#define TICPPINTCTL_C3XXX (0x3A400 + 0x138) +/* + * BIT(3) enables error detection and reporting on the ETR Parity Error. + * BIT(2) enables error detection and reporting on the TI Parity Error. + * BIT(1) enables error detection and reporting on the TI CPP Pull interface. + * BIT(0) enables error detection and reporting on the TI CPP Push interface. + */ +#define TICPP_EN_C3XXX \ + (__BIT(3) | __BIT(2) | __BIT(1) | __BIT(0)) + +/* CFC Uncorrectable Errors */ +#define CPP_CFC_ERR_CTRL_C3XXX (0x30000 + 0xC00) +/* + * BIT(1) enables interrupt. + * BIT(0) enables detecting and logging of push/pull data errors. + */ +#define CPP_CFC_UE_C3XXX (__BIT(1) | __BIT(0)) + +#define SLICEPWRDOWN_C3XXX(i) ((i) * 0x4000 + 0x2C) +/* Enabling PKE4-PKE0. */ +#define MMP_PWR_UP_MSK_C3XXX \ + (__BIT(20) | __BIT(19) | __BIT(18) | __BIT(17) | __BIT(16)) + +/* CPM Uncorrectable Errors */ +#define INTMASKSSM_C3XXX(i) ((i) * 0x4000 + 0x0) +/* Disabling interrupts for correctable errors. */ +#define INTMASKSSM_UERR_C3XXX \ + (__BIT(11) | __BIT(9) | __BIT(7) | __BIT(5) | __BIT(3) | __BIT(1)) + +/* MMP */ +/* BIT(3) enables correction. */ +#define CERRSSMMMP_EN_C3XXX (__BIT(3)) + +/* BIT(3) enables logging. */ +#define UERRSSMMMP_EN_C3XXX (__BIT(3)) + +/* ETR */ +#define ETR_MAX_BANKS_C3XXX 16 +#define ETR_TX_RX_GAP_C3XXX 8 +#define ETR_TX_RINGS_MASK_C3XXX 0xFF +#define ETR_BUNDLE_SIZE_C3XXX 0x1000 + +/* AE firmware */ +#define AE_FW_PROD_TYPE_C3XXX 0x02000000 +#define AE_FW_MOF_NAME_C3XXX "qat_c3xxx" +#define AE_FW_MMP_NAME_C3XXX "qat_c3xxx_mmp" +#define AE_FW_UOF_NAME_C3XXX "icp_qat_ae.suof" + +/* Clock frequency */ +#define CLOCK_PER_SEC_C3XXX (685 * 1000000 / 16) + +#endif Property changes on: head/sys/dev/qat/qat_c3xxxreg.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_c62x.c =================================================================== --- head/sys/dev/qat/qat_c62x.c (nonexistent) +++ head/sys/dev/qat/qat_c62x.c (revision 367386) @@ -0,0 +1,314 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_c62x.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2014 Intel Corporation. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); +#if 0 +__KERNEL_RCSID(0, "$NetBSD: qat_c62x.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $"); +#endif + +#include +#include +#include + +#include + +#include +#include + +#include "qatreg.h" +#include "qat_hw17reg.h" +#include "qat_c62xreg.h" +#include "qatvar.h" +#include "qat_hw17var.h" + +static uint32_t +qat_c62x_get_accel_mask(struct qat_softc *sc) +{ + uint32_t fusectl, strap; + + fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4); + strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C62X, 4); + + return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_C62X) & + ACCEL_MASK_C62X); +} + +static uint32_t +qat_c62x_get_ae_mask(struct qat_softc *sc) +{ + uint32_t fusectl, me_strap, me_disable, ssms_disabled; + + fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4); + me_strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C62X, 4); + + /* If SSMs are disabled, then disable the corresponding MEs */ + ssms_disabled = (~qat_c62x_get_accel_mask(sc)) & ACCEL_MASK_C62X; + me_disable = 0x3; + while (ssms_disabled) { + if (ssms_disabled & 1) + me_strap |= me_disable; + ssms_disabled >>= 1; + me_disable <<= 2; + } + + return (~(fusectl | me_strap)) & AE_MASK_C62X; +} + +static enum qat_sku +qat_c62x_get_sku(struct qat_softc *sc) +{ + switch (sc->sc_ae_num) { + case 8: + return QAT_SKU_2; + case MAX_AE_C62X: + return QAT_SKU_4; + } + + return QAT_SKU_UNKNOWN; +} + +static uint32_t +qat_c62x_get_accel_cap(struct qat_softc *sc) +{ + uint32_t cap, legfuse, strap; + + legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4); + strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_C62X, 4); + + cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC + + QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC + + QAT_ACCEL_CAP_CIPHER + + QAT_ACCEL_CAP_AUTHENTICATION + + QAT_ACCEL_CAP_COMPRESSION + + QAT_ACCEL_CAP_ZUC + + QAT_ACCEL_CAP_SHA3; + + if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) { + cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC; + cap &= ~QAT_ACCEL_CAP_CIPHER; + } + if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE) + cap &= ~QAT_ACCEL_CAP_AUTHENTICATION; + if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE) + cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC; + if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE) + cap &= ~QAT_ACCEL_CAP_COMPRESSION; + if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE) + cap &= ~QAT_ACCEL_CAP_ZUC; + + if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_PKE_C62X) + cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC; + if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_CY_C62X) + cap &= ~QAT_ACCEL_CAP_COMPRESSION; + + return cap; +} + +static const char * +qat_c62x_get_fw_uof_name(struct qat_softc *sc) +{ + + return AE_FW_UOF_NAME_C62X; +} + +static void +qat_c62x_enable_intr(struct qat_softc *sc) +{ + + /* Enable bundle and misc interrupts */ + qat_misc_write_4(sc, SMIAPF0_C62X, SMIA0_MASK_C62X); + qat_misc_write_4(sc, SMIAPF1_C62X, SMIA1_MASK_C62X); +} + +/* Worker thread to service arbiter mappings */ +static uint32_t thrd_to_arb_map[] = { + 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, + 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA +}; + +static void +qat_c62x_get_arb_mapping(struct qat_softc *sc, const uint32_t **arb_map_config) +{ + int i; + + for (i = 1; i < MAX_AE_C62X; i++) { + if ((~sc->sc_ae_mask) & (1 << i)) + thrd_to_arb_map[i] = 0; + } + *arb_map_config = thrd_to_arb_map; +} + +static void +qat_c62x_enable_error_interrupts(struct qat_softc *sc) +{ + qat_misc_write_4(sc, ERRMSK0, ERRMSK0_CERR_C62X); /* ME0-ME3 */ + qat_misc_write_4(sc, ERRMSK1, ERRMSK1_CERR_C62X); /* ME4-ME7 */ + qat_misc_write_4(sc, ERRMSK4, ERRMSK4_CERR_C62X); /* ME8-ME9 */ + qat_misc_write_4(sc, ERRMSK5, ERRMSK5_CERR_C62X); /* SSM2-SSM4 */ + + /* Reset everything except VFtoPF1_16. */ + qat_misc_read_write_and_4(sc, ERRMSK3, VF2PF1_16_C62X); + /* Disable Secure RAM correctable error interrupt */ + qat_misc_read_write_or_4(sc, ERRMSK3, ERRMSK3_CERR_C62X); + + /* RI CPP bus interface error detection and reporting. */ + qat_misc_write_4(sc, RICPPINTCTL_C62X, RICPP_EN_C62X); + + /* TI CPP bus interface error detection and reporting. */ + qat_misc_write_4(sc, TICPPINTCTL_C62X, TICPP_EN_C62X); + + /* Enable CFC Error interrupts and logging. */ + qat_misc_write_4(sc, CPP_CFC_ERR_CTRL_C62X, CPP_CFC_UE_C62X); + + /* Enable SecureRAM to fix and log Correctable errors */ + qat_misc_write_4(sc, SECRAMCERR_C62X, SECRAM_CERR_C62X); + + /* Enable SecureRAM Uncorrectable error interrupts and logging */ + qat_misc_write_4(sc, SECRAMUERR, SECRAM_UERR_C62X); + + /* Enable Push/Pull Misc Uncorrectable error interrupts and logging */ + qat_misc_write_4(sc, CPPMEMTGTERR, TGT_UERR_C62X); +} + +static void +qat_c62x_disable_error_interrupts(struct qat_softc *sc) +{ + /* ME0-ME3 */ + qat_misc_write_4(sc, ERRMSK0, ERRMSK0_UERR_C62X | ERRMSK0_CERR_C62X); + /* ME4-ME7 */ + qat_misc_write_4(sc, ERRMSK1, ERRMSK1_UERR_C62X | ERRMSK1_CERR_C62X); + /* Secure RAM, CPP Push Pull, RI, TI, SSM0-SSM1, CFC */ + qat_misc_write_4(sc, ERRMSK3, ERRMSK3_UERR_C62X | ERRMSK3_CERR_C62X); + /* ME8-ME9 */ + qat_misc_write_4(sc, ERRMSK4, ERRMSK4_UERR_C62X | ERRMSK4_CERR_C62X); + /* SSM2-SSM4 */ + qat_misc_write_4(sc, ERRMSK5, ERRMSK5_UERR_C62X | ERRMSK5_CERR_C62X); +} + +static void +qat_c62x_enable_error_correction(struct qat_softc *sc) +{ + u_int i, mask; + + /* Enable Accel Engine error detection & correction */ + for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) { + if (!(mask & 1)) + continue; + qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_C62X(i), + ENABLE_AE_ECC_ERR_C62X); + qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_C62X(i), + ENABLE_AE_ECC_PARITY_CORR_C62X); + } + + /* Enable shared memory error detection & correction */ + for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) { + if (!(mask & 1)) + continue; + + qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_C62X); + qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_C62X); + qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_C62X); + } + + qat_c62x_enable_error_interrupts(sc); +} + +const struct qat_hw qat_hw_c62x = { + .qhw_sram_bar_id = BAR_SRAM_ID_C62X, + .qhw_misc_bar_id = BAR_PMISC_ID_C62X, + .qhw_etr_bar_id = BAR_ETR_ID_C62X, + .qhw_cap_global_offset = CAP_GLOBAL_OFFSET_C62X, + .qhw_ae_offset = AE_OFFSET_C62X, + .qhw_ae_local_offset = AE_LOCAL_OFFSET_C62X, + .qhw_etr_bundle_size = ETR_BUNDLE_SIZE_C62X, + .qhw_num_banks = ETR_MAX_BANKS_C62X, + .qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK, + .qhw_num_accel = MAX_ACCEL_C62X, + .qhw_num_engines = MAX_AE_C62X, + .qhw_tx_rx_gap = ETR_TX_RX_GAP_C62X, + .qhw_tx_rings_mask = ETR_TX_RINGS_MASK_C62X, + .qhw_clock_per_sec = CLOCK_PER_SEC_C62X, + .qhw_fw_auth = true, + .qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17, + .qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17, + .qhw_ring_asym_tx = 0, + .qhw_ring_asym_rx = 8, + .qhw_ring_sym_tx = 2, + .qhw_ring_sym_rx = 10, + .qhw_mof_fwname = AE_FW_MOF_NAME_C62X, + .qhw_mmp_fwname = AE_FW_MMP_NAME_C62X, + .qhw_prod_type = AE_FW_PROD_TYPE_C62X, + .qhw_get_accel_mask = qat_c62x_get_accel_mask, + .qhw_get_ae_mask = qat_c62x_get_ae_mask, + .qhw_get_sku = qat_c62x_get_sku, + .qhw_get_accel_cap = qat_c62x_get_accel_cap, + .qhw_get_fw_uof_name = qat_c62x_get_fw_uof_name, + .qhw_enable_intr = qat_c62x_enable_intr, + .qhw_init_admin_comms = qat_adm_mailbox_init, + .qhw_send_admin_init = qat_adm_mailbox_send_init, + .qhw_init_arb = qat_arb_init, + .qhw_get_arb_mapping = qat_c62x_get_arb_mapping, + .qhw_enable_error_correction = qat_c62x_enable_error_correction, + .qhw_disable_error_interrupts = qat_c62x_disable_error_interrupts, + .qhw_set_ssm_wdtimer = qat_set_ssm_wdtimer, + .qhw_check_slice_hang = qat_check_slice_hang, + .qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc, + .qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params, + .qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data), +}; Property changes on: head/sys/dev/qat/qat_c62x.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_c62xreg.h =================================================================== --- head/sys/dev/qat/qat_c62xreg.h (nonexistent) +++ head/sys/dev/qat/qat_c62xreg.h (revision 367386) @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_c62xreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2014 Intel Corporation. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* $FreeBSD$ */ + +#ifndef _DEV_PCI_QAT_C62XREG_H_ +#define _DEV_PCI_QAT_C62XREG_H_ + +/* Max number of accelerators and engines */ +#define MAX_ACCEL_C62X 5 +#define MAX_AE_C62X 10 + +/* PCIe BAR index */ +#define BAR_SRAM_ID_C62X 0 +#define BAR_PMISC_ID_C62X 1 +#define BAR_ETR_ID_C62X 2 + +/* BAR PMISC sub-regions */ +#define AE_OFFSET_C62X 0x20000 +#define AE_LOCAL_OFFSET_C62X 0x20800 +#define CAP_GLOBAL_OFFSET_C62X 0x30000 + +#define SOFTSTRAP_REG_C62X 0x2EC +#define SOFTSTRAP_SS_POWERGATE_CY_C62X __BIT(23) +#define SOFTSTRAP_SS_POWERGATE_PKE_C62X __BIT(24) + +#define ACCEL_REG_OFFSET_C62X 16 +#define ACCEL_MASK_C62X 0x1F +#define AE_MASK_C62X 0x3FF + +#define SMIAPF0_C62X 0x3A028 +#define SMIAPF1_C62X 0x3A030 +#define SMIA0_MASK_C62X 0xFFFF +#define SMIA1_MASK_C62X 0x1 + +/* Error detection and correction */ +#define AE_CTX_ENABLES_C62X(i) ((i) * 0x1000 + 0x20818) +#define AE_MISC_CONTROL_C62X(i) ((i) * 0x1000 + 0x20960) +#define ENABLE_AE_ECC_ERR_C62X __BIT(28) +#define ENABLE_AE_ECC_PARITY_CORR_C62X (__BIT(24) | __BIT(12)) +#define ERRSSMSH_EN_C62X __BIT(3) +/* BIT(2) enables the logging of push/pull data errors. */ +#define PPERR_EN_C62X (__BIT(2)) + +/* Mask for VF2PF interrupts */ +#define VF2PF1_16_C62X (0xFFFF << 9) +#define ERRSOU3_VF2PF_C62X(errsou3) (((errsou3) & 0x01FFFE00) >> 9) +#define ERRMSK3_VF2PF_C62X(vf_mask) (((vf_mask) & 0xFFFF) << 9) + +/* Masks for correctable error interrupts. */ +#define ERRMSK0_CERR_C62X (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0)) +#define ERRMSK1_CERR_C62X (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0)) +#define ERRMSK3_CERR_C62X (__BIT(7)) +#define ERRMSK4_CERR_C62X (__BIT(8) | __BIT(0)) +#define ERRMSK5_CERR_C62X (0) + +/* Masks for uncorrectable error interrupts. */ +#define ERRMSK0_UERR_C62X (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1)) +#define ERRMSK1_UERR_C62X (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1)) +#define ERRMSK3_UERR_C62X (__BIT(8) | __BIT(6) | __BIT(5) | __BIT(4) | \ + __BIT(3) | __BIT(2) | __BIT(0)) +#define ERRMSK4_UERR_C62X (__BIT(9) | __BIT(1)) +#define ERRMSK5_UERR_C62X (__BIT(18) | __BIT(17) | __BIT(16)) + +/* RI CPP control */ +#define RICPPINTCTL_C62X (0x3A000 + 0x110) +/* + * BIT(2) enables error detection and reporting on the RI Parity Error. + * BIT(1) enables error detection and reporting on the RI CPP Pull interface. + * BIT(0) enables error detection and reporting on the RI CPP Push interface. + */ +#define RICPP_EN_C62X (__BIT(2) | __BIT(1) | __BIT(0)) + +/* TI CPP control */ +#define TICPPINTCTL_C62X (0x3A400 + 0x138) +/* + * BIT(3) enables error detection and reporting on the ETR Parity Error. + * BIT(2) enables error detection and reporting on the TI Parity Error. + * BIT(1) enables error detection and reporting on the TI CPP Pull interface. + * BIT(0) enables error detection and reporting on the TI CPP Push interface. + */ +#define TICPP_EN_C62X \ + (__BIT(4) | __BIT(3) | __BIT(2) | __BIT(1) | __BIT(0)) + +/* CFC Uncorrectable Errors */ +#define CPP_CFC_ERR_CTRL_C62X (0x30000 + 0xC00) +/* + * BIT(1) enables interrupt. + * BIT(0) enables detecting and logging of push/pull data errors. + */ +#define CPP_CFC_UE_C62X (__BIT(1) | __BIT(0)) + +/* Correctable SecureRAM Error Reg */ +#define SECRAMCERR_C62X (0x3AC00 + 0x00) +/* BIT(3) enables fixing and logging of correctable errors. */ +#define SECRAM_CERR_C62X (__BIT(3)) + +/* Uncorrectable SecureRAM Error Reg */ +/* + * BIT(17) enables interrupt. + * BIT(3) enables detecting and logging of uncorrectable errors. + */ +#define SECRAM_UERR_C62X (__BIT(17) | __BIT(3)) + +/* Miscellaneous Memory Target Errors Register */ +/* + * BIT(3) enables detecting and logging push/pull data errors. + * BIT(2) enables interrupt. + */ +#define TGT_UERR_C62X (__BIT(3) | __BIT(2)) + + +#define SLICEPWRDOWN_C62X(i) ((i) * 0x4000 + 0x2C) +/* Enabling PKE4-PKE0. */ +#define MMP_PWR_UP_MSK_C62X \ + (__BIT(20) | __BIT(19) | __BIT(18) | __BIT(17) | __BIT(16)) + +/* CPM Uncorrectable Errors */ +#define INTMASKSSM_C62X(i) ((i) * 0x4000 + 0x0) +/* Disabling interrupts for correctable errors. */ +#define INTMASKSSM_UERR_C62X \ + (__BIT(11) | __BIT(9) | __BIT(7) | __BIT(5) | __BIT(3) | __BIT(1)) + +/* MMP */ +/* BIT(3) enables correction. */ +#define CERRSSMMMP_EN_C62X (__BIT(3)) + +/* BIT(3) enables logging. */ +#define UERRSSMMMP_EN_C62X (__BIT(3)) + +/* ETR */ +#define ETR_MAX_BANKS_C62X 16 +#define ETR_TX_RX_GAP_C62X 8 +#define ETR_TX_RINGS_MASK_C62X 0xFF +#define ETR_BUNDLE_SIZE_C62X 0x1000 + +/* AE firmware */ +#define AE_FW_PROD_TYPE_C62X 0x01000000 +#define AE_FW_MOF_NAME_C62X "qat_c62x" +#define AE_FW_MMP_NAME_C62X "qat_c62x_mmp" +#define AE_FW_UOF_NAME_C62X "icp_qat_ae.suof" + +/* Clock frequency */ +#define CLOCK_PER_SEC_C62X (685 * 1000000 / 16) + +#endif Property changes on: head/sys/dev/qat/qat_c62xreg.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_d15xx.c =================================================================== --- head/sys/dev/qat/qat_d15xx.c (nonexistent) +++ head/sys/dev/qat/qat_d15xx.c (revision 367386) @@ -0,0 +1,314 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_d15xx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2014 Intel Corporation. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); +#if 0 +__KERNEL_RCSID(0, "$NetBSD: qat_d15xx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $"); +#endif + +#include +#include +#include + +#include + +#include +#include + +#include "qatreg.h" +#include "qat_hw17reg.h" +#include "qat_d15xxreg.h" +#include "qatvar.h" +#include "qat_hw17var.h" + +static uint32_t +qat_d15xx_get_accel_mask(struct qat_softc *sc) +{ + uint32_t fusectl, strap; + + fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4); + strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_D15XX, 4); + + return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_D15XX) & + ACCEL_MASK_D15XX); +} + +static uint32_t +qat_d15xx_get_ae_mask(struct qat_softc *sc) +{ + uint32_t fusectl, me_strap, me_disable, ssms_disabled; + + fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4); + me_strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_D15XX, 4); + + /* If SSMs are disabled, then disable the corresponding MEs */ + ssms_disabled = (~qat_d15xx_get_accel_mask(sc)) & ACCEL_MASK_D15XX; + me_disable = 0x3; + while (ssms_disabled) { + if (ssms_disabled & 1) + me_strap |= me_disable; + ssms_disabled >>= 1; + me_disable <<= 2; + } + + return (~(fusectl | me_strap)) & AE_MASK_D15XX; +} + +static enum qat_sku +qat_d15xx_get_sku(struct qat_softc *sc) +{ + switch (sc->sc_ae_num) { + case 8: + return QAT_SKU_2; + case MAX_AE_D15XX: + return QAT_SKU_4; + } + + return QAT_SKU_UNKNOWN; +} + +static uint32_t +qat_d15xx_get_accel_cap(struct qat_softc *sc) +{ + uint32_t cap, legfuse, strap; + + legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4); + strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_D15XX, 4); + + cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC + + QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC + + QAT_ACCEL_CAP_CIPHER + + QAT_ACCEL_CAP_AUTHENTICATION + + QAT_ACCEL_CAP_COMPRESSION + + QAT_ACCEL_CAP_ZUC + + QAT_ACCEL_CAP_SHA3; + + if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) { + cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC; + cap &= ~QAT_ACCEL_CAP_CIPHER; + } + if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE) + cap &= ~QAT_ACCEL_CAP_AUTHENTICATION; + if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE) + cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC; + if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE) + cap &= ~QAT_ACCEL_CAP_COMPRESSION; + if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE) + cap &= ~QAT_ACCEL_CAP_ZUC; + + if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_PKE_D15XX) + cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC; + if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_CY_D15XX) + cap &= ~QAT_ACCEL_CAP_COMPRESSION; + + return cap; +} + +static const char * +qat_d15xx_get_fw_uof_name(struct qat_softc *sc) +{ + + return AE_FW_UOF_NAME_D15XX; +} + +static void +qat_d15xx_enable_intr(struct qat_softc *sc) +{ + + /* Enable bundle and misc interrupts */ + qat_misc_write_4(sc, SMIAPF0_D15XX, SMIA0_MASK_D15XX); + qat_misc_write_4(sc, SMIAPF1_D15XX, SMIA1_MASK_D15XX); +} + +/* Worker thread to service arbiter mappings */ +static uint32_t thrd_to_arb_map[] = { + 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, + 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA +}; + +static void +qat_d15xx_get_arb_mapping(struct qat_softc *sc, const uint32_t **arb_map_config) +{ + int i; + + for (i = 1; i < MAX_AE_D15XX; i++) { + if ((~sc->sc_ae_mask) & (1 << i)) + thrd_to_arb_map[i] = 0; + } + *arb_map_config = thrd_to_arb_map; +} + +static void +qat_d15xx_enable_error_interrupts(struct qat_softc *sc) +{ + qat_misc_write_4(sc, ERRMSK0, ERRMSK0_CERR_D15XX); /* ME0-ME3 */ + qat_misc_write_4(sc, ERRMSK1, ERRMSK1_CERR_D15XX); /* ME4-ME7 */ + qat_misc_write_4(sc, ERRMSK4, ERRMSK4_CERR_D15XX); /* ME8-ME9 */ + qat_misc_write_4(sc, ERRMSK5, ERRMSK5_CERR_D15XX); /* SSM2-SSM4 */ + + /* Reset everything except VFtoPF1_16. */ + qat_misc_read_write_and_4(sc, ERRMSK3, VF2PF1_16_D15XX); + /* Disable Secure RAM correctable error interrupt */ + qat_misc_read_write_or_4(sc, ERRMSK3, ERRMSK3_CERR_D15XX); + + /* RI CPP bus interface error detection and reporting. */ + qat_misc_write_4(sc, RICPPINTCTL_D15XX, RICPP_EN_D15XX); + + /* TI CPP bus interface error detection and reporting. */ + qat_misc_write_4(sc, TICPPINTCTL_D15XX, TICPP_EN_D15XX); + + /* Enable CFC Error interrupts and logging. */ + qat_misc_write_4(sc, CPP_CFC_ERR_CTRL_D15XX, CPP_CFC_UE_D15XX); + + /* Enable SecureRAM to fix and log Correctable errors */ + qat_misc_write_4(sc, SECRAMCERR_D15XX, SECRAM_CERR_D15XX); + + /* Enable SecureRAM Uncorrectable error interrupts and logging */ + qat_misc_write_4(sc, SECRAMUERR, SECRAM_UERR_D15XX); + + /* Enable Push/Pull Misc Uncorrectable error interrupts and logging */ + qat_misc_write_4(sc, CPPMEMTGTERR, TGT_UERR_D15XX); +} + +static void +qat_d15xx_disable_error_interrupts(struct qat_softc *sc) +{ + /* ME0-ME3 */ + qat_misc_write_4(sc, ERRMSK0, ERRMSK0_UERR_D15XX | ERRMSK0_CERR_D15XX); + /* ME4-ME7 */ + qat_misc_write_4(sc, ERRMSK1, ERRMSK1_UERR_D15XX | ERRMSK1_CERR_D15XX); + /* Secure RAM, CPP Push Pull, RI, TI, SSM0-SSM1, CFC */ + qat_misc_write_4(sc, ERRMSK3, ERRMSK3_UERR_D15XX | ERRMSK3_CERR_D15XX); + /* ME8-ME9 */ + qat_misc_write_4(sc, ERRMSK4, ERRMSK4_UERR_D15XX | ERRMSK4_CERR_D15XX); + /* SSM2-SSM4 */ + qat_misc_write_4(sc, ERRMSK5, ERRMSK5_UERR_D15XX | ERRMSK5_CERR_D15XX); +} + +static void +qat_d15xx_enable_error_correction(struct qat_softc *sc) +{ + u_int i, mask; + + /* Enable Accel Engine error detection & correction */ + for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) { + if (!(mask & 1)) + continue; + qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_D15XX(i), + ENABLE_AE_ECC_ERR_D15XX); + qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_D15XX(i), + ENABLE_AE_ECC_PARITY_CORR_D15XX); + } + + /* Enable shared memory error detection & correction */ + for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) { + if (!(mask & 1)) + continue; + + qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_D15XX); + qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_D15XX); + qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_D15XX); + } + + qat_d15xx_enable_error_interrupts(sc); +} + +const struct qat_hw qat_hw_d15xx = { + .qhw_sram_bar_id = BAR_SRAM_ID_D15XX, + .qhw_misc_bar_id = BAR_PMISC_ID_D15XX, + .qhw_etr_bar_id = BAR_ETR_ID_D15XX, + .qhw_cap_global_offset = CAP_GLOBAL_OFFSET_D15XX, + .qhw_ae_offset = AE_OFFSET_D15XX, + .qhw_ae_local_offset = AE_LOCAL_OFFSET_D15XX, + .qhw_etr_bundle_size = ETR_BUNDLE_SIZE_D15XX, + .qhw_num_banks = ETR_MAX_BANKS_D15XX, + .qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK, + .qhw_num_accel = MAX_ACCEL_D15XX, + .qhw_num_engines = MAX_AE_D15XX, + .qhw_tx_rx_gap = ETR_TX_RX_GAP_D15XX, + .qhw_tx_rings_mask = ETR_TX_RINGS_MASK_D15XX, + .qhw_clock_per_sec = CLOCK_PER_SEC_D15XX, + .qhw_fw_auth = true, + .qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17, + .qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17, + .qhw_ring_asym_tx = 0, + .qhw_ring_asym_rx = 8, + .qhw_ring_sym_tx = 2, + .qhw_ring_sym_rx = 10, + .qhw_mof_fwname = AE_FW_MOF_NAME_D15XX, + .qhw_mmp_fwname = AE_FW_MMP_NAME_D15XX, + .qhw_prod_type = AE_FW_PROD_TYPE_D15XX, + .qhw_get_accel_mask = qat_d15xx_get_accel_mask, + .qhw_get_ae_mask = qat_d15xx_get_ae_mask, + .qhw_get_sku = qat_d15xx_get_sku, + .qhw_get_accel_cap = qat_d15xx_get_accel_cap, + .qhw_get_fw_uof_name = qat_d15xx_get_fw_uof_name, + .qhw_enable_intr = qat_d15xx_enable_intr, + .qhw_init_admin_comms = qat_adm_mailbox_init, + .qhw_send_admin_init = qat_adm_mailbox_send_init, + .qhw_init_arb = qat_arb_init, + .qhw_get_arb_mapping = qat_d15xx_get_arb_mapping, + .qhw_enable_error_correction = qat_d15xx_enable_error_correction, + .qhw_disable_error_interrupts = qat_d15xx_disable_error_interrupts, + .qhw_set_ssm_wdtimer = qat_set_ssm_wdtimer, + .qhw_check_slice_hang = qat_check_slice_hang, + .qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc, + .qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params, + .qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data), +}; Property changes on: head/sys/dev/qat/qat_d15xx.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_d15xxreg.h =================================================================== --- head/sys/dev/qat/qat_d15xxreg.h (nonexistent) +++ head/sys/dev/qat/qat_d15xxreg.h (revision 367386) @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_d15xxreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2014 Intel Corporation. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* $FreeBSD$ */ + +#ifndef _DEV_PCI_QAT_D15XXREG_H_ +#define _DEV_PCI_QAT_D15XXREG_H_ + +/* Max number of accelerators and engines */ +#define MAX_ACCEL_D15XX 5 +#define MAX_AE_D15XX 10 + +/* PCIe BAR index */ +#define BAR_SRAM_ID_D15XX 0 +#define BAR_PMISC_ID_D15XX 1 +#define BAR_ETR_ID_D15XX 2 + +/* BAR PMISC sub-regions */ +#define AE_OFFSET_D15XX 0x20000 +#define AE_LOCAL_OFFSET_D15XX 0x20800 +#define CAP_GLOBAL_OFFSET_D15XX 0x30000 + +#define SOFTSTRAP_REG_D15XX 0x2EC +#define SOFTSTRAP_SS_POWERGATE_CY_D15XX __BIT(23) +#define SOFTSTRAP_SS_POWERGATE_PKE_D15XX __BIT(24) + +#define ACCEL_REG_OFFSET_D15XX 16 +#define ACCEL_MASK_D15XX 0x1F +#define AE_MASK_D15XX 0x3FF + +#define SMIAPF0_D15XX 0x3A028 +#define SMIAPF1_D15XX 0x3A030 +#define SMIA0_MASK_D15XX 0xFFFF +#define SMIA1_MASK_D15XX 0x1 + +/* Error detection and correction */ +#define AE_CTX_ENABLES_D15XX(i) ((i) * 0x1000 + 0x20818) +#define AE_MISC_CONTROL_D15XX(i) ((i) * 0x1000 + 0x20960) +#define ENABLE_AE_ECC_ERR_D15XX __BIT(28) +#define ENABLE_AE_ECC_PARITY_CORR_D15XX (__BIT(24) | __BIT(12)) +#define ERRSSMSH_EN_D15XX __BIT(3) +/* BIT(2) enables the logging of push/pull data errors. */ +#define PPERR_EN_D15XX (__BIT(2)) + +/* Mask for VF2PF interrupts */ +#define VF2PF1_16_D15XX (0xFFFF << 9) +#define ERRSOU3_VF2PF_D15XX(errsou3) (((errsou3) & 0x01FFFE00) >> 9) +#define ERRMSK3_VF2PF_D15XX(vf_mask) (((vf_mask) & 0xFFFF) << 9) + +/* Masks for correctable error interrupts. */ +#define ERRMSK0_CERR_D15XX (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0)) +#define ERRMSK1_CERR_D15XX (__BIT(24) | __BIT(16) | __BIT(8) | __BIT(0)) +#define ERRMSK3_CERR_D15XX (__BIT(7)) +#define ERRMSK4_CERR_D15XX (__BIT(8) | __BIT(0)) +#define ERRMSK5_CERR_D15XX (0) + +/* Masks for uncorrectable error interrupts. */ +#define ERRMSK0_UERR_D15XX (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1)) +#define ERRMSK1_UERR_D15XX (__BIT(25) | __BIT(17) | __BIT(9) | __BIT(1)) +#define ERRMSK3_UERR_D15XX (__BIT(8) | __BIT(6) | __BIT(5) | __BIT(4) | \ + __BIT(3) | __BIT(2) | __BIT(0)) +#define ERRMSK4_UERR_D15XX (__BIT(9) | __BIT(1)) +#define ERRMSK5_UERR_D15XX (__BIT(18) | __BIT(17) | __BIT(16)) + +/* RI CPP control */ +#define RICPPINTCTL_D15XX (0x3A000 + 0x110) +/* + * BIT(2) enables error detection and reporting on the RI Parity Error. + * BIT(1) enables error detection and reporting on the RI CPP Pull interface. + * BIT(0) enables error detection and reporting on the RI CPP Push interface. + */ +#define RICPP_EN_D15XX (__BIT(2) | __BIT(1) | __BIT(0)) + +/* TI CPP control */ +#define TICPPINTCTL_D15XX (0x3A400 + 0x138) +/* + * BIT(3) enables error detection and reporting on the ETR Parity Error. + * BIT(2) enables error detection and reporting on the TI Parity Error. + * BIT(1) enables error detection and reporting on the TI CPP Pull interface. + * BIT(0) enables error detection and reporting on the TI CPP Push interface. + */ +#define TICPP_EN_D15XX \ + (__BIT(4) | __BIT(3) | __BIT(2) | __BIT(1) | __BIT(0)) + +/* CFC Uncorrectable Errors */ +#define CPP_CFC_ERR_CTRL_D15XX (0x30000 + 0xC00) +/* + * BIT(1) enables interrupt. + * BIT(0) enables detecting and logging of push/pull data errors. + */ +#define CPP_CFC_UE_D15XX (__BIT(1) | __BIT(0)) + +/* Correctable SecureRAM Error Reg */ +#define SECRAMCERR_D15XX (0x3AC00 + 0x00) +/* BIT(3) enables fixing and logging of correctable errors. */ +#define SECRAM_CERR_D15XX (__BIT(3)) + +/* Uncorrectable SecureRAM Error Reg */ +/* + * BIT(17) enables interrupt. + * BIT(3) enables detecting and logging of uncorrectable errors. + */ +#define SECRAM_UERR_D15XX (__BIT(17) | __BIT(3)) + +/* Miscellaneous Memory Target Errors Register */ +/* + * BIT(3) enables detecting and logging push/pull data errors. + * BIT(2) enables interrupt. + */ +#define TGT_UERR_D15XX (__BIT(3) | __BIT(2)) + + +#define SLICEPWRDOWN_D15XX(i) ((i) * 0x4000 + 0x2C) +/* Enabling PKE4-PKE0. */ +#define MMP_PWR_UP_MSK_D15XX \ + (__BIT(20) | __BIT(19) | __BIT(18) | __BIT(17) | __BIT(16)) + +/* CPM Uncorrectable Errors */ +#define INTMASKSSM_D15XX(i) ((i) * 0x4000 + 0x0) +/* Disabling interrupts for correctable errors. */ +#define INTMASKSSM_UERR_D15XX \ + (__BIT(11) | __BIT(9) | __BIT(7) | __BIT(5) | __BIT(3) | __BIT(1)) + +/* MMP */ +/* BIT(3) enables correction. */ +#define CERRSSMMMP_EN_D15XX (__BIT(3)) + +/* BIT(3) enables logging. */ +#define UERRSSMMMP_EN_D15XX (__BIT(3)) + +/* ETR */ +#define ETR_MAX_BANKS_D15XX 16 +#define ETR_TX_RX_GAP_D15XX 8 +#define ETR_TX_RINGS_MASK_D15XX 0xFF +#define ETR_BUNDLE_SIZE_D15XX 0x1000 + +/* AE firmware */ +#define AE_FW_PROD_TYPE_D15XX 0x01000000 +#define AE_FW_MOF_NAME_D15XX "qat_d15xx" +#define AE_FW_MMP_NAME_D15XX "qat_d15xx_mmp" +#define AE_FW_UOF_NAME_D15XX "icp_qat_ae.suof" + +/* Clock frequency */ +#define CLOCK_PER_SEC_D15XX (685 * 1000000 / 16) + +#endif Property changes on: head/sys/dev/qat/qat_d15xxreg.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_dh895xcc.c =================================================================== --- head/sys/dev/qat/qat_dh895xcc.c (nonexistent) +++ head/sys/dev/qat/qat_dh895xcc.c (revision 367386) @@ -0,0 +1,271 @@ +/* SPDX-License-Identifier: BSD-2-Clause AND BSD-3-Clause */ +/* + * Copyright (c) 2020 Rubicon Communications, LLC (Netgate) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright(c) 2014 - 2020 Intel Corporation. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include + +#include + +#include +#include + +#include "qatreg.h" +#include "qatvar.h" +#include "qat_hw17reg.h" +#include "qat_hw17var.h" +#include "qat_dh895xccreg.h" + +static uint32_t +qat_dh895xcc_get_accel_mask(struct qat_softc *sc) +{ + uint32_t fusectl, strap; + + fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4); + strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_DH895XCC, 4); + + return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_DH895XCC) & + ACCEL_MASK_DH895XCC); +} + +static uint32_t +qat_dh895xcc_get_ae_mask(struct qat_softc *sc) +{ + uint32_t fusectl, strap; + + fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4); + strap = pci_read_config(sc->sc_dev, SOFTSTRAP_REG_DH895XCC, 4); + + return (~(fusectl | strap)) & AE_MASK_DH895XCC; +} + +static enum qat_sku +qat_dh895xcc_get_sku(struct qat_softc *sc) +{ + uint32_t fusectl, sku; + + fusectl = pci_read_config(sc->sc_dev, FUSECTL_REG, 4); + sku = (fusectl & FUSECTL_SKU_MASK_DH895XCC) >> + FUSECTL_SKU_SHIFT_DH895XCC; + switch (sku) { + case FUSECTL_SKU_1_DH895XCC: + return QAT_SKU_1; + case FUSECTL_SKU_2_DH895XCC: + return QAT_SKU_2; + case FUSECTL_SKU_3_DH895XCC: + return QAT_SKU_3; + case FUSECTL_SKU_4_DH895XCC: + return QAT_SKU_4; + default: + return QAT_SKU_UNKNOWN; + } +} + +static uint32_t +qat_dh895xcc_get_accel_cap(struct qat_softc *sc) +{ + uint32_t cap, legfuse; + + legfuse = pci_read_config(sc->sc_dev, LEGFUSE_REG, 4); + + cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC + + QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC + + QAT_ACCEL_CAP_CIPHER + + QAT_ACCEL_CAP_AUTHENTICATION + + QAT_ACCEL_CAP_COMPRESSION + + QAT_ACCEL_CAP_ZUC + + QAT_ACCEL_CAP_SHA3; + + if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) { + cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC; + cap &= ~QAT_ACCEL_CAP_CIPHER; + } + if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE) + cap &= ~QAT_ACCEL_CAP_AUTHENTICATION; + if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE) + cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC; + if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE) + cap &= ~QAT_ACCEL_CAP_COMPRESSION; + if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE) + cap &= ~QAT_ACCEL_CAP_ZUC; + + return cap; +} + +static const char * +qat_dh895xcc_get_fw_uof_name(struct qat_softc *sc) +{ + return AE_FW_UOF_NAME_DH895XCC; +} + +static void +qat_dh895xcc_enable_intr(struct qat_softc *sc) +{ + /* Enable bundle and misc interrupts */ + qat_misc_write_4(sc, SMIAPF0_DH895XCC, SMIA0_MASK_DH895XCC); + qat_misc_write_4(sc, SMIAPF1_DH895XCC, SMIA1_MASK_DH895XCC); +} + +/* Worker thread to service arbiter mappings based on dev SKUs */ +static uint32_t thrd_to_arb_map_sku4[] = { + 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, + 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static uint32_t thrd_to_arb_map_sku6[] = { + 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, + 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, + 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222, +}; + +static void +qat_dh895xcc_get_arb_mapping(struct qat_softc *sc, + const uint32_t **arb_map_config) +{ + uint32_t *map, sku; + int i; + + sku = qat_dh895xcc_get_sku(sc); + switch (sku) { + case QAT_SKU_1: + map = thrd_to_arb_map_sku4; + break; + case QAT_SKU_2: + case QAT_SKU_4: + map = thrd_to_arb_map_sku6; + break; + default: + *arb_map_config = NULL; + return; + } + + for (i = 1; i < MAX_AE_DH895XCC; i++) { + if ((~sc->sc_ae_mask) & (1 << i)) + map[i] = 0; + } + *arb_map_config = map; +} + +static void +qat_dh895xcc_enable_error_correction(struct qat_softc *sc) +{ + uint32_t mask; + u_int i; + + /* Enable Accel Engine error detection & correction */ + for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) { + if (!(mask & 1)) + continue; + qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_DH895XCC(i), + ENABLE_AE_ECC_ERR_DH895XCC); + qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_DH895XCC(i), + ENABLE_AE_ECC_PARITY_CORR_DH895XCC); + } + + /* Enable shared memory error detection & correction */ + for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) { + if (!(mask & 1)) + continue; + + qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_DH895XCC); + qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_DH895XCC); + qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_DH895XCC); + } +} + +const struct qat_hw qat_hw_dh895xcc = { + .qhw_sram_bar_id = BAR_SRAM_ID_DH895XCC, + .qhw_misc_bar_id = BAR_PMISC_ID_DH895XCC, + .qhw_etr_bar_id = BAR_ETR_ID_DH895XCC, + .qhw_cap_global_offset = CAP_GLOBAL_OFFSET_DH895XCC, + .qhw_ae_offset = AE_OFFSET_DH895XCC, + .qhw_ae_local_offset = AE_LOCAL_OFFSET_DH895XCC, + .qhw_etr_bundle_size = ETR_BUNDLE_SIZE_DH895XCC, + .qhw_num_banks = ETR_MAX_BANKS_DH895XCC, + .qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK, + .qhw_num_accel = MAX_ACCEL_DH895XCC, + .qhw_num_engines = MAX_AE_DH895XCC, + .qhw_tx_rx_gap = ETR_TX_RX_GAP_DH895XCC, + .qhw_tx_rings_mask = ETR_TX_RINGS_MASK_DH895XCC, + .qhw_clock_per_sec = CLOCK_PER_SEC_DH895XCC, + .qhw_fw_auth = false, + .qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17, + .qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17, + .qhw_ring_asym_tx = 0, + .qhw_ring_asym_rx = 8, + .qhw_ring_sym_tx = 2, + .qhw_ring_sym_rx = 10, + .qhw_mof_fwname = AE_FW_MOF_NAME_DH895XCC, + .qhw_mmp_fwname = AE_FW_MMP_NAME_DH895XCC, + .qhw_prod_type = AE_FW_PROD_TYPE_DH895XCC, + .qhw_get_accel_mask = qat_dh895xcc_get_accel_mask, + .qhw_get_ae_mask = qat_dh895xcc_get_ae_mask, + .qhw_get_sku = qat_dh895xcc_get_sku, + .qhw_get_accel_cap = qat_dh895xcc_get_accel_cap, + .qhw_get_fw_uof_name = qat_dh895xcc_get_fw_uof_name, + .qhw_enable_intr = qat_dh895xcc_enable_intr, + .qhw_init_admin_comms = qat_adm_mailbox_init, + .qhw_send_admin_init = qat_adm_mailbox_send_init, + .qhw_init_arb = qat_arb_init, + .qhw_get_arb_mapping = qat_dh895xcc_get_arb_mapping, + .qhw_enable_error_correction = qat_dh895xcc_enable_error_correction, + .qhw_check_slice_hang = qat_check_slice_hang, + .qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc, + .qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params, + .qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data), +}; Property changes on: head/sys/dev/qat/qat_dh895xcc.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_dh895xccreg.h =================================================================== --- head/sys/dev/qat/qat_dh895xccreg.h (nonexistent) +++ head/sys/dev/qat/qat_dh895xccreg.h (revision 367386) @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2014-2020 Intel Corporation. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* $FreeBSD$ */ + +#ifndef _DEV_PCI_QAT_DH895XCCREG_H_ +#define _DEV_PCI_QAT_DH895XCCREG_H_ + +/* Max number of accelerators and engines */ +#define MAX_ACCEL_DH895XCC 6 +#define MAX_AE_DH895XCC 12 + +/* PCIe BAR index */ +#define BAR_SRAM_ID_DH895XCC 0 +#define BAR_PMISC_ID_DH895XCC 1 +#define BAR_ETR_ID_DH895XCC 2 + +/* BAR PMISC sub-regions */ +#define AE_OFFSET_DH895XCC 0x20000 +#define AE_LOCAL_OFFSET_DH895XCC 0x20800 +#define CAP_GLOBAL_OFFSET_DH895XCC 0x30000 + +#define SOFTSTRAP_REG_DH895XCC 0x2EC + +#define FUSECTL_SKU_MASK_DH895XCC 0x300000 +#define FUSECTL_SKU_SHIFT_DH895XCC 20 +#define FUSECTL_SKU_1_DH895XCC 0 +#define FUSECTL_SKU_2_DH895XCC 1 +#define FUSECTL_SKU_3_DH895XCC 2 +#define FUSECTL_SKU_4_DH895XCC 3 + +#define ACCEL_REG_OFFSET_DH895XCC 13 +#define ACCEL_MASK_DH895XCC 0x3F +#define AE_MASK_DH895XCC 0xFFF + +#define SMIAPF0_DH895XCC 0x3A028 +#define SMIAPF1_DH895XCC 0x3A030 +#define SMIA0_MASK_DH895XCC 0xFFFFFFFF +#define SMIA1_MASK_DH895XCC 0x1 + +/* Error detection and correction */ +#define AE_CTX_ENABLES_DH895XCC(i) ((i) * 0x1000 + 0x20818) +#define AE_MISC_CONTROL_DH895XCC(i) ((i) * 0x1000 + 0x20960) +#define ENABLE_AE_ECC_ERR_DH895XCC __BIT(28) +#define ENABLE_AE_ECC_PARITY_CORR_DH895XCC (__BIT(24) | __BIT(12)) +#define ERRSSMSH_EN_DH895XCC __BIT(3) +/* BIT(2) enables the logging of push/pull data errors. */ +#define PPERR_EN_DH895XCC (__BIT(2)) + +/* ETR */ +#define ETR_MAX_BANKS_DH895XCC 32 +#define ETR_TX_RX_GAP_DH895XCC 8 +#define ETR_TX_RINGS_MASK_DH895XCC 0xFF +#define ETR_BUNDLE_SIZE_DH895XCC 0x1000 + +/* AE firmware */ +#define AE_FW_PROD_TYPE_DH895XCC 0x00400000 +#define AE_FW_MOF_NAME_DH895XCC "qat_895xcc" +#define AE_FW_MMP_NAME_DH895XCC "qat_895xcc_mmp" +#define AE_FW_UOF_NAME_DH895XCC "icp_qat_ae.uof" + +/* Clock frequency */ +#define CLOCK_PER_SEC_DH895XCC (685 * 1000000 / 16) + +#endif Property changes on: head/sys/dev/qat/qat_dh895xccreg.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_hw15.c =================================================================== --- head/sys/dev/qat/qat_hw15.c (nonexistent) +++ head/sys/dev/qat/qat_hw15.c (revision 367386) @@ -0,0 +1,953 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_hw15.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2007-2013 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); +#if 0 +__KERNEL_RCSID(0, "$NetBSD: qat_hw15.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $"); +#endif + +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include + +#include "qatreg.h" +#include "qat_hw15reg.h" +#include "qatvar.h" +#include "qat_hw15var.h" + +static int qat_adm_ring_init_ring_table(struct qat_softc *); +static void qat_adm_ring_build_slice_mask(uint16_t *, uint32_t, uint32_t); +static void qat_adm_ring_build_shram_mask(uint64_t *, uint32_t, uint32_t); +static int qat_adm_ring_build_ring_table(struct qat_softc *, uint32_t); +static int qat_adm_ring_build_init_msg(struct qat_softc *, + struct fw_init_req *, enum fw_init_cmd_id, uint32_t, + struct qat_accel_init_cb *); +static int qat_adm_ring_send_init_msg_sync(struct qat_softc *, + enum fw_init_cmd_id, uint32_t); +static int qat_adm_ring_send_init_msg(struct qat_softc *, + enum fw_init_cmd_id); +static int qat_adm_ring_intr(struct qat_softc *, void *, void *); + +void +qat_msg_req_type_populate(struct arch_if_req_hdr *msg, enum arch_if_req type, + uint32_t rxring) +{ + + memset(msg, 0, sizeof(struct arch_if_req_hdr)); + msg->flags = ARCH_IF_FLAGS_VALID_FLAG | + ARCH_IF_FLAGS_RESP_RING_TYPE_ET | ARCH_IF_FLAGS_RESP_TYPE_S; + msg->req_type = type; + msg->resp_pipe_id = rxring; +} + +void +qat_msg_cmn_hdr_populate(struct fw_la_bulk_req *msg, bus_addr_t desc_paddr, + uint8_t hdrsz, uint8_t hwblksz, uint16_t comn_req_flags, uint32_t flow_id) +{ + struct fw_comn_req_hdr *hdr = &msg->comn_hdr; + + hdr->comn_req_flags = comn_req_flags; + hdr->content_desc_params_sz = hwblksz; + hdr->content_desc_hdr_sz = hdrsz; + hdr->content_desc_addr = desc_paddr; + msg->flow_id = flow_id; +} + +void +qat_msg_service_cmd_populate(struct fw_la_bulk_req *msg, enum fw_la_cmd_id cmdid, + uint16_t cmd_flags) +{ + msg->comn_la_req.la_cmd_id = cmdid; + msg->comn_la_req.u.la_flags = cmd_flags; +} + +void +qat_msg_cmn_mid_populate(struct fw_comn_req_mid *msg, void *cookie, + uint64_t src, uint64_t dst) +{ + + msg->opaque_data = (uint64_t)(uintptr_t)cookie; + msg->src_data_addr = src; + if (dst == 0) + msg->dest_data_addr = src; + else + msg->dest_data_addr = dst; +} + +void +qat_msg_req_params_populate(struct fw_la_bulk_req *msg, + bus_addr_t req_params_paddr, uint8_t req_params_sz) +{ + msg->req_params_addr = req_params_paddr; + msg->comn_la_req.u1.req_params_blk_sz = req_params_sz / 8; +} + +void +qat_msg_cmn_footer_populate(union fw_comn_req_ftr *msg, uint64_t next_addr) +{ + msg->next_request_addr = next_addr; +} + +void +qat_msg_params_populate(struct fw_la_bulk_req *msg, + struct qat_crypto_desc *desc, uint8_t req_params_sz, + uint16_t service_cmd_flags, uint16_t comn_req_flags) +{ + qat_msg_cmn_hdr_populate(msg, desc->qcd_desc_paddr, + desc->qcd_hdr_sz, desc->qcd_hw_blk_sz, comn_req_flags, 0); + qat_msg_service_cmd_populate(msg, desc->qcd_cmd_id, service_cmd_flags); + qat_msg_cmn_mid_populate(&msg->comn_mid, NULL, 0, 0); + qat_msg_req_params_populate(msg, 0, req_params_sz); + qat_msg_cmn_footer_populate(&msg->comn_ftr, 0); +} + +static int +qat_adm_ring_init_ring_table(struct qat_softc *sc) +{ + struct qat_admin_rings *qadr = &sc->sc_admin_rings; + + if (sc->sc_ae_num == 1) { + qadr->qadr_cya_ring_tbl = + &qadr->qadr_master_ring_tbl[0]; + qadr->qadr_srv_mask[0] = QAT_SERVICE_CRYPTO_A; + } else if (sc->sc_ae_num == 2 || sc->sc_ae_num == 4) { + qadr->qadr_cya_ring_tbl = + &qadr->qadr_master_ring_tbl[0]; + qadr->qadr_srv_mask[0] = QAT_SERVICE_CRYPTO_A; + qadr->qadr_cyb_ring_tbl = + &qadr->qadr_master_ring_tbl[1]; + qadr->qadr_srv_mask[1] = QAT_SERVICE_CRYPTO_B; + } + + return 0; +} + +int +qat_adm_ring_init(struct qat_softc *sc) +{ + struct qat_admin_rings *qadr = &sc->sc_admin_rings; + int error, i, j; + + error = qat_alloc_dmamem(sc, &qadr->qadr_dma, 1, PAGE_SIZE, PAGE_SIZE); + if (error) + return error; + + qadr->qadr_master_ring_tbl = qadr->qadr_dma.qdm_dma_vaddr; + + MPASS(sc->sc_ae_num * + sizeof(struct fw_init_ring_table) <= PAGE_SIZE); + + /* Initialize the Master Ring Table */ + for (i = 0; i < sc->sc_ae_num; i++) { + struct fw_init_ring_table *firt = + &qadr->qadr_master_ring_tbl[i]; + + for (j = 0; j < INIT_RING_TABLE_SZ; j++) { + struct fw_init_ring_params *firp = + &firt->firt_bulk_rings[j]; + + firp->firp_reserved = 0; + firp->firp_curr_weight = QAT_DEFAULT_RING_WEIGHT; + firp->firp_init_weight = QAT_DEFAULT_RING_WEIGHT; + firp->firp_ring_pvl = QAT_DEFAULT_PVL; + } + memset(firt->firt_ring_mask, 0, sizeof(firt->firt_ring_mask)); + } + + error = qat_etr_setup_ring(sc, 0, RING_NUM_ADMIN_TX, + ADMIN_RING_SIZE, sc->sc_hw.qhw_fw_req_size, + NULL, NULL, "admin_tx", &qadr->qadr_admin_tx); + if (error) + return error; + + error = qat_etr_setup_ring(sc, 0, RING_NUM_ADMIN_RX, + ADMIN_RING_SIZE, sc->sc_hw.qhw_fw_resp_size, + qat_adm_ring_intr, qadr, "admin_rx", &qadr->qadr_admin_rx); + if (error) + return error; + + /* + * Finally set up the service indices into the Master Ring Table + * and convenient ring table pointers for each service enabled. + * Only the Admin rings are initialized. + */ + error = qat_adm_ring_init_ring_table(sc); + if (error) + return error; + + /* + * Calculate the number of active AEs per QAT + * needed for Shram partitioning. + */ + for (i = 0; i < sc->sc_ae_num; i++) { + if (qadr->qadr_srv_mask[i]) + qadr->qadr_active_aes_per_accel++; + } + + return 0; +} + +static void +qat_adm_ring_build_slice_mask(uint16_t *slice_mask, uint32_t srv_mask, + uint32_t init_shram) +{ + uint16_t shram = 0, comn_req = 0; + + if (init_shram) + shram = COMN_REQ_SHRAM_INIT_REQUIRED; + + if (srv_mask & QAT_SERVICE_CRYPTO_A) + comn_req |= COMN_REQ_CY0_ONLY(shram); + if (srv_mask & QAT_SERVICE_CRYPTO_B) + comn_req |= COMN_REQ_CY1_ONLY(shram); + + *slice_mask = comn_req; +} + +static void +qat_adm_ring_build_shram_mask(uint64_t *shram_mask, uint32_t active_aes, + uint32_t ae) +{ + *shram_mask = 0; + + if (active_aes == 1) { + *shram_mask = ~(*shram_mask); + } else if (active_aes == 2) { + if (ae == 1) + *shram_mask = ((~(*shram_mask)) & 0xffffffff); + else + *shram_mask = ((~(*shram_mask)) & 0xffffffff00000000ull); + } else if (active_aes == 3) { + if (ae == 0) + *shram_mask = ((~(*shram_mask)) & 0x7fffff); + else if (ae == 1) + *shram_mask = ((~(*shram_mask)) & 0x3fffff800000ull); + else + *shram_mask = ((~(*shram_mask)) & 0xffffc00000000000ull); + } else { + panic("Only three services are supported in current version"); + } +} + +static int +qat_adm_ring_build_ring_table(struct qat_softc *sc, uint32_t ae) +{ + struct qat_admin_rings *qadr = &sc->sc_admin_rings; + struct fw_init_ring_table *tbl; + struct fw_init_ring_params *param; + uint8_t srv_mask = sc->sc_admin_rings.qadr_srv_mask[ae]; + + if ((srv_mask & QAT_SERVICE_CRYPTO_A)) { + tbl = qadr->qadr_cya_ring_tbl; + } else if ((srv_mask & QAT_SERVICE_CRYPTO_B)) { + tbl = qadr->qadr_cyb_ring_tbl; + } else { + device_printf(sc->sc_dev, + "Invalid execution engine %d\n", ae); + return EINVAL; + } + + param = &tbl->firt_bulk_rings[sc->sc_hw.qhw_ring_sym_tx]; + param->firp_curr_weight = QAT_HI_PRIO_RING_WEIGHT; + param->firp_init_weight = QAT_HI_PRIO_RING_WEIGHT; + FW_INIT_RING_MASK_SET(tbl, sc->sc_hw.qhw_ring_sym_tx); + + return 0; +} + +static int +qat_adm_ring_build_init_msg(struct qat_softc *sc, + struct fw_init_req *initmsg, enum fw_init_cmd_id cmd, uint32_t ae, + struct qat_accel_init_cb *cb) +{ + struct fw_init_set_ae_info_hdr *aehdr; + struct fw_init_set_ae_info *aeinfo; + struct fw_init_set_ring_info_hdr *ringhdr; + struct fw_init_set_ring_info *ringinfo; + int init_shram = 0, tgt_id, cluster_id; + uint32_t srv_mask; + + srv_mask = sc->sc_admin_rings.qadr_srv_mask[ + ae % sc->sc_ae_num]; + + memset(initmsg, 0, sizeof(struct fw_init_req)); + + qat_msg_req_type_populate(&initmsg->comn_hdr.arch_if, + ARCH_IF_REQ_QAT_FW_INIT, + sc->sc_admin_rings.qadr_admin_rx->qr_ring_id); + + qat_msg_cmn_mid_populate(&initmsg->comn_mid, cb, 0, 0); + + switch (cmd) { + case FW_INIT_CMD_SET_AE_INFO: + if (ae % sc->sc_ae_num == 0) + init_shram = 1; + if (ae >= sc->sc_ae_num) { + tgt_id = 1; + cluster_id = 1; + } else { + cluster_id = 0; + if (sc->sc_ae_mask) + tgt_id = 0; + else + tgt_id = 1; + } + aehdr = &initmsg->u.set_ae_info; + aeinfo = &initmsg->u1.set_ae_info; + + aehdr->init_cmd_id = cmd; + /* XXX that does not support sparse ae_mask */ + aehdr->init_trgt_id = ae; + aehdr->init_ring_cluster_id = cluster_id; + aehdr->init_qat_id = tgt_id; + + qat_adm_ring_build_slice_mask(&aehdr->init_slice_mask, srv_mask, + init_shram); + + qat_adm_ring_build_shram_mask(&aeinfo->init_shram_mask, + sc->sc_admin_rings.qadr_active_aes_per_accel, + ae % sc->sc_ae_num); + + break; + case FW_INIT_CMD_SET_RING_INFO: + ringhdr = &initmsg->u.set_ring_info; + ringinfo = &initmsg->u1.set_ring_info; + + ringhdr->init_cmd_id = cmd; + /* XXX that does not support sparse ae_mask */ + ringhdr->init_trgt_id = ae; + + /* XXX */ + qat_adm_ring_build_ring_table(sc, + ae % sc->sc_ae_num); + + ringhdr->init_ring_tbl_sz = sizeof(struct fw_init_ring_table); + + ringinfo->init_ring_table_ptr = + sc->sc_admin_rings.qadr_dma.qdm_dma_seg.ds_addr + + ((ae % sc->sc_ae_num) * + sizeof(struct fw_init_ring_table)); + + break; + default: + return ENOTSUP; + } + + return 0; +} + +static int +qat_adm_ring_send_init_msg_sync(struct qat_softc *sc, + enum fw_init_cmd_id cmd, uint32_t ae) +{ + struct fw_init_req initmsg; + struct qat_accel_init_cb cb; + int error; + + error = qat_adm_ring_build_init_msg(sc, &initmsg, cmd, ae, &cb); + if (error) + return error; + + error = qat_etr_put_msg(sc, sc->sc_admin_rings.qadr_admin_tx, + (uint32_t *)&initmsg); + if (error) + return error; + + error = tsleep(&cb, PZERO, "qat_init", hz * 3 / 2); + if (error) { + device_printf(sc->sc_dev, + "Timed out initialization firmware: %d\n", error); + return error; + } + if (cb.qaic_status) { + device_printf(sc->sc_dev, "Failed to initialize firmware\n"); + return EIO; + } + + return error; +} + +static int +qat_adm_ring_send_init_msg(struct qat_softc *sc, + enum fw_init_cmd_id cmd) +{ + struct qat_admin_rings *qadr = &sc->sc_admin_rings; + uint32_t error, ae; + + for (ae = 0; ae < sc->sc_ae_num; ae++) { + uint8_t srv_mask = qadr->qadr_srv_mask[ae]; + switch (cmd) { + case FW_INIT_CMD_SET_AE_INFO: + case FW_INIT_CMD_SET_RING_INFO: + if (!srv_mask) + continue; + break; + case FW_INIT_CMD_TRNG_ENABLE: + case FW_INIT_CMD_TRNG_DISABLE: + if (!(srv_mask & QAT_SERVICE_CRYPTO_A)) + continue; + break; + default: + return ENOTSUP; + } + + error = qat_adm_ring_send_init_msg_sync(sc, cmd, ae); + if (error) + return error; + } + + return 0; +} + +int +qat_adm_ring_send_init(struct qat_softc *sc) +{ + int error; + + error = qat_adm_ring_send_init_msg(sc, FW_INIT_CMD_SET_AE_INFO); + if (error) + return error; + + error = qat_adm_ring_send_init_msg(sc, FW_INIT_CMD_SET_RING_INFO); + if (error) + return error; + + return 0; +} + +static int +qat_adm_ring_intr(struct qat_softc *sc, void *arg, void *msg) +{ + struct arch_if_resp_hdr *resp; + struct fw_init_resp *init_resp; + struct qat_accel_init_cb *init_cb; + int handled = 0; + + resp = (struct arch_if_resp_hdr *)msg; + + switch (resp->resp_type) { + case ARCH_IF_REQ_QAT_FW_INIT: + init_resp = (struct fw_init_resp *)msg; + init_cb = (struct qat_accel_init_cb *) + (uintptr_t)init_resp->comn_resp.opaque_data; + init_cb->qaic_status = + __SHIFTOUT(init_resp->comn_resp.comn_status, + COMN_RESP_INIT_ADMIN_STATUS); + wakeup(init_cb); + break; + default: + device_printf(sc->sc_dev, + "unknown resp type %d\n", resp->resp_type); + break; + } + + return handled; +} + +static inline uint16_t +qat_hw15_get_comn_req_flags(uint8_t ae) +{ + if (ae == 0) { + return COMN_REQ_ORD_STRICT | COMN_REQ_PTR_TYPE_SGL | + COMN_REQ_AUTH0_SLICE_REQUIRED | + COMN_REQ_CIPHER0_SLICE_REQUIRED; + } else { + return COMN_REQ_ORD_STRICT | COMN_REQ_PTR_TYPE_SGL | + COMN_REQ_AUTH1_SLICE_REQUIRED | + COMN_REQ_CIPHER1_SLICE_REQUIRED; + } +} + +static uint32_t +qat_hw15_crypto_setup_cipher_desc(struct qat_crypto_desc *desc, + struct qat_session *qs, struct fw_cipher_hdr *cipher_hdr, + uint32_t hw_blk_offset, enum fw_slice next_slice) +{ + desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ; + + cipher_hdr->state_padding_sz = 0; + cipher_hdr->key_sz = qs->qs_cipher_klen / 8; + + cipher_hdr->state_sz = desc->qcd_cipher_blk_sz / 8; + + cipher_hdr->next_id = next_slice; + cipher_hdr->curr_id = FW_SLICE_CIPHER; + cipher_hdr->offset = hw_blk_offset / 8; + cipher_hdr->resrvd = 0; + + return sizeof(struct hw_cipher_config) + qs->qs_cipher_klen; +} + +static void +qat_hw15_crypto_setup_cipher_config(const struct qat_crypto_desc *desc, + const struct qat_session *qs, const struct cryptop *crp, + struct hw_cipher_config *cipher_config) +{ + const uint8_t *key; + uint8_t *cipher_key; + + cipher_config->val = qat_crypto_load_cipher_session(desc, qs); + cipher_config->reserved = 0; + + cipher_key = (uint8_t *)(cipher_config + 1); + if (crp != NULL && crp->crp_cipher_key != NULL) + key = crp->crp_cipher_key; + else + key = qs->qs_cipher_key; + memcpy(cipher_key, key, qs->qs_cipher_klen); +} + +static uint32_t +qat_hw15_crypto_setup_auth_desc(struct qat_crypto_desc *desc, + struct qat_session *qs, struct fw_auth_hdr *auth_hdr, + uint32_t ctrl_blk_offset, uint32_t hw_blk_offset, + enum fw_slice next_slice) +{ + const struct qat_sym_hash_def *hash_def; + + (void)qat_crypto_load_auth_session(desc, qs, &hash_def); + + auth_hdr->next_id = next_slice; + auth_hdr->curr_id = FW_SLICE_AUTH; + auth_hdr->offset = hw_blk_offset / 8; + auth_hdr->resrvd = 0; + + auth_hdr->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED; + auth_hdr->u.inner_prefix_sz = 0; + auth_hdr->outer_prefix_sz = 0; + auth_hdr->final_sz = hash_def->qshd_alg->qshai_digest_len; + auth_hdr->inner_state1_sz = + roundup(hash_def->qshd_qat->qshqi_state1_len, 8); + auth_hdr->inner_res_sz = hash_def->qshd_alg->qshai_digest_len; + auth_hdr->inner_state2_sz = + roundup(hash_def->qshd_qat->qshqi_state2_len, 8); + auth_hdr->inner_state2_off = auth_hdr->offset + + ((sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz) / 8); + + auth_hdr->outer_config_off = 0; + auth_hdr->outer_state1_sz = 0; + auth_hdr->outer_res_sz = 0; + auth_hdr->outer_prefix_off = 0; + + desc->qcd_auth_sz = hash_def->qshd_alg->qshai_sah->hashsize; + desc->qcd_state_storage_sz = (sizeof(struct hw_auth_counter) + + roundup(hash_def->qshd_alg->qshai_state_size, 8)) / 8; + desc->qcd_gcm_aad_sz_offset1 = desc->qcd_auth_offset + + sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz + + AES_BLOCK_LEN; + desc->qcd_gcm_aad_sz_offset2 = ctrl_blk_offset + + offsetof(struct fw_auth_hdr, u.aad_sz); + + return sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz + + auth_hdr->inner_state2_sz; +} + +static void +qat_hw15_crypto_setup_auth_setup(const struct qat_crypto_desc *desc, + const struct qat_session *qs, const struct cryptop *crp, + struct hw_auth_setup *auth_setup) +{ + const struct qat_sym_hash_def *hash_def; + const uint8_t *key; + uint8_t *state1, *state2; + uint32_t state_sz, state1_sz, state2_sz, state1_pad_len, state2_pad_len; + + auth_setup->auth_config.config = qat_crypto_load_auth_session(desc, qs, + &hash_def); + auth_setup->auth_config.reserved = 0; + + auth_setup->auth_counter.counter = + htobe32(hash_def->qshd_qat->qshqi_auth_counter); + auth_setup->auth_counter.reserved = 0; + + state1 = (uint8_t *)(auth_setup + 1); + state2 = state1 + roundup(hash_def->qshd_qat->qshqi_state1_len, 8); + switch (qs->qs_auth_algo) { + case HW_AUTH_ALGO_GALOIS_128: + qat_crypto_gmac_precompute(desc, qs->qs_cipher_key, + qs->qs_cipher_klen, hash_def, state2); + break; + case HW_AUTH_ALGO_SHA1: + state_sz = hash_def->qshd_alg->qshai_state_size; + state1_sz = roundup(hash_def->qshd_qat->qshqi_state1_len, 8); + state2_sz = roundup(hash_def->qshd_qat->qshqi_state2_len, 8); + if (qs->qs_auth_mode == HW_AUTH_MODE1) { + state1_pad_len = state1_sz - state_sz; + state2_pad_len = state2_sz - state_sz; + if (state1_pad_len > 0) + memset(state1 + state_sz, 0, state1_pad_len); + if (state2_pad_len > 0) + memset(state2 + state_sz, 0, state2_pad_len); + } + /* FALLTHROUGH */ + case HW_AUTH_ALGO_SHA256: + case HW_AUTH_ALGO_SHA384: + case HW_AUTH_ALGO_SHA512: + switch (qs->qs_auth_mode) { + case HW_AUTH_MODE0: + memcpy(state1, hash_def->qshd_alg->qshai_init_state, + state1_sz); + /* Override for mode 0 hashes. */ + auth_setup->auth_counter.counter = 0; + break; + case HW_AUTH_MODE1: + if (crp != NULL && crp->crp_auth_key != NULL) + key = crp->crp_auth_key; + else + key = qs->qs_auth_key; + if (key != NULL) { + qat_crypto_hmac_precompute(desc, key, + qs->qs_auth_klen, hash_def, state1, state2); + } + break; + default: + panic("%s: unhandled auth mode %d", __func__, + qs->qs_auth_mode); + } + break; + default: + panic("%s: unhandled auth algorithm %d", __func__, + qs->qs_auth_algo); + } +} + +void +qat_hw15_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs, + struct qat_crypto_desc *desc) +{ + struct fw_cipher_hdr *cipher_hdr; + struct fw_auth_hdr *auth_hdr; + struct fw_la_bulk_req *req_cache; + struct hw_auth_setup *auth_setup; + struct hw_cipher_config *cipher_config; + uint32_t ctrl_blk_sz, ctrl_blk_offset, hw_blk_offset; + int i; + uint16_t la_cmd_flags; + uint8_t req_params_sz; + uint8_t *ctrl_blk_ptr, *hw_blk_ptr; + + ctrl_blk_sz = 0; + if (qs->qs_cipher_algo != HW_CIPHER_ALGO_NULL) + ctrl_blk_sz += sizeof(struct fw_cipher_hdr); + if (qs->qs_auth_algo != HW_AUTH_ALGO_NULL) + ctrl_blk_sz += sizeof(struct fw_auth_hdr); + + ctrl_blk_ptr = desc->qcd_content_desc; + ctrl_blk_offset = 0; + hw_blk_ptr = ctrl_blk_ptr + ctrl_blk_sz; + hw_blk_offset = 0; + + la_cmd_flags = 0; + req_params_sz = 0; + for (i = 0; i < MAX_FW_SLICE; i++) { + switch (desc->qcd_slices[i]) { + case FW_SLICE_CIPHER: + cipher_hdr = (struct fw_cipher_hdr *)(ctrl_blk_ptr + + ctrl_blk_offset); + cipher_config = (struct hw_cipher_config *)(hw_blk_ptr + + hw_blk_offset); + desc->qcd_cipher_offset = ctrl_blk_sz + hw_blk_offset; + hw_blk_offset += qat_hw15_crypto_setup_cipher_desc(desc, + qs, cipher_hdr, hw_blk_offset, + desc->qcd_slices[i + 1]); + qat_hw15_crypto_setup_cipher_config(desc, qs, NULL, + cipher_config); + ctrl_blk_offset += sizeof(struct fw_cipher_hdr); + req_params_sz += sizeof(struct fw_la_cipher_req_params); + break; + case FW_SLICE_AUTH: + auth_hdr = (struct fw_auth_hdr *)(ctrl_blk_ptr + + ctrl_blk_offset); + auth_setup = (struct hw_auth_setup *)(hw_blk_ptr + + hw_blk_offset); + desc->qcd_auth_offset = ctrl_blk_sz + hw_blk_offset; + hw_blk_offset += qat_hw15_crypto_setup_auth_desc(desc, + qs, auth_hdr, ctrl_blk_offset, hw_blk_offset, + desc->qcd_slices[i + 1]); + qat_hw15_crypto_setup_auth_setup(desc, qs, NULL, + auth_setup); + ctrl_blk_offset += sizeof(struct fw_auth_hdr); + req_params_sz += sizeof(struct fw_la_auth_req_params); + la_cmd_flags |= LA_FLAGS_RET_AUTH_RES; + /* no digest verify */ + break; + case FW_SLICE_DRAM_WR: + i = MAX_FW_SLICE; /* end of chain */ + break; + default: + MPASS(0); + break; + } + } + + desc->qcd_hdr_sz = ctrl_blk_offset / 8; + desc->qcd_hw_blk_sz = hw_blk_offset / 8; + + req_cache = (struct fw_la_bulk_req *)desc->qcd_req_cache; + qat_msg_req_type_populate( + &req_cache->comn_hdr.arch_if, + ARCH_IF_REQ_QAT_FW_LA, 0); + + if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) + la_cmd_flags |= LA_FLAGS_PROTO_GCM | LA_FLAGS_GCM_IV_LEN_FLAG; + else + la_cmd_flags |= LA_FLAGS_PROTO_NO; + + qat_msg_params_populate(req_cache, desc, req_params_sz, + la_cmd_flags, 0); + + bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag, + qs->qs_desc_mem.qdm_dma_map, BUS_DMASYNC_PREWRITE); +} + +static void +qat_hw15_crypto_req_setkey(const struct qat_crypto_desc *desc, + const struct qat_session *qs, struct qat_sym_cookie *qsc, + struct fw_la_bulk_req *bulk_req, struct cryptop *crp) +{ + struct hw_auth_setup *auth_setup; + struct hw_cipher_config *cipher_config; + uint8_t *cdesc; + int i; + + cdesc = qsc->qsc_content_desc; + memcpy(cdesc, desc->qcd_content_desc, CONTENT_DESC_MAX_SIZE); + for (i = 0; i < MAX_FW_SLICE; i++) { + switch (desc->qcd_slices[i]) { + case FW_SLICE_CIPHER: + cipher_config = (struct hw_cipher_config *) + (cdesc + desc->qcd_cipher_offset); + qat_hw15_crypto_setup_cipher_config(desc, qs, crp, + cipher_config); + break; + case FW_SLICE_AUTH: + auth_setup = (struct hw_auth_setup *) + (cdesc + desc->qcd_auth_offset); + qat_hw15_crypto_setup_auth_setup(desc, qs, crp, + auth_setup); + break; + case FW_SLICE_DRAM_WR: + i = MAX_FW_SLICE; /* end of chain */ + break; + default: + MPASS(0); + } + } + + bulk_req->comn_hdr.content_desc_addr = qsc->qsc_content_desc_paddr; +} + +void +qat_hw15_crypto_setup_req_params(struct qat_crypto_bank *qcb, + struct qat_session *qs, struct qat_crypto_desc const *desc, + struct qat_sym_cookie *qsc, struct cryptop *crp) +{ + struct qat_sym_bulk_cookie *qsbc; + struct fw_la_bulk_req *bulk_req; + struct fw_la_cipher_req_params *cipher_req; + struct fw_la_auth_req_params *auth_req; + bus_addr_t digest_paddr; + uint8_t *aad_szp2, *req_params_ptr; + uint32_t aad_sz, *aad_szp1; + enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id; + enum fw_slice next_slice; + + qsbc = &qsc->u.qsc_bulk_cookie; + + bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg; + memcpy(bulk_req, &desc->qcd_req_cache, QAT_HW15_SESSION_REQ_CACHE_SIZE); + bulk_req->comn_hdr.arch_if.resp_pipe_id = qcb->qcb_sym_rx->qr_ring_id; + bulk_req->comn_hdr.comn_req_flags = + qat_hw15_get_comn_req_flags(qcb->qcb_bank % 2); + bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr; + bulk_req->comn_mid.dest_data_addr = qsc->qsc_buffer_list_desc_paddr; + bulk_req->req_params_addr = qsc->qsc_bulk_req_params_buf_paddr; + bulk_req->comn_ftr.next_request_addr = 0; + bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc; + if (__predict_false(crp->crp_cipher_key != NULL || + crp->crp_auth_key != NULL)) { + qat_hw15_crypto_req_setkey(desc, qs, qsc, bulk_req, crp); + } + + digest_paddr = 0; + if (desc->qcd_auth_sz != 0) + digest_paddr = qsc->qsc_auth_res_paddr; + + req_params_ptr = qsbc->qsbc_req_params_buf; + memset(req_params_ptr, 0, sizeof(qsbc->qsbc_req_params_buf)); + + /* + * The SG list layout is a bit different for GCM and GMAC, it's simpler + * to handle those cases separately. + */ + if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) { + cipher_req = (struct fw_la_cipher_req_params *)req_params_ptr; + auth_req = (struct fw_la_auth_req_params *) + (req_params_ptr + sizeof(struct fw_la_cipher_req_params)); + + cipher_req->cipher_state_sz = desc->qcd_cipher_blk_sz / 8; + cipher_req->curr_id = FW_SLICE_CIPHER; + if (cmd_id == FW_LA_CMD_HASH_CIPHER || cmd_id == FW_LA_CMD_AUTH) + cipher_req->next_id = FW_SLICE_DRAM_WR; + else + cipher_req->next_id = FW_SLICE_AUTH; + cipher_req->state_address = qsc->qsc_iv_buf_paddr; + + if (cmd_id != FW_LA_CMD_AUTH) { + /* + * Don't fill out the cipher block if we're doing GMAC + * only. + */ + cipher_req->cipher_off = 0; + cipher_req->cipher_len = crp->crp_payload_length; + } + auth_req->curr_id = FW_SLICE_AUTH; + if (cmd_id == FW_LA_CMD_HASH_CIPHER || cmd_id == FW_LA_CMD_AUTH) + auth_req->next_id = FW_SLICE_CIPHER; + else + auth_req->next_id = FW_SLICE_DRAM_WR; + + auth_req->auth_res_address = digest_paddr; + auth_req->auth_res_sz = desc->qcd_auth_sz; + + auth_req->auth_off = 0; + auth_req->auth_len = crp->crp_payload_length; + + auth_req->hash_state_sz = + roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN) >> 3; + auth_req->u1.aad_addr = crp->crp_aad_length > 0 ? + qsc->qsc_gcm_aad_paddr : 0; + + /* + * Update the hash state block if necessary. This only occurs + * when the AAD length changes between requests in a session and + * is synchronized by qat_process(). + */ + aad_sz = htobe32(crp->crp_aad_length); + aad_szp1 = (uint32_t *)( + __DECONST(uint8_t *, desc->qcd_content_desc) + + desc->qcd_gcm_aad_sz_offset1); + aad_szp2 = __DECONST(uint8_t *, desc->qcd_content_desc) + + desc->qcd_gcm_aad_sz_offset2; + if (__predict_false(*aad_szp1 != aad_sz)) { + *aad_szp1 = aad_sz; + *aad_szp2 = (uint8_t)roundup2(crp->crp_aad_length, + QAT_AES_GCM_AAD_ALIGN); + bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag, + qs->qs_desc_mem.qdm_dma_map, + BUS_DMASYNC_PREWRITE); + } + } else { + cipher_req = (struct fw_la_cipher_req_params *)req_params_ptr; + if (cmd_id != FW_LA_CMD_AUTH) { + if (cmd_id == FW_LA_CMD_CIPHER || + cmd_id == FW_LA_CMD_HASH_CIPHER) + next_slice = FW_SLICE_DRAM_WR; + else + next_slice = FW_SLICE_AUTH; + + cipher_req->cipher_state_sz = + desc->qcd_cipher_blk_sz / 8; + + cipher_req->curr_id = FW_SLICE_CIPHER; + cipher_req->next_id = next_slice; + + cipher_req->cipher_off = crp->crp_aad_length == 0 ? 0 : + crp->crp_payload_start - crp->crp_aad_start; + cipher_req->cipher_len = crp->crp_payload_length; + cipher_req->state_address = qsc->qsc_iv_buf_paddr; + } + if (cmd_id != FW_LA_CMD_CIPHER) { + if (cmd_id == FW_LA_CMD_AUTH) + auth_req = (struct fw_la_auth_req_params *) + req_params_ptr; + else + auth_req = (struct fw_la_auth_req_params *) + (cipher_req + 1); + if (cmd_id == FW_LA_CMD_HASH_CIPHER) + next_slice = FW_SLICE_CIPHER; + else + next_slice = FW_SLICE_DRAM_WR; + + auth_req->curr_id = FW_SLICE_AUTH; + auth_req->next_id = next_slice; + + auth_req->auth_res_address = digest_paddr; + auth_req->auth_res_sz = desc->qcd_auth_sz; + + auth_req->auth_len = + crp->crp_payload_length + crp->crp_aad_length; + auth_req->auth_off = 0; + + auth_req->hash_state_sz = 0; + auth_req->u1.prefix_addr = desc->qcd_hash_state_paddr + + desc->qcd_state_storage_sz; + } + } +} Property changes on: head/sys/dev/qat/qat_hw15.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_hw15reg.h =================================================================== --- head/sys/dev/qat/qat_hw15reg.h (nonexistent) +++ head/sys/dev/qat/qat_hw15reg.h (revision 367386) @@ -0,0 +1,635 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_hw15reg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2007-2013 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* $FreeBSD$ */ + +#ifndef _DEV_PCI_QAT_HW15REG_H_ +#define _DEV_PCI_QAT_HW15REG_H_ + +/* Default message size in bytes */ +#define FW_REQ_DEFAULT_SZ_HW15 64 +#define FW_RESP_DEFAULT_SZ_HW15 64 + +#define ADMIN_RING_SIZE 256 +#define RING_NUM_ADMIN_TX 0 +#define RING_NUM_ADMIN_RX 1 + +/* -------------------------------------------------------------------------- */ +/* accel */ + +#define ARCH_IF_FLAGS_VALID_FLAG __BIT(7) +#define ARCH_IF_FLAGS_RESP_RING_TYPE __BITS(4, 3) +#define ARCH_IF_FLAGS_RESP_RING_TYPE_SHIFT 3 +#define ARCH_IF_FLAGS_RESP_RING_TYPE_SCRATCH (0 << ARCH_IF_FLAGS_RESP_RING_TYPE_SHIFT) +#define ARCH_IF_FLAGS_RESP_RING_TYPE_NN (1 << ARCH_IF_FLAGS_RESP_RING_TYPE_SHIFT) +#define ARCH_IF_FLAGS_RESP_RING_TYPE_ET (2 << ARCH_IF_FLAGS_RESP_RING_TYPE_SHIFT) +#define ARCH_IF_FLAGS_RESP_TYPE __BITS(2, 0) +#define ARCH_IF_FLAGS_RESP_TYPE_SHIFT 0 +#define ARCH_IF_FLAGS_RESP_TYPE_A (0 << ARCH_IF_FLAGS_RESP_TYPE_SHIFT) +#define ARCH_IF_FLAGS_RESP_TYPE_B (1 << ARCH_IF_FLAGS_RESP_TYPE_SHIFT) +#define ARCH_IF_FLAGS_RESP_TYPE_C (2 << ARCH_IF_FLAGS_RESP_TYPE_SHIFT) +#define ARCH_IF_FLAGS_RESP_TYPE_S (3 << ARCH_IF_FLAGS_RESP_TYPE_SHIFT) + +enum arch_if_req { + ARCH_IF_REQ_NULL, /* NULL request type */ + + /* QAT-AE Service Request Type IDs - 01 to 20 */ + ARCH_IF_REQ_QAT_FW_INIT, /* QAT-FW Initialization Request */ + ARCH_IF_REQ_QAT_FW_ADMIN, /* QAT-FW Administration Request */ + ARCH_IF_REQ_QAT_FW_PKE, /* QAT-FW PKE Request */ + ARCH_IF_REQ_QAT_FW_LA, /* QAT-FW Lookaside Request */ + ARCH_IF_REQ_QAT_FW_IPSEC, /* QAT-FW IPSec Request */ + ARCH_IF_REQ_QAT_FW_SSL, /* QAT-FW SSL Request */ + ARCH_IF_REQ_QAT_FW_DMA, /* QAT-FW DMA Request */ + ARCH_IF_REQ_QAT_FW_STORAGE, /* QAT-FW Storage Request */ + ARCH_IF_REQ_QAT_FW_COMPRESS, /* QAT-FW Compression Request */ + ARCH_IF_REQ_QAT_FW_PATMATCH, /* QAT-FW Pattern Matching Request */ + + /* IP Service (Range Match and Exception) Blocks Request Type IDs 21 - 30 */ + ARCH_IF_REQ_RM_FLOW_MISS = 21, /* RM flow miss request */ + ARCH_IF_REQ_RM_FLOW_TIMER_EXP, /* RM flow timer exp Request */ + ARCH_IF_REQ_IP_SERVICES_RFC_LOOKUP_UPDATE, /* RFC Lookup request */ + ARCH_IF_REQ_IP_SERVICES_CONFIG_UPDATE, /* Config Update request */ + ARCH_IF_REQ_IP_SERVICES_FCT_CONFIG, /* FCT Config request */ + ARCH_IF_REQ_IP_SERVICES_NEXT_HOP_TIMER_EXPIRY, /* NH Timer expiry request */ + ARCH_IF_REQ_IP_SERVICES_EXCEPTION, /* Exception processign request */ + ARCH_IF_REQ_IP_SERVICES_STACK_DRIVER, /* Send to SD request */ + ARCH_IF_REQ_IP_SERVICES_ACTION_HANDLER, /* Send to AH request */ + ARCH_IF_REQ_IP_SERVICES_EVENT_HANDLER, /* Send to EH request */ + ARCH_IF_REQ_DELIMITER /* End delimiter */ +}; + +struct arch_if_req_hdr { + uint8_t resp_dest_id; + /* Opaque identifier passed from the request to response to allow + * response handler perform any further processing */ + uint8_t resp_pipe_id; + /* Response pipe to write the response associated with this request to */ + uint8_t req_type; + /* Definition of the service described by the request */ + uint8_t flags; + /* Request and response control flags */ +}; + +struct arch_if_resp_hdr { + uint8_t dest_id; + /* Opaque identifier passed from the request to response to allow + * response handler perform any further processing */ + uint8_t serv_id; + /* Definition of the service id generating the response */ + uint8_t resp_type; + /* Definition of the service described by the request */ + uint8_t flags; + /* Request and response control flags */ +}; + +struct fw_comn_req_hdr { + struct arch_if_req_hdr arch_if; + /* Common arch fields used by all ICP interface requests. Remaining + * fields are specific to the common QAT FW service. */ + uint16_t comn_req_flags; + /* Flags used to describe common processing required by the request and + * the meaning of parameters in it i.e. differentiating between a buffer + * descriptor and a flat buffer pointer in the source (src) and destination + * (dest) data address fields. Full definition of the fields is given + * below */ + uint8_t content_desc_params_sz; + /* Size of the content descriptor parameters in quad words. These + * parameters describe the session setup configuration info for the + * slices that this request relies upon i.e. the configuration word and + * cipher key needed by the cipher slice if there is a request for cipher + * processing. The format of the parameters are contained in icp_qat_hw.h + * and vary depending on the algorithm and mode being used. It is the + * clients responsibility to ensure this structure is correctly packed */ + uint8_t content_desc_hdr_sz; + /* Size of the content descriptor header in quad words. This information + * is read into the QAT AE xfr registers */ + uint64_t content_desc_addr; + /* Address of the content descriptor containing both the content header + * the size of which is defined by content_desc_hdr_sz followed by the + * content parameters whose size is described bycontent_desc_params_sz + */ +}; + +struct fw_comn_req_mid { + uint64_t opaque_data; + /* Opaque data passed unmodified from the request to response messages + * by firmware (fw) */ + uint64_t src_data_addr; + /* Generic definition of the source data supplied to the QAT AE. The + * common flags are used to further describe the attributes of this + * field */ + uint64_t dest_data_addr; + /* Generic definition of the destination data supplied to the QAT AE. + * The common flags are used to further describe the attributes of this + * field */ +}; + +union fw_comn_req_ftr { + uint64_t next_request_addr; + /* Overloaded field, for stateful requests, this field is the pointer to + next request descriptor */ + struct { + uint32_t src_length; + /* Length of source flat buffer incase src buffer type is flat */ + uint32_t dst_length; + /* Length of source flat buffer incase dst buffer type is flat */ + } s; +}; + +union fw_comn_error { + struct { + uint8_t resrvd; /* 8 bit reserved field */ + uint8_t comn_err_code; /**< 8 bit common error code */ + } s; + /* Structure which is used for non-compression responses */ + + struct { + uint8_t xlat_err_code; /* 8 bit translator error field */ + uint8_t cmp_err_code; /* 8 bit compression error field */ + } s1; + /* Structure which is used for compression responses */ +}; + +struct fw_comn_resp_hdr { + struct arch_if_resp_hdr arch_if; + /* Common arch fields used by all ICP interface response messages. The + * remaining fields are specific to the QAT FW */ + union fw_comn_error comn_error; + /* This field is overloaded to allow for one 8 bit common error field + * or two 8 bit error fields from compression and translator */ + uint8_t comn_status; + /* Status field which specifies which slice(s) report an error */ + uint8_t serv_cmd_id; + /* For services that define multiple commands this field represents the + * command. If only 1 command is supported then this field will be 0 */ + uint64_t opaque_data; + /* Opaque data passed from the request to the response message */ +}; + + +#define RING_MASK_TABLE_ENTRY_LOG_SZ (5) + +#define FW_INIT_RING_MASK_SET(table, id) \ + table->firt_ring_mask[id >> RING_MASK_TABLE_ENTRY_LOG_SZ] =\ + table->firt_ring_mask[id >> RING_MASK_TABLE_ENTRY_LOG_SZ] | \ + (1 << (id & 0x1f)) + +struct fw_init_ring_params { + uint8_t firp_curr_weight; /* Current ring weight (working copy), + * has to be equal to init_weight */ + uint8_t firp_init_weight; /* Initial ring weight: -1 ... 0 + * -1 is equal to FF, -2 is equal to FE, + * the weighting uses negative logic + * where FF means poll the ring once, + * -2 is poll the ring twice, + * 0 is poll the ring 255 times */ + uint8_t firp_ring_pvl; /* Ring Privilege Level. */ + uint8_t firp_reserved; /* Reserved field which must be set + * to 0 by the client */ +}; + +#define INIT_RING_TABLE_SZ 128 +#define INIT_RING_TABLE_LW_SZ 4 + +struct fw_init_ring_table { + struct fw_init_ring_params firt_bulk_rings[INIT_RING_TABLE_SZ]; + /* array of ring parameters */ + uint32_t firt_ring_mask[INIT_RING_TABLE_LW_SZ]; + /* Structure to hold the bit masks for + * 128 rings. */ +}; + +struct fw_init_set_ae_info_hdr { + uint16_t init_slice_mask; /* Init time flags to set the ownership of the slices */ + uint16_t resrvd; /* Reserved field and must be set to 0 by the client */ + uint8_t init_qat_id; /* Init time qat id described in the request */ + uint8_t init_ring_cluster_id; /* Init time ring cluster Id */ + uint8_t init_trgt_id; /* Init time target AE id described in the request */ + uint8_t init_cmd_id; /* Init time command that is described in the request */ +}; + +struct fw_init_set_ae_info { + uint64_t init_shram_mask; /* Init time shram mask to set the page ownership in page pool of AE*/ + uint64_t resrvd; /* Reserved field and must be set to 0 by the client */ +}; + +struct fw_init_set_ring_info_hdr { + uint32_t resrvd; /* Reserved field and must be set to 0 by the client */ + uint16_t init_ring_tbl_sz; /* Init time information to state size of the ring table */ + uint8_t init_trgt_id; /* Init time target AE id described in the request */ + uint8_t init_cmd_id; /* Init time command that is described in the request */ +}; + +struct fw_init_set_ring_info { + uint64_t init_ring_table_ptr; /* Pointer to weighting information for 128 rings */ + uint64_t resrvd; /* Reserved field and must be set to 0 by the client */ +}; + +struct fw_init_trng_hdr { + uint32_t resrvd; /* Reserved field and must be set to 0 by the client */ + union { + uint8_t resrvd; /* Reserved field set to if cmd type is trng disable */ + uint8_t init_trng_cfg_sz; /* Size of the trng config word in QW*/ + } u; + uint8_t resrvd1; /* Reserved field and must be set to 0 by the client */ + uint8_t init_trgt_id; /* Init time target AE id described in the request */ + uint8_t init_cmd_id; /* Init time command that is described in the request */ +}; + +struct fw_init_trng { + union { + uint64_t resrvd; /* Reserved field set to 0 if cmd type is trng disable */ + uint64_t init_trng_cfg_ptr; /* Pointer to TRNG Slice config word*/ + } u; + uint64_t resrvd; /* Reserved field and must be set to 0 by the client */ +}; + +struct fw_init_req { + struct fw_comn_req_hdr comn_hdr; /* Common request header */ + union { + struct fw_init_set_ae_info_hdr set_ae_info; + /* INIT SET_AE_INFO request header structure */ + struct fw_init_set_ring_info_hdr set_ring_info; + /* INIT SET_RING_INFO request header structure */ + struct fw_init_trng_hdr init_trng; + /* INIT TRNG ENABLE/DISABLE request header structure */ + } u; + struct fw_comn_req_mid comn_mid; /* Common request middle section */ + union { + struct fw_init_set_ae_info set_ae_info; + /* INIT SET_AE_INFO request data structure */ + struct fw_init_set_ring_info set_ring_info; + /* INIT SET_RING_INFO request data structure */ + struct fw_init_trng init_trng; + /* INIT TRNG ENABLE/DISABLE request data structure */ + } u1; +}; + +enum fw_init_cmd_id { + FW_INIT_CMD_SET_AE_INFO, /* Setup AE Info command type */ + FW_INIT_CMD_SET_RING_INFO, /* Setup Ring Info command type */ + FW_INIT_CMD_TRNG_ENABLE, /* TRNG Enable command type */ + FW_INIT_CMD_TRNG_DISABLE, /* TRNG Disable command type */ + FW_INIT_CMD_DELIMITER /* Delimiter type */ +}; + +struct fw_init_resp { + struct fw_comn_resp_hdr comn_resp; /* Common interface response */ + uint8_t resrvd[64 - sizeof(struct fw_comn_resp_hdr)]; + /* XXX FW_RESP_DEFAULT_SZ_HW15 */ + /* Reserved padding out to the default response size */ +}; + +/* -------------------------------------------------------------------------- */ +/* look aside */ + +#define COMN_REQ_ORD UINT16_C(0x8000) +#define COMN_REQ_ORD_SHIFT 15 +#define COMN_REQ_ORD_NONE (0 << COMN_REQ_ORD_SHIFT) +#define COMN_REQ_ORD_STRICT (1 << COMN_REQ_ORD_SHIFT) +#define COMN_REQ_PTR_TYPE UINT16_C(0x4000) +#define COMN_REQ_PTR_TYPE_SHIFT 14 +#define COMN_REQ_PTR_TYPE_FLAT (0 << COMN_REQ_PTR_TYPE_SHIFT) +#define COMN_REQ_PTR_TYPE_SGL (1 << COMN_REQ_PTR_TYPE_SHIFT) +#define COMN_REQ_RESERVED UINT16_C(0x2000) +#define COMN_REQ_SHRAM_INIT UINT16_C(0x1000) +#define COMN_REQ_SHRAM_INIT_SHIFT 12 +#define COMN_REQ_SHRAM_INIT_REQUIRED (1 << COMN_REQ_SHRAM_INIT_SHIFT) +#define COMN_REQ_REGEX_SLICE UINT16_C(0x0800) +#define COMN_REQ_REGEX_SLICE_SHIFT 11 +#define COMN_REQ_REGEX_SLICE_REQUIRED (1 << COMN_REQ_REGEX_SLICE_SHIFT) +#define COMN_REQ_XLAT_SLICE UINT16_C(0x0400) +#define COMN_REQ_XLAT_SLICE_SHIFT 10 +#define COMN_REQ_XLAT_SLICE_REQUIRED (1 << COMN_REQ_XLAT_SLICE_SHIFT) +#define COMN_REQ_CPR_SLICE UINT16_C(0x0200) +#define COMN_REQ_CPR_SLICE_SHIFT 9 +#define COMN_REQ_CPR_SLICE_REQUIRED (1 << COMN_REQ_CPR_SLICE_SHIFT) +#define COMN_REQ_BULK_SLICE UINT16_C(0x0100) +#define COMN_REQ_BULK_SLICE_SHIFT 8 +#define COMN_REQ_BULK_SLICE_REQUIRED (1 << COMN_REQ_BULK_SLICE_SHIFT) +#define COMN_REQ_STORAGE_SLICE UINT16_C(0x0080) +#define COMN_REQ_STORAGE_SLICE_SHIFT 7 +#define COMN_REQ_STORAGE_SLICE_REQUIRED (1 << COMN_REQ_STORAGE_SLICE_SHIFT) +#define COMN_REQ_RND_SLICE UINT16_C(0x0040) +#define COMN_REQ_RND_SLICE_SHIFT 6 +#define COMN_REQ_RND_SLICE_REQUIRED (1 << COMN_REQ_RND_SLICE_SHIFT) +#define COMN_REQ_PKE1_SLICE UINT16_C(0x0020) +#define COMN_REQ_PKE1_SLICE_SHIFT 5 +#define COMN_REQ_PKE1_SLICE_REQUIRED (1 << COMN_REQ_PKE1_SLICE_SHIFT) +#define COMN_REQ_PKE0_SLICE UINT16_C(0x0010) +#define COMN_REQ_PKE0_SLICE_SHIFT 4 +#define COMN_REQ_PKE0_SLICE_REQUIRED (1 << COMN_REQ_PKE0_SLICE_SHIFT) +#define COMN_REQ_AUTH1_SLICE UINT16_C(0x0008) +#define COMN_REQ_AUTH1_SLICE_SHIFT 3 +#define COMN_REQ_AUTH1_SLICE_REQUIRED (1 << COMN_REQ_AUTH1_SLICE_SHIFT) +#define COMN_REQ_AUTH0_SLICE UINT16_C(0x0004) +#define COMN_REQ_AUTH0_SLICE_SHIFT 2 +#define COMN_REQ_AUTH0_SLICE_REQUIRED (1 << COMN_REQ_AUTH0_SLICE_SHIFT) +#define COMN_REQ_CIPHER1_SLICE UINT16_C(0x0002) +#define COMN_REQ_CIPHER1_SLICE_SHIFT 1 +#define COMN_REQ_CIPHER1_SLICE_REQUIRED (1 << COMN_REQ_CIPHER1_SLICE_SHIFT) +#define COMN_REQ_CIPHER0_SLICE UINT16_C(0x0001) +#define COMN_REQ_CIPHER0_SLICE_SHIFT 0 +#define COMN_REQ_CIPHER0_SLICE_REQUIRED (1 << COMN_REQ_CIPHER0_SLICE_SHIFT) + +#define COMN_REQ_CY0_ONLY(shram) \ + COMN_REQ_ORD_STRICT | \ + COMN_REQ_PTR_TYPE_FLAT | \ + (shram) | \ + COMN_REQ_RND_SLICE_REQUIRED | \ + COMN_REQ_PKE0_SLICE_REQUIRED | \ + COMN_REQ_AUTH0_SLICE_REQUIRED | \ + COMN_REQ_CIPHER0_SLICE_REQUIRED; +#define COMN_REQ_CY1_ONLY(shram) \ + COMN_REQ_ORD_STRICT | \ + COMN_REQ_PTR_TYPE_FLAT | \ + (shram) | \ + COMN_REQ_PKE1_SLICE_REQUIRED | \ + COMN_REQ_AUTH1_SLICE_REQUIRED | \ + COMN_REQ_CIPHER1_SLICE_REQUIRED; + +#define COMN_RESP_CRYPTO_STATUS __BIT(7) +#define COMN_RESP_PKE_STATUS __BIT(6) +#define COMN_RESP_CMP_STATUS __BIT(5) +#define COMN_RESP_XLAT_STATUS __BIT(4) +#define COMN_RESP_PM_STATUS __BIT(3) +#define COMN_RESP_INIT_ADMIN_STATUS __BIT(2) + +#define COMN_STATUS_FLAG_OK 0 +#define COMN_STATUS_FLAG_ERROR 1 + +struct fw_la_ssl_tls_common { + uint8_t out_len; /* Number of bytes of key material to output. */ + uint8_t label_len; /* Number of bytes of label for SSL and bytes + * for TLS key generation */ +}; + +struct fw_la_mgf_common { + uint8_t hash_len; + /* Number of bytes of hash output by the QAT per iteration */ + uint8_t seed_len; + /* Number of bytes of seed provided in src buffer for MGF1 */ +}; + +struct fw_cipher_hdr { + uint8_t state_sz; + /* State size in quad words of the cipher algorithm used in this session. + * Set to zero if the algorithm doesnt provide any state */ + uint8_t offset; + /* Quad word offset from the content descriptor parameters address i.e. + * (content_address + (cd_hdr_sz << 3)) to the parameters for the cipher + * processing */ + uint8_t curr_id; + /* Initialised with the cipher slice type */ + uint8_t next_id; + /* Set to the next slice to pass the ciphered data through. + * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through + * anymore slices after cipher */ + uint16_t resrvd; + /* Reserved padding byte to bring the struct to the word boundary. MUST be + * set to 0 */ + uint8_t state_padding_sz; + /* State padding size in quad words. Set to 0 if no padding is required. */ + uint8_t key_sz; + /* Key size in quad words of the cipher algorithm used in this session */ +}; + +struct fw_auth_hdr { + uint8_t hash_flags; + /* General flags defining the processing to perform. 0 is normal processing + * and 1 means there is a nested hash processing loop to go through */ + uint8_t offset; + /* Quad word offset from the content descriptor parameters address to the + * parameters for the auth processing */ + uint8_t curr_id; + /* Initialised with the auth slice type */ + uint8_t next_id; + /* Set to the next slice to pass data through. + * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through + * anymore slices after auth */ + union { + uint8_t inner_prefix_sz; + /* Size in bytes of the inner prefix data */ + uint8_t aad_sz; + /* Size in bytes of padded AAD data to prefix to the packet for CCM + * or GCM processing */ + } u; + + uint8_t outer_prefix_sz; + /* Size in bytes of outer prefix data */ + uint8_t final_sz; + /* Size in bytes of digest to be returned to the client if requested */ + uint8_t inner_res_sz; + /* Size in bytes of the digest from the inner hash algorithm */ + uint8_t resrvd; + /* This field is unused, assumed value is zero. */ + uint8_t inner_state1_sz; + /* Size in bytes of inner hash state1 data. Must be a qword multiple */ + uint8_t inner_state2_off; + /* Quad word offset from the content descriptor parameters pointer to the + * inner state2 value */ + uint8_t inner_state2_sz; + /* Size in bytes of inner hash state2 data. Must be a qword multiple */ + uint8_t outer_config_off; + /* Quad word offset from the content descriptor parameters pointer to the + * outer configuration information */ + uint8_t outer_state1_sz; + /* Size in bytes of the outer state1 value */ + uint8_t outer_res_sz; + /* Size in bytes of digest from the outer auth algorithm */ + uint8_t outer_prefix_off; + /* Quad word offset from the start of the inner prefix data to the outer + * prefix information. Should equal the rounded inner prefix size, converted + * to qwords */ +}; + +#define FW_AUTH_HDR_FLAG_DO_NESTED 1 +#define FW_AUTH_HDR_FLAG_NO_NESTED 0 + +struct fw_la_comn_req { + union { + uint16_t la_flags; + /* Definition of the common LA processing flags used for the + * bulk processing */ + union { + struct fw_la_ssl_tls_common ssl_tls_common; + /* For TLS or SSL Key Generation, this field is + * overloaded with ssl_tls common information */ + struct fw_la_mgf_common mgf_common; + /* For MGF Key Generation, this field is overloaded with + mgf information */ + } u; + } u; + + union { + uint8_t resrvd; + /* If not useRd by a request this field must be set to 0 */ + uint8_t tls_seed_len; + /* Byte Len of tls seed */ + uint8_t req_params_blk_sz; + /* For bulk processing this field represents the request + * parameters block size */ + uint8_t trng_cfg_sz; + /* This field is used for TRNG_ENABLE requests to indicate the + * size of the TRNG Slice configuration word. Size is in QW's */ + } u1; + uint8_t la_cmd_id; + /* Definition of the LA command defined by this request */ +}; + +#define LA_FLAGS_GCM_IV_LEN_FLAG __BIT(9) +#define LA_FLAGS_PROTO __BITS(8, 6) +#define LA_FLAGS_PROTO_SNOW_3G __SHIFTIN(4, LA_FLAGS_PROTO) +#define LA_FLAGS_PROTO_GCM __SHIFTIN(2, LA_FLAGS_PROTO) +#define LA_FLAGS_PROTO_CCM __SHIFTIN(1, LA_FLAGS_PROTO) +#define LA_FLAGS_PROTO_NO __SHIFTIN(0, LA_FLAGS_PROTO) +#define LA_FLAGS_DIGEST_IN_BUFFER __BIT(5) +#define LA_FLAGS_CMP_AUTH_RES __BIT(4) +#define LA_FLAGS_RET_AUTH_RES __BIT(3) +#define LA_FLAGS_UPDATE_STATE __BIT(2) +#define LA_FLAGS_PARTIAL __BITS(1, 0) + +struct fw_la_bulk_req { + struct fw_comn_req_hdr comn_hdr; + /* Common request header */ + uint32_t flow_id; + /* Field used by Firmware to limit the number of stateful requests + * for a session being processed at a given point of time */ + struct fw_la_comn_req comn_la_req; + /* Common LA request parameters */ + struct fw_comn_req_mid comn_mid; + /* Common request middle section */ + uint64_t req_params_addr; + /* Memory address of the request parameters */ + union fw_comn_req_ftr comn_ftr; + /* Common request footer */ +}; + +struct fw_la_resp { + struct fw_comn_resp_hdr comn_resp; + uint8_t resrvd[64 - sizeof(struct fw_comn_resp_hdr)]; + /* FW_RESP_DEFAULT_SZ_HW15 */ +}; + +struct fw_la_cipher_req_params { + uint8_t resrvd; + /* Reserved field and assumed set to 0 */ + uint8_t cipher_state_sz; + /* Number of quad words of state data for the cipher algorithm */ + uint8_t curr_id; + /* Initialised with the cipher slice type */ + uint8_t next_id; + /* Set to the next slice to pass the ciphered data through. + * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through + * anymore slices after cipher */ + uint16_t resrvd1; + /* Reserved field, should be set to zero*/ + uint8_t resrvd2; + /* Reserved field, should be set to zero*/ + uint8_t next_offset; + /* Offset in bytes to the next request parameter block */ + uint32_t cipher_off; + /* Byte offset from the start of packet to the cipher data region */ + uint32_t cipher_len; + /* Byte length of the cipher data region */ + uint64_t state_address; + /* Flat buffer address in memory of the cipher state information. Unused + * if the state size is 0 */ +}; + +struct fw_la_auth_req_params { + uint8_t auth_res_sz; + /* Size in quad words of digest information to validate */ + uint8_t hash_state_sz; + /* Number of quad words of inner and outer hash prefix data to process */ + uint8_t curr_id; + /* Initialised with the auth slice type */ + uint8_t next_id; + /* Set to the next slice to pass the auth data through. + * Set to ICP_QAT_FW_SLICE_NULL for in-place auth-only requests + * Set to ICP_QAT_FW_SLICE_DRAM_WR for all other request types + * if the data is not to go through anymore slices after auth */ + union { + uint16_t resrvd; + /* Reserved field should be set to zero for bulk services */ + uint16_t tls_secret_len; + /* Length of Secret information for TLS. */ + } u; + uint8_t resrvd; + /* Reserved field, should be set to zero*/ + uint8_t next_offset; + /* offset in bytes to the next request parameter block */ + uint32_t auth_off; + /* Byte offset from the start of packet to the auth data region */ + uint32_t auth_len; + /* Byte length of the auth data region */ + union { + uint64_t prefix_addr; + /* Address of the prefix information */ + uint64_t aad_addr; + /* Address of the AAD info in DRAM. Used for the CCM and GCM + * protocols */ + } u1; + uint64_t auth_res_address; + /* Address of the auth result information to validate or the location to + * writeback the digest information to */ +}; + +#endif Property changes on: head/sys/dev/qat/qat_hw15reg.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_hw15var.h =================================================================== --- head/sys/dev/qat/qat_hw15var.h (nonexistent) +++ head/sys/dev/qat/qat_hw15var.h (revision 367386) @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_hw15var.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2007-2013 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* $FreeBSD$ */ + +#ifndef _DEV_PCI_QAT_HW15VAR_H_ +#define _DEV_PCI_QAT_HW15VAR_H_ + +CTASSERT(HASH_CONTENT_DESC_SIZE >= + sizeof(struct fw_auth_hdr) + MAX_HASH_SETUP_BLK_SZ); +CTASSERT(CIPHER_CONTENT_DESC_SIZE >= + sizeof(struct fw_cipher_hdr) + MAX_CIPHER_SETUP_BLK_SZ); +CTASSERT(CONTENT_DESC_MAX_SIZE >= + roundup(HASH_CONTENT_DESC_SIZE + CIPHER_CONTENT_DESC_SIZE, + QAT_OPTIMAL_ALIGN)); +CTASSERT(QAT_SYM_REQ_PARAMS_SIZE_PADDED >= + roundup(sizeof(struct fw_la_cipher_req_params) + + sizeof(struct fw_la_auth_req_params), QAT_OPTIMAL_ALIGN)); + +/* length of the 5 long words of the request that are stored in the session + * This is rounded up to 32 in order to use the fast memcopy function */ +#define QAT_HW15_SESSION_REQ_CACHE_SIZE (32) + +void qat_msg_req_type_populate(struct arch_if_req_hdr *, + enum arch_if_req, uint32_t); +void qat_msg_cmn_hdr_populate(struct fw_la_bulk_req *, bus_addr_t, + uint8_t, uint8_t, uint16_t, uint32_t); +void qat_msg_service_cmd_populate(struct fw_la_bulk_req *, + enum fw_la_cmd_id, uint16_t); +void qat_msg_cmn_mid_populate(struct fw_comn_req_mid *, void *, + uint64_t , uint64_t); +void qat_msg_req_params_populate(struct fw_la_bulk_req *, bus_addr_t, + uint8_t); +void qat_msg_cmn_footer_populate(union fw_comn_req_ftr *, uint64_t); +void qat_msg_params_populate(struct fw_la_bulk_req *, + struct qat_crypto_desc *, uint8_t, uint16_t, + uint16_t); + + +int qat_adm_ring_init(struct qat_softc *); +int qat_adm_ring_send_init(struct qat_softc *); + +void qat_hw15_crypto_setup_desc(struct qat_crypto *, + struct qat_session *, struct qat_crypto_desc *); +void qat_hw15_crypto_setup_req_params(struct qat_crypto_bank *, + struct qat_session *, struct qat_crypto_desc const *, + struct qat_sym_cookie *, struct cryptop *); + +#endif Property changes on: head/sys/dev/qat/qat_hw15var.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_hw17.c =================================================================== --- head/sys/dev/qat/qat_hw17.c (nonexistent) +++ head/sys/dev/qat/qat_hw17.c (revision 367386) @@ -0,0 +1,662 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2014 Intel Corporation. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); +#if 0 +__KERNEL_RCSID(0, "$NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $"); +#endif + +#include +#include +#include +#include + +#include + +#include + +#include +#include + +#include "qatreg.h" +#include "qat_hw17reg.h" +#include "qatvar.h" +#include "qat_hw17var.h" + +int qat_adm_mailbox_put_msg_sync(struct qat_softc *, uint32_t, + void *, void *); +int qat_adm_mailbox_send(struct qat_softc *, + struct fw_init_admin_req *, struct fw_init_admin_resp *); +int qat_adm_mailbox_send_init_me(struct qat_softc *); +int qat_adm_mailbox_send_hb_timer(struct qat_softc *); +int qat_adm_mailbox_send_fw_status(struct qat_softc *); +int qat_adm_mailbox_send_constants(struct qat_softc *); + +int +qat_adm_mailbox_init(struct qat_softc *sc) +{ + uint64_t addr; + int error; + struct qat_dmamem *qdm; + + error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_dma, 1, + PAGE_SIZE, PAGE_SIZE); + if (error) + return error; + + qdm = &sc->sc_admin_comms.qadc_const_tbl_dma; + error = qat_alloc_dmamem(sc, qdm, 1, PAGE_SIZE, PAGE_SIZE); + if (error) + return error; + + memcpy(qdm->qdm_dma_vaddr, + mailbox_const_tab, sizeof(mailbox_const_tab)); + + bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map, + BUS_DMASYNC_PREWRITE); + + error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_hb_dma, 1, + PAGE_SIZE, PAGE_SIZE); + if (error) + return error; + + addr = (uint64_t)sc->sc_admin_comms.qadc_dma.qdm_dma_seg.ds_addr; + qat_misc_write_4(sc, ADMINMSGUR, addr >> 32); + qat_misc_write_4(sc, ADMINMSGLR, addr); + + return 0; +} + +int +qat_adm_mailbox_put_msg_sync(struct qat_softc *sc, uint32_t ae, + void *in, void *out) +{ + struct qat_dmamem *qdm; + uint32_t mailbox; + bus_size_t mb_offset = MAILBOX_BASE + (ae * MAILBOX_STRIDE); + int offset = ae * ADMINMSG_LEN * 2; + int times, received; + uint8_t *buf = (uint8_t *)sc->sc_admin_comms.qadc_dma.qdm_dma_vaddr + offset; + + mailbox = qat_misc_read_4(sc, mb_offset); + if (mailbox == 1) + return EAGAIN; + + qdm = &sc->sc_admin_comms.qadc_dma; + memcpy(buf, in, ADMINMSG_LEN); + bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + qat_misc_write_4(sc, mb_offset, 1); + + received = 0; + for (times = 0; times < 50; times++) { + DELAY(20000); + if (qat_misc_read_4(sc, mb_offset) == 0) { + received = 1; + break; + } + } + if (received) { + bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + memcpy(out, buf + ADMINMSG_LEN, ADMINMSG_LEN); + } else { + device_printf(sc->sc_dev, + "Failed to send admin msg to accelerator\n"); + } + + return received ? 0 : EFAULT; +} + +int +qat_adm_mailbox_send(struct qat_softc *sc, + struct fw_init_admin_req *req, struct fw_init_admin_resp *resp) +{ + int error; + uint32_t mask; + uint8_t ae; + + for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) { + if (!(mask & 1)) + continue; + + error = qat_adm_mailbox_put_msg_sync(sc, ae, req, resp); + if (error) + return error; + if (resp->init_resp_hdr.status) { + device_printf(sc->sc_dev, + "Failed to send admin msg: cmd %d\n", + req->init_admin_cmd_id); + return EFAULT; + } + } + + return 0; +} + +int +qat_adm_mailbox_send_init_me(struct qat_softc *sc) +{ + struct fw_init_admin_req req; + struct fw_init_admin_resp resp; + + memset(&req, 0, sizeof(req)); + req.init_admin_cmd_id = FW_INIT_ME; + + return qat_adm_mailbox_send(sc, &req, &resp); +} + +int +qat_adm_mailbox_send_hb_timer(struct qat_softc *sc) +{ + struct fw_init_admin_req req; + struct fw_init_admin_resp resp; + + memset(&req, 0, sizeof(req)); + req.init_admin_cmd_id = FW_HEARTBEAT_TIMER_SET; + + req.init_cfg_ptr = sc->sc_admin_comms.qadc_hb_dma.qdm_dma_seg.ds_addr; + req.heartbeat_ticks = + sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_HB_INTERVAL; + + return qat_adm_mailbox_send(sc, &req, &resp); +} + +int +qat_adm_mailbox_send_fw_status(struct qat_softc *sc) +{ + int error; + struct fw_init_admin_req req; + struct fw_init_admin_resp resp; + + memset(&req, 0, sizeof(req)); + req.init_admin_cmd_id = FW_STATUS_GET; + + error = qat_adm_mailbox_send(sc, &req, &resp); + if (error) + return error; + + return 0; +} + +int +qat_adm_mailbox_send_constants(struct qat_softc *sc) +{ + struct fw_init_admin_req req; + struct fw_init_admin_resp resp; + + memset(&req, 0, sizeof(req)); + req.init_admin_cmd_id = FW_CONSTANTS_CFG; + + req.init_cfg_sz = 1024; + req.init_cfg_ptr = + sc->sc_admin_comms.qadc_const_tbl_dma.qdm_dma_seg.ds_addr; + + return qat_adm_mailbox_send(sc, &req, &resp); +} + +int +qat_adm_mailbox_send_init(struct qat_softc *sc) +{ + int error; + + error = qat_adm_mailbox_send_init_me(sc); + if (error) + return error; + + error = qat_adm_mailbox_send_hb_timer(sc); + if (error) + return error; + + error = qat_adm_mailbox_send_fw_status(sc); + if (error) + return error; + + return qat_adm_mailbox_send_constants(sc); +} + +int +qat_arb_init(struct qat_softc *sc) +{ + uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1; + uint32_t arb, i; + const uint32_t *thd_2_arb_cfg; + + /* Service arb configured for 32 bytes responses and + * ring flow control check enabled. */ + for (arb = 0; arb < MAX_ARB; arb++) + qat_arb_sarconfig_write_4(sc, arb, arb_cfg); + + /* Map worker threads to service arbiters */ + sc->sc_hw.qhw_get_arb_mapping(sc, &thd_2_arb_cfg); + + if (!thd_2_arb_cfg) + return EINVAL; + + for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) + qat_arb_wrk_2_ser_map_write_4(sc, i, *(thd_2_arb_cfg + i)); + + return 0; +} + +int +qat_set_ssm_wdtimer(struct qat_softc *sc) +{ + uint32_t timer; + u_int mask; + int i; + + timer = sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_SSM_WDT; + for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) { + if (!(mask & 1)) + continue; + qat_misc_write_4(sc, SSMWDT(i), timer); + qat_misc_write_4(sc, SSMWDTPKE(i), timer); + } + + return 0; +} + +int +qat_check_slice_hang(struct qat_softc *sc) +{ + int handled = 0; + + return handled; +} + +static uint32_t +qat_hw17_crypto_setup_cipher_ctrl(struct qat_crypto_desc *desc, + struct qat_session *qs, uint32_t cd_blk_offset, + struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice) +{ + struct fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = + (struct fw_cipher_cd_ctrl_hdr *)&req_tmpl->cd_ctrl; + + desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ; + desc->qcd_cipher_offset = cd_blk_offset; + + cipher_cd_ctrl->cipher_state_sz = desc->qcd_cipher_blk_sz >> 3; + cipher_cd_ctrl->cipher_key_sz = qs->qs_cipher_klen >> 3; + cipher_cd_ctrl->cipher_cfg_offset = cd_blk_offset >> 3; + FW_COMN_CURR_ID_SET(cipher_cd_ctrl, FW_SLICE_CIPHER); + FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, next_slice); + + return roundup(sizeof(struct hw_cipher_config) + qs->qs_cipher_klen, 8); +} + +static void +qat_hw17_crypto_setup_cipher_cdesc(const struct qat_crypto_desc *desc, + const struct qat_session *qs, const struct cryptop *crp, + union hw_cipher_algo_blk *cipher) +{ + const uint8_t *key; + + cipher->max.cipher_config.val = + qat_crypto_load_cipher_session(desc, qs); + if (crp != NULL && crp->crp_cipher_key != NULL) + key = crp->crp_cipher_key; + else + key = qs->qs_cipher_key; + memcpy(cipher->max.key, key, qs->qs_cipher_klen); +} + +static uint32_t +qat_hw17_crypto_setup_auth_ctrl(struct qat_crypto_desc *desc, + struct qat_session *qs, uint32_t cd_blk_offset, + struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice) +{ + struct fw_auth_cd_ctrl_hdr *auth_cd_ctrl = + (struct fw_auth_cd_ctrl_hdr *)&req_tmpl->cd_ctrl; + struct qat_sym_hash_def const *hash_def; + + (void)qat_crypto_load_auth_session(desc, qs, &hash_def); + + auth_cd_ctrl->hash_cfg_offset = cd_blk_offset >> 3; + auth_cd_ctrl->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED; + auth_cd_ctrl->inner_res_sz = hash_def->qshd_alg->qshai_digest_len; + auth_cd_ctrl->final_sz = hash_def->qshd_alg->qshai_sah->hashsize; + + auth_cd_ctrl->inner_state1_sz = + roundup(hash_def->qshd_qat->qshqi_state1_len, 8); + auth_cd_ctrl->inner_state2_sz = + roundup(hash_def->qshd_qat->qshqi_state2_len, 8); + auth_cd_ctrl->inner_state2_offset = + auth_cd_ctrl->hash_cfg_offset + + ((sizeof(struct hw_auth_setup) + + auth_cd_ctrl->inner_state1_sz) >> 3); + + FW_COMN_CURR_ID_SET(auth_cd_ctrl, FW_SLICE_AUTH); + FW_COMN_NEXT_ID_SET(auth_cd_ctrl, next_slice); + + desc->qcd_auth_sz = auth_cd_ctrl->final_sz; + desc->qcd_auth_offset = cd_blk_offset; + desc->qcd_gcm_aad_sz_offset1 = + cd_blk_offset + offsetof(union hw_auth_algo_blk, max.state1) + + auth_cd_ctrl->inner_state1_sz + AES_BLOCK_LEN; + + return roundup(auth_cd_ctrl->inner_state1_sz + + auth_cd_ctrl->inner_state2_sz + + sizeof(struct hw_auth_setup), 8); +} + +static void +qat_hw17_crypto_setup_auth_cdesc(const struct qat_crypto_desc *desc, + const struct qat_session *qs, const struct cryptop *crp, + union hw_auth_algo_blk *auth) +{ + struct qat_sym_hash_def const *hash_def; + uint8_t inner_state1_sz, *state1, *state2; + const uint8_t *key; + + auth->max.inner_setup.auth_config.config = + qat_crypto_load_auth_session(desc, qs, &hash_def); + auth->max.inner_setup.auth_counter.counter = + htobe32(hash_def->qshd_qat->qshqi_auth_counter); + inner_state1_sz = roundup(hash_def->qshd_qat->qshqi_state1_len, 8); + + state1 = auth->max.state1; + state2 = auth->max.state1 + inner_state1_sz; + switch (qs->qs_auth_algo) { + case HW_AUTH_ALGO_GALOIS_128: + key = NULL; + if (crp != NULL && crp->crp_cipher_key != NULL) + key = crp->crp_cipher_key; + else if (qs->qs_cipher_key != NULL) + key = qs->qs_cipher_key; + if (key != NULL) { + qat_crypto_gmac_precompute(desc, key, + qs->qs_cipher_klen, hash_def, state2); + } + break; + case HW_AUTH_ALGO_SHA1: + case HW_AUTH_ALGO_SHA256: + case HW_AUTH_ALGO_SHA384: + case HW_AUTH_ALGO_SHA512: + switch (qs->qs_auth_mode) { + case HW_AUTH_MODE0: + memcpy(state1, hash_def->qshd_alg->qshai_init_state, + inner_state1_sz); + /* Override for mode 0 hashes. */ + auth->max.inner_setup.auth_counter.counter = 0; + break; + case HW_AUTH_MODE1: + if (crp != NULL && crp->crp_auth_key != NULL) + key = crp->crp_auth_key; + else + key = qs->qs_auth_key; + if (key != NULL) { + qat_crypto_hmac_precompute(desc, key, + qs->qs_auth_klen, hash_def, state1, state2); + } + break; + default: + panic("%s: unhandled auth mode %d", __func__, + qs->qs_auth_mode); + } + break; + default: + panic("%s: unhandled auth algorithm %d", __func__, + qs->qs_auth_algo); + } +} + +static void +qat_hw17_init_comn_req_hdr(struct qat_crypto_desc *desc, + struct fw_la_bulk_req *req) +{ + union fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; + struct fw_comn_req_hdr *req_hdr = &req->comn_hdr; + + req_hdr->service_cmd_id = desc->qcd_cmd_id; + req_hdr->hdr_flags = FW_COMN_VALID; + req_hdr->service_type = FW_COMN_REQ_CPM_FW_LA; + req_hdr->comn_req_flags = FW_COMN_FLAGS_BUILD( + COMN_CD_FLD_TYPE_64BIT_ADR, COMN_PTR_TYPE_SGL); + req_hdr->serv_specif_flags = 0; + cd_pars->s.content_desc_addr = desc->qcd_desc_paddr; +} + +void +qat_hw17_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs, + struct qat_crypto_desc *desc) +{ + union hw_cipher_algo_blk *cipher; + union hw_auth_algo_blk *auth; + struct fw_la_bulk_req *req_tmpl; + struct fw_comn_req_hdr *req_hdr; + uint32_t cd_blk_offset = 0; + int i; + uint8_t *cd_blk_ptr; + + req_tmpl = (struct fw_la_bulk_req *)desc->qcd_req_cache; + req_hdr = &req_tmpl->comn_hdr; + cd_blk_ptr = desc->qcd_content_desc; + + memset(req_tmpl, 0, sizeof(struct fw_la_bulk_req)); + qat_hw17_init_comn_req_hdr(desc, req_tmpl); + + for (i = 0; i < MAX_FW_SLICE; i++) { + switch (desc->qcd_slices[i]) { + case FW_SLICE_CIPHER: + cipher = (union hw_cipher_algo_blk *)(cd_blk_ptr + + cd_blk_offset); + cd_blk_offset += qat_hw17_crypto_setup_cipher_ctrl(desc, + qs, cd_blk_offset, req_tmpl, + desc->qcd_slices[i + 1]); + qat_hw17_crypto_setup_cipher_cdesc(desc, qs, NULL, + cipher); + break; + case FW_SLICE_AUTH: + auth = (union hw_auth_algo_blk *)(cd_blk_ptr + + cd_blk_offset); + cd_blk_offset += qat_hw17_crypto_setup_auth_ctrl(desc, + qs, cd_blk_offset, req_tmpl, + desc->qcd_slices[i + 1]); + qat_hw17_crypto_setup_auth_cdesc(desc, qs, NULL, auth); + req_hdr->serv_specif_flags |= FW_LA_RET_AUTH_RES; + break; + case FW_SLICE_DRAM_WR: + i = MAX_FW_SLICE; /* end of chain */ + break; + default: + MPASS(0); + break; + } + } + + req_tmpl->cd_pars.s.content_desc_params_sz = + roundup(cd_blk_offset, QAT_OPTIMAL_ALIGN) >> 3; + if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) + req_hdr->serv_specif_flags |= + FW_LA_PROTO_GCM | FW_LA_GCM_IV_LEN_12_OCTETS; + + bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag, + qs->qs_desc_mem.qdm_dma_map, BUS_DMASYNC_PREWRITE); +} + +static void +qat_hw17_crypto_req_setkey(const struct qat_crypto_desc *desc, + const struct qat_session *qs, struct qat_sym_cookie *qsc, + struct fw_la_bulk_req *bulk_req, const struct cryptop *crp) +{ + union hw_auth_algo_blk *auth; + union hw_cipher_algo_blk *cipher; + uint8_t *cdesc; + int i; + + cdesc = qsc->qsc_content_desc; + memcpy(cdesc, desc->qcd_content_desc, CONTENT_DESC_MAX_SIZE); + for (i = 0; i < MAX_FW_SLICE; i++) { + switch (desc->qcd_slices[i]) { + case FW_SLICE_CIPHER: + cipher = (union hw_cipher_algo_blk *) + (cdesc + desc->qcd_cipher_offset); + qat_hw17_crypto_setup_cipher_cdesc(desc, qs, crp, + cipher); + break; + case FW_SLICE_AUTH: + auth = (union hw_auth_algo_blk *) + (cdesc + desc->qcd_auth_offset); + qat_hw17_crypto_setup_auth_cdesc(desc, qs, crp, auth); + break; + case FW_SLICE_DRAM_WR: + i = MAX_FW_SLICE; /* end of chain */ + break; + default: + MPASS(0); + } + } + + bulk_req->cd_pars.s.content_desc_addr = qsc->qsc_content_desc_paddr; +} + +void +qat_hw17_crypto_setup_req_params(struct qat_crypto_bank *qcb __unused, + struct qat_session *qs, const struct qat_crypto_desc *desc, + struct qat_sym_cookie *qsc, struct cryptop *crp) +{ + struct qat_sym_bulk_cookie *qsbc; + struct fw_la_bulk_req *bulk_req; + struct fw_la_cipher_req_params *cipher_param; + struct fw_la_auth_req_params *auth_param; + bus_addr_t digest_paddr; + uint32_t aad_sz, *aad_szp; + uint8_t *req_params_ptr; + enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id; + + qsbc = &qsc->u.qsc_bulk_cookie; + bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg; + + memcpy(bulk_req, desc->qcd_req_cache, sizeof(struct fw_la_bulk_req)); + bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc; + bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr; + bulk_req->comn_mid.dest_data_addr = qsc->qsc_buffer_list_desc_paddr; + if (__predict_false(crp->crp_cipher_key != NULL || + crp->crp_auth_key != NULL)) + qat_hw17_crypto_req_setkey(desc, qs, qsc, bulk_req, crp); + + digest_paddr = 0; + if (desc->qcd_auth_sz != 0) + digest_paddr = qsc->qsc_auth_res_paddr; + + req_params_ptr = (uint8_t *)&bulk_req->serv_specif_rqpars; + cipher_param = (struct fw_la_cipher_req_params *)req_params_ptr; + auth_param = (struct fw_la_auth_req_params *) + (req_params_ptr + sizeof(struct fw_la_cipher_req_params)); + + cipher_param->u.s.cipher_IV_ptr = qsc->qsc_iv_buf_paddr; + + /* + * The SG list layout is a bit different for GCM and GMAC, it's simpler + * to handle those cases separately. + */ + if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) { + if (cmd_id != FW_LA_CMD_AUTH) { + /* + * Don't fill out the cipher block if we're doing GMAC + * only. + */ + cipher_param->cipher_offset = 0; + cipher_param->cipher_length = crp->crp_payload_length; + } + auth_param->auth_off = 0; + auth_param->auth_len = crp->crp_payload_length; + auth_param->auth_res_addr = digest_paddr; + auth_param->auth_res_sz = desc->qcd_auth_sz; + auth_param->u1.aad_adr = + crp->crp_aad_length > 0 ? qsc->qsc_gcm_aad_paddr : 0; + auth_param->u2.aad_sz = + roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN); + auth_param->hash_state_sz = auth_param->u2.aad_sz >> 3; + + /* + * Update the hash state block if necessary. This only occurs + * when the AAD length changes between requests in a session and + * is synchronized by qat_process(). + */ + aad_sz = htobe32(crp->crp_aad_length); + aad_szp = (uint32_t *)( + __DECONST(uint8_t *, desc->qcd_content_desc) + + desc->qcd_gcm_aad_sz_offset1); + if (__predict_false(*aad_szp != aad_sz)) { + *aad_szp = aad_sz; + bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag, + qs->qs_desc_mem.qdm_dma_map, + BUS_DMASYNC_PREWRITE); + } + } else { + if (cmd_id != FW_LA_CMD_AUTH) { + cipher_param->cipher_offset = + crp->crp_aad_length == 0 ? 0 : + crp->crp_payload_start - crp->crp_aad_start; + cipher_param->cipher_length = crp->crp_payload_length; + } + if (cmd_id != FW_LA_CMD_CIPHER) { + auth_param->auth_off = 0; + auth_param->auth_len = + crp->crp_payload_length + crp->crp_aad_length; + auth_param->auth_res_addr = digest_paddr; + auth_param->auth_res_sz = desc->qcd_auth_sz; + auth_param->u1.aad_adr = 0; + auth_param->u2.aad_sz = 0; + auth_param->hash_state_sz = 0; + } + } +} Property changes on: head/sys/dev/qat/qat_hw17.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_hw17reg.h =================================================================== --- head/sys/dev/qat/qat_hw17reg.h (nonexistent) +++ head/sys/dev/qat/qat_hw17reg.h (revision 367386) @@ -0,0 +1,2460 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_hw17reg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2014 Intel Corporation. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* $FreeBSD$ */ + +#ifndef _DEV_PCI_QAT_HW17REG_H_ +#define _DEV_PCI_QAT_HW17REG_H_ + +/* Default message size in bytes */ +#define FW_REQ_DEFAULT_SZ_HW17 128 +#define FW_RESP_DEFAULT_SZ_HW17 32 + +/* -------------------------------------------------------------------------- */ +/* accel */ + +enum fw_init_admin_cmd_id { + FW_INIT_ME = 0, + FW_TRNG_ENABLE = 1, + FW_TRNG_DISABLE = 2, + FW_CONSTANTS_CFG = 3, + FW_STATUS_GET = 4, + FW_COUNTERS_GET = 5, + FW_LOOPBACK = 6, + FW_HEARTBEAT_SYNC = 7, + FW_HEARTBEAT_GET = 8, + FW_COMP_CAPABILITY_GET = 9, + FW_CRYPTO_CAPABILITY_GET = 10, + FW_HEARTBEAT_TIMER_SET = 13, +}; + +enum fw_init_admin_resp_status { + FW_INIT_RESP_STATUS_SUCCESS = 0, + FW_INIT_RESP_STATUS_FAIL = 1, + FW_INIT_RESP_STATUS_UNSUPPORTED = 4 +}; + +struct fw_init_admin_req { + uint16_t init_cfg_sz; + uint8_t resrvd1; + uint8_t init_admin_cmd_id; + uint32_t resrvd2; + uint64_t opaque_data; + uint64_t init_cfg_ptr; + + union { + struct { + uint16_t ibuf_size_in_kb; + uint16_t resrvd3; + }; + uint32_t heartbeat_ticks; + }; + + uint32_t resrvd4; +}; + +struct fw_init_admin_resp_hdr { + uint8_t flags; + uint8_t resrvd1; + uint8_t status; + uint8_t init_admin_cmd_id; +}; + +enum fw_init_admin_init_flag { + FW_INIT_FLAG_PKE_DISABLED = 0 +}; + +struct fw_init_admin_fw_capability_resp_hdr { + uint16_t reserved; + uint8_t status; + uint8_t init_admin_cmd_id; +}; + +struct fw_init_admin_capability_resp { + struct fw_init_admin_fw_capability_resp_hdr init_resp_hdr; + uint32_t extended_features; + uint64_t opaque_data; + union { + struct { + uint16_t compression_algos; + uint16_t checksum_algos; + uint32_t deflate_capabilities; + uint32_t resrvd1; + uint32_t lzs_capabilities; + } compression; + struct { + uint32_t cipher_algos; + uint32_t hash_algos; + uint16_t keygen_algos; + uint16_t other; + uint16_t public_key_algos; + uint16_t prime_algos; + } crypto; + }; +}; + +struct fw_init_admin_resp_pars { + union { + uint32_t resrvd1[4]; + struct { + uint32_t version_patch_num; + uint8_t context_id; + uint8_t ae_id; + uint16_t resrvd1; + uint64_t resrvd2; + } s1; + struct { + uint64_t req_rec_count; + uint64_t resp_sent_count; + } s2; + } u; +}; + +struct fw_init_admin_hb_cnt { + uint16_t resp_heartbeat_cnt; + uint16_t req_heartbeat_cnt; +}; + +#define QAT_NUM_THREADS 8 + +struct fw_init_admin_hb_stats { + struct fw_init_admin_hb_cnt stats[QAT_NUM_THREADS]; +}; + +struct fw_init_admin_resp { + struct fw_init_admin_resp_hdr init_resp_hdr; + union { + uint32_t resrvd2; + struct { + uint16_t version_minor_num; + uint16_t version_major_num; + } s; + } u; + uint64_t opaque_data; + struct fw_init_admin_resp_pars init_resp_pars; +}; + +#define FW_COMN_HEARTBEAT_OK 0 +#define FW_COMN_HEARTBEAT_BLOCKED 1 +#define FW_COMN_HEARTBEAT_FLAG_BITPOS 0 +#define FW_COMN_HEARTBEAT_FLAG_MASK 0x1 +#define FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE +#define FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \ + FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags) + +#define FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \ + FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val) + +#define FW_COMN_HEARTBEAT_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, \ + FW_COMN_HEARTBEAT_FLAG_BITPOS, \ + FW_COMN_HEARTBEAT_FLAG_MASK) + +/* -------------------------------------------------------------------------- */ + +/* Big assumptions that both bitpos and mask are constants */ +#define FIELD_SET(flags, val, bitpos, mask) \ + (flags) = \ + (((flags) & (~((mask) << (bitpos)))) | (((val) & (mask)) << (bitpos))) + +#define FIELD_GET(flags, bitpos, mask) (((flags) >> (bitpos)) & (mask)) + +#define FLAG_SET(flags, bitpos) (flags) = ((flags) | (1 << (bitpos))) + +#define FLAG_CLEAR(flags, bitpos) (flags) = ((flags) & (~(1 << (bitpos)))) + +#define FLAG_GET(flags, bitpos) (((flags) >> (bitpos)) & 1) + +/* Default request and response ring size in bytes */ +#define FW_REQ_DEFAULT_SZ 128 +#define FW_RESP_DEFAULT_SZ 32 + +#define FW_COMN_ONE_BYTE_SHIFT 8 +#define FW_COMN_SINGLE_BYTE_MASK 0xFF + +/* Common Request - Block sizes definitions in multiples of individual long + * words */ +#define FW_NUM_LONGWORDS_1 1 +#define FW_NUM_LONGWORDS_2 2 +#define FW_NUM_LONGWORDS_3 3 +#define FW_NUM_LONGWORDS_4 4 +#define FW_NUM_LONGWORDS_5 5 +#define FW_NUM_LONGWORDS_6 6 +#define FW_NUM_LONGWORDS_7 7 +#define FW_NUM_LONGWORDS_10 10 +#define FW_NUM_LONGWORDS_13 13 + +/* Definition of the associated service Id for NULL service type. + Note: the response is expected to use FW_COMN_RESP_SERV_CPM_FW */ +#define FW_NULL_REQ_SERV_ID 1 + +/* + * Definition of the firmware interface service users, for + * responses. + * Enumeration which is used to indicate the ids of the services + * for responses using the external firmware interfaces. + */ + +enum fw_comn_resp_serv_id { + FW_COMN_RESP_SERV_NULL, /* NULL service id type */ + FW_COMN_RESP_SERV_CPM_FW, /* CPM FW Service ID */ + FW_COMN_RESP_SERV_DELIMITER /* Delimiter service id type */ +}; + +/* + * Definition of the request types + * Enumeration which is used to indicate the ids of the request + * types used in each of the external firmware interfaces + */ + +enum fw_comn_request_id { + FW_COMN_REQ_NULL = 0, /* NULL request type */ + FW_COMN_REQ_CPM_FW_PKE = 3, /* CPM FW PKE Request */ + FW_COMN_REQ_CPM_FW_LA = 4, /* CPM FW Lookaside Request */ + FW_COMN_REQ_CPM_FW_DMA = 7, /* CPM FW DMA Request */ + FW_COMN_REQ_CPM_FW_COMP = 9, /* CPM FW Compression Request */ + FW_COMN_REQ_DELIMITER /* End delimiter */ + +}; + +/* + * Definition of the common QAT FW request content descriptor field - + * points to the content descriptor parameters or itself contains service- + * specific data. Also specifies content descriptor parameter size. + * Contains reserved fields. + * Common section of the request used across all of the services exposed + * by the QAT FW. Each of the services inherit these common fields + */ +union fw_comn_req_hdr_cd_pars { + /* LWs 2-5 */ + struct + { + uint64_t content_desc_addr; + /* Address of the content descriptor */ + + uint16_t content_desc_resrvd1; + /* Content descriptor reserved field */ + + uint8_t content_desc_params_sz; + /* Size of the content descriptor parameters in quad words. These + * parameters describe the session setup configuration info for the + * slices that this request relies upon i.e. the configuration word and + * cipher key needed by the cipher slice if there is a request for + * cipher processing. */ + + uint8_t content_desc_hdr_resrvd2; + /* Content descriptor reserved field */ + + uint32_t content_desc_resrvd3; + /* Content descriptor reserved field */ + } s; + + struct + { + uint32_t serv_specif_fields[FW_NUM_LONGWORDS_4]; + + } s1; + +}; + +/* + * Definition of the common QAT FW request middle block. + * Common section of the request used across all of the services exposed + * by the QAT FW. Each of the services inherit these common fields + */ +struct fw_comn_req_mid +{ + /* LWs 6-13 */ + uint64_t opaque_data; + /* Opaque data passed unmodified from the request to response messages by + * firmware (fw) */ + + uint64_t src_data_addr; + /* Generic definition of the source data supplied to the QAT AE. The + * common flags are used to further describe the attributes of this + * field */ + + uint64_t dest_data_addr; + /* Generic definition of the destination data supplied to the QAT AE. The + * common flags are used to further describe the attributes of this + * field */ + + uint32_t src_length; + /* Length of source flat buffer incase src buffer + * type is flat */ + + uint32_t dst_length; + /* Length of source flat buffer incase dst buffer + * type is flat */ + +}; + +/* + * Definition of the common QAT FW request content descriptor control + * block. + * + * Service specific section of the request used across all of the services + * exposed by the QAT FW. Each of the services populates this block + * uniquely. Refer to the service-specific header structures e.g. + * 'fw_cipher_hdr_s' (for Cipher) etc. + */ +struct fw_comn_req_cd_ctrl +{ + /* LWs 27-31 */ + uint32_t content_desc_ctrl_lw[FW_NUM_LONGWORDS_5]; + +}; + +/* + * Definition of the common QAT FW request header. + * Common section of the request used across all of the services exposed + * by the QAT FW. Each of the services inherit these common fields. The + * reserved field of 7 bits and the service command Id field are all + * service-specific fields, along with the service specific flags. + */ +struct fw_comn_req_hdr +{ + /* LW0 */ + uint8_t resrvd1; + /* reserved field */ + + uint8_t service_cmd_id; + /* Service Command Id - this field is service-specific + * Please use service-specific command Id here e.g.Crypto Command Id + * or Compression Command Id etc. */ + + uint8_t service_type; + /* Service type */ + + uint8_t hdr_flags; + /* This represents a flags field for the Service Request. + * The most significant bit is the 'valid' flag and the only + * one used. All remaining bit positions are unused and + * are therefore reserved and need to be set to 0. */ + + /* LW1 */ + uint16_t serv_specif_flags; + /* Common Request service-specific flags + * e.g. Symmetric Crypto Command Flags */ + + uint16_t comn_req_flags; + /* Common Request Flags consisting of + * - 14 reserved bits, + * - 1 Content Descriptor field type bit and + * - 1 Source/destination pointer type bit */ + +}; + +/* + * Definition of the common QAT FW request parameter field. + * + * Service specific section of the request used across all of the services + * exposed by the QAT FW. Each of the services populates this block + * uniquely. Refer to service-specific header structures e.g. + * 'fw_comn_req_cipher_rqpars_s' (for Cipher) etc. + * + */ +struct fw_comn_req_rqpars +{ + /* LWs 14-26 */ + uint32_t serv_specif_rqpars_lw[FW_NUM_LONGWORDS_13]; + +}; + +/* + * Definition of the common request structure with service specific + * fields + * This is a definition of the full qat request structure used by all + * services. Each service is free to use the service fields in its own + * way. This struct is useful as a message passing argument before the + * service contained within the request is determined. + */ +struct fw_comn_req +{ + /* LWs 0-1 */ + struct fw_comn_req_hdr comn_hdr; + /* Common request header */ + + /* LWs 2-5 */ + union fw_comn_req_hdr_cd_pars cd_pars; + /* Common Request content descriptor field which points either to a + * content descriptor + * parameter block or contains the service-specific data itself. */ + + /* LWs 6-13 */ + struct fw_comn_req_mid comn_mid; + /* Common request middle section */ + + /* LWs 14-26 */ + struct fw_comn_req_rqpars serv_specif_rqpars; + /* Common request service-specific parameter field */ + + /* LWs 27-31 */ + struct fw_comn_req_cd_ctrl cd_ctrl; + /* Common request content descriptor control block - + * this field is service-specific */ + +}; + +/* + * Error code field + * + * Overloaded field with 8 bit common error field or two + * 8 bit compression error fields for compression and translator slices + */ +union fw_comn_error { + struct + { + uint8_t resrvd; + /* 8 bit reserved field */ + + uint8_t comn_err_code; + /* 8 bit common error code */ + + } s; + /* Structure which is used for non-compression responses */ + + struct + { + uint8_t xlat_err_code; + /* 8 bit translator error field */ + + uint8_t cmp_err_code; + /* 8 bit compression error field */ + + } s1; + /* Structure which is used for compression responses */ + +}; + +/* + * Definition of the common QAT FW response header. + * This section of the response is common across all of the services + * that generate a firmware interface response + */ +struct fw_comn_resp_hdr +{ + /* LW0 */ + uint8_t resrvd1; + /* Reserved field - this field is service-specific - + * Note: The Response Destination Id has been removed + * from first QWord */ + + uint8_t service_id; + /* Service Id returned by service block */ + + uint8_t response_type; + /* Response type - copied from the request to + * the response message */ + + uint8_t hdr_flags; + /* This represents a flags field for the Response. + * Bit<7> = 'valid' flag + * Bit<6> = 'CNV' flag indicating that CNV was executed + * on the current request + * Bit<5> = 'CNVNR' flag indicating that a recovery happened + * on the current request following a CNV error + * All remaining bits are unused and are therefore reserved. + * They must to be set to 0. + */ + + /* LW 1 */ + union fw_comn_error comn_error; + /* This field is overloaded to allow for one 8 bit common error field + * or two 8 bit error fields from compression and translator */ + + uint8_t comn_status; + /* Status field which specifies which slice(s) report an error */ + + uint8_t cmd_id; + /* Command Id - passed from the request to the response message */ + +}; + +/* + * Definition of the common response structure with service specific + * fields + * This is a definition of the full qat response structure used by all + * services. + */ +struct fw_comn_resp +{ + /* LWs 0-1 */ + struct fw_comn_resp_hdr comn_hdr; + /* Common header fields */ + + /* LWs 2-3 */ + uint64_t opaque_data; + /* Opaque data passed from the request to the response message */ + + /* LWs 4-7 */ + uint32_t resrvd[FW_NUM_LONGWORDS_4]; + /* Reserved */ + +}; + +/* Common QAT FW request header - structure of LW0 + * + ===== + ---- + ----------- + ----------- + ----------- + ----------- + + * | Bit | 31 | 30 - 24 | 21 - 16 | 15 - 8 | 7 - 0 | + * + ===== + ---- + ----------- + ----------- + ----------- + ----------- + + * | Flags | V | Reserved | Serv Type | Serv Cmd Id | Reserved | + * + ===== + ---- + ----------- + ----------- + ----------- + ----------- + + */ + +#define FW_COMN_VALID __BIT(7) + +/* Common QAT FW response header - structure of LW0 + * + ===== + --- + --- + ----- + ----- + --------- + ----------- + ----- + + * | Bit | 31 | 30 | 29 | 28-24 | 21 - 16 | 15 - 8 | 7-0 | + * + ===== + --- + ----+ ----- + ----- + --------- + ----------- + ----- + + * | Flags | V | CNV | CNVNR | Rsvd | Serv Type | Serv Cmd Id | Rsvd | + * + ===== + --- + --- + ----- + ----- + --------- + ----------- + ----- + */ +/* Macros defining the bit position and mask of 'CNV' flag + * within the hdr_flags field of LW0 (service response only) */ +#define FW_COMN_CNV_FLAG_BITPOS 6 +#define FW_COMN_CNV_FLAG_MASK 0x1 + +/* Macros defining the bit position and mask of CNVNR flag + * within the hdr_flags field of LW0 (service response only) */ +#define FW_COMN_CNVNR_FLAG_BITPOS 5 +#define FW_COMN_CNVNR_FLAG_MASK 0x1 + +/* + * Macro for extraction of Service Type Field + * + * struct fw_comn_req_hdr Structure 'fw_comn_req_hdr_t' + * to extract the Service Type Field + */ +#define FW_COMN_OV_SRV_TYPE_GET(fw_comn_req_hdr_t) \ + fw_comn_req_hdr_t.service_type + +/* + * Macro for setting of Service Type Field + * + * 'fw_comn_req_hdr_t' structure to set the Service + * Type Field + * val Value of the Service Type Field + */ +#define FW_COMN_OV_SRV_TYPE_SET(fw_comn_req_hdr_t, val) \ + fw_comn_req_hdr_t.service_type = val + +/* + * Macro for extraction of Service Command Id Field + * + * struct fw_comn_req_hdr Structure 'fw_comn_req_hdr_t' + * to extract the Service Command Id Field + */ +#define FW_COMN_OV_SRV_CMD_ID_GET(fw_comn_req_hdr_t) \ + fw_comn_req_hdr_t.service_cmd_id + +/* + * Macro for setting of Service Command Id Field + * + * 'fw_comn_req_hdr_t' structure to set the + * Service Command Id Field + * val Value of the Service Command Id Field + */ +#define FW_COMN_OV_SRV_CMD_ID_SET(fw_comn_req_hdr_t, val) \ + fw_comn_req_hdr_t.service_cmd_id = val + +/* + * Extract the valid flag from the request or response's header flags. + * + * hdr_t Request or Response 'hdr_t' structure to extract the valid bit + * from the 'hdr_flags' field. + */ +#define FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \ + FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags) + +/* + * Extract the CNVNR flag from the header flags in the response only. + * + * hdr_t Response 'hdr_t' structure to extract the CNVNR bit + * from the 'hdr_flags' field. + */ +#define FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \ + FIELD_GET(hdr_flags, \ + FW_COMN_CNVNR_FLAG_BITPOS, \ + FW_COMN_CNVNR_FLAG_MASK) + +/* + * Extract the CNV flag from the header flags in the response only. + * + * hdr_t Response 'hdr_t' structure to extract the CNV bit + * from the 'hdr_flags' field. + */ +#define FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \ + FIELD_GET(hdr_flags, \ + FW_COMN_CNV_FLAG_BITPOS, \ + FW_COMN_CNV_FLAG_MASK) + +/* + * Set the valid bit in the request's header flags. + * + * hdr_t Request or Response 'hdr_t' structure to set the valid bit + * val Value of the valid bit flag. + */ +#define FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \ + FW_COMN_VALID_FLAG_SET(hdr_t, val) + +/* + * Common macro to extract the valid flag from the header flags field + * within the header structure (request or response). + * + * hdr_t Structure (request or response) to extract the + * valid bit from the 'hdr_flags' field. + */ +#define FW_COMN_VALID_FLAG_GET(hdr_flags) \ + FIELD_GET(hdr_flags, \ + FW_COMN_VALID_FLAG_BITPOS, \ + FW_COMN_VALID_FLAG_MASK) + +/* + * Common macro to extract the remaining reserved flags from the header + * flags field within the header structure (request or response). + * + * hdr_t Structure (request or response) to extract the + * remaining bits from the 'hdr_flags' field (excluding the + * valid flag). + */ +#define FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \ + (hdr_flags & FW_COMN_HDR_RESRVD_FLD_MASK) + +/* + * Common macro to set the valid bit in the header flags field within + * the header structure (request or response). + * + * hdr_t Structure (request or response) containing the header + * flags field, to allow the valid bit to be set. + * val Value of the valid bit flag. + */ +#define FW_COMN_VALID_FLAG_SET(hdr_t, val) \ + FIELD_SET((hdr_t.hdr_flags), \ + (val), \ + FW_COMN_VALID_FLAG_BITPOS, \ + FW_COMN_VALID_FLAG_MASK) + +/* + * Macro that must be used when building the common header flags. + * Note that all bits reserved field bits 0-6 (LW0) need to be forced to 0. + * + * ptr Value of the valid flag + */ + +#define FW_COMN_HDR_FLAGS_BUILD(valid) \ + (((valid)&FW_COMN_VALID_FLAG_MASK) \ + << FW_COMN_VALID_FLAG_BITPOS) + +/* + * Common Request Flags Definition + * The bit offsets below are within the flags field. These are NOT relative to + * the memory word. Unused fields e.g. reserved bits, must be zeroed. + * + * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- + + * | Bits [15:8] | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | + * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- + + * | Flags[15:8] | Rsv | Rsv | Rsv | Rsv | Rsv | Rsv | Rsv | Rsv | + * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- + + * | Bits [7:0] | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- + + * | Flags [7:0] | Rsv | Rsv | Rsv | Rsv | Rsv | BnP | Cdt | Ptr | + * + ===== + ------ + --- + --- + --- + --- + --- + --- + --- + --- + + */ + +#define COMN_PTR_TYPE_BITPOS 0 +/* Common Request Flags - Starting bit position indicating + * Src&Dst Buffer Pointer type */ + +#define COMN_PTR_TYPE_MASK 0x1 +/* Common Request Flags - One bit mask used to determine + * Src&Dst Buffer Pointer type */ + +#define COMN_CD_FLD_TYPE_BITPOS 1 +/* Common Request Flags - Starting bit position indicating + * CD Field type */ + +#define COMN_CD_FLD_TYPE_MASK 0x1 +/* Common Request Flags - One bit mask used to determine + * CD Field type */ + +#define COMN_BNP_ENABLED_BITPOS 2 +/* Common Request Flags - Starting bit position indicating + * the source buffer contains batch of requests. if this + * bit is set, source buffer is type of Batch And Pack OpData List + * and the Ptr Type Bit only applies to Destination buffer. */ + +#define COMN_BNP_ENABLED_MASK 0x1 +/* Batch And Pack Enabled Flag Mask - One bit mask used to determine + * the source buffer is in Batch and Pack OpData Link List Mode. */ + +/* ========================================================================= */ +/* Pointer Type Flag definitions */ +/* ========================================================================= */ +#define COMN_PTR_TYPE_FLAT 0x0 +/* Constant value indicating Src&Dst Buffer Pointer type is flat + * If Batch and Pack mode is enabled, only applies to Destination buffer. */ + +#define COMN_PTR_TYPE_SGL 0x1 +/* Constant value indicating Src&Dst Buffer Pointer type is SGL type + * If Batch and Pack mode is enabled, only applies to Destination buffer. */ + +#define COMN_PTR_TYPE_BATCH 0x2 +/* Constant value indicating Src is a batch request + * and Dst Buffer Pointer type is SGL type */ + +/* ========================================================================= */ +/* CD Field Flag definitions */ +/* ========================================================================= */ +#define COMN_CD_FLD_TYPE_64BIT_ADR 0x0 +/* Constant value indicating CD Field contains 64-bit address */ + +#define COMN_CD_FLD_TYPE_16BYTE_DATA 0x1 +/* Constant value indicating CD Field contains 16 bytes of setup data */ + +/* ========================================================================= */ +/* Batch And Pack Enable/Disable Definitions */ +/* ========================================================================= */ +#define COMN_BNP_ENABLED 0x1 +/* Constant value indicating Source buffer will point to Batch And Pack OpData + * List */ + +#define COMN_BNP_DISABLED 0x0 +/* Constant value indicating Source buffer will point to Batch And Pack OpData + * List */ + +/* + * Macro that must be used when building the common request flags (for all + * requests but comp BnP). + * Note that all bits reserved field bits 2-15 (LW1) need to be forced to 0. + * + * ptr Value of the pointer type flag + * cdt Value of the cd field type flag +*/ +#define FW_COMN_FLAGS_BUILD(cdt, ptr) \ + ((((cdt)&COMN_CD_FLD_TYPE_MASK) << COMN_CD_FLD_TYPE_BITPOS) | \ + (((ptr)&COMN_PTR_TYPE_MASK) << COMN_PTR_TYPE_BITPOS)) + +/* + * Macro that must be used when building the common request flags for comp + * BnP service. + * Note that all bits reserved field bits 3-15 (LW1) need to be forced to 0. + * + * ptr Value of the pointer type flag + * cdt Value of the cd field type flag + * bnp Value of the bnp enabled flag + */ +#define FW_COMN_FLAGS_BUILD_BNP(cdt, ptr, bnp) \ + ((((cdt)&COMN_CD_FLD_TYPE_MASK) << COMN_CD_FLD_TYPE_BITPOS) | \ + (((ptr)&COMN_PTR_TYPE_MASK) << COMN_PTR_TYPE_BITPOS) | \ + (((bnp)&COMN_BNP_ENABLED_MASK) << COMN_BNP_ENABLED_BITPOS)) + +/* + * Macro for extraction of the pointer type bit from the common flags + * + * flags Flags to extract the pointer type bit from + */ +#define FW_COMN_PTR_TYPE_GET(flags) \ + FIELD_GET(flags, COMN_PTR_TYPE_BITPOS, COMN_PTR_TYPE_MASK) + +/* + * Macro for extraction of the cd field type bit from the common flags + * + * flags Flags to extract the cd field type type bit from + */ +#define FW_COMN_CD_FLD_TYPE_GET(flags) \ + FIELD_GET(flags, COMN_CD_FLD_TYPE_BITPOS, COMN_CD_FLD_TYPE_MASK) + +/* + * Macro for extraction of the bnp field type bit from the common flags + * + * flags Flags to extract the bnp field type type bit from + * + */ +#define FW_COMN_BNP_ENABLED_GET(flags) \ + FIELD_GET(flags, COMN_BNP_ENABLED_BITPOS, COMN_BNP_ENABLED_MASK) + +/* + * Macro for setting the pointer type bit in the common flags + * + * flags Flags in which Pointer Type bit will be set + * val Value of the bit to be set in flags + * + */ +#define FW_COMN_PTR_TYPE_SET(flags, val) \ + FIELD_SET(flags, val, COMN_PTR_TYPE_BITPOS, COMN_PTR_TYPE_MASK) + +/* + * Macro for setting the cd field type bit in the common flags + * + * flags Flags in which Cd Field Type bit will be set + * val Value of the bit to be set in flags + * + */ +#define FW_COMN_CD_FLD_TYPE_SET(flags, val) \ + FIELD_SET( \ + flags, val, COMN_CD_FLD_TYPE_BITPOS, COMN_CD_FLD_TYPE_MASK) + +/* + * Macro for setting the bnp field type bit in the common flags + * + * flags Flags in which Bnp Field Type bit will be set + * val Value of the bit to be set in flags + * + */ +#define FW_COMN_BNP_ENABLE_SET(flags, val) \ + FIELD_SET( \ + flags, val, COMN_BNP_ENABLED_BITPOS, COMN_BNP_ENABLED_MASK) + +/* + * Macros using the bit position and mask to set/extract the next + * and current id nibbles within the next_curr_id field of the + * content descriptor header block. Note that these are defined + * in the common header file, as they are used by compression, cipher + * and authentication. + * + * cd_ctrl_hdr_t Content descriptor control block header pointer. + * val Value of the field being set. + */ +#define FW_COMN_NEXT_ID_BITPOS 4 +#define FW_COMN_NEXT_ID_MASK 0xF0 +#define FW_COMN_CURR_ID_BITPOS 0 +#define FW_COMN_CURR_ID_MASK 0x0F + +#define FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id) & FW_COMN_NEXT_ID_MASK) >> \ + (FW_COMN_NEXT_ID_BITPOS)) + +#define FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ + ((cd_ctrl_hdr_t)->next_curr_id) = \ + ((((cd_ctrl_hdr_t)->next_curr_id) & FW_COMN_CURR_ID_MASK) | \ + ((val << FW_COMN_NEXT_ID_BITPOS) & \ + FW_COMN_NEXT_ID_MASK)) + +#define FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id) & FW_COMN_CURR_ID_MASK) + +#define FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \ + ((cd_ctrl_hdr_t)->next_curr_id) = \ + ((((cd_ctrl_hdr_t)->next_curr_id) & FW_COMN_NEXT_ID_MASK) | \ + ((val)&FW_COMN_CURR_ID_MASK)) + +/* + * Common Status Field Definition The bit offsets below are within the COMMON + * RESPONSE status field, assumed to be 8 bits wide. In the case of the PKE + * response (which follows the CPM 1.5 message format), the status field is 16 + * bits wide. + * The status flags are contained within the most significant byte and align + * with the diagram below. Please therefore refer to the service-specific PKE + * header file for the appropriate macro definition to extract the PKE status + * flag from the PKE response, which assumes that a word is passed to the + * macro. + * + ===== + ------ + --- + --- + ---- + ---- + -------- + ---- + ---------- + + * | Bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * + ===== + ------ + --- + --- + ---- + ---- + -------- + ---- + ---------- + + * | Flags | Crypto | Pke | Cmp | Xlat | EOLB | UnSupReq | Rsvd | XltWaApply | + * + ===== + ------ + --- + --- + ---- + ---- + -------- + ---- + ---------- + + * Note: + * For the service specific status bit definitions refer to service header files + * Eg. Crypto Status bit refers to Symmetric Crypto, Key Generation, and NRBG + * Requests' Status. Unused bits e.g. reserved bits need to have been forced to + * 0. + */ + +#define COMN_RESP_CRYPTO_STATUS_BITPOS 7 +/* Starting bit position indicating Response for Crypto service Flag */ + +#define COMN_RESP_CRYPTO_STATUS_MASK 0x1 +/* One bit mask used to determine Crypto status mask */ + +#define COMN_RESP_PKE_STATUS_BITPOS 6 +/* Starting bit position indicating Response for PKE service Flag */ + +#define COMN_RESP_PKE_STATUS_MASK 0x1 +/* One bit mask used to determine PKE status mask */ + +#define COMN_RESP_CMP_STATUS_BITPOS 5 +/* Starting bit position indicating Response for Compression service Flag */ + +#define COMN_RESP_CMP_STATUS_MASK 0x1 +/* One bit mask used to determine Compression status mask */ + +#define COMN_RESP_XLAT_STATUS_BITPOS 4 +/* Starting bit position indicating Response for Xlat service Flag */ + +#define COMN_RESP_XLAT_STATUS_MASK 0x1 +/* One bit mask used to determine Translator status mask */ + +#define COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3 +/* Starting bit position indicating the last block in a deflate stream for + the compression service Flag */ + +#define COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1 +/* One bit mask used to determine the last block in a deflate stream + status mask */ + +#define COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2 +/* Starting bit position indicating when an unsupported service request Flag */ + +#define COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1 +/* One bit mask used to determine the unsupported service request status mask */ + +#define COMN_RESP_XLT_WA_APPLIED_BITPOS 0 +/* Bit position indicating a firmware workaround was applied to translation */ + +#define COMN_RESP_XLT_WA_APPLIED_MASK 0x1 +/* One bit mask */ + +/* + * Macro that must be used when building the status + * for the common response + * + * crypto Value of the Crypto Service status flag + * comp Value of the Compression Service Status flag + * xlat Value of the Xlator Status flag + * eolb Value of the Compression End of Last Block Status flag + * unsupp Value of the Unsupported Request flag + * xlt_wa Value of the Translation WA marker + */ +#define FW_COMN_RESP_STATUS_BUILD( \ + crypto, pke, comp, xlat, eolb, unsupp, xlt_wa) \ + ((((crypto)&COMN_RESP_CRYPTO_STATUS_MASK) \ + << COMN_RESP_CRYPTO_STATUS_BITPOS) | \ + (((pke)&COMN_RESP_PKE_STATUS_MASK) \ + << COMN_RESP_PKE_STATUS_BITPOS) | \ + (((xlt_wa)&COMN_RESP_XLT_WA_APPLIED_MASK) \ + << COMN_RESP_XLT_WA_APPLIED_BITPOS) | \ + (((comp)&COMN_RESP_CMP_STATUS_MASK) \ + << COMN_RESP_CMP_STATUS_BITPOS) | \ + (((xlat)&COMN_RESP_XLAT_STATUS_MASK) \ + << COMN_RESP_XLAT_STATUS_BITPOS) | \ + (((eolb)&COMN_RESP_CMP_END_OF_LAST_BLK_MASK) \ + << COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS) | \ + (((unsupp)&COMN_RESP_UNSUPPORTED_REQUEST_BITPOS) \ + << COMN_RESP_UNSUPPORTED_REQUEST_MASK)) + +/* + * Macro for extraction of the Crypto bit from the status + * + * status Status to extract the status bit from + */ +#define FW_COMN_RESP_CRYPTO_STAT_GET(status) \ + FIELD_GET(status, \ + COMN_RESP_CRYPTO_STATUS_BITPOS, \ + COMN_RESP_CRYPTO_STATUS_MASK) + +/* + * Macro for extraction of the PKE bit from the status + * + * status Status to extract the status bit from + */ +#define FW_COMN_RESP_PKE_STAT_GET(status) \ + FIELD_GET(status, \ + COMN_RESP_PKE_STATUS_BITPOS, \ + COMN_RESP_PKE_STATUS_MASK) + +/* + * Macro for extraction of the Compression bit from the status + * + * status Status to extract the status bit from + */ +#define FW_COMN_RESP_CMP_STAT_GET(status) \ + FIELD_GET(status, \ + COMN_RESP_CMP_STATUS_BITPOS, \ + COMN_RESP_CMP_STATUS_MASK) + +/* + * Macro for extraction of the Translator bit from the status + * + * status Status to extract the status bit from + */ +#define FW_COMN_RESP_XLAT_STAT_GET(status) \ + FIELD_GET(status, \ + COMN_RESP_XLAT_STATUS_BITPOS, \ + COMN_RESP_XLAT_STATUS_MASK) + +/* + * Macro for extraction of the Translation Workaround Applied bit from the + * status + * + * status Status to extract the status bit from + */ +#define FW_COMN_RESP_XLT_WA_APPLIED_GET(status) \ + FIELD_GET(status, \ + COMN_RESP_XLT_WA_APPLIED_BITPOS, \ + COMN_RESP_XLT_WA_APPLIED_MASK) + +/* + * Macro for extraction of the end of compression block bit from the + * status + * + * status + * Status to extract the status bit from + */ +#define FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \ + FIELD_GET(status, \ + COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \ + COMN_RESP_CMP_END_OF_LAST_BLK_MASK) + +/* + * Macro for extraction of the Unsupported request from the status + * + * status + * Status to extract the status bit from + */ +#define FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(status) \ + FIELD_GET(status, \ + COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \ + COMN_RESP_UNSUPPORTED_REQUEST_MASK) + +#define FW_COMN_STATUS_FLAG_OK 0 +/* Definition of successful processing of a request */ + +#define FW_COMN_STATUS_FLAG_ERROR 1 +/* Definition of erroneous processing of a request */ + +#define FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0 +/* Final Deflate block of a compression request not completed */ + +#define FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1 +/* Final Deflate block of a compression request completed */ + +#define ERR_CODE_NO_ERROR 0 +/* Error Code constant value for no error */ + +#define ERR_CODE_INVALID_BLOCK_TYPE -1 +/* Invalid block type (type == 3)*/ + +#define ERR_CODE_NO_MATCH_ONES_COMP -2 +/* Stored block length does not match one's complement */ + +#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3 +/* Too many length or distance codes */ + +#define ERR_CODE_INCOMPLETE_LEN -4 +/* Code lengths codes incomplete */ + +#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5 +/* Repeat lengths with no first length */ + +#define ERR_CODE_RPT_GT_SPEC_LEN -6 +/* Repeat more than specified lengths */ + +#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7 +/* Invalid lit/len code lengths */ + +#define ERR_CODE_INV_DIS_CODE_LEN -8 +/* Invalid distance code lengths */ + +#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9 +/* Invalid lit/len or distance code in fixed/dynamic block */ + +#define ERR_CODE_DIS_TOO_FAR_BACK -10 +/* Distance too far back in fixed or dynamic block */ + +/* Common Error code definitions */ +#define ERR_CODE_OVERFLOW_ERROR -11 +/* Error Code constant value for overflow error */ + +#define ERR_CODE_SOFT_ERROR -12 +/* Error Code constant value for soft error */ + +#define ERR_CODE_FATAL_ERROR -13 +/* Error Code constant value for hard/fatal error */ + +#define ERR_CODE_COMP_OUTPUT_CORRUPTION -14 +/* Error Code constant for compression output corruption */ + +#define ERR_CODE_HW_INCOMPLETE_FILE -15 +/* Error Code constant value for incomplete file hardware error */ + +#define ERR_CODE_SSM_ERROR -16 +/* Error Code constant value for error detected by SSM e.g. slice hang */ + +#define ERR_CODE_ENDPOINT_ERROR -17 +/* Error Code constant value for error detected by PCIe Endpoint, e.g. push + * data error */ + +#define ERR_CODE_CNV_ERROR -18 +/* Error Code constant value for cnv failure */ + +#define ERR_CODE_EMPTY_DYM_BLOCK -19 +/* Error Code constant value for submission of empty dynamic stored block to + * slice */ + +#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_HANDLE -20 +/* Error Code constant for invalid handle in kpt crypto service */ + +#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_HMAC_FAILED -21 +/* Error Code constant for failed hmac in kpt crypto service */ + +#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_WRAPPING_ALGO -22 +/* Error Code constant for invalid wrapping algo in kpt crypto service */ + +#define ERR_CODE_KPT_DRNG_SEED_NOT_LOAD -23 +/* Error Code constant for no drng seed is not loaded in kpt ecdsa signrs +/service */ + +#define FW_LA_ICV_VER_STATUS_PASS FW_COMN_STATUS_FLAG_OK +/* Status flag indicating that the ICV verification passed */ + +#define FW_LA_ICV_VER_STATUS_FAIL FW_COMN_STATUS_FLAG_ERROR +/* Status flag indicating that the ICV verification failed */ + +#define FW_LA_TRNG_STATUS_PASS FW_COMN_STATUS_FLAG_OK +/* Status flag indicating that the TRNG returned valid entropy data */ + +#define FW_LA_TRNG_STATUS_FAIL FW_COMN_STATUS_FLAG_ERROR +/* Status flag indicating that the TRNG Command Failed. */ + +/* -------------------------------------------------------------------------- */ + +/* + * Definition of the full bulk processing request structure. + * Used for hash, cipher, hash-cipher and authentication-encryption + * requests etc. + */ +struct fw_la_bulk_req +{ + /* LWs 0-1 */ + struct fw_comn_req_hdr comn_hdr; + /* Common request header - for Service Command Id, + * use service-specific Crypto Command Id. + * Service Specific Flags - use Symmetric Crypto Command Flags + * (all of cipher, auth, SSL3, TLS and MGF, + * excluding TRNG - field unused) */ + + /* LWs 2-5 */ + union fw_comn_req_hdr_cd_pars cd_pars; + /* Common Request content descriptor field which points either to a + * content descriptor + * parameter block or contains the service-specific data itself. */ + + /* LWs 6-13 */ + struct fw_comn_req_mid comn_mid; + /* Common request middle section */ + + /* LWs 14-26 */ + struct fw_comn_req_rqpars serv_specif_rqpars; + /* Common request service-specific parameter field */ + + /* LWs 27-31 */ + struct fw_comn_req_cd_ctrl cd_ctrl; + /* Common request content descriptor control block - + * this field is service-specific */ + +}; + +/* clang-format off */ + +/* + * LA BULK (SYMMETRIC CRYPTO) COMMAND FLAGS + * + * + ===== + ---------- + ----- + ----- + ----- + ----- + ----- + ----- + ----- + ----- + ----- + ----- + + * | Bit | [15:13] | 12 | 11 | 10 | 7-9 | 6 | 5 | 4 | 3 | 2 | 1-0 | + * + ===== + ---------- + ----- + ----- + ----- + ----- + ----- + ----- + ----- + ----- + ------+ ----- + + * | Flags | Resvd Bits | ZUC | GcmIV |Digest | Prot | Cmp | Rtn | Upd | Ciph/ | CiphIV| Part- | + * | | =0 | Prot | Len | In Buf| flgs | Auth | Auth | State | Auth | Field | ial | + * + ===== + ---------- + ----- + ----- + ----- + ----- + ----- + ----- + ----- + ----- + ------+ ----- + + */ + +/* clang-format on */ + +/* Private defines */ + +#define FW_LA_ZUC_3G_PROTO __BIT(12) +/* Indicating ZUC processing for a encrypt command + * Must be set for Cipher-only, Cipher + Auth and Auth-only */ + +#define FW_LA_GCM_IV_LEN_12_OCTETS __BIT(11) +/* Indicates the IV Length for GCM protocol is 96 Bits (12 Octets) + * If set FW does the padding to compute CTR0 */ + +#define FW_LA_DIGEST_IN_BUFFER __BIT(10) +/* Flag representing that authentication digest is stored or is extracted + * from the source buffer. Auth Result Pointer will be ignored in this case. */ + +#define FW_LA_PROTO __BITS(7, 9) +#define FW_LA_PROTO_SNOW_3G __BIT(9) +/* Indicates SNOW_3G processing for a encrypt command */ +#define FW_LA_PROTO_GCM __BIT(8) +/* Indicates GCM processing for a auth_encrypt command */ +#define FW_LA_PROTO_CCM __BIT(7) +/* Indicates CCM processing for a auth_encrypt command */ +#define FW_LA_PROTO_NONE 0 +/* Indicates no specific protocol processing for the command */ + +#define FW_LA_CMP_AUTH_RES __BIT(6) +/* Flag representing the need to compare the auth result data to the expected + * value in DRAM at the auth_address. */ + +#define FW_LA_RET_AUTH_RES __BIT(5) +/* Flag representing the need to return the auth result data to dram after the + * request processing is complete */ + +#define FW_LA_UPDATE_STATE __BIT(4) +/* Flag representing the need to update the state data in dram after the + * request processing is complete */ + +#define FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP __BIT(3) +/* Flag representing Cipher/Auth Config Offset Type, where the offset + * is contained in SHRAM constants page. When the SHRAM constants page + * is not used for cipher/auth configuration, then the Content Descriptor + * pointer field must be a pointer (as opposed to a 16-byte key), since + * the block pointed to must contain both the slice config and the key */ + +#define FW_CIPH_IV_16BYTE_DATA __BIT(2) +/* Flag representing Cipher IV field contents as 16-byte data array + * Otherwise Cipher IV field contents via 64-bit pointer */ + +#define FW_LA_PARTIAL __BITS(0, 1) +#define FW_LA_PARTIAL_NONE 0 +/* Flag representing no need for partial processing condition i.e. + * entire packet processed in the current command */ +#define FW_LA_PARTIAL_START 1 +/* Flag representing the first chunk of the partial packet */ +#define FW_LA_PARTIAL_MID 3 +/* Flag representing a middle chunk of the partial packet */ +#define FW_LA_PARTIAL_END 2 +/* Flag representing the final/end chunk of the partial packet */ + +/* The table below defines the meaning of the prefix_addr & hash_state_sz in + * the case of partial processing. See the HLD for further details + * + * + ====== + ------------------------- + ----------------------- + + * | Parial | Prefix Addr | Hash State Sz | + * | State | | | + * + ====== + ------------------------- + ----------------------- + + * | FULL | Points to the prefix data | Prefix size as below. | + * | | | No update of state | + * + ====== + ------------------------- + ----------------------- + + * | SOP | Points to the prefix | = inner prefix rounded | + * | | data. State is updated | to qwrds + outer prefix | + * | | at prefix_addr - state_sz | rounded to qwrds. The | + * | | - 8 (counter size) | writeback state sz | + * | | | comes from the CD | + * + ====== + ------------------------- + ----------------------- + + * | MOP | Points to the state data | State size rounded to | + * | | Updated state written to | num qwrds + 8 (for the | + * | | same location | counter) + inner prefix | + * | | | rounded to qwrds + | + * | | | outer prefix rounded to | + * | | | qwrds. | + * + ====== + ------------------------- + ----------------------- + + * | EOP | Points to the state data | State size rounded to | + * | | | num qwrds + 8 (for the | + * | | | counter) + inner prefix | + * | | | rounded to qwrds + | + * | | | outer prefix rounded to | + * | | | qwrds. | + * + ====== + ------------------------- + ----------------------- + + * + * Notes: + * + * - If the EOP is set it is assumed that no state update is to be performed. + * However it is the clients responsibility to set the update_state flag + * correctly i.e. not set for EOP or Full packet cases. Only set for SOP and + * MOP with no EOP flag + * - The SOP take precedence over the MOP and EOP i.e. in the calculation of + * the address to writeback the state. + * - The prefix address must be on at least the 8 byte boundary + */ + +/* Macros for extracting field bits */ +/* + * Macro for extraction of the Cipher IV field contents (bit 2) + * + * flags Flags to extract the Cipher IV field contents + * + */ +#define FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \ + FIELD_GET(flags, LA_CIPH_IV_FLD_BITPOS, LA_CIPH_IV_FLD_MASK) + +/* + * Macro for extraction of the Cipher/Auth Config + * offset type (bit 3) + * + * flags Flags to extract the Cipher/Auth Config offset type + * + */ +#define FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \ + FIELD_GET(flags, \ + LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ + LA_CIPH_AUTH_CFG_OFFSET_MASK) + +/* + * Macro for extraction of the ZUC protocol bit + * information (bit 11) + * + * flags Flags to extract the ZUC protocol bit + */ +#define FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \ + FIELD_GET(flags, \ + FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ + FW_LA_ZUC_3G_PROTO_FLAG_MASK) + +/* + * Macro for extraction of the GCM IV Len is 12 Octets / 96 Bits + * information (bit 11) + * + * flags Flags to extract the GCM IV length + */ +#define FW_LA_GCM_IV_LEN_FLAG_GET(flags) \ + FIELD_GET( \ + flags, LA_GCM_IV_LEN_FLAG_BITPOS, LA_GCM_IV_LEN_FLAG_MASK) + +/* + * Macro for extraction of the LA protocol state (bits 9-7) + * + * flags Flags to extract the protocol state + */ +#define FW_LA_PROTO_GET(flags) \ + FIELD_GET(flags, LA_PROTO_BITPOS, LA_PROTO_MASK) + +/* + * Macro for extraction of the "compare auth" state (bit 6) + * + * flags Flags to extract the compare auth result state + * + */ +#define FW_LA_CMP_AUTH_GET(flags) \ + FIELD_GET(flags, LA_CMP_AUTH_RES_BITPOS, LA_CMP_AUTH_RES_MASK) + +/* + * Macro for extraction of the "return auth" state (bit 5) + * + * flags Flags to extract the return auth result state + * + */ +#define FW_LA_RET_AUTH_GET(flags) \ + FIELD_GET(flags, LA_RET_AUTH_RES_BITPOS, LA_RET_AUTH_RES_MASK) + +/* + * Macro for extraction of the "digest in buffer" state (bit 10) + * + * flags Flags to extract the digest in buffer state + * + */ +#define FW_LA_DIGEST_IN_BUFFER_GET(flags) \ + FIELD_GET( \ + flags, LA_DIGEST_IN_BUFFER_BITPOS, LA_DIGEST_IN_BUFFER_MASK) + +/* + * Macro for extraction of the update content state value. (bit 4) + * + * flags Flags to extract the update content state bit + */ +#define FW_LA_UPDATE_STATE_GET(flags) \ + FIELD_GET(flags, LA_UPDATE_STATE_BITPOS, LA_UPDATE_STATE_MASK) + +/* + * Macro for extraction of the "partial" packet state (bits 1-0) + * + * flags Flags to extract the partial state + */ +#define FW_LA_PARTIAL_GET(flags) \ + FIELD_GET(flags, LA_PARTIAL_BITPOS, LA_PARTIAL_MASK) + +/* Macros for setting field bits */ +/* + * Macro for setting the Cipher IV field contents + * + * flags Flags to set with the Cipher IV field contents + * val Field contents indicator value + */ +#define FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \ + FIELD_SET( \ + flags, val, LA_CIPH_IV_FLD_BITPOS, LA_CIPH_IV_FLD_MASK) + +/* + * Macro for setting the Cipher/Auth Config + * offset type + * + * flags Flags to set the Cipher/Auth Config offset type + * val Offset type value + */ +#define FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \ + FIELD_SET(flags, \ + val, \ + LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ + LA_CIPH_AUTH_CFG_OFFSET_MASK) + +/* + * Macro for setting the ZUC protocol flag + * + * flags Flags to set the ZUC protocol flag + * val Protocol value + */ +#define FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \ + FIELD_SET(flags, \ + val, \ + FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ + FW_LA_ZUC_3G_PROTO_FLAG_MASK) + +/* + * Macro for setting the GCM IV length flag state + * + * flags Flags to set the GCM IV length flag state + * val Protocol value + */ +#define FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \ + FIELD_SET(flags, \ + val, \ + LA_GCM_IV_LEN_FLAG_BITPOS, \ + LA_GCM_IV_LEN_FLAG_MASK) + +/* + * Macro for setting the LA protocol flag state + * + * flags Flags to set the protocol state + * val Protocol value + */ +#define FW_LA_PROTO_SET(flags, val) \ + FIELD_SET(flags, val, LA_PROTO_BITPOS, LA_PROTO_MASK) + +/* + * Macro for setting the "compare auth" flag state + * + * flags Flags to set the compare auth result state + * val Compare Auth value + */ +#define FW_LA_CMP_AUTH_SET(flags, val) \ + FIELD_SET( \ + flags, val, LA_CMP_AUTH_RES_BITPOS, LA_CMP_AUTH_RES_MASK) + +/* + * Macro for setting the "return auth" flag state + * + * flags Flags to set the return auth result state + * val Return Auth value + */ +#define FW_LA_RET_AUTH_SET(flags, val) \ + FIELD_SET( \ + flags, val, LA_RET_AUTH_RES_BITPOS, LA_RET_AUTH_RES_MASK) + +/* + * Macro for setting the "digest in buffer" flag state + * + * flags Flags to set the digest in buffer state + * val Digest in buffer value + */ +#define FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \ + FIELD_SET(flags, \ + val, \ + LA_DIGEST_IN_BUFFER_BITPOS, \ + LA_DIGEST_IN_BUFFER_MASK) + +/* + * Macro for setting the "update state" flag value + * + * flags Flags to set the update content state + * val Update Content State flag value + */ +#define FW_LA_UPDATE_STATE_SET(flags, val) \ + FIELD_SET( \ + flags, val, LA_UPDATE_STATE_BITPOS, LA_UPDATE_STATE_MASK) + +/* + * Macro for setting the "partial" packet flag state + * + * flags Flags to set the partial state + * val Partial state value + */ +#define FW_LA_PARTIAL_SET(flags, val) \ + FIELD_SET(flags, val, LA_PARTIAL_BITPOS, LA_PARTIAL_MASK) + +/* + * Definition of the Cipher header Content Descriptor pars block + * Definition of the cipher processing header cd pars block. + * The structure is a service-specific implementation of the common + * 'fw_comn_req_hdr_cd_pars_s' structure. + */ +union fw_cipher_req_hdr_cd_pars { + /* LWs 2-5 */ + struct + { + uint64_t content_desc_addr; + /* Address of the content descriptor */ + + uint16_t content_desc_resrvd1; + /* Content descriptor reserved field */ + + uint8_t content_desc_params_sz; + /* Size of the content descriptor parameters in quad words. These + * parameters describe the session setup configuration info for the + * slices that this request relies upon i.e. the configuration word and + * cipher key needed by the cipher slice if there is a request for + * cipher processing. */ + + uint8_t content_desc_hdr_resrvd2; + /* Content descriptor reserved field */ + + uint32_t content_desc_resrvd3; + /* Content descriptor reserved field */ + } s; + + struct + { + uint32_t cipher_key_array[FW_NUM_LONGWORDS_4]; + /* Cipher Key Array */ + + } s1; + +}; + +/* + * Definition of the Authentication header Content Descriptor pars block + * Definition of the authentication processing header cd pars block. + */ +/* Note: Authentication uses the common 'fw_comn_req_hdr_cd_pars_s' + * structure - similarly, it is also used by SSL3, TLS and MGF. Only cipher + * and cipher + authentication require service-specific implementations of + * the structure */ + +/* + * Definition of the Cipher + Auth header Content Descriptor pars block + * Definition of the cipher + auth processing header cd pars block. + * The structure is a service-specific implementation of the common + * 'fw_comn_req_hdr_cd_pars_s' structure. + */ +union fw_cipher_auth_req_hdr_cd_pars { + /* LWs 2-5 */ + struct + { + uint64_t content_desc_addr; + /* Address of the content descriptor */ + + uint16_t content_desc_resrvd1; + /* Content descriptor reserved field */ + + uint8_t content_desc_params_sz; + /* Size of the content descriptor parameters in quad words. These + * parameters describe the session setup configuration info for the + * slices that this request relies upon i.e. the configuration word and + * cipher key needed by the cipher slice if there is a request for + * cipher processing. */ + + uint8_t content_desc_hdr_resrvd2; + /* Content descriptor reserved field */ + + uint32_t content_desc_resrvd3; + /* Content descriptor reserved field */ + } s; + + struct + { + uint32_t cipher_key_array[FW_NUM_LONGWORDS_4]; + /* Cipher Key Array */ + + } sl; + +}; + +/* + * Cipher content descriptor control block (header) + * Definition of the service-specific cipher control block header + * structure. This header forms part of the content descriptor + * block incorporating LWs 27-31, as defined by the common base + * parameters structure. + */ +struct fw_cipher_cd_ctrl_hdr +{ + /* LW 27 */ + uint8_t cipher_state_sz; + /* State size in quad words of the cipher algorithm used in this session. + * Set to zero if the algorithm doesnt provide any state */ + + uint8_t cipher_key_sz; + /* Key size in quad words of the cipher algorithm used in this session */ + + uint8_t cipher_cfg_offset; + /* Quad word offset from the content descriptor parameters address i.e. + * (content_address + (cd_hdr_sz << 3)) to the parameters for the cipher + * processing */ + + uint8_t next_curr_id; + /* This field combines the next and current id (each four bits) - + * the next id is the most significant nibble. + * Next Id: Set to the next slice to pass the ciphered data through. + * Set to FW_SLICE_DRAM_WR if the data is not to go through + * any more slices after cipher. + * Current Id: Initialised with the cipher slice type */ + + /* LW 28 */ + uint8_t cipher_padding_sz; + /* State padding size in quad words. Set to 0 if no padding is required. + */ + + uint8_t resrvd1; + uint16_t resrvd2; + /* Reserved bytes to bring the struct to the word boundary, used by + * authentication. MUST be set to 0 */ + + /* LWs 29-31 */ + uint32_t resrvd3[FW_NUM_LONGWORDS_3]; + /* Reserved bytes used by authentication. MUST be set to 0 */ + +}; + +/* + * Authentication content descriptor control block (header) + * Definition of the service-specific authentication control block + * header structure. This header forms part of the content descriptor + * block incorporating LWs 27-31, as defined by the common base + * parameters structure, the first portion of which is reserved for + * cipher. + */ +struct fw_auth_cd_ctrl_hdr +{ + /* LW 27 */ + uint32_t resrvd1; + /* Reserved bytes, used by cipher only. MUST be set to 0 */ + + /* LW 28 */ + uint8_t resrvd2; + /* Reserved byte, used by cipher only. MUST be set to 0 */ + + uint8_t hash_flags; + /* General flags defining the processing to perform. 0 is normal + * processing + * and 1 means there is a nested hash processing loop to go through */ + + uint8_t hash_cfg_offset; + /* Quad word offset from the content descriptor parameters address to the + * parameters for the auth processing */ + + uint8_t next_curr_id; + /* This field combines the next and current id (each four bits) - + * the next id is the most significant nibble. + * Next Id: Set to the next slice to pass the authentication data through. + * Set to FW_SLICE_DRAM_WR if the data is not to go through + * any more slices after authentication. + * Current Id: Initialised with the authentication slice type */ + + /* LW 29 */ + uint8_t resrvd3; + /* Now a reserved field. MUST be set to 0 */ + + uint8_t outer_prefix_sz; + /* Size in bytes of outer prefix data */ + + uint8_t final_sz; + /* Size in bytes of digest to be returned to the client if requested */ + + uint8_t inner_res_sz; + /* Size in bytes of the digest from the inner hash algorithm */ + + /* LW 30 */ + uint8_t resrvd4; + /* Now a reserved field. MUST be set to zero. */ + + uint8_t inner_state1_sz; + /* Size in bytes of inner hash state1 data. Must be a qword multiple */ + + uint8_t inner_state2_offset; + /* Quad word offset from the content descriptor parameters pointer to the + * inner state2 value */ + + uint8_t inner_state2_sz; + /* Size in bytes of inner hash state2 data. Must be a qword multiple */ + + /* LW 31 */ + uint8_t outer_config_offset; + /* Quad word offset from the content descriptor parameters pointer to the + * outer configuration information */ + + uint8_t outer_state1_sz; + /* Size in bytes of the outer state1 value */ + + uint8_t outer_res_sz; + /* Size in bytes of digest from the outer auth algorithm */ + + uint8_t outer_prefix_offset; + /* Quad word offset from the start of the inner prefix data to the outer + * prefix information. Should equal the rounded inner prefix size, converted + * to qwords */ + +}; + +/* + * Cipher + Authentication content descriptor control block header + * Definition of both service-specific cipher + authentication control + * block header structures. This header forms part of the content + * descriptor block incorporating LWs 27-31, as defined by the common + * base parameters structure. + */ +struct fw_cipher_auth_cd_ctrl_hdr +{ + /* LW 27 */ + uint8_t cipher_state_sz; + /* State size in quad words of the cipher algorithm used in this session. + * Set to zero if the algorithm doesnt provide any state */ + + uint8_t cipher_key_sz; + /* Key size in quad words of the cipher algorithm used in this session */ + + uint8_t cipher_cfg_offset; + /* Quad word offset from the content descriptor parameters address i.e. + * (content_address + (cd_hdr_sz << 3)) to the parameters for the cipher + * processing */ + + uint8_t next_curr_id_cipher; + /* This field combines the next and current id (each four bits) - + * the next id is the most significant nibble. + * Next Id: Set to the next slice to pass the ciphered data through. + * Set to FW_SLICE_DRAM_WR if the data is not to go through + * any more slices after cipher. + * Current Id: Initialised with the cipher slice type */ + + /* LW 28 */ + uint8_t cipher_padding_sz; + /* State padding size in quad words. Set to 0 if no padding is required. + */ + + uint8_t hash_flags; + /* General flags defining the processing to perform. 0 is normal + * processing + * and 1 means there is a nested hash processing loop to go through */ + + uint8_t hash_cfg_offset; + /* Quad word offset from the content descriptor parameters address to the + * parameters for the auth processing */ + + uint8_t next_curr_id_auth; + /* This field combines the next and current id (each four bits) - + * the next id is the most significant nibble. + * Next Id: Set to the next slice to pass the authentication data through. + * Set to FW_SLICE_DRAM_WR if the data is not to go through + * any more slices after authentication. + * Current Id: Initialised with the authentication slice type */ + + /* LW 29 */ + uint8_t resrvd1; + /* Reserved field. MUST be set to 0 */ + + uint8_t outer_prefix_sz; + /* Size in bytes of outer prefix data */ + + uint8_t final_sz; + /* Size in bytes of digest to be returned to the client if requested */ + + uint8_t inner_res_sz; + /* Size in bytes of the digest from the inner hash algorithm */ + + /* LW 30 */ + uint8_t resrvd2; + /* Now a reserved field. MUST be set to zero. */ + + uint8_t inner_state1_sz; + /* Size in bytes of inner hash state1 data. Must be a qword multiple */ + + uint8_t inner_state2_offset; + /* Quad word offset from the content descriptor parameters pointer to the + * inner state2 value */ + + uint8_t inner_state2_sz; + /* Size in bytes of inner hash state2 data. Must be a qword multiple */ + + /* LW 31 */ + uint8_t outer_config_offset; + /* Quad word offset from the content descriptor parameters pointer to the + * outer configuration information */ + + uint8_t outer_state1_sz; + /* Size in bytes of the outer state1 value */ + + uint8_t outer_res_sz; + /* Size in bytes of digest from the outer auth algorithm */ + + uint8_t outer_prefix_offset; + /* Quad word offset from the start of the inner prefix data to the outer + * prefix information. Should equal the rounded inner prefix size, converted + * to qwords */ + +}; + +#define FW_AUTH_HDR_FLAG_DO_NESTED 1 +/* Definition of the hash_flags bit of the auth_hdr to indicate the request + * requires nested hashing */ + +#define FW_AUTH_HDR_FLAG_NO_NESTED 0 +/* Definition of the hash_flags bit of the auth_hdr for no nested hashing + * required */ + +#define FW_CCM_GCM_AAD_SZ_MAX 240 +/* Maximum size of AAD data allowed for CCM or GCM processing. AAD data size90 - + * is stored in 8-bit field and must be multiple of hash block size. 240 is + * largest value which satisfy both requirements.AAD_SZ_MAX is in byte units */ + +/* + * request parameter #defines + */ +#define FW_HASH_REQUEST_PARAMETERS_OFFSET \ + (sizeof(fw_la_cipher_req_params_t)) +/* Offset in bytes from the start of the request parameters block to the hash + * (auth) request parameters */ + +#define FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0) +/* Offset in bytes from the start of the request parameters block to the cipher + * request parameters */ + +/* + * Definition of the cipher request parameters block + * + * Definition of the cipher processing request parameters block + * structure, which forms part of the block incorporating LWs 14-26, + * as defined by the common base parameters structure. + * Unused fields must be set to 0. + */ +struct fw_la_cipher_req_params { + /* LW 14 */ + uint32_t cipher_offset; + /* Cipher offset long word. */ + + /* LW 15 */ + uint32_t cipher_length; + /* Cipher length long word. */ + + /* LWs 16-19 */ + union { + uint32_t cipher_IV_array[FW_NUM_LONGWORDS_4]; + /* Cipher IV array */ + + struct + { + uint64_t cipher_IV_ptr; + /* Cipher IV pointer or Partial State Pointer */ + + uint64_t resrvd1; + /* reserved */ + + } s; + + } u; + +}; + +/* + * Definition of the auth request parameters block + * Definition of the authentication processing request parameters block + * structure, which forms part of the block incorporating LWs 14-26, + * as defined by the common base parameters structure. Note: + * This structure is used by TLS only. + */ +struct fw_la_auth_req_params { + /* LW 20 */ + uint32_t auth_off; + /* Byte offset from the start of packet to the auth data region */ + + /* LW 21 */ + uint32_t auth_len; + /* Byte length of the auth data region */ + + /* LWs 22-23 */ + union { + uint64_t auth_partial_st_prefix; + /* Address of the authentication partial state prefix + * information */ + + uint64_t aad_adr; + /* Address of the AAD info in DRAM. Used for the CCM and GCM + * protocols */ + + } u1; + + /* LWs 24-25 */ + uint64_t auth_res_addr; + /* Address of the authentication result information to validate or + * the location to which the digest information can be written back to */ + + /* LW 26 */ + union { + uint8_t inner_prefix_sz; + /* Size in bytes of the inner prefix data */ + + uint8_t aad_sz; + /* Size in bytes of padded AAD data to prefix to the packet for CCM + * or GCM processing */ + } u2; + + uint8_t resrvd1; + /* reserved */ + + uint8_t hash_state_sz; + /* Number of quad words of inner and outer hash prefix data to process + * Maximum size is 240 */ + + uint8_t auth_res_sz; + /* Size in bytes of the authentication result */ + +} __packed; + +/* + * Definition of the auth request parameters block + * Definition of the authentication processing request parameters block + * structure, which forms part of the block incorporating LWs 14-26, + * as defined by the common base parameters structure. Note: + * This structure is used by SSL3 and MGF1 only. All fields other than + * inner prefix/ AAD size are unused and therefore reserved. + */ +struct fw_la_auth_req_params_resrvd_flds { + /* LWs 20-25 */ + uint32_t resrvd[FW_NUM_LONGWORDS_6]; + + /* LW 26 */ + union { + uint8_t inner_prefix_sz; + /* Size in bytes of the inner prefix data */ + + uint8_t aad_sz; + /* Size in bytes of padded AAD data to prefix to the packet for CCM + * or GCM processing */ + } u2; + + uint8_t resrvd1; + /* reserved */ + + uint16_t resrvd2; + /* reserved */ +}; + +/* + * Definition of the shared fields within the parameter block + * containing SSL, TLS or MGF information. + * This structure defines the shared fields for SSL, TLS or MGF + * within the parameter block incorporating LWs 14-26, as defined + * by the common base parameters structure. + * Unused fields must be set to 0. + */ +struct fw_la_key_gen_common { + /* LW 14 */ + union { + /* SSL3 */ + uint16_t secret_lgth_ssl; + /* Length of Secret information for SSL. In the case of TLS the + * secret is supplied in the content descriptor */ + + /* MGF */ + uint16_t mask_length; + /* Size in bytes of the desired output mask for MGF1*/ + + /* TLS */ + uint16_t secret_lgth_tls; + /* TLS Secret length */ + + } u; + + union { + /* SSL3 */ + struct + { + uint8_t output_lgth_ssl; + /* Output length */ + + uint8_t label_lgth_ssl; + /* Label length */ + + } s1; + + /* MGF */ + struct + { + uint8_t hash_length; + /* Hash length */ + + uint8_t seed_length; + /* Seed length */ + + } s2; + + /* TLS */ + struct + { + uint8_t output_lgth_tls; + /* Output length */ + + uint8_t label_lgth_tls; + /* Label length */ + + } s3; + + } u1; + + /* LW 15 */ + union { + /* SSL3 */ + uint8_t iter_count; + /* Iteration count used by the SSL key gen request */ + + /* TLS */ + uint8_t tls_seed_length; + /* TLS Seed length */ + + uint8_t resrvd1; + /* Reserved field set to 0 for MGF1 */ + + } u2; + + uint8_t resrvd2; + uint16_t resrvd3; + /* Reserved space - unused */ + +}; + +/* + * Definition of the SSL3 request parameters block + * This structure contains the the SSL3 processing request parameters + * incorporating LWs 14-26, as defined by the common base + * parameters structure. Unused fields must be set to 0. + */ +struct fw_la_ssl3_req_params { + /* LWs 14-15 */ + struct fw_la_key_gen_common keygen_comn; + /* For other key gen processing these field holds ssl, tls or mgf + * parameters */ + + /* LW 16-25 */ + uint32_t resrvd[FW_NUM_LONGWORDS_10]; + /* Reserved */ + + /* LW 26 */ + union { + uint8_t inner_prefix_sz; + /* Size in bytes of the inner prefix data */ + + uint8_t aad_sz; + /* Size in bytes of padded AAD data to prefix to the packet for CCM + * or GCM processing */ + } u2; + + uint8_t resrvd1; + /* reserved */ + + uint16_t resrvd2; + /* reserved */ + +}; + +/* + * Definition of the MGF request parameters block + * This structure contains the the MGF processing request parameters + * incorporating LWs 14-26, as defined by the common base parameters + * structure. Unused fields must be set to 0. + */ +struct fw_la_mgf_req_params { + /* LWs 14-15 */ + struct fw_la_key_gen_common keygen_comn; + /* For other key gen processing these field holds ssl or mgf + * parameters */ + + /* LW 16-25 */ + uint32_t resrvd[FW_NUM_LONGWORDS_10]; + /* Reserved */ + + /* LW 26 */ + union { + uint8_t inner_prefix_sz; + /* Size in bytes of the inner prefix data */ + + uint8_t aad_sz; + /* Size in bytes of padded AAD data to prefix to the packet for CCM + * or GCM processing */ + } u2; + + uint8_t resrvd1; + /* reserved */ + + uint16_t resrvd2; + /* reserved */ + +}; + +/* + * Definition of the TLS request parameters block + * This structure contains the the TLS processing request parameters + * incorporating LWs 14-26, as defined by the common base parameters + * structure. Unused fields must be set to 0. + */ +struct fw_la_tls_req_params { + /* LWs 14-15 */ + struct fw_la_key_gen_common keygen_comn; + /* For other key gen processing these field holds ssl, tls or mgf + * parameters */ + + /* LW 16-19 */ + uint32_t resrvd[FW_NUM_LONGWORDS_4]; + /* Reserved */ + +}; + +/* + * Definition of the common QAT FW request middle block for TRNG. + * Common section of the request used across all of the services exposed + * by the QAT FW. Each of the services inherit these common fields. TRNG + * requires a specific implementation. + */ +struct fw_la_trng_req_mid { + /* LWs 6-13 */ + uint64_t opaque_data; + /* Opaque data passed unmodified from the request to response messages by + * firmware (fw) */ + + uint64_t resrvd1; + /* Reserved, unused for TRNG */ + + uint64_t dest_data_addr; + /* Generic definition of the destination data supplied to the QAT AE. The + * common flags are used to further describe the attributes of this + * field */ + + uint32_t resrvd2; + /* Reserved, unused for TRNG */ + + uint32_t entropy_length; + /* Size of the data in bytes to process. Used by the get_random + * command. Set to 0 for commands that dont need a length parameter */ + +}; + +/* + * Definition of the common LA QAT FW TRNG request + * Definition of the TRNG processing request type + */ +struct fw_la_trng_req { + /* LWs 0-1 */ + struct fw_comn_req_hdr comn_hdr; + /* Common request header */ + + /* LWs 2-5 */ + union fw_comn_req_hdr_cd_pars cd_pars; + /* Common Request content descriptor field which points either to a + * content descriptor + * parameter block or contains the service-specific data itself. */ + + /* LWs 6-13 */ + struct fw_la_trng_req_mid comn_mid; + /* TRNG request middle section - differs from the common mid-section */ + + /* LWs 14-26 */ + uint32_t resrvd1[FW_NUM_LONGWORDS_13]; + + /* LWs 27-31 */ + uint32_t resrvd2[FW_NUM_LONGWORDS_5]; + +}; + +/* + * Definition of the Lookaside Eagle Tail Response + * This is the response delivered to the ET rings by the Lookaside + * QAT FW service for all commands + */ +struct fw_la_resp { + /* LWs 0-1 */ + struct fw_comn_resp_hdr comn_resp; + /* Common interface response format see fw.h */ + + /* LWs 2-3 */ + uint64_t opaque_data; + /* Opaque data passed from the request to the response message */ + + /* LWs 4-7 */ + uint32_t resrvd[FW_NUM_LONGWORDS_4]; + /* Reserved */ + +}; + +/* + * Definition of the Lookaside TRNG Test Status Structure + * As an addition to FW_LA_TRNG_STATUS Pass or Fail information + * in common response fields, as a response to TRNG_TEST request, Test + * status, Counter for failed tests and 4 entropy counter values are + * sent + * Status of test status and the fail counts. + */ +struct fw_la_trng_test_result { + uint32_t test_status_info; + /* TRNG comparator health test status& Validity information + see Test Status Bit Fields below. */ + + uint32_t test_status_fail_count; + /* TRNG comparator health test status, 32bit fail counter */ + + uint64_t r_ent_ones_cnt; + /* Raw Entropy ones counter */ + + uint64_t r_ent_zeros_cnt; + /* Raw Entropy zeros counter */ + + uint64_t c_ent_ones_cnt; + /* Conditioned Entropy ones counter */ + + uint64_t c_ent_zeros_cnt; + /* Conditioned Entropy zeros counter */ + + uint64_t resrvd; + /* Reserved field must be set to zero */ + +}; + +/* + * Definition of the Lookaside SSL Key Material Input + * This struct defines the layout of input parameters for the + * SSL3 key generation (source flat buffer format) + */ +struct fw_la_ssl_key_material_input { + uint64_t seed_addr; + /* Pointer to seed */ + + uint64_t label_addr; + /* Pointer to label(s) */ + + uint64_t secret_addr; + /* Pointer to secret */ + +}; + +/* + * Definition of the Lookaside TLS Key Material Input + * This struct defines the layout of input parameters for the + * TLS key generation (source flat buffer format) + * NOTE: + * Secret state value (S split into S1 and S2 parts) is supplied via + * Content Descriptor. S1 is placed in an outer prefix buffer, and S2 + * inside the inner prefix buffer. + */ +struct fw_la_tls_key_material_input { + uint64_t seed_addr; + /* Pointer to seed */ + + uint64_t label_addr; + /* Pointer to label(s) */ + +}; + +/* + * Macros using the bit position and mask to set/extract the next + * and current id nibbles within the next_curr_id field of the + * content descriptor header block, ONLY FOR CIPHER + AUTH COMBINED. + * Note that for cipher only or authentication only, the common macros + * need to be used. These are defined in the 'fw.h' common header + * file, as they are used by compression, cipher and authentication. + * + * cd_ctrl_hdr_t Content descriptor control block header. + * val Value of the field being set. + */ +/* Cipher fields within Cipher + Authentication structure */ +#define FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ + FW_COMN_NEXT_ID_MASK) >> \ + (FW_COMN_NEXT_ID_BITPOS)) + +#define FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ + (cd_ctrl_hdr_t)->next_curr_id_cipher = \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ + FW_COMN_CURR_ID_MASK) | \ + ((val << FW_COMN_NEXT_ID_BITPOS) & \ + FW_COMN_NEXT_ID_MASK)) + +#define FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id_cipher) & FW_COMN_CURR_ID_MASK) + +#define FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \ + (cd_ctrl_hdr_t)->next_curr_id_cipher = \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ + FW_COMN_NEXT_ID_MASK) | \ + ((val)&FW_COMN_CURR_ID_MASK)) + +/* Authentication fields within Cipher + Authentication structure */ +#define FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) & FW_COMN_NEXT_ID_MASK) >> \ + (FW_COMN_NEXT_ID_BITPOS)) + +#define FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ + (cd_ctrl_hdr_t)->next_curr_id_auth = \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) & \ + FW_COMN_CURR_ID_MASK) | \ + ((val << FW_COMN_NEXT_ID_BITPOS) & \ + FW_COMN_NEXT_ID_MASK)) + +#define FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id_auth) & FW_COMN_CURR_ID_MASK) + +#define FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \ + (cd_ctrl_hdr_t)->next_curr_id_auth = \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) & \ + FW_COMN_NEXT_ID_MASK) | \ + ((val)&FW_COMN_CURR_ID_MASK)) + +/* Definitions of the bits in the test_status_info of the TRNG_TEST response. + * The values returned by the Lookaside service are given below + * The Test result and Test Fail Count values are only valid if the Test + * Results Valid (Tv) is set. + * + * TRNG Test Status Info + * + ===== + ------------------------------------------------ + --- + --- + + * | Bit | 31 - 2 | 1 | 0 | + * + ===== + ------------------------------------------------ + --- + --- + + * | Flags | RESERVED = 0 | Tv | Ts | + * + ===== + ------------------------------------------------------------ + + */ +/* + * Definition of the Lookaside TRNG Test Status Information received as + * a part of fw_la_trng_test_result_t + * + */ +#define FW_LA_TRNG_TEST_STATUS_TS_BITPOS 0 +/* TRNG Test Result t_status field bit pos definition. */ + +#define FW_LA_TRNG_TEST_STATUS_TS_MASK 0x1 +/* TRNG Test Result t_status field mask definition. */ + +#define FW_LA_TRNG_TEST_STATUS_TV_BITPOS 1 +/* TRNG Test Result test results valid field bit pos definition. */ + +#define FW_LA_TRNG_TEST_STATUS_TV_MASK 0x1 +/* TRNG Test Result test results valid field mask definition. */ + +/* + * Definition of the Lookaside TRNG test_status values. + * + * + */ +#define FW_LA_TRNG_TEST_STATUS_TV_VALID 1 +/* TRNG TEST Response Test Results Valid Value. */ + +#define FW_LA_TRNG_TEST_STATUS_TV_NOT_VALID 0 +/* TRNG TEST Response Test Results are NOT Valid Value. */ + +#define FW_LA_TRNG_TEST_STATUS_TS_NO_FAILS 1 +/* Value for TRNG Test status tests have NO FAILs Value. */ + +#define FW_LA_TRNG_TEST_STATUS_TS_HAS_FAILS 0 +/* Value for TRNG Test status tests have one or more FAILS Value. */ + +/* + * Macro for extraction of the Test Status Field returned in the response + * to TRNG TEST command. + * + * test_status 8 bit test_status value to extract the status bit + */ +#define FW_LA_TRNG_TEST_STATUS_TS_FLD_GET(test_status) \ + FIELD_GET(test_status, \ + FW_LA_TRNG_TEST_STATUS_TS_BITPOS, \ + FW_LA_TRNG_TEST_STATUS_TS_MASK) +/* + * Macro for extraction of the Test Results Valid Field returned in the + * response to TRNG TEST command. + * + * test_status 8 bit test_status value to extract the Tests + * Results valid bit + */ +#define FW_LA_TRNG_TEST_STATUS_TV_FLD_GET(test_status) \ + FIELD_GET(test_status, \ + FW_LA_TRNG_TEST_STATUS_TV_BITPOS, \ + FW_LA_TRNG_TEST_STATUS_TV_MASK) + +/* + * MGF Max supported input parameters + */ +#define FW_LA_MGF_SEED_LEN_MAX 255 +/* Maximum seed length for MGF1 request in bytes + * Typical values may be 48, 64, 128 bytes (or any). */ + +#define FW_LA_MGF_MASK_LEN_MAX 65528 +/* Maximum mask length for MGF1 request in bytes + * Typical values may be 8 (64-bit), 16 (128-bit). MUST be quad word multiple */ + +/* + * SSL Max supported input parameters + */ +#define FW_LA_SSL_SECRET_LEN_MAX 512 +/* Maximum secret length for SSL3 Key Gen request (bytes) */ + +#define FW_LA_SSL_ITERATES_LEN_MAX 16 +/* Maximum iterations for SSL3 Key Gen request (integer) */ + +#define FW_LA_SSL_LABEL_LEN_MAX 136 +/* Maximum label length for SSL3 Key Gen request (bytes) */ + +#define FW_LA_SSL_SEED_LEN_MAX 64 +/* Maximum seed length for SSL3 Key Gen request (bytes) */ + +#define FW_LA_SSL_OUTPUT_LEN_MAX 248 +/* Maximum output length for SSL3 Key Gen request (bytes) */ + +/* + * TLS Max supported input parameters + */ +#define FW_LA_TLS_SECRET_LEN_MAX 128 +/* Maximum secret length for TLS Key Gen request (bytes) */ + +#define FW_LA_TLS_V1_1_SECRET_LEN_MAX 128 +/* Maximum secret length for TLS Key Gen request (bytes) */ + +#define FW_LA_TLS_V1_2_SECRET_LEN_MAX 64 +/* Maximum secret length for TLS Key Gen request (bytes) */ + +#define FW_LA_TLS_LABEL_LEN_MAX 255 +/* Maximum label length for TLS Key Gen request (bytes) */ + +#define FW_LA_TLS_SEED_LEN_MAX 64 +/* Maximum seed length for TLS Key Gen request (bytes) */ + +#define FW_LA_TLS_OUTPUT_LEN_MAX 248 +/* Maximum output length for TLS Key Gen request (bytes) */ + +#endif Property changes on: head/sys/dev/qat/qat_hw17reg.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qat_hw17var.h =================================================================== --- head/sys/dev/qat/qat_hw17var.h (nonexistent) +++ head/sys/dev/qat/qat_hw17var.h (revision 367386) @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qat_hw17var.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2014 Intel Corporation. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* $FreeBSD$ */ + +#ifndef _DEV_PCI_QAT_HW17VAR_H_ +#define _DEV_PCI_QAT_HW17VAR_H_ + +CTASSERT(CONTENT_DESC_MAX_SIZE >= + roundup(sizeof(union hw_cipher_algo_blk), 8) + + roundup(sizeof(union hw_auth_algo_blk), 8)); + +int qat_adm_mailbox_init(struct qat_softc *); +int qat_adm_mailbox_send_init(struct qat_softc *); +int qat_arb_init(struct qat_softc *); +int qat_set_ssm_wdtimer(struct qat_softc *); +int qat_check_slice_hang(struct qat_softc *); + +void qat_hw17_crypto_setup_desc(struct qat_crypto *, + struct qat_session *, struct qat_crypto_desc *); +void qat_hw17_crypto_setup_req_params(struct qat_crypto_bank *, + struct qat_session *, struct qat_crypto_desc const *, + struct qat_sym_cookie *, struct cryptop *); + +#endif Property changes on: head/sys/dev/qat/qat_hw17var.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qatreg.h =================================================================== --- head/sys/dev/qat/qatreg.h (nonexistent) +++ head/sys/dev/qat/qatreg.h (revision 367386) @@ -0,0 +1,1582 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qatreg.h,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2007-2019 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* $FreeBSD$ */ + +#ifndef _DEV_PCI_QATREG_H_ +#define _DEV_PCI_QATREG_H_ + +#define __BIT(__n) \ + (((uintmax_t)(__n) >= NBBY * sizeof(uintmax_t)) ? 0 : \ + ((uintmax_t)1 << (uintmax_t)((__n) & (NBBY * sizeof(uintmax_t) - 1)))) +#define __BITS(__m, __n) \ + ((__BIT(MAX((__m), (__n)) + 1) - 1) ^ (__BIT(MIN((__m), (__n))) - 1)) + +#define __LOWEST_SET_BIT(__mask) ((((__mask) - 1) & (__mask)) ^ (__mask)) +#define __SHIFTOUT(__x, __mask) (((__x) & (__mask)) / __LOWEST_SET_BIT(__mask)) +#define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask)) + +/* Limits */ +#define MAX_NUM_AE 0x10 +#define MAX_NUM_ACCEL 6 +#define MAX_AE 0x18 +#define MAX_AE_CTX 8 +#define MAX_ARB 4 + +#define MAX_USTORE_PER_SEG 0x8000 /* 16k * 2 */ +#define MAX_USTORE MAX_USTORE_PER_SEG + +#define MAX_AE_PER_ACCEL 4 /* XXX */ +#define MAX_BANK_PER_ACCEL 16 /* XXX */ +#define MAX_RING_PER_BANK 16 + +#define MAX_XFER_REG 128 +#define MAX_GPR_REG 128 +#define MAX_NN_REG 128 +#define MAX_LMEM_REG 1024 +#define MAX_INP_STATE 16 +#define MAX_CAM_REG 16 +#define MAX_FIFO_QWADDR 160 + +#define MAX_EXEC_INST 100 +#define UWORD_CPYBUF_SIZE 1024 /* micro-store copy buffer (bytes) */ +#define INVLD_UWORD 0xffffffffffull /* invalid micro-instruction */ +#define AEV2_PACKED_UWORD_BYTES 6 /* version 2 packed uword size */ +#define UWORD_MASK 0xbffffffffffull /* micro-word mask without parity */ + +#define AE_ALL_CTX 0xff + +/* PCIe configuration space parameter */ +#define NO_PCI_REG (-1) +#define NO_REG_OFFSET 0 + +#define MAX_BARS 3 + +/* Fuse Control */ +#define FUSECTL_REG 0x40 +#define FUSECTL_MASK __BIT(31) + +#define LEGFUSE_REG 0x4c +#define LEGFUSE_ACCEL_MASK_CIPHER_SLICE __BIT(0) +#define LEGFUSE_ACCEL_MASK_AUTH_SLICE __BIT(1) +#define LEGFUSE_ACCEL_MASK_PKE_SLICE __BIT(2) +#define LEGFUSE_ACCEL_MASK_COMPRESS_SLICE __BIT(3) +#define LEGFUSE_ACCEL_MASK_LZS_SLICE __BIT(4) +#define LEGFUSE_ACCEL_MASK_EIA3_SLICE __BIT(5) +#define LEGFUSE_ACCEL_MASK_SHA3_SLICE __BIT(6) + +/* -------------------------------------------------------------------------- */ +/* PETRINGCSR region */ + +/* ETR parameters */ +#define ETR_MAX_RINGS_PER_BANK 16 + +/* ETR registers */ +#define ETR_RING_CONFIG 0x0000 +#define ETR_RING_LBASE 0x0040 +#define ETR_RING_UBASE 0x0080 +#define ETR_RING_HEAD_OFFSET 0x00C0 +#define ETR_RING_TAIL_OFFSET 0x0100 +#define ETR_RING_STAT 0x0140 +#define ETR_UO_STAT 0x0148 +#define ETR_E_STAT 0x014C +#define ETR_NE_STAT 0x0150 +#define ETR_NF_STAT 0x0154 +#define ETR_F_STAT 0x0158 +#define ETR_C_STAT 0x015C +#define ETR_INT_EN 0x016C +#define ETR_INT_REG 0x0170 +#define ETR_INT_SRCSEL 0x0174 +#define ETR_INT_SRCSEL_2 0x0178 +#define ETR_INT_COL_EN 0x017C +#define ETR_INT_COL_CTL 0x0180 +#define ETR_AP_NF_MASK 0x2000 +#define ETR_AP_NF_DEST 0x2020 +#define ETR_AP_NE_MASK 0x2040 +#define ETR_AP_NE_DEST 0x2060 +#define ETR_AP_DELAY 0x2080 + +/* ARB registers */ +#define ARB_OFFSET 0x30000 +#define ARB_REG_SIZE 0x4 +#define ARB_WTR_SIZE 0x20 +#define ARB_REG_SLOT 0x1000 +#define ARB_WTR_OFFSET 0x010 +#define ARB_RO_EN_OFFSET 0x090 +#define ARB_WRK_2_SER_MAP_OFFSET 0x180 +#define ARB_RINGSRVARBEN_OFFSET 0x19c + +/* Ring Config */ +#define ETR_RING_CONFIG_LATE_HEAD_POINTER_MODE __BIT(31) +#define ETR_RING_CONFIG_NEAR_FULL_WM __BITS(14, 10) +#define ETR_RING_CONFIG_NEAR_EMPTY_WM __BITS(9, 5) +#define ETR_RING_CONFIG_RING_SIZE __BITS(4, 0) + +#define ETR_RING_CONFIG_NEAR_WM_0 0x00 +#define ETR_RING_CONFIG_NEAR_WM_4 0x01 +#define ETR_RING_CONFIG_NEAR_WM_8 0x02 +#define ETR_RING_CONFIG_NEAR_WM_16 0x03 +#define ETR_RING_CONFIG_NEAR_WM_32 0x04 +#define ETR_RING_CONFIG_NEAR_WM_64 0x05 +#define ETR_RING_CONFIG_NEAR_WM_128 0x06 +#define ETR_RING_CONFIG_NEAR_WM_256 0x07 +#define ETR_RING_CONFIG_NEAR_WM_512 0x08 +#define ETR_RING_CONFIG_NEAR_WM_1K 0x09 +#define ETR_RING_CONFIG_NEAR_WM_2K 0x0A +#define ETR_RING_CONFIG_NEAR_WM_4K 0x0B +#define ETR_RING_CONFIG_NEAR_WM_8K 0x0C +#define ETR_RING_CONFIG_NEAR_WM_16K 0x0D +#define ETR_RING_CONFIG_NEAR_WM_32K 0x0E +#define ETR_RING_CONFIG_NEAR_WM_64K 0x0F +#define ETR_RING_CONFIG_NEAR_WM_128K 0x10 +#define ETR_RING_CONFIG_NEAR_WM_256K 0x11 +#define ETR_RING_CONFIG_NEAR_WM_512K 0x12 +#define ETR_RING_CONFIG_NEAR_WM_1M 0x13 +#define ETR_RING_CONFIG_NEAR_WM_2M 0x14 +#define ETR_RING_CONFIG_NEAR_WM_4M 0x15 + +#define ETR_RING_CONFIG_SIZE_64 0x00 +#define ETR_RING_CONFIG_SIZE_128 0x01 +#define ETR_RING_CONFIG_SIZE_256 0x02 +#define ETR_RING_CONFIG_SIZE_512 0x03 +#define ETR_RING_CONFIG_SIZE_1K 0x04 +#define ETR_RING_CONFIG_SIZE_2K 0x05 +#define ETR_RING_CONFIG_SIZE_4K 0x06 +#define ETR_RING_CONFIG_SIZE_8K 0x07 +#define ETR_RING_CONFIG_SIZE_16K 0x08 +#define ETR_RING_CONFIG_SIZE_32K 0x09 +#define ETR_RING_CONFIG_SIZE_64K 0x0A +#define ETR_RING_CONFIG_SIZE_128K 0x0B +#define ETR_RING_CONFIG_SIZE_256K 0x0C +#define ETR_RING_CONFIG_SIZE_512K 0x0D +#define ETR_RING_CONFIG_SIZE_1M 0x0E +#define ETR_RING_CONFIG_SIZE_2M 0x0F +#define ETR_RING_CONFIG_SIZE_4M 0x10 + +/* Default Ring Config is Nearly Full = Full and Nearly Empty = Empty */ +#define ETR_RING_CONFIG_BUILD(size) \ + (__SHIFTIN(ETR_RING_CONFIG_NEAR_WM_0, \ + ETR_RING_CONFIG_NEAR_FULL_WM) | \ + __SHIFTIN(ETR_RING_CONFIG_NEAR_WM_0, \ + ETR_RING_CONFIG_NEAR_EMPTY_WM) | \ + __SHIFTIN((size), ETR_RING_CONFIG_RING_SIZE)) + +/* Response Ring Configuration */ +#define ETR_RING_CONFIG_BUILD_RESP(size, wm_nf, wm_ne) \ + (__SHIFTIN((wm_nf), ETR_RING_CONFIG_NEAR_FULL_WM) | \ + __SHIFTIN((wm_ne), ETR_RING_CONFIG_NEAR_EMPTY_WM) | \ + __SHIFTIN((size), ETR_RING_CONFIG_RING_SIZE)) + +/* Ring Base */ +#define ETR_RING_BASE_BUILD(addr, size) \ + (((addr) >> 6) & (0xFFFFFFFFFFFFFFFFULL << (size))) + +#define ETR_INT_REG_CLEAR_MASK 0xffff + +/* Initial bank Interrupt Source mask */ +#define ETR_INT_SRCSEL_MASK 0x44444444UL + +#define ETR_INT_SRCSEL_NEXT_OFFSET 4 + +#define ETR_RINGS_PER_INT_SRCSEL 8 + +#define ETR_INT_COL_CTL_ENABLE __BIT(31) + +#define ETR_AP_NF_MASK_INIT 0xAAAAAAAA +#define ETR_AP_NE_MASK_INIT 0x55555555 + +/* Autopush destination AE bit */ +#define ETR_AP_DEST_ENABLE __BIT(7) +#define ETR_AP_DEST_AE __BITS(6, 2) +#define ETR_AP_DEST_MAILBOX __BITS(1, 0) + +/* Autopush destination enable bit */ + +/* Autopush CSR Offset */ +#define ETR_AP_BANK_OFFSET 4 + +/* Autopush maximum rings per bank */ +#define ETR_MAX_RINGS_PER_AP_BANK 32 + +/* Maximum mailbox per acclerator */ +#define ETR_MAX_MAILBOX_PER_ACCELERATOR 4 + +/* Maximum AEs per mailbox */ +#define ETR_MAX_AE_PER_MAILBOX 4 + +/* Macro to get the ring's autopush bank number */ +#define ETR_RING_AP_BANK_NUMBER(ring) ((ring) >> 5) + +/* Macro to get the ring's autopush mailbox number */ +#define ETR_RING_AP_MAILBOX_NUMBER(ring) \ + (ETR_RING_AP_BANK_NUMBER(ring) % ETR_MAX_MAILBOX_PER_ACCELERATOR) + +/* Macro to get the ring number in the autopush bank */ +#define ETR_RING_NUMBER_IN_AP_BANK(ring) \ + ((ring) % ETR_MAX_RINGS_PER_AP_BANK) + +#define ETR_RING_EMPTY_ENTRY_SIG (0x7F7F7F7F) + +/* -------------------------------------------------------------------------- */ +/* CAP_GLOBAL_CTL region */ + +#define FCU_CTRL 0x8c0 +#define FCU_CTRL_CMD_NOOP 0 +#define FCU_CTRL_CMD_AUTH 1 +#define FCU_CTRL_CMD_LOAD 2 +#define FCU_CTRL_CMD_START 3 +#define FCU_CTRL_AE __BITS(8, 31) + +#define FCU_STATUS 0x8c4 +#define FCU_STATUS_STS __BITS(0, 2) +#define FCU_STATUS_STS_NO 0 +#define FCU_STATUS_STS_VERI_DONE 1 +#define FCU_STATUS_STS_LOAD_DONE 2 +#define FCU_STATUS_STS_VERI_FAIL 3 +#define FCU_STATUS_STS_LOAD_FAIL 4 +#define FCU_STATUS_STS_BUSY 5 +#define FCU_STATUS_AUTHFWLD __BIT(8) +#define FCU_STATUS_DONE __BIT(9) +#define FCU_STATUS_LOADED_AE __BITS(22, 31) + +#define FCU_STATUS1 0x8c8 + +#define FCU_DRAM_ADDR_LO 0x8cc +#define FCU_DRAM_ADDR_HI 0x8d0 +#define FCU_RAMBASE_ADDR_HI 0x8d4 +#define FCU_RAMBASE_ADDR_LO 0x8d8 + +#define FW_AUTH_WAIT_PERIOD 10 +#define FW_AUTH_MAX_RETRY 300 + +#define CAP_GLOBAL_CTL_BASE 0xa00 +#define CAP_GLOBAL_CTL_MISC CAP_GLOBAL_CTL_BASE + 0x04 +#define CAP_GLOBAL_CTL_MISC_TIMESTAMP_EN __BIT(7) +#define CAP_GLOBAL_CTL_RESET CAP_GLOBAL_CTL_BASE + 0x0c +#define CAP_GLOBAL_CTL_RESET_MASK __BITS(31, 26) +#define CAP_GLOBAL_CTL_RESET_ACCEL_MASK __BITS(25, 20) +#define CAP_GLOBAL_CTL_RESET_AE_MASK __BITS(19, 0) +#define CAP_GLOBAL_CTL_CLK_EN CAP_GLOBAL_CTL_BASE + 0x50 +#define CAP_GLOBAL_CTL_CLK_EN_ACCEL_MASK __BITS(25, 20) +#define CAP_GLOBAL_CTL_CLK_EN_AE_MASK __BITS(19, 0) + +/* -------------------------------------------------------------------------- */ +/* AE region */ +#define UPC_MASK 0x1ffff +#define USTORE_SIZE QAT_16K + +#define AE_LOCAL_AE_MASK __BITS(31, 12) +#define AE_LOCAL_CSR_MASK __BITS(9, 0) + +/* AE_LOCAL registers */ +/* Control Store Address Register */ +#define USTORE_ADDRESS 0x000 +#define USTORE_ADDRESS_ECS __BIT(31) + +#define USTORE_ECC_BIT_0 44 +#define USTORE_ECC_BIT_1 45 +#define USTORE_ECC_BIT_2 46 +#define USTORE_ECC_BIT_3 47 +#define USTORE_ECC_BIT_4 48 +#define USTORE_ECC_BIT_5 49 +#define USTORE_ECC_BIT_6 50 + +/* Control Store Data Lower Register */ +#define USTORE_DATA_LOWER 0x004 +/* Control Store Data Upper Register */ +#define USTORE_DATA_UPPER 0x008 +/* Control Store Error Status Register */ +#define USTORE_ERROR_STATUS 0x00c +/* Arithmetic Logic Unit Output Register */ +#define ALU_OUT 0x010 +/* Context Arbiter Control Register */ +#define CTX_ARB_CNTL 0x014 +#define CTX_ARB_CNTL_INIT 0x00000000 +/* Context Enables Register */ +#define CTX_ENABLES 0x018 +#define CTX_ENABLES_INIT 0 +#define CTX_ENABLES_INUSE_CONTEXTS __BIT(31) +#define CTX_ENABLES_CNTL_STORE_PARITY_ERROR __BIT(29) +#define CTX_ENABLES_CNTL_STORE_PARITY_ENABLE __BIT(28) +#define CTX_ENABLES_BREAKPOINT __BIT(27) +#define CTX_ENABLES_PAR_ERR __BIT(25) +#define CTX_ENABLES_NN_MODE __BIT(20) +#define CTX_ENABLES_NN_RING_EMPTY __BIT(18) +#define CTX_ENABLES_LMADDR_1_GLOBAL __BIT(17) +#define CTX_ENABLES_LMADDR_0_GLOBAL __BIT(16) +#define CTX_ENABLES_ENABLE __BITS(15,8) + +#define CTX_ENABLES_IGNORE_W1C_MASK \ + (~(CTX_ENABLES_PAR_ERR | \ + CTX_ENABLES_BREAKPOINT | \ + CTX_ENABLES_CNTL_STORE_PARITY_ERROR)) + +/* cycles from CTX_ENABLE high to CTX entering executing state */ +#define CYCLES_FROM_READY2EXE 8 + +/* Condition Code Enable Register */ +#define CC_ENABLE 0x01c +#define CC_ENABLE_INIT 0x2000 + +/* CSR Context Pointer Register */ +#define CSR_CTX_POINTER 0x020 +#define CSR_CTX_POINTER_CONTEXT __BITS(2,0) +/* Register Error Status Register */ +#define REG_ERROR_STATUS 0x030 +/* Indirect Context Status Register */ +#define CTX_STS_INDIRECT 0x040 +#define CTX_STS_INDIRECT_UPC_INIT 0x00000000 + +/* Active Context Status Register */ +#define ACTIVE_CTX_STATUS 0x044 +#define ACTIVE_CTX_STATUS_ABO __BIT(31) +#define ACTIVE_CTX_STATUS_ACNO __BITS(0, 2) +/* Indirect Context Signal Events Register */ +#define CTX_SIG_EVENTS_INDIRECT 0x048 +#define CTX_SIG_EVENTS_INDIRECT_INIT 0x00000001 +/* Active Context Signal Events Register */ +#define CTX_SIG_EVENTS_ACTIVE 0x04c +/* Indirect Context Wakeup Events Register */ +#define CTX_WAKEUP_EVENTS_INDIRECT 0x050 +#define CTX_WAKEUP_EVENTS_INDIRECT_VOLUNTARY 0x00000001 +#define CTX_WAKEUP_EVENTS_INDIRECT_SLEEP 0x00010000 + +#define CTX_WAKEUP_EVENTS_INDIRECT_INIT 0x00000001 + +/* Active Context Wakeup Events Register */ +#define CTX_WAKEUP_EVENTS_ACTIVE 0x054 +/* Indirect Context Future Count Register */ +#define CTX_FUTURE_COUNT_INDIRECT 0x058 +/* Active Context Future Count Register */ +#define CTX_FUTURE_COUNT_ACTIVE 0x05c +/* Indirect Local Memory Address 0 Register */ +#define LM_ADDR_0_INDIRECT 0x060 +/* Active Local Memory Address 0 Register */ +#define LM_ADDR_0_ACTIVE 0x064 +/* Indirect Local Memory Address 1 Register */ +#define LM_ADDR_1_INDIRECT 0x068 +/* Active Local Memory Address 1 Register */ +#define LM_ADDR_1_ACTIVE 0x06c +/* Byte Index Register */ +#define BYTE_INDEX 0x070 +/* Indirect Local Memory Address 0 Byte Index Register */ +#define INDIRECT_LM_ADDR_0_BYTE_INDEX 0x0e0 +/* Active Local Memory Address 0 Byte Index Register */ +#define ACTIVE_LM_ADDR_0_BYTE_INDEX 0x0e4 +/* Indirect Local Memory Address 1 Byte Index Register */ +#define INDIRECT_LM_ADDR_1_BYTE_INDEX 0x0e8 +/* Active Local Memory Address 1 Byte Index Register */ +#define ACTIVE_LM_ADDR_1_BYTE_INDEX 0x0ec +/* Transfer Index Concatenated with Byte Index Register */ +#define T_INDEX_BYTE_INDEX 0x0f4 +/* Transfer Index Register */ +#define T_INDEX 0x074 +/* Indirect Future Count Signal Signal Register */ +#define FUTURE_COUNT_SIGNAL_INDIRECT 0x078 +/* Active Context Future Count Register */ +#define FUTURE_COUNT_SIGNAL_ACTIVE 0x07c +/* Next Neighbor Put Register */ +#define NN_PUT 0x080 +/* Next Neighbor Get Register */ +#define NN_GET 0x084 +/* Timestamp Low Register */ +#define TIMESTAMP_LOW 0x0c0 +/* Timestamp High Register */ +#define TIMESTAMP_HIGH 0x0c4 +/* Next Neighbor Signal Register */ +#define NEXT_NEIGHBOR_SIGNAL 0x100 +/* Previous Neighbor Signal Register */ +#define PREV_NEIGHBOR_SIGNAL 0x104 +/* Same AccelEngine Signal Register */ +#define SAME_AE_SIGNAL 0x108 +/* Cyclic Redundancy Check Remainder Register */ +#define CRC_REMAINDER 0x140 +/* Profile Count Register */ +#define PROFILE_COUNT 0x144 +/* Pseudorandom Number Register */ +#define PSEUDO_RANDOM_NUMBER 0x148 +/* Signature Enable Register */ +#define SIGNATURE_ENABLE 0x150 +/* Miscellaneous Control Register */ +#define AE_MISC_CONTROL 0x160 +#define AE_MISC_CONTROL_PARITY_ENABLE __BIT(24) +#define AE_MISC_CONTROL_FORCE_BAD_PARITY __BIT(23) +#define AE_MISC_CONTROL_ONE_CTX_RELOAD __BIT(22) +#define AE_MISC_CONTROL_CS_RELOAD __BITS(21, 20) +#define AE_MISC_CONTROL_SHARE_CS __BIT(2) +/* Control Store Address 1 Register */ +#define USTORE_ADDRESS1 0x158 +/* Local CSR Status Register */ +#define LOCAL_CSR_STATUS 0x180 +#define LOCAL_CSR_STATUS_STATUS 0x1 +/* NULL Register */ +#define NULL_CSR 0x3fc + +/* AE_XFER macros */ +#define AE_XFER_AE_MASK __BITS(31, 12) +#define AE_XFER_CSR_MASK __BITS(9, 2) + +#define AEREG_BAD_REGADDR 0xffff /* bad register address */ + +/* -------------------------------------------------------------------------- */ + +#define SSMWDT(i) ((i) * 0x4000 + 0x54) +#define SSMWDTPKE(i) ((i) * 0x4000 + 0x58) +#define INTSTATSSM(i) ((i) * 0x4000 + 0x04) +#define INTSTATSSM_SHANGERR __BIT(13) +#define PPERR(i) ((i) * 0x4000 + 0x08) +#define PPERRID(i) ((i) * 0x4000 + 0x0C) +#define CERRSSMSH(i) ((i) * 0x4000 + 0x10) +#define UERRSSMSH(i) ((i) * 0x4000 + 0x18) +#define UERRSSMSHAD(i) ((i) * 0x4000 + 0x1C) +#define SLICEHANGSTATUS(i) ((i) * 0x4000 + 0x4C) +#define SLICE_HANG_AUTH0_MASK __BIT(0) +#define SLICE_HANG_AUTH1_MASK __BIT(1) +#define SLICE_HANG_CPHR0_MASK __BIT(4) +#define SLICE_HANG_CPHR1_MASK __BIT(5) +#define SLICE_HANG_CMP0_MASK __BIT(8) +#define SLICE_HANG_CMP1_MASK __BIT(9) +#define SLICE_HANG_XLT0_MASK __BIT(12) +#define SLICE_HANG_XLT1_MASK __BIT(13) +#define SLICE_HANG_MMP0_MASK __BIT(16) +#define SLICE_HANG_MMP1_MASK __BIT(17) +#define SLICE_HANG_MMP2_MASK __BIT(18) +#define SLICE_HANG_MMP3_MASK __BIT(19) +#define SLICE_HANG_MMP4_MASK __BIT(20) + +#define SHINTMASKSSM(i) ((i) * 0x4000 + 0x1018) +#define ENABLE_SLICE_HANG 0x000000 +#define MAX_MMP (5) +#define MMP_BASE(i) ((i) * 0x1000 % 0x3800) +#define CERRSSMMMP(i, n) ((i) * 0x4000 + MMP_BASE(n) + 0x380) +#define UERRSSMMMP(i, n) ((i) * 0x4000 + MMP_BASE(n) + 0x388) +#define UERRSSMMMPAD(i, n) ((i) * 0x4000 + MMP_BASE(n) + 0x38C) + +#define CPP_CFC_ERR_STATUS (0x30000 + 0xC04) +#define CPP_CFC_ERR_PPID (0x30000 + 0xC08) + +#define ERRSOU0 (0x3A000 + 0x00) +#define ERRSOU1 (0x3A000 + 0x04) +#define ERRSOU2 (0x3A000 + 0x08) +#define ERRSOU3 (0x3A000 + 0x0C) +#define ERRSOU4 (0x3A000 + 0xD0) +#define ERRSOU5 (0x3A000 + 0xD8) +#define ERRMSK0 (0x3A000 + 0x10) +#define ERRMSK1 (0x3A000 + 0x14) +#define ERRMSK2 (0x3A000 + 0x18) +#define ERRMSK3 (0x3A000 + 0x1C) +#define ERRMSK4 (0x3A000 + 0xD4) +#define ERRMSK5 (0x3A000 + 0xDC) +#define EMSK3_CPM0_MASK __BIT(2) +#define EMSK3_CPM1_MASK __BIT(3) +#define EMSK5_CPM2_MASK __BIT(16) +#define EMSK5_CPM3_MASK __BIT(17) +#define EMSK5_CPM4_MASK __BIT(18) +#define RICPPINTSTS (0x3A000 + 0x114) +#define RIERRPUSHID (0x3A000 + 0x118) +#define RIERRPULLID (0x3A000 + 0x11C) + +#define TICPPINTSTS (0x3A400 + 0x13C) +#define TIERRPUSHID (0x3A400 + 0x140) +#define TIERRPULLID (0x3A400 + 0x144) +#define SECRAMUERR (0x3AC00 + 0x04) +#define SECRAMUERRAD (0x3AC00 + 0x0C) +#define CPPMEMTGTERR (0x3AC00 + 0x10) +#define ERRPPID (0x3AC00 + 0x14) + +#define ADMINMSGUR 0x3a574 +#define ADMINMSGLR 0x3a578 +#define MAILBOX_BASE 0x20970 +#define MAILBOX_STRIDE 0x1000 +#define ADMINMSG_LEN 32 + +/* -------------------------------------------------------------------------- */ +static const uint8_t mailbox_const_tab[1024] __aligned(1024) = { +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, +0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, +0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, +0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0, +0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e, +0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39, +0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, +0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, +0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, +0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29, +0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17, +0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff, +0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, +0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, +0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, +0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, +0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52, +0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f, +0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, +0x7e, 0x21, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + +/* -------------------------------------------------------------------------- */ +/* Microcode */ + +/* Clear GPR of AE */ +static const uint64_t ae_clear_gprs_inst[] = { + 0x0F0000C0000ull, /* .0 l0000!val = 0 ; immed[l0000!val, 0x0] */ + 0x0F000000380ull, /* .1 l0000!count = 128 ; immed[l0000!count, 0x80] */ + 0x0D805000011ull, /* .2 br!=ctx[0, ctx_init#] */ + 0x0FC082C0300ull, /* .3 local_csr_wr[nn_put, 0] */ + 0x0F0000C0300ull, /* .4 nop */ + 0x0F0000C0300ull, /* .5 nop */ + 0x0F0000C0300ull, /* .6 nop */ + 0x0F0000C0300ull, /* .7 nop */ + 0x0A0643C0000ull, /* .8 init_nn#:alu[*n$index++, --, b, l0000!val] */ + 0x0BAC0000301ull, /* .9 alu[l0000!count, l0000!count, -, 1] */ + 0x0D802000101ull, /* .10 bne[init_nn#] */ + 0x0F0000C0001ull, /* .11 l0000!indx = 0 ; immed[l0000!indx, 0x0] */ + 0x0FC066C0001ull, /* .12 local_csr_wr[active_lm_addr_0, l0000!indx]; + * put indx to lm_addr */ + 0x0F0000C0300ull, /* .13 nop */ + 0x0F0000C0300ull, /* .14 nop */ + 0x0F0000C0300ull, /* .15 nop */ + 0x0F000400300ull, /* .16 l0000!count = 1024 ; immed[l0000!count, 0x400] */ + 0x0A0610C0000ull, /* .17 init_lm#:alu[*l$index0++, --, b, l0000!val] */ + 0x0BAC0000301ull, /* .18 alu[l0000!count, l0000!count, -, 1] */ + 0x0D804400101ull, /* .19 bne[init_lm#] */ + 0x0A0580C0000ull, /* .20 ctx_init#:alu[$l0000!xfers[0], --, b, l0000!val] */ + 0x0A0581C0000ull, /* .21 alu[$l0000!xfers[1], --, b, l0000!val] */ + 0x0A0582C0000ull, /* .22 alu[$l0000!xfers[2], --, b, l0000!val] */ + 0x0A0583C0000ull, /* .23 alu[$l0000!xfers[3], --, b, l0000!val] */ + 0x0A0584C0000ull, /* .24 alu[$l0000!xfers[4], --, b, l0000!val] */ + 0x0A0585C0000ull, /* .25 alu[$l0000!xfers[5], --, b, l0000!val] */ + 0x0A0586C0000ull, /* .26 alu[$l0000!xfers[6], --, b, l0000!val] */ + 0x0A0587C0000ull, /* .27 alu[$l0000!xfers[7], --, b, l0000!val] */ + 0x0A0588C0000ull, /* .28 alu[$l0000!xfers[8], --, b, l0000!val] */ + 0x0A0589C0000ull, /* .29 alu[$l0000!xfers[9], --, b, l0000!val] */ + 0x0A058AC0000ull, /* .30 alu[$l0000!xfers[10], --, b, l0000!val] */ + 0x0A058BC0000ull, /* .31 alu[$l0000!xfers[11], --, b, l0000!val] */ + 0x0A058CC0000ull, /* .32 alu[$l0000!xfers[12], --, b, l0000!val] */ + 0x0A058DC0000ull, /* .33 alu[$l0000!xfers[13], --, b, l0000!val] */ + 0x0A058EC0000ull, /* .34 alu[$l0000!xfers[14], --, b, l0000!val] */ + 0x0A058FC0000ull, /* .35 alu[$l0000!xfers[15], --, b, l0000!val] */ + 0x0A05C0C0000ull, /* .36 alu[$l0000!xfers[16], --, b, l0000!val] */ + 0x0A05C1C0000ull, /* .37 alu[$l0000!xfers[17], --, b, l0000!val] */ + 0x0A05C2C0000ull, /* .38 alu[$l0000!xfers[18], --, b, l0000!val] */ + 0x0A05C3C0000ull, /* .39 alu[$l0000!xfers[19], --, b, l0000!val] */ + 0x0A05C4C0000ull, /* .40 alu[$l0000!xfers[20], --, b, l0000!val] */ + 0x0A05C5C0000ull, /* .41 alu[$l0000!xfers[21], --, b, l0000!val] */ + 0x0A05C6C0000ull, /* .42 alu[$l0000!xfers[22], --, b, l0000!val] */ + 0x0A05C7C0000ull, /* .43 alu[$l0000!xfers[23], --, b, l0000!val] */ + 0x0A05C8C0000ull, /* .44 alu[$l0000!xfers[24], --, b, l0000!val] */ + 0x0A05C9C0000ull, /* .45 alu[$l0000!xfers[25], --, b, l0000!val] */ + 0x0A05CAC0000ull, /* .46 alu[$l0000!xfers[26], --, b, l0000!val] */ + 0x0A05CBC0000ull, /* .47 alu[$l0000!xfers[27], --, b, l0000!val] */ + 0x0A05CCC0000ull, /* .48 alu[$l0000!xfers[28], --, b, l0000!val] */ + 0x0A05CDC0000ull, /* .49 alu[$l0000!xfers[29], --, b, l0000!val] */ + 0x0A05CEC0000ull, /* .50 alu[$l0000!xfers[30], --, b, l0000!val] */ + 0x0A05CFC0000ull, /* .51 alu[$l0000!xfers[31], --, b, l0000!val] */ + 0x0A0400C0000ull, /* .52 alu[l0000!gprega[0], --, b, l0000!val] */ + 0x0B0400C0000ull, /* .53 alu[l0000!gpregb[0], --, b, l0000!val] */ + 0x0A0401C0000ull, /* .54 alu[l0000!gprega[1], --, b, l0000!val] */ + 0x0B0401C0000ull, /* .55 alu[l0000!gpregb[1], --, b, l0000!val] */ + 0x0A0402C0000ull, /* .56 alu[l0000!gprega[2], --, b, l0000!val] */ + 0x0B0402C0000ull, /* .57 alu[l0000!gpregb[2], --, b, l0000!val] */ + 0x0A0403C0000ull, /* .58 alu[l0000!gprega[3], --, b, l0000!val] */ + 0x0B0403C0000ull, /* .59 alu[l0000!gpregb[3], --, b, l0000!val] */ + 0x0A0404C0000ull, /* .60 alu[l0000!gprega[4], --, b, l0000!val] */ + 0x0B0404C0000ull, /* .61 alu[l0000!gpregb[4], --, b, l0000!val] */ + 0x0A0405C0000ull, /* .62 alu[l0000!gprega[5], --, b, l0000!val] */ + 0x0B0405C0000ull, /* .63 alu[l0000!gpregb[5], --, b, l0000!val] */ + 0x0A0406C0000ull, /* .64 alu[l0000!gprega[6], --, b, l0000!val] */ + 0x0B0406C0000ull, /* .65 alu[l0000!gpregb[6], --, b, l0000!val] */ + 0x0A0407C0000ull, /* .66 alu[l0000!gprega[7], --, b, l0000!val] */ + 0x0B0407C0000ull, /* .67 alu[l0000!gpregb[7], --, b, l0000!val] */ + 0x0A0408C0000ull, /* .68 alu[l0000!gprega[8], --, b, l0000!val] */ + 0x0B0408C0000ull, /* .69 alu[l0000!gpregb[8], --, b, l0000!val] */ + 0x0A0409C0000ull, /* .70 alu[l0000!gprega[9], --, b, l0000!val] */ + 0x0B0409C0000ull, /* .71 alu[l0000!gpregb[9], --, b, l0000!val] */ + 0x0A040AC0000ull, /* .72 alu[l0000!gprega[10], --, b, l0000!val] */ + 0x0B040AC0000ull, /* .73 alu[l0000!gpregb[10], --, b, l0000!val] */ + 0x0A040BC0000ull, /* .74 alu[l0000!gprega[11], --, b, l0000!val] */ + 0x0B040BC0000ull, /* .75 alu[l0000!gpregb[11], --, b, l0000!val] */ + 0x0A040CC0000ull, /* .76 alu[l0000!gprega[12], --, b, l0000!val] */ + 0x0B040CC0000ull, /* .77 alu[l0000!gpregb[12], --, b, l0000!val] */ + 0x0A040DC0000ull, /* .78 alu[l0000!gprega[13], --, b, l0000!val] */ + 0x0B040DC0000ull, /* .79 alu[l0000!gpregb[13], --, b, l0000!val] */ + 0x0A040EC0000ull, /* .80 alu[l0000!gprega[14], --, b, l0000!val] */ + 0x0B040EC0000ull, /* .81 alu[l0000!gpregb[14], --, b, l0000!val] */ + 0x0A040FC0000ull, /* .82 alu[l0000!gprega[15], --, b, l0000!val] */ + 0x0B040FC0000ull, /* .83 alu[l0000!gpregb[15], --, b, l0000!val] */ + 0x0D81581C010ull, /* .84 br=ctx[7, exit#] */ + 0x0E000010000ull, /* .85 ctx_arb[kill], any */ + 0x0E000010000ull, /* .86 exit#:ctx_arb[kill], any */ +}; + +static const uint64_t ae_inst_4b[] = { + 0x0F0400C0000ull, /* .0 immed_w0[l0000!indx, 0] */ + 0x0F4400C0000ull, /* .1 immed_w1[l0000!indx, 0] */ + 0x0F040000300ull, /* .2 immed_w0[l0000!myvalue, 0x0] */ + 0x0F440000300ull, /* .3 immed_w1[l0000!myvalue, 0x0] */ + 0x0FC066C0000ull, /* .4 local_csr_wr[active_lm_addr_0, + l0000!indx]; put indx to lm_addr */ + 0x0F0000C0300ull, /* .5 nop */ + 0x0F0000C0300ull, /* .6 nop */ + 0x0F0000C0300ull, /* .7 nop */ + 0x0A021000000ull, /* .8 alu[*l$index0++, --, b, l0000!myvalue] */ +}; + +static const uint64_t ae_inst_1b[] = { + 0x0F0400C0000ull, /* .0 immed_w0[l0000!indx, 0] */ + 0x0F4400C0000ull, /* .1 immed_w1[l0000!indx, 0] */ + 0x0F040000300ull, /* .2 immed_w0[l0000!myvalue, 0x0] */ + 0x0F440000300ull, /* .3 immed_w1[l0000!myvalue, 0x0] */ + 0x0FC066C0000ull, /* .4 local_csr_wr[active_lm_addr_0, + l0000!indx]; put indx to lm_addr */ + 0x0F0000C0300ull, /* .5 nop */ + 0x0F0000C0300ull, /* .6 nop */ + 0x0F0000C0300ull, /* .7 nop */ + 0x0A000180000ull, /* .8 alu[l0000!val, --, b, *l$index0] */ + 0x09080000200ull, /* .9 alu_shf[l0000!myvalue, --, b, + l0000!myvalue, <<24 ] */ + 0x08180280201ull, /* .10 alu_shf[l0000!val1, --, b, l0000!val, <<8 ] */ + 0x08080280102ull, /* .11 alu_shf[l0000!val1, --, b, l0000!val1 , >>8 ] */ + 0x0BA00100002ull, /* .12 alu[l0000!val2, l0000!val1, or, l0000!myvalue] */ + +}; + +static const uint64_t ae_inst_2b[] = { + 0x0F0400C0000ull, /* .0 immed_w0[l0000!indx, 0] */ + 0x0F4400C0000ull, /* .1 immed_w1[l0000!indx, 0] */ + 0x0F040000300ull, /* .2 immed_w0[l0000!myvalue, 0x0] */ + 0x0F440000300ull, /* .3 immed_w1[l0000!myvalue, 0x0] */ + 0x0FC066C0000ull, /* .4 local_csr_wr[active_lm_addr_0, + l0000!indx]; put indx to lm_addr */ + 0x0F0000C0300ull, /* .5 nop */ + 0x0F0000C0300ull, /* .6 nop */ + 0x0F0000C0300ull, /* .7 nop */ + 0x0A000180000ull, /* .8 alu[l0000!val, --, b, *l$index0] */ + 0x09100000200ull, /* .9 alu_shf[l0000!myvalue, --, b, + l0000!myvalue, <<16 ] */ + 0x08100280201ull, /* .10 alu_shf[l0000!val1, --, b, l0000!val, <<16 ] */ + 0x08100280102ull, /* .11 alu_shf[l0000!val1, --, b, l0000!val1 , >>16 ] */ + 0x0BA00100002ull, /* .12 alu[l0000!val2, l0000!val1, or, l0000!myvalue] */ +}; + +static const uint64_t ae_inst_3b[] = { + 0x0F0400C0000ull, /* .0 immed_w0[l0000!indx, 0] */ + 0x0F4400C0000ull, /* .1 immed_w1[l0000!indx, 0] */ + 0x0F040000300ull, /* .2 immed_w0[l0000!myvalue, 0x0] */ + 0x0F440000300ull, /* .3 immed_w1[l0000!myvalue, 0x0] */ + 0x0FC066C0000ull, /* .4 local_csr_wr[active_lm_addr_0, + l0000!indx]; put indx to lm_addr */ + 0x0F0000C0300ull, /* .5 nop */ + 0x0F0000C0300ull, /* .6 nop */ + 0x0F0000C0300ull, /* .7 nop */ + 0x0A000180000ull, /* .8 alu[l0000!val, --, b, *l$index0] */ + 0x09180000200ull, /* .9 alu_shf[l0000!myvalue, --, + b, l0000!myvalue, <<8 ] */ + 0x08080280201ull, /* .10 alu_shf[l0000!val1, --, b, l0000!val, <<24 ] */ + 0x08180280102ull, /* .11 alu_shf[l0000!val1, --, b, l0000!val1 , >>24 ] */ + 0x0BA00100002ull, /* .12 alu[l0000!val2, l0000!val1, or, l0000!myvalue] */ +}; + +/* micro-instr fixup */ +#define INSERT_IMMED_GPRA_CONST(inst, const_val) \ + inst = (inst & 0xFFFF00C03FFull) | \ + ((((const_val) << 12) & 0x0FF00000ull) | \ + (((const_val) << 10) & 0x0003FC00ull)) +#define INSERT_IMMED_GPRB_CONST(inst, const_val) \ + inst = (inst & 0xFFFF00FFF00ull) | \ + ((((const_val) << 12) & 0x0FF00000ull) | \ + (((const_val) << 0) & 0x000000FFull)) + +enum aereg_type { + AEREG_NO_DEST, /* no destination */ + AEREG_GPA_REL, /* general-purpose A register under relative mode */ + AEREG_GPA_ABS, /* general-purpose A register under absolute mode */ + AEREG_GPB_REL, /* general-purpose B register under relative mode */ + AEREG_GPB_ABS, /* general-purpose B register under absolute mode */ + AEREG_SR_REL, /* sram register under relative mode */ + AEREG_SR_RD_REL, /* sram read register under relative mode */ + AEREG_SR_WR_REL, /* sram write register under relative mode */ + AEREG_SR_ABS, /* sram register under absolute mode */ + AEREG_SR_RD_ABS, /* sram read register under absolute mode */ + AEREG_SR_WR_ABS, /* sram write register under absolute mode */ + AEREG_SR0_SPILL, /* sram0 spill register */ + AEREG_SR1_SPILL, /* sram1 spill register */ + AEREG_SR2_SPILL, /* sram2 spill register */ + AEREG_SR3_SPILL, /* sram3 spill register */ + AEREG_SR0_MEM_ADDR, /* sram0 memory address register */ + AEREG_SR1_MEM_ADDR, /* sram1 memory address register */ + AEREG_SR2_MEM_ADDR, /* sram2 memory address register */ + AEREG_SR3_MEM_ADDR, /* sram3 memory address register */ + AEREG_DR_REL, /* dram register under relative mode */ + AEREG_DR_RD_REL, /* dram read register under relative mode */ + AEREG_DR_WR_REL, /* dram write register under relative mode */ + AEREG_DR_ABS, /* dram register under absolute mode */ + AEREG_DR_RD_ABS, /* dram read register under absolute mode */ + AEREG_DR_WR_ABS, /* dram write register under absolute mode */ + AEREG_DR_MEM_ADDR, /* dram memory address register */ + AEREG_LMEM, /* local memory */ + AEREG_LMEM0, /* local memory bank0 */ + AEREG_LMEM1, /* local memory bank1 */ + AEREG_LMEM_SPILL, /* local memory spill */ + AEREG_LMEM_ADDR, /* local memory address */ + AEREG_NEIGH_REL, /* next neighbour register under relative mode */ + AEREG_NEIGH_INDX, /* next neighbour register under index mode */ + AEREG_SIG_REL, /* signal register under relative mode */ + AEREG_SIG_INDX, /* signal register under index mode */ + AEREG_SIG_DOUBLE, /* signal register */ + AEREG_SIG_SINGLE, /* signal register */ + AEREG_SCRATCH_MEM_ADDR, /* scratch memory address */ + AEREG_UMEM0, /* ustore memory bank0 */ + AEREG_UMEM1, /* ustore memory bank1 */ + AEREG_UMEM_SPILL, /* ustore memory spill */ + AEREG_UMEM_ADDR, /* ustore memory address */ + AEREG_DR1_MEM_ADDR, /* dram segment1 address */ + AEREG_SR0_IMPORTED, /* sram segment0 imported data */ + AEREG_SR1_IMPORTED, /* sram segment1 imported data */ + AEREG_SR2_IMPORTED, /* sram segment2 imported data */ + AEREG_SR3_IMPORTED, /* sram segment3 imported data */ + AEREG_DR_IMPORTED, /* dram segment0 imported data */ + AEREG_DR1_IMPORTED, /* dram segment1 imported data */ + AEREG_SCRATCH_IMPORTED, /* scratch imported data */ + AEREG_XFER_RD_ABS, /* transfer read register under absolute mode */ + AEREG_XFER_WR_ABS, /* transfer write register under absolute mode */ + AEREG_CONST_VALUE, /* const alue */ + AEREG_ADDR_TAKEN, /* address taken */ + AEREG_OPTIMIZED_AWAY, /* optimized away */ + AEREG_SHRAM_ADDR, /* shared ram0 address */ + AEREG_SHRAM1_ADDR, /* shared ram1 address */ + AEREG_SHRAM2_ADDR, /* shared ram2 address */ + AEREG_SHRAM3_ADDR, /* shared ram3 address */ + AEREG_SHRAM4_ADDR, /* shared ram4 address */ + AEREG_SHRAM5_ADDR, /* shared ram5 address */ + AEREG_ANY = 0xffff /* any register */ +}; +#define AEREG_SR_INDX AEREG_SR_ABS + /* sram transfer register under index mode */ +#define AEREG_DR_INDX AEREG_DR_ABS + /* dram transfer register under index mode */ +#define AEREG_NEIGH_ABS AEREG_NEIGH_INDX + /* next neighbor register under absolute mode */ + + +#define QAT_2K 0x0800 +#define QAT_4K 0x1000 +#define QAT_6K 0x1800 +#define QAT_8K 0x2000 +#define QAT_16K 0x4000 + +#define MOF_OBJ_ID_LEN 8 +#define MOF_FID 0x00666f6d +#define MOF_MIN_VER 0x1 +#define MOF_MAJ_VER 0x0 +#define SYM_OBJS "SYM_OBJS" /* symbol object string */ +#define UOF_OBJS "UOF_OBJS" /* uof object string */ +#define SUOF_OBJS "SUF_OBJS" /* suof object string */ +#define SUOF_IMAG "SUF_IMAG" /* suof chunk ID string */ + +#define UOF_STRT "UOF_STRT" /* string table section ID */ +#define UOF_GTID "UOF_GTID" /* GTID section ID */ +#define UOF_IMAG "UOF_IMAG" /* image section ID */ +#define UOF_IMEM "UOF_IMEM" /* import section ID */ +#define UOF_MSEG "UOF_MSEG" /* memory section ID */ + +#define CRC_POLY 0x1021 +#define CRC_WIDTH 16 +#define CRC_BITMASK(x) (1L << (x)) +#define CRC_WIDTHMASK(width) ((((1L<<(width-1))-1L)<<1)|1L) + +struct mof_file_hdr { + u_int mfh_fid; + u_int mfh_csum; + char mfh_min_ver; + char mfh_maj_ver; + u_short mfh_reserved; + u_short mfh_max_chunks; + u_short mfh_num_chunks; +}; + +struct mof_file_chunk_hdr { + char mfch_id[MOF_OBJ_ID_LEN]; + uint64_t mfch_offset; + uint64_t mfch_size; +}; + +struct mof_uof_hdr { + u_short muh_max_chunks; + u_short muh_num_chunks; + u_int muh_reserved; +}; + +struct mof_uof_chunk_hdr { + char much_id[MOF_OBJ_ID_LEN]; /* should be UOF_IMAG */ + uint64_t much_offset; /* uof image */ + uint64_t much_size; /* uof image size */ + u_int much_name; /* uof name string-table offset */ + u_int much_reserved; +}; + +#define UOF_MAX_NUM_OF_AE 16 /* maximum number of AE */ + +#define UOF_OBJ_ID_LEN 8 /* length of object ID */ +#define UOF_FIELD_POS_SIZE 12 /* field postion size */ +#define MIN_UOF_SIZE 24 /* minimum .uof file size */ +#define UOF_FID 0xc6c2 /* uof magic number */ +#define UOF_MIN_VER 0x11 +#define UOF_MAJ_VER 0x4 + +struct uof_file_hdr { + u_short ufh_id; /* file id and endian indicator */ + u_short ufh_reserved1; /* reserved for future use */ + char ufh_min_ver; /* file format minor version */ + char ufh_maj_ver; /* file format major version */ + u_short ufh_reserved2; /* reserved for future use */ + u_short ufh_max_chunks; /* max chunks in file */ + u_short ufh_num_chunks; /* num of actual chunks */ +}; + +struct uof_file_chunk_hdr { + char ufch_id[UOF_OBJ_ID_LEN]; /* chunk identifier */ + u_int ufch_csum; /* chunk checksum */ + u_int ufch_offset; /* offset of the chunk in the file */ + u_int ufch_size; /* size of the chunk */ +}; + +struct uof_obj_hdr { + u_int uoh_cpu_type; /* CPU type */ + u_short uoh_min_cpu_ver; /* starting CPU version */ + u_short uoh_max_cpu_ver; /* ending CPU version */ + short uoh_max_chunks; /* max chunks in chunk obj */ + short uoh_num_chunks; /* num of actual chunks */ + u_int uoh_reserved1; + u_int uoh_reserved2; +}; + +struct uof_chunk_hdr { + char uch_id[UOF_OBJ_ID_LEN]; + u_int uch_offset; + u_int uch_size; +}; + +struct uof_str_tab { + u_int ust_table_len; /* length of table */ + u_int ust_reserved; /* reserved for future use */ + uint64_t ust_strings; /* pointer to string table. + * NULL terminated strings */ +}; + +#define AE_MODE_RELOAD_CTX_SHARED __BIT(12) +#define AE_MODE_SHARED_USTORE __BIT(11) +#define AE_MODE_LMEM1 __BIT(9) +#define AE_MODE_LMEM0 __BIT(8) +#define AE_MODE_NN_MODE __BITS(7, 4) +#define AE_MODE_CTX_MODE __BITS(3, 0) + +#define AE_MODE_NN_MODE_NEIGH 0 +#define AE_MODE_NN_MODE_SELF 1 +#define AE_MODE_NN_MODE_DONTCARE 0xff + +struct uof_image { + u_int ui_name; /* image name */ + u_int ui_ae_assigned; /* AccelEngines assigned */ + u_int ui_ctx_assigned; /* AccelEngine contexts assigned */ + u_int ui_cpu_type; /* cpu type */ + u_int ui_entry_address; /* entry uaddress */ + u_int ui_fill_pattern[2]; /* uword fill value */ + u_int ui_reloadable_size; /* size of reloadable ustore section */ + + u_char ui_sensitivity; /* + * case sensitivity: 0 = insensitive, + * 1 = sensitive + */ + u_char ui_reserved; /* reserved for future use */ + u_short ui_ae_mode; /* + * unused<15:14>, legacyMode<13>, + * reloadCtxShared<12>, sharedUstore<11>, + * ecc<10>, locMem1<9>, locMem0<8>, + * nnMode<7:4>, ctx<3:0> + */ + + u_short ui_max_ver; /* max cpu ver on which the image can run */ + u_short ui_min_ver; /* min cpu ver on which the image can run */ + + u_short ui_image_attrib; /* image attributes */ + u_short ui_reserved2; /* reserved for future use */ + + u_short ui_num_page_regions; /* number of page regions */ + u_short ui_num_pages; /* number of pages */ + + u_int ui_reg_tab; /* offset to register table */ + u_int ui_init_reg_sym_tab; /* reg/sym init table */ + u_int ui_sbreak_tab; /* offset to sbreak table */ + + u_int ui_app_metadata; /* application meta-data */ + /* ui_npages of code page follows this header */ +}; + +struct uof_obj_table { + u_int uot_nentries; /* number of table entries */ + /* uot_nentries of object follows */ +}; + +struct uof_ae_reg { + u_int uar_name; /* reg name string-table offset */ + u_int uar_vis_name; /* reg visible name string-table offset */ + u_short uar_type; /* reg type */ + u_short uar_addr; /* reg address */ + u_short uar_access_mode; /* uof_RegAccessMode_T: read/write/both/undef */ + u_char uar_visible; /* register visibility */ + u_char uar_reserved1; /* reserved for future use */ + u_short uar_ref_count; /* number of contiguous registers allocated */ + u_short uar_reserved2; /* reserved for future use */ + u_int uar_xoid; /* xfer order ID */ +}; + +enum uof_value_kind { + UNDEF_VAL, /* undefined value */ + CHAR_VAL, /* character value */ + SHORT_VAL, /* short value */ + INT_VAL, /* integer value */ + STR_VAL, /* string value */ + STRTAB_VAL, /* string table value */ + NUM_VAL, /* number value */ + EXPR_VAL /* expression value */ +}; + +enum uof_init_type { + INIT_EXPR, + INIT_REG, + INIT_REG_CTX, + INIT_EXPR_ENDIAN_SWAP +}; + +struct uof_init_reg_sym { + u_int uirs_name; /* symbol name */ + char uirs_init_type; /* 0=expr, 1=register, 2=ctxReg, + * 3=expr_endian_swap */ + char uirs_value_type; /* EXPR_VAL, STRTAB_VAL */ + char uirs_reg_type; /* register type: ae_reg_type */ + u_char uirs_ctx; /* AE context when initType=2 */ + u_int uirs_addr_offset; /* reg address, or sym-value offset */ + u_int uirs_value; /* integer value, or expression */ +}; + +struct uof_sbreak { + u_int us_page_num; /* page number */ + u_int us_virt_uaddr; /* virt uaddress */ + u_char us_sbreak_type; /* sbreak type */ + u_char us_reg_type; /* register type: ae_reg_type */ + u_short us_reserved1; /* reserved for future use */ + u_int us_addr_offset; /* branch target address or offset + * to be used with the reg value to + * calculate the target address */ + u_int us_reg_rddr; /* register address */ +}; +struct uof_code_page { + u_int ucp_page_region; /* page associated region */ + u_int ucp_page_num; /* code-page number */ + u_char ucp_def_page; /* default page indicator */ + u_char ucp_reserved2; /* reserved for future use */ + u_short ucp_reserved1; /* reserved for future use */ + u_int ucp_beg_vaddr; /* starting virtual uaddr */ + u_int ucp_beg_paddr; /* starting physical uaddr */ + u_int ucp_neigh_reg_tab; /* offset to neighbour-reg table */ + u_int ucp_uc_var_tab; /* offset to uC var table */ + u_int ucp_imp_var_tab; /* offset to import var table */ + u_int ucp_imp_expr_tab; /* offset to import expression table */ + u_int ucp_code_area; /* offset to code area */ +}; + +struct uof_code_area { + u_int uca_num_micro_words; /* number of micro words */ + u_int uca_uword_block_tab; /* offset to ublock table */ +}; + +struct uof_uword_block { + u_int uub_start_addr; /* start address */ + u_int uub_num_words; /* number of microwords */ + u_int uub_uword_offset; /* offset to the uwords */ + u_int uub_reserved; /* reserved for future use */ +}; + +struct uof_uword_fixup { + u_int uuf_name; /* offset to string table */ + u_int uuf_uword_address; /* micro word address */ + u_int uuf_expr_value; /* string table offset of expr string, or value */ + u_char uuf_val_type; /* VALUE_UNDEF, VALUE_NUM, VALUE_EXPR */ + u_char uuf_value_attrs; /* bit<0> (Scope: 0=global, 1=local), + * bit<1> (init: 0=no, 1=yes) */ + u_short uuf_reserved1; /* reserved for future use */ + char uuf_field_attrs[UOF_FIELD_POS_SIZE]; + /* field pos, size, and right shift value */ +}; + +struct uof_import_var { + u_int uiv_name; /* import var name string-table offset */ + u_char uiv_value_attrs; /* bit<0> (Scope: 0=global), + * bit<1> (init: 0=no, 1=yes) */ + u_char uiv_reserved1; /* reserved for future use */ + u_short uiv_reserved2; /* reserved for future use */ + uint64_t uiv_value; /* 64-bit imported value */ +}; + +struct uof_mem_val_attr { + u_int umva_byte_offset; /* byte-offset from the allocated memory */ + u_int umva_value; /* memory value */ +}; + +enum uof_mem_region { + SRAM_REGION, /* SRAM region */ + DRAM_REGION, /* DRAM0 region */ + DRAM1_REGION, /* DRAM1 region */ + LMEM_REGION, /* local memory region */ + SCRATCH_REGION, /* SCRATCH region */ + UMEM_REGION, /* micro-store region */ + RAM_REGION, /* RAM region */ + SHRAM_REGION, /* shared memory-0 region */ + SHRAM1_REGION, /* shared memory-1 region */ + SHRAM2_REGION, /* shared memory-2 region */ + SHRAM3_REGION, /* shared memory-3 region */ + SHRAM4_REGION, /* shared memory-4 region */ + SHRAM5_REGION /* shared memory-5 region */ +}; + +#define UOF_SCOPE_GLOBAL 0 +#define UOF_SCOPE_LOCAL 1 + +struct uof_init_mem { + u_int uim_sym_name; /* symbol name */ + char uim_region; /* memory region -- uof_mem_region */ + char uim_scope; /* visibility scope */ + u_short uim_reserved1; /* reserved for future use */ + u_int uim_addr; /* memory address */ + u_int uim_num_bytes; /* number of bytes */ + u_int uim_num_val_attr; /* number of values attributes */ + + /* uim_num_val_attr of uof_mem_val_attr follows this header */ +}; + +struct uof_var_mem_seg { + u_int uvms_sram_base; /* SRAM memory segment base addr */ + u_int uvms_sram_size; /* SRAM segment size bytes */ + u_int uvms_sram_alignment; /* SRAM segment alignment bytes */ + u_int uvms_sdram_base; /* DRAM0 memory segment base addr */ + u_int uvms_sdram_size; /* DRAM0 segment size bytes */ + u_int uvms_sdram_alignment; /* DRAM0 segment alignment bytes */ + u_int uvms_sdram1_base; /* DRAM1 memory segment base addr */ + u_int uvms_sdram1_size; /* DRAM1 segment size bytes */ + u_int uvms_sdram1_alignment; /* DRAM1 segment alignment bytes */ + u_int uvms_scratch_base; /* SCRATCH memory segment base addr */ + u_int uvms_scratch_size; /* SCRATCH segment size bytes */ + u_int uvms_scratch_alignment; /* SCRATCH segment alignment bytes */ +}; + +#define SUOF_OBJ_ID_LEN 8 +#define SUOF_FID 0x53554f46 +#define SUOF_MAJ_VER 0x0 +#define SUOF_MIN_VER 0x1 +#define SIMG_AE_INIT_SEQ_LEN (50 * sizeof(unsigned long long)) +#define SIMG_AE_INSTS_LEN (0x4000 * sizeof(unsigned long long)) +#define CSS_FWSK_MODULUS_LEN 256 +#define CSS_FWSK_EXPONENT_LEN 4 +#define CSS_FWSK_PAD_LEN 252 +#define CSS_FWSK_PUB_LEN (CSS_FWSK_MODULUS_LEN + \ + CSS_FWSK_EXPONENT_LEN + \ + CSS_FWSK_PAD_LEN) +#define CSS_SIGNATURE_LEN 256 +#define CSS_AE_IMG_LEN (sizeof(struct simg_ae_mode) + \ + SIMG_AE_INIT_SEQ_LEN + \ + SIMG_AE_INSTS_LEN) +#define CSS_AE_SIMG_LEN (sizeof(struct css_hdr) + \ + CSS_FWSK_PUB_LEN + \ + CSS_SIGNATURE_LEN + \ + CSS_AE_IMG_LEN) +#define AE_IMG_OFFSET (sizeof(struct css_hdr) + \ + CSS_FWSK_MODULUS_LEN + \ + CSS_FWSK_EXPONENT_LEN + \ + CSS_SIGNATURE_LEN) +#define CSS_MAX_IMAGE_LEN 0x40000 + +struct fw_auth_desc { + u_int fad_img_len; + u_int fad_reserved; + u_int fad_css_hdr_high; + u_int fad_css_hdr_low; + u_int fad_img_high; + u_int fad_img_low; + u_int fad_signature_high; + u_int fad_signature_low; + u_int fad_fwsk_pub_high; + u_int fad_fwsk_pub_low; + u_int fad_img_ae_mode_data_high; + u_int fad_img_ae_mode_data_low; + u_int fad_img_ae_init_data_high; + u_int fad_img_ae_init_data_low; + u_int fad_img_ae_insts_high; + u_int fad_img_ae_insts_low; +}; + +struct auth_chunk { + struct fw_auth_desc ac_fw_auth_desc; + uint64_t ac_chunk_size; + uint64_t ac_chunk_bus_addr; +}; + +enum css_fwtype { + CSS_AE_FIRMWARE = 0, + CSS_MMP_FIRMWARE = 1 +}; + +struct css_hdr { + u_int css_module_type; + u_int css_header_len; + u_int css_header_ver; + u_int css_module_id; + u_int css_module_vendor; + u_int css_date; + u_int css_size; + u_int css_key_size; + u_int css_module_size; + u_int css_exponent_size; + u_int css_fw_type; + u_int css_reserved[21]; +}; + +struct simg_ae_mode { + u_int sam_file_id; + u_short sam_maj_ver; + u_short sam_min_ver; + u_int sam_dev_type; + u_short sam_devmax_ver; + u_short sam_devmin_ver; + u_int sam_ae_mask; + u_int sam_ctx_enables; + char sam_fw_type; + char sam_ctx_mode; + char sam_nn_mode; + char sam_lm0_mode; + char sam_lm1_mode; + char sam_scs_mode; + char sam_lm2_mode; + char sam_lm3_mode; + char sam_tindex_mode; + u_char sam_reserved[7]; + char sam_simg_name[256]; + char sam_appmeta_data[256]; +}; + +struct suof_file_hdr { + u_int sfh_file_id; + u_int sfh_check_sum; + char sfh_min_ver; + char sfh_maj_ver; + char sfh_fw_type; + char sfh_reserved; + u_short sfh_max_chunks; + u_short sfh_num_chunks; +}; + +struct suof_chunk_hdr { + char sch_chunk_id[SUOF_OBJ_ID_LEN]; + uint64_t sch_offset; + uint64_t sch_size; +}; + +struct suof_str_tab { + u_int sst_tab_length; + u_int sst_strings; +}; + +struct suof_obj_hdr { + u_int soh_img_length; + u_int soh_reserved; +}; + +/* -------------------------------------------------------------------------- */ +/* accel */ + +enum fw_slice { + FW_SLICE_NULL = 0, /* NULL slice type */ + FW_SLICE_CIPHER = 1, /* CIPHER slice type */ + FW_SLICE_AUTH = 2, /* AUTH slice type */ + FW_SLICE_DRAM_RD = 3, /* DRAM_RD Logical slice type */ + FW_SLICE_DRAM_WR = 4, /* DRAM_WR Logical slice type */ + FW_SLICE_COMP = 5, /* Compression slice type */ + FW_SLICE_XLAT = 6, /* Translator slice type */ + FW_SLICE_DELIMITER /* End delimiter */ +}; +#define MAX_FW_SLICE FW_SLICE_DELIMITER + +#define QAT_OPTIMAL_ALIGN_SHIFT 6 +#define QAT_OPTIMAL_ALIGN (1 << QAT_OPTIMAL_ALIGN_SHIFT) + +enum hw_auth_algo { + HW_AUTH_ALGO_NULL = 0, /* Null hashing */ + HW_AUTH_ALGO_SHA1 = 1, /* SHA1 hashing */ + HW_AUTH_ALGO_MD5 = 2, /* MD5 hashing */ + HW_AUTH_ALGO_SHA224 = 3, /* SHA-224 hashing */ + HW_AUTH_ALGO_SHA256 = 4, /* SHA-256 hashing */ + HW_AUTH_ALGO_SHA384 = 5, /* SHA-384 hashing */ + HW_AUTH_ALGO_SHA512 = 6, /* SHA-512 hashing */ + HW_AUTH_ALGO_AES_XCBC_MAC = 7, /* AES-XCBC-MAC hashing */ + HW_AUTH_ALGO_AES_CBC_MAC = 8, /* AES-CBC-MAC hashing */ + HW_AUTH_ALGO_AES_F9 = 9, /* AES F9 hashing */ + HW_AUTH_ALGO_GALOIS_128 = 10, /* Galois 128 bit hashing */ + HW_AUTH_ALGO_GALOIS_64 = 11, /* Galois 64 hashing */ + HW_AUTH_ALGO_KASUMI_F9 = 12, /* Kasumi F9 hashing */ + HW_AUTH_ALGO_SNOW_3G_UIA2 = 13, /* UIA2/SNOW_3H F9 hashing */ + HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14, + HW_AUTH_RESERVED_1 = 15, + HW_AUTH_RESERVED_2 = 16, + HW_AUTH_ALGO_SHA3_256 = 17, + HW_AUTH_RESERVED_3 = 18, + HW_AUTH_ALGO_SHA3_512 = 19, + HW_AUTH_ALGO_DELIMITER = 20 +}; + +enum hw_auth_mode { + HW_AUTH_MODE0, + HW_AUTH_MODE1, + HW_AUTH_MODE2, + HW_AUTH_MODE_DELIMITER +}; + +struct hw_auth_config { + uint32_t config; + /* Configuration used for setting up the slice */ + uint32_t reserved; + /* Reserved */ +}; + +#define HW_AUTH_CONFIG_SHA3_ALGO __BITS(22, 23) +#define HW_AUTH_CONFIG_SHA3_PADDING __BIT(16) +#define HW_AUTH_CONFIG_CMPLEN __BITS(14, 8) + /* The length of the digest if the QAT is to the check*/ +#define HW_AUTH_CONFIG_MODE __BITS(7, 4) +#define HW_AUTH_CONFIG_ALGO __BITS(3, 0) + +#define HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \ + __SHIFTIN(mode, HW_AUTH_CONFIG_MODE) | \ + __SHIFTIN(algo, HW_AUTH_CONFIG_ALGO) | \ + __SHIFTIN(cmp_len, HW_AUTH_CONFIG_CMPLEN) + +struct hw_auth_counter { + uint32_t counter; /* Counter value */ + uint32_t reserved; /* Reserved */ +}; + +struct hw_auth_setup { + struct hw_auth_config auth_config; + /* Configuration word for the auth slice */ + struct hw_auth_counter auth_counter; + /* Auth counter value for this request */ +}; + +#define HW_NULL_STATE1_SZ 32 +#define HW_MD5_STATE1_SZ 16 +#define HW_SHA1_STATE1_SZ 20 +#define HW_SHA224_STATE1_SZ 32 +#define HW_SHA256_STATE1_SZ 32 +#define HW_SHA3_256_STATE1_SZ 32 +#define HW_SHA384_STATE1_SZ 64 +#define HW_SHA512_STATE1_SZ 64 +#define HW_SHA3_512_STATE1_SZ 64 +#define HW_SHA3_224_STATE1_SZ 28 +#define HW_SHA3_384_STATE1_SZ 48 +#define HW_AES_XCBC_MAC_STATE1_SZ 16 +#define HW_AES_CBC_MAC_STATE1_SZ 16 +#define HW_AES_F9_STATE1_SZ 32 +#define HW_KASUMI_F9_STATE1_SZ 16 +#define HW_GALOIS_128_STATE1_SZ 16 +#define HW_SNOW_3G_UIA2_STATE1_SZ 8 +#define HW_ZUC_3G_EIA3_STATE1_SZ 8 +#define HW_NULL_STATE2_SZ 32 +#define HW_MD5_STATE2_SZ 16 +#define HW_SHA1_STATE2_SZ 20 +#define HW_SHA224_STATE2_SZ 32 +#define HW_SHA256_STATE2_SZ 32 +#define HW_SHA3_256_STATE2_SZ 0 +#define HW_SHA384_STATE2_SZ 64 +#define HW_SHA512_STATE2_SZ 64 +#define HW_SHA3_512_STATE2_SZ 0 +#define HW_SHA3_224_STATE2_SZ 0 +#define HW_SHA3_384_STATE2_SZ 0 +#define HW_AES_XCBC_MAC_KEY_SZ 16 +#define HW_AES_CBC_MAC_KEY_SZ 16 +#define HW_AES_CCM_CBC_E_CTR0_SZ 16 +#define HW_F9_IK_SZ 16 +#define HW_F9_FK_SZ 16 +#define HW_KASUMI_F9_STATE2_SZ (HW_F9_IK_SZ + HW_F9_FK_SZ) +#define HW_AES_F9_STATE2_SZ HW_KASUMI_F9_STATE2_SZ +#define HW_SNOW_3G_UIA2_STATE2_SZ 24 +#define HW_ZUC_3G_EIA3_STATE2_SZ 32 +#define HW_GALOIS_H_SZ 16 +#define HW_GALOIS_LEN_A_SZ 8 +#define HW_GALOIS_E_CTR0_SZ 16 + +struct hw_auth_sha512 { + struct hw_auth_setup inner_setup; + /* Inner loop configuration word for the slice */ + uint8_t state1[HW_SHA512_STATE1_SZ]; + /* Slice state1 variable */ + struct hw_auth_setup outer_setup; + /* Outer configuration word for the slice */ + uint8_t state2[HW_SHA512_STATE2_SZ]; + /* Slice state2 variable */ +}; + +union hw_auth_algo_blk { + struct hw_auth_sha512 max; + /* This is the largest possible auth setup block size */ +}; + +enum hw_cipher_algo { + HW_CIPHER_ALGO_NULL = 0, /* Null ciphering */ + HW_CIPHER_ALGO_DES = 1, /* DES ciphering */ + HW_CIPHER_ALGO_3DES = 2, /* 3DES ciphering */ + HW_CIPHER_ALGO_AES128 = 3, /* AES-128 ciphering */ + HW_CIPHER_ALGO_AES192 = 4, /* AES-192 ciphering */ + HW_CIPHER_ALGO_AES256 = 5, /* AES-256 ciphering */ + HW_CIPHER_ALGO_ARC4 = 6, /* ARC4 ciphering */ + HW_CIPHER_ALGO_KASUMI = 7, /* Kasumi */ + HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8, /* Snow_3G */ + HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9, + HW_CIPHER_DELIMITER = 10 /* Delimiter type */ +}; + +enum hw_cipher_mode { + HW_CIPHER_ECB_MODE = 0, /* ECB mode */ + HW_CIPHER_CBC_MODE = 1, /* CBC mode */ + HW_CIPHER_CTR_MODE = 2, /* CTR mode */ + HW_CIPHER_F8_MODE = 3, /* F8 mode */ + HW_CIPHER_XTS_MODE = 6, + HW_CIPHER_MODE_DELIMITER = 7 /* Delimiter type */ +}; + +struct hw_cipher_config { + uint32_t val; /* Cipher slice configuration */ + uint32_t reserved; /* Reserved */ +}; + +#define CIPHER_CONFIG_CONVERT __BIT(9) +#define CIPHER_CONFIG_DIR __BIT(8) +#define CIPHER_CONFIG_MODE __BITS(7, 4) +#define CIPHER_CONFIG_ALGO __BITS(3, 0) +#define HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \ + __SHIFTIN(mode, CIPHER_CONFIG_MODE) | \ + __SHIFTIN(algo, CIPHER_CONFIG_ALGO) | \ + __SHIFTIN(convert, CIPHER_CONFIG_CONVERT) | \ + __SHIFTIN(dir, CIPHER_CONFIG_DIR) + +enum hw_cipher_dir { + HW_CIPHER_ENCRYPT = 0, /* encryption is required */ + HW_CIPHER_DECRYPT = 1, /* decryption is required */ +}; + +enum hw_cipher_convert { + HW_CIPHER_NO_CONVERT = 0, /* no key convert is required*/ + HW_CIPHER_KEY_CONVERT = 1, /* key conversion is required*/ +}; + +#define CIPHER_MODE_F8_KEY_SZ_MULT 2 +#define CIPHER_MODE_XTS_KEY_SZ_MULT 2 + +#define HW_DES_BLK_SZ 8 +#define HW_3DES_BLK_SZ 8 +#define HW_NULL_BLK_SZ 8 +#define HW_AES_BLK_SZ 16 +#define HW_KASUMI_BLK_SZ 8 +#define HW_SNOW_3G_BLK_SZ 8 +#define HW_ZUC_3G_BLK_SZ 8 +#define HW_NULL_KEY_SZ 256 +#define HW_DES_KEY_SZ 8 +#define HW_3DES_KEY_SZ 24 +#define HW_AES_128_KEY_SZ 16 +#define HW_AES_192_KEY_SZ 24 +#define HW_AES_256_KEY_SZ 32 +#define HW_AES_128_F8_KEY_SZ (HW_AES_128_KEY_SZ * \ + CIPHER_MODE_F8_KEY_SZ_MULT) +#define HW_AES_192_F8_KEY_SZ (HW_AES_192_KEY_SZ * \ + CIPHER_MODE_F8_KEY_SZ_MULT) +#define HW_AES_256_F8_KEY_SZ (HW_AES_256_KEY_SZ * \ + CIPHER_MODE_F8_KEY_SZ_MULT) +#define HW_AES_128_XTS_KEY_SZ (HW_AES_128_KEY_SZ * \ + CIPHER_MODE_XTS_KEY_SZ_MULT) +#define HW_AES_256_XTS_KEY_SZ (HW_AES_256_KEY_SZ * \ + CIPHER_MODE_XTS_KEY_SZ_MULT) +#define HW_KASUMI_KEY_SZ 16 +#define HW_KASUMI_F8_KEY_SZ (HW_KASUMI_KEY_SZ * \ + CIPHER_MODE_F8_KEY_SZ_MULT) +#define HW_AES_128_XTS_KEY_SZ (HW_AES_128_KEY_SZ * \ + CIPHER_MODE_XTS_KEY_SZ_MULT) +#define HW_AES_256_XTS_KEY_SZ (HW_AES_256_KEY_SZ * \ + CIPHER_MODE_XTS_KEY_SZ_MULT) +#define HW_ARC4_KEY_SZ 256 +#define HW_SNOW_3G_UEA2_KEY_SZ 16 +#define HW_SNOW_3G_UEA2_IV_SZ 16 +#define HW_ZUC_3G_EEA3_KEY_SZ 16 +#define HW_ZUC_3G_EEA3_IV_SZ 16 +#define HW_MODE_F8_NUM_REG_TO_CLEAR 2 + +struct hw_cipher_aes256_f8 { + struct hw_cipher_config cipher_config; + /* Cipher configuration word for the slice set to + * AES-256 and the F8 mode */ + uint8_t key[HW_AES_256_F8_KEY_SZ]; + /* Cipher key */ +}; + +union hw_cipher_algo_blk { + struct hw_cipher_aes256_f8 max; /* AES-256 F8 Cipher */ + /* This is the largest possible cipher setup block size */ +}; + +struct flat_buffer_desc { + uint32_t data_len_in_bytes; + uint32_t reserved; + uint64_t phy_buffer; +}; + +struct buffer_list_desc { + uint64_t resrvd; + uint32_t num_buffers; + uint32_t reserved; +}; + +/* -------------------------------------------------------------------------- */ +/* look aside */ + +enum fw_la_cmd_id { + FW_LA_CMD_CIPHER, /* Cipher Request */ + FW_LA_CMD_AUTH, /* Auth Request */ + FW_LA_CMD_CIPHER_HASH, /* Cipher-Hash Request */ + FW_LA_CMD_HASH_CIPHER, /* Hash-Cipher Request */ + FW_LA_CMD_TRNG_GET_RANDOM, /* TRNG Get Random Request */ + FW_LA_CMD_TRNG_TEST, /* TRNG Test Request */ + FW_LA_CMD_SSL3_KEY_DERIVE, /* SSL3 Key Derivation Request */ + FW_LA_CMD_TLS_V1_1_KEY_DERIVE, /* TLS Key Derivation Request */ + FW_LA_CMD_TLS_V1_2_KEY_DERIVE, /* TLS Key Derivation Request */ + FW_LA_CMD_MGF1, /* MGF1 Request */ + FW_LA_CMD_AUTH_PRE_COMP, /* Auth Pre-Compute Request */ +#if 0 /* incompatible between qat 1.5 and 1.7 */ + FW_LA_CMD_CIPHER_CIPHER, /* Cipher-Cipher Request */ + FW_LA_CMD_HASH_HASH, /* Hash-Hash Request */ + FW_LA_CMD_CIPHER_PRE_COMP, /* Auth Pre-Compute Request */ +#endif + FW_LA_CMD_DELIMITER, /* Delimiter type */ +}; + +#endif Property changes on: head/sys/dev/qat/qatreg.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/qat/qatvar.h =================================================================== --- head/sys/dev/qat/qatvar.h (nonexistent) +++ head/sys/dev/qat/qatvar.h (revision 367386) @@ -0,0 +1,1073 @@ +/* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */ +/* $NetBSD: qatvar.h,v 1.2 2020/03/14 18:08:39 ad Exp $ */ + +/* + * Copyright (c) 2019 Internet Initiative Japan, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright(c) 2007-2019 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* $FreeBSD$ */ + +#ifndef _DEV_PCI_QATVAR_H_ +#define _DEV_PCI_QATVAR_H_ + +#include +#include + +#include + +#define QAT_NSYMREQ 256 +#define QAT_NSYMCOOKIE ((QAT_NSYMREQ * 2 + 1) * 2) +#define QAT_NASYMREQ 64 +#define QAT_BATCH_SUBMIT_FREE_SPACE 2 + +#define QAT_EV_NAME_SIZE 32 +#define QAT_RING_NAME_SIZE 32 + +#define QAT_MAXSEG 32 /* max segments for sg dma */ +#define QAT_MAXLEN 65535 /* IP_MAXPACKET */ + +#define QAT_HB_INTERVAL 500 /* heartbeat msec */ +#define QAT_SSM_WDT 100 + +enum qat_chip_type { + QAT_CHIP_C2XXX = 0, /* NanoQAT: Atom C2000 */ + QAT_CHIP_C2XXX_IOV, + QAT_CHIP_C3XXX, /* Atom C3000 */ + QAT_CHIP_C3XXX_IOV, + QAT_CHIP_C62X, + QAT_CHIP_C62X_IOV, + QAT_CHIP_D15XX, + QAT_CHIP_D15XX_IOV, + QAT_CHIP_DH895XCC, + QAT_CHIP_DH895XCC_IOV, +}; + +enum qat_sku { + QAT_SKU_UNKNOWN = 0, + QAT_SKU_1, + QAT_SKU_2, + QAT_SKU_3, + QAT_SKU_4, + QAT_SKU_VF, +}; + +enum qat_ae_status { + QAT_AE_ENABLED = 1, + QAT_AE_ACTIVE, + QAT_AE_DISABLED +}; + +#define TIMEOUT_AE_RESET 100 +#define TIMEOUT_AE_CHECK 10000 +#define TIMEOUT_AE_CSR 500 +#define AE_EXEC_CYCLE 20 + +#define QAT_UOF_MAX_PAGE 1 +#define QAT_UOF_MAX_PAGE_REGION 1 + +struct qat_dmamem { + bus_dma_tag_t qdm_dma_tag; + bus_dmamap_t qdm_dma_map; + bus_size_t qdm_dma_size; + bus_dma_segment_t qdm_dma_seg; + void *qdm_dma_vaddr; +}; + +/* Valid internal ring size values */ +#define QAT_RING_SIZE_128 0x01 +#define QAT_RING_SIZE_256 0x02 +#define QAT_RING_SIZE_512 0x03 +#define QAT_RING_SIZE_4K 0x06 +#define QAT_RING_SIZE_16K 0x08 +#define QAT_RING_SIZE_4M 0x10 +#define QAT_MIN_RING_SIZE QAT_RING_SIZE_128 +#define QAT_MAX_RING_SIZE QAT_RING_SIZE_4M +#define QAT_DEFAULT_RING_SIZE QAT_RING_SIZE_16K + +/* Valid internal msg size values */ +#define QAT_MSG_SIZE_32 0x01 +#define QAT_MSG_SIZE_64 0x02 +#define QAT_MSG_SIZE_128 0x04 +#define QAT_MIN_MSG_SIZE QAT_MSG_SIZE_32 +#define QAT_MAX_MSG_SIZE QAT_MSG_SIZE_128 + +/* Size to bytes conversion macros for ring and msg size values */ +#define QAT_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5) +#define QAT_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5) +#define QAT_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7) +#define QAT_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7) + +/* Minimum ring bufer size for memory allocation */ +#define QAT_RING_SIZE_BYTES_MIN(SIZE) \ + ((SIZE < QAT_SIZE_TO_RING_SIZE_IN_BYTES(QAT_RING_SIZE_4K)) ? \ + QAT_SIZE_TO_RING_SIZE_IN_BYTES(QAT_RING_SIZE_4K) : SIZE) +#define QAT_RING_SIZE_MODULO(SIZE) (SIZE + 0x6) +#define QAT_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \ + SIZE) & ~0x4) +/* Max outstanding requests */ +#define QAT_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \ + ((((1 << (RING_SIZE - 1)) << 3) >> QAT_SIZE_TO_POW(MSG_SIZE)) - 1) + +#define QAT_RING_PATTERN 0x7f + +struct qat_softc; + +typedef int (*qat_cb_t)(struct qat_softc *, void *, void *); + +struct qat_ring { + struct mtx qr_ring_mtx; /* Lock per ring */ + bool qr_need_wakeup; + void *qr_ring_vaddr; + uint32_t * volatile qr_inflight; /* tx/rx shared */ + uint32_t qr_head; + uint32_t qr_tail; + uint8_t qr_msg_size; + uint8_t qr_ring_size; + uint32_t qr_ring; /* ring number in bank */ + uint32_t qr_bank; /* bank number in device */ + uint32_t qr_ring_id; + uint32_t qr_ring_mask; + qat_cb_t qr_cb; + void *qr_cb_arg; + struct qat_dmamem qr_dma; + bus_addr_t qr_ring_paddr; + + const char *qr_name; +}; + +struct qat_bank { + struct qat_softc *qb_sc; /* back pointer to softc */ + uint32_t qb_intr_mask; /* current interrupt mask */ + uint32_t qb_allocated_rings; /* current allocated ring bitfiled */ + uint32_t qb_coalescing_time; /* timer in nano sec, 0: disabled */ +#define COALESCING_TIME_INTERVAL_DEFAULT 10000 +#define COALESCING_TIME_INTERVAL_MIN 500 +#define COALESCING_TIME_INTERVAL_MAX 0xfffff + uint32_t qb_bank; /* bank index */ + struct mtx qb_bank_mtx; + struct resource *qb_ih; + void *qb_ih_cookie; + + struct qat_ring qb_et_rings[MAX_RING_PER_BANK]; + +}; + +struct qat_ap_bank { + uint32_t qab_nf_mask; + uint32_t qab_nf_dest; + uint32_t qab_ne_mask; + uint32_t qab_ne_dest; +}; + +struct qat_ae_page { + struct qat_ae_page *qap_next; + struct qat_uof_page *qap_page; + struct qat_ae_region *qap_region; + u_int qap_flags; +}; + +#define QAT_AE_PAGA_FLAG_WAITING (1 << 0) + +struct qat_ae_region { + struct qat_ae_page *qar_loaded_page; + STAILQ_HEAD(, qat_ae_page) qar_waiting_pages; +}; + +struct qat_ae_slice { + u_int qas_assigned_ctx_mask; + struct qat_ae_region qas_regions[QAT_UOF_MAX_PAGE_REGION]; + struct qat_ae_page qas_pages[QAT_UOF_MAX_PAGE]; + struct qat_ae_page *qas_cur_pages[MAX_AE_CTX]; + struct qat_uof_image *qas_image; +}; + +#define QAT_AE(sc, ae) \ + ((sc)->sc_ae[ae]) + +struct qat_ae { + u_int qae_state; /* AE state */ + u_int qae_ustore_size; /* free micro-store address */ + u_int qae_free_addr; /* free micro-store address */ + u_int qae_free_size; /* free micro-store size */ + u_int qae_live_ctx_mask; /* live context mask */ + u_int qae_ustore_dram_addr; /* mirco-store DRAM address */ + u_int qae_reload_size; /* reloadable code size */ + + /* aefw */ + u_int qae_num_slices; + struct qat_ae_slice qae_slices[MAX_AE_CTX]; + u_int qae_reloc_ustore_dram; /* reloadable ustore-dram address */ + u_int qae_effect_ustore_size; /* effective AE ustore size */ + u_int qae_shareable_ustore; +}; + +struct qat_mof { + void *qmf_sym; /* SYM_OBJS in sc_fw_mof */ + size_t qmf_sym_size; + void *qmf_uof_objs; /* UOF_OBJS in sc_fw_mof */ + size_t qmf_uof_objs_size; + void *qmf_suof_objs; /* SUOF_OBJS in sc_fw_mof */ + size_t qmf_suof_objs_size; +}; + +struct qat_ae_batch_init { + u_int qabi_ae; + u_int qabi_addr; + u_int *qabi_value; + u_int qabi_size; + STAILQ_ENTRY(qat_ae_batch_init) qabi_next; +}; + +STAILQ_HEAD(qat_ae_batch_init_list, qat_ae_batch_init); + +/* overwritten struct uof_uword_block */ +struct qat_uof_uword_block { + u_int quub_start_addr; /* start address */ + u_int quub_num_words; /* number of microwords */ + uint64_t quub_micro_words; /* pointer to the uwords */ +}; + +struct qat_uof_page { + u_int qup_page_num; /* page number */ + u_int qup_def_page; /* default page */ + u_int qup_page_region; /* region of page */ + u_int qup_beg_vaddr; /* begin virtual address */ + u_int qup_beg_paddr; /* begin physical address */ + + u_int qup_num_uc_var; /* num of uC var in array */ + struct uof_uword_fixup *qup_uc_var; + /* array of import variables */ + u_int qup_num_imp_var; /* num of import var in array */ + struct uof_import_var *qup_imp_var; + /* array of import variables */ + u_int qup_num_imp_expr; /* num of import expr in array */ + struct uof_uword_fixup *qup_imp_expr; + /* array of import expressions */ + u_int qup_num_neigh_reg; /* num of neigh-reg in array */ + struct uof_uword_fixup *qup_neigh_reg; + /* array of neigh-reg assignments */ + u_int qup_num_micro_words; /* number of microwords in the seg */ + + u_int qup_num_uw_blocks; /* number of uword blocks */ + struct qat_uof_uword_block *qup_uw_blocks; + /* array of uword blocks */ +}; + +struct qat_uof_image { + struct uof_image *qui_image; /* image pointer */ + struct qat_uof_page qui_pages[QAT_UOF_MAX_PAGE]; + /* array of pages */ + + u_int qui_num_ae_reg; /* num of registers */ + struct uof_ae_reg *qui_ae_reg; /* array of registers */ + + u_int qui_num_init_reg_sym; /* num of reg/sym init values */ + struct uof_init_reg_sym *qui_init_reg_sym; + /* array of reg/sym init values */ + + u_int qui_num_sbreak; /* num of sbreak values */ + struct qui_sbreak *qui_sbreak; /* array of sbreak values */ + + u_int qui_num_uwords_used; + /* highest uword addressreferenced + 1 */ +}; + +struct qat_aefw_uof { + size_t qafu_size; /* uof size */ + struct uof_obj_hdr *qafu_obj_hdr; /* UOF_OBJS */ + + void *qafu_str_tab; + size_t qafu_str_tab_size; + + u_int qafu_num_init_mem; + struct uof_init_mem *qafu_init_mem; + size_t qafu_init_mem_size; + + struct uof_var_mem_seg *qafu_var_mem_seg; + + struct qat_ae_batch_init_list qafu_lm_init[MAX_AE]; + size_t qafu_num_lm_init[MAX_AE]; + size_t qafu_num_lm_init_inst[MAX_AE]; + + u_int qafu_num_imgs; /* number of uof image */ + struct qat_uof_image qafu_imgs[MAX_NUM_AE * MAX_AE_CTX]; + /* uof images */ +}; + +#define QAT_SERVICE_CRYPTO_A (1 << 0) +#define QAT_SERVICE_CRYPTO_B (1 << 1) + +struct qat_admin_rings { + uint32_t qadr_active_aes_per_accel; + uint8_t qadr_srv_mask[MAX_AE_PER_ACCEL]; + + struct qat_dmamem qadr_dma; + struct fw_init_ring_table *qadr_master_ring_tbl; + struct fw_init_ring_table *qadr_cya_ring_tbl; + struct fw_init_ring_table *qadr_cyb_ring_tbl; + + struct qat_ring *qadr_admin_tx; + struct qat_ring *qadr_admin_rx; +}; + +struct qat_accel_init_cb { + int qaic_status; +}; + +struct qat_admin_comms { + struct qat_dmamem qadc_dma; + struct qat_dmamem qadc_const_tbl_dma; + struct qat_dmamem qadc_hb_dma; +}; + +#define QAT_PID_MINOR_REV 0xf +#define QAT_PID_MAJOR_REV (0xf << 4) + +struct qat_suof_image { + char *qsi_simg_buf; + u_long qsi_simg_len; + char *qsi_css_header; + char *qsi_css_key; + char *qsi_css_signature; + char *qsi_css_simg; + u_long qsi_simg_size; + u_int qsi_ae_num; + u_int qsi_ae_mask; + u_int qsi_fw_type; + u_long qsi_simg_name; + u_long qsi_appmeta_data; + struct qat_dmamem qsi_dma; +}; + +struct qat_aefw_suof { + u_int qafs_file_id; + u_int qafs_check_sum; + char qafs_min_ver; + char qafs_maj_ver; + char qafs_fw_type; + char *qafs_suof_buf; + u_int qafs_suof_size; + char *qafs_sym_str; + u_int qafs_sym_size; + u_int qafs_num_simgs; + struct qat_suof_image *qafs_simg; +}; + +enum qat_sym_hash_algorithm { + QAT_SYM_HASH_NONE = 0, + QAT_SYM_HASH_MD5 = 1, + QAT_SYM_HASH_SHA1 = 2, + QAT_SYM_HASH_SHA224 = 3, + QAT_SYM_HASH_SHA256 = 4, + QAT_SYM_HASH_SHA384 = 5, + QAT_SYM_HASH_SHA512 = 6, + QAT_SYM_HASH_AES_XCBC = 7, + QAT_SYM_HASH_AES_CCM = 8, + QAT_SYM_HASH_AES_GCM = 9, + QAT_SYM_HASH_KASUMI_F9 = 10, + QAT_SYM_HASH_SNOW3G_UIA2 = 11, + QAT_SYM_HASH_AES_CMAC = 12, + QAT_SYM_HASH_AES_GMAC = 13, + QAT_SYM_HASH_AES_CBC_MAC = 14, +}; + +#define QAT_HASH_MD5_BLOCK_SIZE 64 +#define QAT_HASH_MD5_DIGEST_SIZE 16 +#define QAT_HASH_MD5_STATE_SIZE 16 +#define QAT_HASH_SHA1_BLOCK_SIZE 64 +#define QAT_HASH_SHA1_DIGEST_SIZE 20 +#define QAT_HASH_SHA1_STATE_SIZE 20 +#define QAT_HASH_SHA224_BLOCK_SIZE 64 +#define QAT_HASH_SHA224_DIGEST_SIZE 28 +#define QAT_HASH_SHA224_STATE_SIZE 32 +#define QAT_HASH_SHA256_BLOCK_SIZE 64 +#define QAT_HASH_SHA256_DIGEST_SIZE 32 +#define QAT_HASH_SHA256_STATE_SIZE 32 +#define QAT_HASH_SHA384_BLOCK_SIZE 128 +#define QAT_HASH_SHA384_DIGEST_SIZE 48 +#define QAT_HASH_SHA384_STATE_SIZE 64 +#define QAT_HASH_SHA512_BLOCK_SIZE 128 +#define QAT_HASH_SHA512_DIGEST_SIZE 64 +#define QAT_HASH_SHA512_STATE_SIZE 64 +#define QAT_HASH_XCBC_PRECOMP_KEY_NUM 3 +#define QAT_HASH_XCBC_MAC_BLOCK_SIZE 16 +#define QAT_HASH_XCBC_MAC_128_DIGEST_SIZE 16 +#define QAT_HASH_CMAC_BLOCK_SIZE 16 +#define QAT_HASH_CMAC_128_DIGEST_SIZE 16 +#define QAT_HASH_AES_CCM_BLOCK_SIZE 16 +#define QAT_HASH_AES_CCM_DIGEST_SIZE 16 +#define QAT_HASH_AES_GCM_BLOCK_SIZE 16 +#define QAT_HASH_AES_GCM_DIGEST_SIZE 16 +#define QAT_HASH_AES_GCM_STATE_SIZE 16 +#define QAT_HASH_KASUMI_F9_BLOCK_SIZE 8 +#define QAT_HASH_KASUMI_F9_DIGEST_SIZE 4 +#define QAT_HASH_SNOW3G_UIA2_BLOCK_SIZE 8 +#define QAT_HASH_SNOW3G_UIA2_DIGEST_SIZE 4 +#define QAT_HASH_AES_CBC_MAC_BLOCK_SIZE 16 +#define QAT_HASH_AES_CBC_MAC_DIGEST_SIZE 16 +#define QAT_HASH_AES_GCM_ICV_SIZE_8 8 +#define QAT_HASH_AES_GCM_ICV_SIZE_12 12 +#define QAT_HASH_AES_GCM_ICV_SIZE_16 16 +#define QAT_HASH_AES_CCM_ICV_SIZE_MIN 4 +#define QAT_HASH_AES_CCM_ICV_SIZE_MAX 16 +#define QAT_HASH_IPAD_BYTE 0x36 +#define QAT_HASH_OPAD_BYTE 0x5c +#define QAT_HASH_IPAD_4_BYTES 0x36363636 +#define QAT_HASH_OPAD_4_BYTES 0x5c5c5c5c +#define QAT_HASH_KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA + +#define QAT_SYM_XCBC_STATE_SIZE ((QAT_HASH_XCBC_MAC_BLOCK_SIZE) * 3) +#define QAT_SYM_CMAC_STATE_SIZE ((QAT_HASH_CMAC_BLOCK_SIZE) * 3) + +struct qat_sym_hash_alg_info { + uint32_t qshai_digest_len; /* Digest length in bytes */ + uint32_t qshai_block_len; /* Block length in bytes */ + uint32_t qshai_state_size; /* size of above state in bytes */ + const uint8_t *qshai_init_state; /* Initial state */ + + const struct auth_hash *qshai_sah; /* software auth hash */ + uint32_t qshai_state_offset; /* offset to state in *_CTX */ + uint32_t qshai_state_word; +}; + +struct qat_sym_hash_qat_info { + uint32_t qshqi_algo_enc; /* QAT Algorithm encoding */ + uint32_t qshqi_auth_counter; /* Counter value for Auth */ + uint32_t qshqi_state1_len; /* QAT state1 length in bytes */ + uint32_t qshqi_state2_len; /* QAT state2 length in bytes */ +}; + +struct qat_sym_hash_def { + const struct qat_sym_hash_alg_info *qshd_alg; + const struct qat_sym_hash_qat_info *qshd_qat; +}; + +#define QAT_SYM_REQ_PARAMS_SIZE_MAX (24 + 32) +/* Reserve enough space for cipher and authentication request params */ +/* Basis of values are guaranteed in qat_hw*var.h with CTASSERT */ + +#define QAT_SYM_REQ_PARAMS_SIZE_PADDED \ + roundup(QAT_SYM_REQ_PARAMS_SIZE_MAX, QAT_OPTIMAL_ALIGN) +/* Pad out to 64-byte multiple to ensure optimal alignment of next field */ + +#define QAT_SYM_KEY_TLS_PREFIX_SIZE (128) +/* Hash Prefix size in bytes for TLS (128 = MAX = SHA2 (384, 512)*/ + +#define QAT_SYM_KEY_MAX_HASH_STATE_BUFFER \ + (QAT_SYM_KEY_TLS_PREFIX_SIZE * 2) +/* hash state prefix buffer structure that holds the maximum sized secret */ + +#define QAT_SYM_HASH_BUFFER_LEN QAT_HASH_SHA512_STATE_SIZE +/* Buffer length to hold 16 byte MD5 key and 20 byte SHA1 key */ + +#define QAT_GCM_AAD_SIZE_MAX 240 +/* Maximum AAD size */ + +#define QAT_AES_GCM_AAD_ALIGN 16 + +struct qat_sym_bulk_cookie { + uint8_t qsbc_req_params_buf[QAT_SYM_REQ_PARAMS_SIZE_PADDED]; + /* memory block reserved for request params + * NOTE: Field must be correctly aligned in memory for access by QAT + * engine */ + struct qat_crypto *qsbc_crypto; + struct qat_session *qsbc_session; + /* Session context */ + void *qsbc_cb_tag; + /* correlator supplied by the client */ + uint8_t qsbc_msg[QAT_MSG_SIZE_TO_BYTES(QAT_MAX_MSG_SIZE)]; + /* QAT request message */ +} __aligned(QAT_OPTIMAL_ALIGN); + +/* Basis of values are guaranteed in qat_hw*var.h with CTASSERT */ +#define HASH_CONTENT_DESC_SIZE 176 +#define CIPHER_CONTENT_DESC_SIZE 64 + +#define CONTENT_DESC_MAX_SIZE roundup( \ + HASH_CONTENT_DESC_SIZE + CIPHER_CONTENT_DESC_SIZE, \ + QAT_OPTIMAL_ALIGN) + +struct qat_sym_cookie { + union qat_sym_cookie_u { + /* should be 64byte aligned */ + struct qat_sym_bulk_cookie qsc_bulk_cookie; + /* symmetric bulk cookie */ +#ifdef notyet + struct qat_sym_key_cookie qsc_key_cookie; + /* symmetric key cookie */ + struct qat_sym_nrbg_cookie qsc_nrbg_cookie; + /* symmetric NRBG cookie */ +#endif + } u; + + /* should be 64-byte aligned */ + struct buffer_list_desc qsc_buf_list; + struct flat_buffer_desc qsc_flat_bufs[QAT_MAXSEG]; /* should be here */ + + bus_dmamap_t qsc_self_dmamap; /* self DMA mapping and + end of DMA region */ + bus_dma_tag_t qsc_self_dma_tag; + + uint8_t qsc_iv_buf[EALG_MAX_BLOCK_LEN]; + uint8_t qsc_auth_res[QAT_SYM_HASH_BUFFER_LEN]; + uint8_t qsc_gcm_aad[QAT_GCM_AAD_SIZE_MAX]; + uint8_t qsc_content_desc[CONTENT_DESC_MAX_SIZE]; + + bus_dmamap_t qsc_buf_dmamap; /* qsc_flat_bufs DMA mapping */ + bus_dma_tag_t qsc_buf_dma_tag; + void *qsc_buf; + + bus_addr_t qsc_bulk_req_params_buf_paddr; + bus_addr_t qsc_buffer_list_desc_paddr; + bus_addr_t qsc_iv_buf_paddr; + bus_addr_t qsc_auth_res_paddr; + bus_addr_t qsc_gcm_aad_paddr; + bus_addr_t qsc_content_desc_paddr; +}; + +CTASSERT(offsetof(struct qat_sym_cookie, + u.qsc_bulk_cookie.qsbc_req_params_buf) % QAT_OPTIMAL_ALIGN == 0); +CTASSERT(offsetof(struct qat_sym_cookie, qsc_buf_list) % QAT_OPTIMAL_ALIGN == 0); + +#define MAX_CIPHER_SETUP_BLK_SZ \ + (sizeof(struct hw_cipher_config) + \ + 2 * HW_KASUMI_KEY_SZ + 2 * HW_KASUMI_BLK_SZ) +#define MAX_HASH_SETUP_BLK_SZ sizeof(union hw_auth_algo_blk) + +struct qat_crypto_desc { + uint8_t qcd_content_desc[CONTENT_DESC_MAX_SIZE]; /* must be first */ + /* using only for qat 1.5 */ + uint8_t qcd_hash_state_prefix_buf[QAT_GCM_AAD_SIZE_MAX]; + + bus_addr_t qcd_desc_paddr; + bus_addr_t qcd_hash_state_paddr; + + enum fw_slice qcd_slices[MAX_FW_SLICE]; + enum fw_la_cmd_id qcd_cmd_id; + enum hw_cipher_dir qcd_cipher_dir; + + /* content desc info */ + uint8_t qcd_hdr_sz; /* in quad words */ + uint8_t qcd_hw_blk_sz; /* in quad words */ + uint32_t qcd_cipher_offset; + uint32_t qcd_auth_offset; + /* hash info */ + uint8_t qcd_state_storage_sz; /* in quad words */ + uint32_t qcd_gcm_aad_sz_offset1; + uint32_t qcd_gcm_aad_sz_offset2; + /* cipher info */ + uint16_t qcd_cipher_blk_sz; /* in bytes */ + uint16_t qcd_auth_sz; /* in bytes */ + + uint8_t qcd_req_cache[QAT_MSG_SIZE_TO_BYTES(QAT_MAX_MSG_SIZE)]; +} __aligned(QAT_OPTIMAL_ALIGN); + +/* should be aligned to 64bytes */ +struct qat_session { + struct qat_crypto_desc *qs_dec_desc; /* should be at top of struct*/ + /* decrypt or auth then decrypt or auth */ + + struct qat_crypto_desc *qs_enc_desc; + /* encrypt or encrypt then auth */ + + struct qat_dmamem qs_desc_mem; + + enum hw_cipher_algo qs_cipher_algo; + enum hw_cipher_mode qs_cipher_mode; + enum hw_auth_algo qs_auth_algo; + enum hw_auth_mode qs_auth_mode; + + const uint8_t *qs_cipher_key; + int qs_cipher_klen; + const uint8_t *qs_auth_key; + int qs_auth_klen; + int qs_auth_mlen; + + uint32_t qs_status; +#define QAT_SESSION_STATUS_ACTIVE (1 << 0) +#define QAT_SESSION_STATUS_FREEING (1 << 1) + uint32_t qs_inflight; + int qs_aad_length; + bool qs_need_wakeup; + + struct mtx qs_session_mtx; +}; + +struct qat_crypto_bank { + uint16_t qcb_bank; + + struct qat_ring *qcb_sym_tx; + struct qat_ring *qcb_sym_rx; + + struct qat_dmamem qcb_symck_dmamems[QAT_NSYMCOOKIE]; + struct qat_sym_cookie *qcb_symck_free[QAT_NSYMCOOKIE]; + uint32_t qcb_symck_free_count; + + struct mtx qcb_bank_mtx; + + char qcb_ring_names[2][QAT_RING_NAME_SIZE]; /* sym tx,rx */ +}; + +struct qat_crypto { + struct qat_softc *qcy_sc; + uint32_t qcy_bank_mask; + uint16_t qcy_num_banks; + + int32_t qcy_cid; /* OpenCrypto driver ID */ + + struct qat_crypto_bank *qcy_banks; /* array of qat_crypto_bank */ + + uint32_t qcy_session_free_count; + + struct mtx qcy_crypto_mtx; +}; + +struct qat_hw { + int8_t qhw_sram_bar_id; + int8_t qhw_misc_bar_id; + int8_t qhw_etr_bar_id; + + bus_size_t qhw_cap_global_offset; + bus_size_t qhw_ae_offset; + bus_size_t qhw_ae_local_offset; + bus_size_t qhw_etr_bundle_size; + + /* crypto processing callbacks */ + size_t qhw_crypto_opaque_offset; + void (*qhw_crypto_setup_req_params)(struct qat_crypto_bank *, + struct qat_session *, struct qat_crypto_desc const *, + struct qat_sym_cookie *, struct cryptop *); + void (*qhw_crypto_setup_desc)(struct qat_crypto *, struct qat_session *, + struct qat_crypto_desc *); + + uint8_t qhw_num_banks; /* max number of banks */ + uint8_t qhw_num_ap_banks; /* max number of AutoPush banks */ + uint8_t qhw_num_rings_per_bank; /* rings per bank */ + uint8_t qhw_num_accel; /* max number of accelerators */ + uint8_t qhw_num_engines; /* max number of accelerator engines */ + uint8_t qhw_tx_rx_gap; + uint32_t qhw_tx_rings_mask; + uint32_t qhw_clock_per_sec; + bool qhw_fw_auth; + uint32_t qhw_fw_req_size; + uint32_t qhw_fw_resp_size; + + uint8_t qhw_ring_sym_tx; + uint8_t qhw_ring_sym_rx; + uint8_t qhw_ring_asym_tx; + uint8_t qhw_ring_asym_rx; + + /* MSIx */ + uint32_t qhw_msix_ae_vec_gap; /* gap to ae vec from bank */ + + const char *qhw_mof_fwname; + const char *qhw_mmp_fwname; + + uint32_t qhw_prod_type; /* cpu type */ + + /* setup callbacks */ + uint32_t (*qhw_get_accel_mask)(struct qat_softc *); + uint32_t (*qhw_get_ae_mask)(struct qat_softc *); + enum qat_sku (*qhw_get_sku)(struct qat_softc *); + uint32_t (*qhw_get_accel_cap)(struct qat_softc *); + const char *(*qhw_get_fw_uof_name)(struct qat_softc *); + void (*qhw_enable_intr)(struct qat_softc *); + void (*qhw_init_etr_intr)(struct qat_softc *, int); + int (*qhw_init_admin_comms)(struct qat_softc *); + int (*qhw_send_admin_init)(struct qat_softc *); + int (*qhw_init_arb)(struct qat_softc *); + void (*qhw_get_arb_mapping)(struct qat_softc *, const uint32_t **); + void (*qhw_enable_error_correction)(struct qat_softc *); + int (*qhw_check_uncorrectable_error)(struct qat_softc *); + void (*qhw_print_err_registers)(struct qat_softc *); + void (*qhw_disable_error_interrupts)(struct qat_softc *); + int (*qhw_check_slice_hang)(struct qat_softc *); + int (*qhw_set_ssm_wdtimer)(struct qat_softc *); +}; + + +/* sc_flags */ +#define QAT_FLAG_ESRAM_ENABLE_AUTO_INIT (1 << 0) +#define QAT_FLAG_SHRAM_WAIT_READY (1 << 1) + +/* sc_accel_cap */ +#define QAT_ACCEL_CAP_CRYPTO_SYMMETRIC (1 << 0) +#define QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC (1 << 1) +#define QAT_ACCEL_CAP_CIPHER (1 << 2) +#define QAT_ACCEL_CAP_AUTHENTICATION (1 << 3) +#define QAT_ACCEL_CAP_REGEX (1 << 4) +#define QAT_ACCEL_CAP_COMPRESSION (1 << 5) +#define QAT_ACCEL_CAP_LZS_COMPRESSION (1 << 6) +#define QAT_ACCEL_CAP_RANDOM_NUMBER (1 << 7) +#define QAT_ACCEL_CAP_ZUC (1 << 8) +#define QAT_ACCEL_CAP_SHA3 (1 << 9) +#define QAT_ACCEL_CAP_KPT (1 << 10) + +#define QAT_ACCEL_CAP_BITS \ + "\177\020" \ + "b\x0a" "KPT\0" \ + "b\x09" "SHA3\0" \ + "b\x08" "ZUC\0" \ + "b\x07" "RANDOM_NUMBER\0" \ + "b\x06" "LZS_COMPRESSION\0" \ + "b\x05" "COMPRESSION\0" \ + "b\x04" "REGEX\0" \ + "b\x03" "AUTHENTICATION\0" \ + "b\x02" "CIPHER\0" \ + "b\x01" "CRYPTO_ASYMMETRIC\0" \ + "b\x00" "CRYPTO_SYMMETRIC\0" + +#define QAT_HI_PRIO_RING_WEIGHT 0xfc +#define QAT_LO_PRIO_RING_WEIGHT 0xfe +#define QAT_DEFAULT_RING_WEIGHT 0xff +#define QAT_DEFAULT_PVL 0 + +struct firmware; +struct resource; + +struct qat_softc { + device_t sc_dev; + + struct resource *sc_res[MAX_BARS]; + int sc_rid[MAX_BARS]; + bus_space_tag_t sc_csrt[MAX_BARS]; + bus_space_handle_t sc_csrh[MAX_BARS]; + + uint32_t sc_ae_num; + uint32_t sc_ae_mask; + + struct qat_crypto sc_crypto; /* crypto services */ + + struct qat_hw sc_hw; + + uint8_t sc_rev; + enum qat_sku sc_sku; + uint32_t sc_flags; + + uint32_t sc_accel_num; + uint32_t sc_accel_mask; + uint32_t sc_accel_cap; + + struct qat_admin_rings sc_admin_rings; /* use only for qat 1.5 */ + struct qat_admin_comms sc_admin_comms; /* use only for qat 1.7 */ + + /* ETR */ + struct qat_bank *sc_etr_banks; /* array of etr banks */ + struct qat_ap_bank *sc_etr_ap_banks; /* array of etr auto push banks */ + + /* AE */ + struct qat_ae sc_ae[MAX_NUM_AE]; + + /* Interrupt */ + struct resource *sc_ih; /* ae cluster ih */ + void *sc_ih_cookie; /* ae cluster ih cookie */ + + /* Counters */ + counter_u64_t sc_gcm_aad_restarts; + counter_u64_t sc_gcm_aad_updates; + counter_u64_t sc_ring_full_restarts; + + /* Firmware */ + void *sc_fw_mof; /* mof data */ + size_t sc_fw_mof_size; /* mof size */ + struct qat_mof sc_mof; /* mof sections */ + + const char *sc_fw_uof_name; /* uof/suof name in mof */ + + void *sc_fw_uof; /* uof head */ + size_t sc_fw_uof_size; /* uof size */ + struct qat_aefw_uof sc_aefw_uof; /* UOF_OBJS in uof */ + + void *sc_fw_suof; /* suof head */ + size_t sc_fw_suof_size; /* suof size */ + struct qat_aefw_suof sc_aefw_suof; /* suof context */ + + void *sc_fw_mmp; /* mmp data */ + size_t sc_fw_mmp_size; /* mmp size */ +}; + +static inline void +qat_bar_write_4(struct qat_softc *sc, int baroff, bus_size_t offset, + uint32_t value) +{ + + MPASS(baroff >= 0 && baroff < MAX_BARS); + + bus_space_write_4(sc->sc_csrt[baroff], + sc->sc_csrh[baroff], offset, value); +} + +static inline uint32_t +qat_bar_read_4(struct qat_softc *sc, int baroff, bus_size_t offset) +{ + + MPASS(baroff >= 0 && baroff < MAX_BARS); + + return bus_space_read_4(sc->sc_csrt[baroff], + sc->sc_csrh[baroff], offset); +} + +static inline void +qat_misc_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value) +{ + + qat_bar_write_4(sc, sc->sc_hw.qhw_misc_bar_id, offset, value); +} + +static inline uint32_t +qat_misc_read_4(struct qat_softc *sc, bus_size_t offset) +{ + + return qat_bar_read_4(sc, sc->sc_hw.qhw_misc_bar_id, offset); +} + +static inline void +qat_misc_read_write_or_4(struct qat_softc *sc, bus_size_t offset, + uint32_t value) +{ + uint32_t reg; + + reg = qat_misc_read_4(sc, offset); + reg |= value; + qat_misc_write_4(sc, offset, reg); +} + +static inline void +qat_misc_read_write_and_4(struct qat_softc *sc, bus_size_t offset, + uint32_t mask) +{ + uint32_t reg; + + reg = qat_misc_read_4(sc, offset); + reg &= mask; + qat_misc_write_4(sc, offset, reg); +} + +static inline void +qat_etr_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value) +{ + + qat_bar_write_4(sc, sc->sc_hw.qhw_etr_bar_id, offset, value); +} + +static inline uint32_t +qat_etr_read_4(struct qat_softc *sc, bus_size_t offset) +{ + + return qat_bar_read_4(sc, sc->sc_hw.qhw_etr_bar_id, offset); +} + +static inline void +qat_ae_local_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset, + uint32_t value) +{ + + offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_LOCAL_AE_MASK) | + (offset & AE_LOCAL_CSR_MASK); + + qat_misc_write_4(sc, sc->sc_hw.qhw_ae_local_offset + offset, + value); +} + +static inline uint32_t +qat_ae_local_read_4(struct qat_softc *sc, u_char ae, bus_size_t offset) +{ + + offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_LOCAL_AE_MASK) | + (offset & AE_LOCAL_CSR_MASK); + + return qat_misc_read_4(sc, sc->sc_hw.qhw_ae_local_offset + offset); +} + +static inline void +qat_ae_xfer_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset, + uint32_t value) +{ + offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_XFER_AE_MASK) | + __SHIFTIN(offset, AE_XFER_CSR_MASK); + + qat_misc_write_4(sc, sc->sc_hw.qhw_ae_offset + offset, value); +} + +static inline void +qat_cap_global_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value) +{ + + qat_misc_write_4(sc, sc->sc_hw.qhw_cap_global_offset + offset, value); +} + +static inline uint32_t +qat_cap_global_read_4(struct qat_softc *sc, bus_size_t offset) +{ + + return qat_misc_read_4(sc, sc->sc_hw.qhw_cap_global_offset + offset); +} + + +static inline void +qat_etr_bank_write_4(struct qat_softc *sc, int bank, + bus_size_t offset, uint32_t value) +{ + + qat_etr_write_4(sc, sc->sc_hw.qhw_etr_bundle_size * bank + offset, + value); +} + +static inline uint32_t +qat_etr_bank_read_4(struct qat_softc *sc, int bank, + bus_size_t offset) +{ + + return qat_etr_read_4(sc, + sc->sc_hw.qhw_etr_bundle_size * bank + offset); +} + +static inline void +qat_etr_ap_bank_write_4(struct qat_softc *sc, int ap_bank, + bus_size_t offset, uint32_t value) +{ + + qat_etr_write_4(sc, ETR_AP_BANK_OFFSET * ap_bank + offset, value); +} + +static inline uint32_t +qat_etr_ap_bank_read_4(struct qat_softc *sc, int ap_bank, + bus_size_t offset) +{ + + return qat_etr_read_4(sc, ETR_AP_BANK_OFFSET * ap_bank + offset); +} + + +static inline void +qat_etr_bank_ring_write_4(struct qat_softc *sc, int bank, int ring, + bus_size_t offset, uint32_t value) +{ + + qat_etr_bank_write_4(sc, bank, (ring << 2) + offset, value); +} + +static inline uint32_t +qat_etr_bank_ring_read_4(struct qat_softc *sc, int bank, int ring, + bus_size_t offset) +{ + + return qat_etr_bank_read_4(sc, bank, (ring << 2) * offset); +} + +static inline void +qat_etr_bank_ring_base_write_8(struct qat_softc *sc, int bank, int ring, + uint64_t value) +{ + uint32_t lo, hi; + + lo = (uint32_t)(value & 0xffffffff); + hi = (uint32_t)((value & 0xffffffff00000000ULL) >> 32); + qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_LBASE, lo); + qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_UBASE, hi); +} + +static inline void +qat_arb_ringsrvarben_write_4(struct qat_softc *sc, int index, uint32_t value) +{ + + qat_etr_write_4(sc, ARB_RINGSRVARBEN_OFFSET + + (ARB_REG_SLOT * index), value); +} + +static inline void +qat_arb_sarconfig_write_4(struct qat_softc *sc, int index, uint32_t value) +{ + + qat_etr_write_4(sc, ARB_OFFSET + + (ARB_REG_SIZE * index), value); +} + +static inline void +qat_arb_wrk_2_ser_map_write_4(struct qat_softc *sc, int index, uint32_t value) +{ + + qat_etr_write_4(sc, ARB_OFFSET + ARB_WRK_2_SER_MAP_OFFSET + + (ARB_REG_SIZE * index), value); +} + +void * qat_alloc_mem(size_t); +void qat_free_mem(void *); +void qat_free_dmamem(struct qat_softc *, struct qat_dmamem *); +int qat_alloc_dmamem(struct qat_softc *, struct qat_dmamem *, int, + bus_size_t, bus_size_t); + +int qat_etr_setup_ring(struct qat_softc *, int, uint32_t, uint32_t, + uint32_t, qat_cb_t, void *, const char *, + struct qat_ring **); +int qat_etr_put_msg(struct qat_softc *, struct qat_ring *, + uint32_t *); + +void qat_memcpy_htobe64(void *, const void *, size_t); +void qat_memcpy_htobe32(void *, const void *, size_t); +void qat_memcpy_htobe(void *, const void *, size_t, uint32_t); +void qat_crypto_gmac_precompute(const struct qat_crypto_desc *, + const uint8_t *key, int klen, + const struct qat_sym_hash_def *, uint8_t *); +void qat_crypto_hmac_precompute(const struct qat_crypto_desc *, + const uint8_t *, int, const struct qat_sym_hash_def *, + uint8_t *, uint8_t *); +uint16_t qat_crypto_load_cipher_session(const struct qat_crypto_desc *, + const struct qat_session *); +uint16_t qat_crypto_load_auth_session(const struct qat_crypto_desc *, + const struct qat_session *, + struct qat_sym_hash_def const **); + +#endif Property changes on: head/sys/dev/qat/qatvar.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/modules/Makefile =================================================================== --- head/sys/modules/Makefile (revision 367385) +++ head/sys/modules/Makefile (revision 367386) @@ -1,834 +1,836 @@ # $FreeBSD$ SYSDIR?=${SRCTOP}/sys .include "${SYSDIR}/conf/kern.opts.mk" SUBDIR_PARALLEL= # Modules that include binary-only blobs of microcode should be selectable by # MK_SOURCELESS_UCODE option (see below). .include "${SYSDIR}/conf/config.mk" .if defined(MODULES_OVERRIDE) && !defined(ALL_MODULES) SUBDIR=${MODULES_OVERRIDE} .else SUBDIR= \ ${_3dfx} \ ${_3dfx_linux} \ ${_aac} \ ${_aacraid} \ accf_data \ accf_dns \ accf_http \ acl_nfs4 \ acl_posix1e \ ${_acpi} \ ae \ ${_aesni} \ age \ ${_agp} \ ahci \ aic7xxx \ alc \ ale \ alq \ ${_amd_ecc_inject} \ ${_amdgpio} \ ${_amdsbwd} \ ${_amdsmn} \ ${_amdtemp} \ amr \ ${_an} \ ${_aout} \ ${_arcmsr} \ ${_allwinner} \ ${_armv8crypto} \ ${_asmc} \ ata \ ath \ ath_dfs \ ath_hal \ ath_hal_ar5210 \ ath_hal_ar5211 \ ath_hal_ar5212 \ ath_hal_ar5416 \ ath_hal_ar9300 \ ath_main \ ath_rate \ ath_pci \ ${_autofs} \ axgbe \ backlight \ ${_bce} \ ${_bcm283x_clkman} \ ${_bcm283x_pwm} \ bfe \ bge \ bhnd \ ${_bxe} \ ${_bios} \ ${_blake2} \ bnxt \ bridgestp \ bwi \ bwn \ ${_bytgpio} \ ${_chvgpio} \ cam \ ${_cardbus} \ ${_carp} \ cas \ ${_cbb} \ cc \ ${_ccp} \ cd9660 \ cd9660_iconv \ ${_ce} \ ${_cfi} \ ${_chromebook_platform} \ ${_ciss} \ cloudabi \ ${_cloudabi32} \ ${_cloudabi64} \ ${_cmx} \ ${_coretemp} \ ${_cp} \ ${_cpsw} \ ${_cpuctl} \ ${_cpufreq} \ ${_crypto} \ ${_cryptodev} \ ctl \ ${_cxgb} \ ${_cxgbe} \ dc \ dcons \ dcons_crom \ ${_dpms} \ dummynet \ ${_efirt} \ ${_em} \ ${_ena} \ esp \ ${_et} \ evdev \ ${_exca} \ ext2fs \ fdc \ fdescfs \ ${_ffec} \ filemon \ firewire \ firmware \ fusefs \ ${_fxp} \ gem \ geom \ ${_glxiic} \ ${_glxsb} \ gpio \ hifn \ hme \ ${_hpt27xx} \ ${_hptiop} \ ${_hptmv} \ ${_hptnr} \ ${_hptrr} \ hwpmc \ ${_hwpmc_mips24k} \ ${_hwpmc_mips74k} \ ${_hyperv} \ i2c \ ${_iavf} \ ${_ibcore} \ ${_ichwd} \ ${_ice} \ ${_ice_ddp} \ ${_ida} \ if_bridge \ if_disc \ if_edsc \ ${_if_enc} \ if_epair \ ${_if_gif} \ ${_if_gre} \ ${_if_me} \ if_infiniband \ if_lagg \ ${_if_ndis} \ ${_if_stf} \ if_tuntap \ if_vlan \ if_vxlan \ iflib \ ${_iir} \ imgact_binmisc \ ${_intelspi} \ ${_io} \ ${_ioat} \ ${_ipoib} \ ${_ipdivert} \ ${_ipfilter} \ ${_ipfw} \ ipfw_nat \ ${_ipfw_nat64} \ ${_ipfw_nptv6} \ ${_ipfw_pmod} \ ${_ipmi} \ ip6_mroute_mod \ ip_mroute_mod \ ${_ips} \ ${_ipsec} \ ${_ipw} \ ${_ipwfw} \ ${_isci} \ ${_iser} \ isp \ ${_ispfw} \ ${_itwd} \ ${_iwi} \ ${_iwifw} \ ${_iwm} \ ${_iwmfw} \ ${_iwn} \ ${_iwnfw} \ ${_ix} \ ${_ixv} \ ${_ixl} \ jme \ kbdmux \ kgssapi \ kgssapi_krb5 \ khelp \ krpc \ ksyms \ ${_ktls_ocf} \ le \ lge \ libalias \ libiconv \ libmchain \ lindebugfs \ linuxkpi \ ${_lio} \ lpt \ mac_biba \ mac_bsdextended \ mac_ifoff \ mac_lomac \ mac_mls \ mac_none \ mac_ntpd \ mac_partition \ mac_portacl \ mac_seeotheruids \ mac_stub \ mac_test \ ${_malo} \ md \ mdio \ mem \ mfi \ mii \ mlx \ mlxfw \ ${_mlx4} \ ${_mlx4ib} \ ${_mlx4en} \ ${_mlx5} \ ${_mlx5en} \ ${_mlx5ib} \ ${_mly} \ mmc \ mmcsd \ ${_mpr} \ ${_mps} \ mpt \ mqueue \ mrsas \ msdosfs \ msdosfs_iconv \ msk \ ${_mthca} \ mvs \ mwl \ ${_mwlfw} \ mxge \ my \ ${_nctgpio} \ ${_ndis} \ ${_netgraph} \ ${_nfe} \ nfscl \ nfscommon \ nfsd \ nfslockd \ nfssvc \ nge \ nmdm \ nullfs \ ${_ntb} \ ${_nvd} \ ${_nvdimm} \ ${_nvme} \ ${_nvram} \ oce \ ${_ocs_fc} \ ${_ossl} \ otus \ ${_otusfw} \ ow \ ${_padlock} \ ${_padlock_rng} \ ${_pccard} \ ${_pchtherm} \ ${_pcfclock} \ ${_pf} \ ${_pflog} \ ${_pfsync} \ plip \ ${_pms} \ ppbus \ ppc \ ppi \ pps \ procfs \ proto \ pseudofs \ ${_pst} \ pty \ puc \ pwm \ + ${_qat} \ ${_qlxge} \ ${_qlxgb} \ ${_qlxgbe} \ ${_qlnx} \ ral \ ${_ralfw} \ ${_random_fortuna} \ ${_random_other} \ rc4 \ ${_rdma} \ ${_rdrand_rng} \ re \ rl \ ${_rockchip} \ rtwn \ rtwn_pci \ rtwn_usb \ ${_rtwnfw} \ ${_s3} \ ${_safe} \ safexcel \ ${_sbni} \ scc \ ${_sctp} \ sdhci \ ${_sdhci_acpi} \ sdhci_pci \ sdio \ sem \ send \ ${_sfxge} \ sge \ ${_sgx} \ ${_sgx_linux} \ siftr \ siis \ sis \ sk \ ${_smartpqi} \ smbfs \ snp \ sound \ ${_speaker} \ spi \ ${_splash} \ ${_sppp} \ ste \ stge \ ${_sume} \ ${_superio} \ ${_sym} \ ${_syscons} \ sysvipc \ tcp \ ${_ti} \ tmpfs \ ${_toecore} \ ${_tpm} \ ${_twa} \ twe \ tws \ uart \ udf \ udf_iconv \ ufs \ uinput \ unionfs \ usb \ ${_vesa} \ ${_virtio} \ vge \ ${_viawd} \ videomode \ vkbd \ ${_vmd} \ ${_vmm} \ ${_vmware} \ vr \ vte \ ${_wbwd} \ ${_wi} \ wlan \ wlan_acl \ wlan_amrr \ wlan_ccmp \ wlan_rssadapt \ wlan_tkip \ wlan_wep \ wlan_xauth \ ${_wpi} \ ${_wpifw} \ ${_x86bios} \ xdr \ xl \ xz \ zlib .if ${MK_AUTOFS} != "no" || defined(ALL_MODULES) _autofs= autofs .endif .if ${MK_CDDL} != "no" || defined(ALL_MODULES) .if (${MACHINE_CPUARCH} != "arm" || ${MACHINE_ARCH:Marmv[67]*} != "") && \ ${MACHINE_CPUARCH} != "mips" .if ${KERN_OPTS:MKDTRACE_HOOKS} SUBDIR+= dtrace .endif .endif SUBDIR+= opensolaris .endif .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) .if exists(${SRCTOP}/sys/opencrypto) _crypto= crypto _cryptodev= cryptodev _random_fortuna=random_fortuna _random_other= random_other _ktls_ocf= ktls_ocf .endif .endif .if ${MK_CUSE} != "no" || defined(ALL_MODULES) SUBDIR+= cuse .endif .if ${MK_EFI} != "no" .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" _efirt= efirt .endif .endif .if (${MK_INET_SUPPORT} != "no" || ${MK_INET6_SUPPORT} != "no") || \ defined(ALL_MODULES) _carp= carp _toecore= toecore _if_enc= if_enc _if_gif= if_gif _if_gre= if_gre _ipfw_pmod= ipfw_pmod .if ${KERN_OPTS:MIPSEC_SUPPORT} && !${KERN_OPTS:MIPSEC} _ipsec= ipsec .endif .if ${KERN_OPTS:MSCTP_SUPPORT} || ${KERN_OPTS:MSCTP} _sctp= sctp .endif .endif .if (${MK_INET_SUPPORT} != "no" && ${MK_INET6_SUPPORT} != "no") || \ defined(ALL_MODULES) _if_stf= if_stf .endif .if ${MK_INET_SUPPORT} != "no" || defined(ALL_MODULES) _if_me= if_me _ipdivert= ipdivert _ipfw= ipfw .if ${MK_INET6_SUPPORT} != "no" || defined(ALL_MODULES) _ipfw_nat64= ipfw_nat64 .endif .endif .if ${MK_INET6_SUPPORT} != "no" || defined(ALL_MODULES) _ipfw_nptv6= ipfw_nptv6 .endif .if ${MK_IPFILTER} != "no" || defined(ALL_MODULES) _ipfilter= ipfilter .endif .if ${MK_ISCSI} != "no" || defined(ALL_MODULES) SUBDIR+= cfiscsi SUBDIR+= iscsi SUBDIR+= iscsi_initiator .endif .if !empty(OPT_FDT) SUBDIR+= fdt .endif # Linuxulator .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \ ${MACHINE_CPUARCH} == "i386" SUBDIR+= linprocfs SUBDIR+= linsysfs .endif .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386" SUBDIR+= linux .endif .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" SUBDIR+= linux64 SUBDIR+= linux_common .endif .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \ ${MACHINE_CPUARCH} == "i386" _ena= ena .if ${MK_OFED} != "no" || defined(ALL_MODULES) _ibcore= ibcore _ipoib= ipoib _iser= iser .endif _mlx4= mlx4 _mlx5= mlx5 .if (${MK_INET_SUPPORT} != "no" && ${MK_INET6_SUPPORT} != "no") || \ defined(ALL_MODULES) _mlx4en= mlx4en _mlx5en= mlx5en .endif .if ${MK_OFED} != "no" || defined(ALL_MODULES) _mthca= mthca _mlx4ib= mlx4ib _mlx5ib= mlx5ib .endif _vmware= vmware .endif .if ${MK_NETGRAPH} != "no" || defined(ALL_MODULES) _netgraph= netgraph .endif .if (${MK_PF} != "no" && (${MK_INET_SUPPORT} != "no" || \ ${MK_INET6_SUPPORT} != "no")) || defined(ALL_MODULES) _pf= pf _pflog= pflog .if ${MK_INET_SUPPORT} != "no" _pfsync= pfsync .endif .endif .if ${MK_SOURCELESS_UCODE} != "no" _bce= bce _fxp= fxp _ispfw= ispfw _ti= ti .if ${MACHINE_CPUARCH} != "mips" _mwlfw= mwlfw _otusfw= otusfw _ralfw= ralfw _rtwnfw= rtwnfw .endif .endif .if ${MK_SOURCELESS_UCODE} != "no" && ${MACHINE_CPUARCH} != "arm" && \ ${MACHINE_CPUARCH} != "mips" && \ ${MACHINE_ARCH} != "powerpc" && ${MACHINE_ARCH} != "powerpcspe" && \ ${MACHINE_CPUARCH} != "riscv" _cxgbe= cxgbe .endif .if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "arm64" _ice= ice .if ${MK_SOURCELESS_UCODE} != "no" _ice_ddp= ice_ddp .endif .endif # These rely on 64bit atomics .if ${MACHINE_ARCH} != "powerpc" && ${MACHINE_ARCH} != "powerpcspe" && \ ${MACHINE_CPUARCH} != "mips" _mps= mps _mpr= mpr .endif .if ${MK_TESTS} != "no" || defined(ALL_MODULES) SUBDIR+= tests .endif .if ${MK_ZFS} != "no" || (defined(ALL_MODULES) && ${MACHINE_CPUARCH} != "powerpc") SUBDIR+= zfs .endif .if (${MACHINE_CPUARCH} == "mips" && ${MACHINE_ARCH:Mmips64} == "") _hwpmc_mips24k= hwpmc_mips24k _hwpmc_mips74k= hwpmc_mips74k .endif .if ${MACHINE_CPUARCH} != "aarch64" && ${MACHINE_CPUARCH} != "arm" && \ ${MACHINE_CPUARCH} != "mips" && ${MACHINE_CPUARCH} != "powerpc" && \ ${MACHINE_CPUARCH} != "riscv" _syscons= syscons .endif .if ${MACHINE_CPUARCH} != "mips" # no BUS_SPACE_UNSPECIFIED # No barrier instruction support (specific to this driver) _sym= sym # intr_disable() is a macro, causes problems .if ${MK_SOURCELESS_UCODE} != "no" _cxgb= cxgb .endif .endif .if ${MACHINE_CPUARCH} == "aarch64" _allwinner= allwinner _armv8crypto= armv8crypto _em= em _rockchip= rockchip .endif .if ${MACHINE_CPUARCH} == "i386" || ${MACHINE_CPUARCH} == "amd64" _agp= agp _an= an _aout= aout _bios= bios .if ${MK_SOURCELESS_UCODE} != "no" _bxe= bxe .endif _cardbus= cardbus _cbb= cbb _cpuctl= cpuctl _cpufreq= cpufreq _dpms= dpms _em= em _et= et _exca= exca _if_ndis= if_ndis _io= io _itwd= itwd _ix= ix _ixv= ixv .if ${MK_SOURCELESS_UCODE} != "no" _lio= lio .endif _nctgpio= nctgpio _ndis= ndis _ntb= ntb _ocs_fc= ocs_fc _ossl= ossl _pccard= pccard +_qat= qat .if ${MK_OFED} != "no" || defined(ALL_MODULES) _rdma= rdma .endif _safe= safe _speaker= speaker _splash= splash _sppp= sppp _wbwd= wbwd _wi= wi _aac= aac _aacraid= aacraid _acpi= acpi .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) _aesni= aesni .endif _amd_ecc_inject=amd_ecc_inject _amdsbwd= amdsbwd _amdsmn= amdsmn _amdtemp= amdtemp _arcmsr= arcmsr _asmc= asmc .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) _blake2= blake2 .endif _bytgpio= bytgpio _chvgpio= chvgpio _ciss= ciss _chromebook_platform= chromebook_platform _cmx= cmx _coretemp= coretemp .if ${MK_SOURCELESS_HOST} != "no" && empty(KCSAN_ENABLED) _hpt27xx= hpt27xx .endif _hptiop= hptiop .if ${MK_SOURCELESS_HOST} != "no" && empty(KCSAN_ENABLED) _hptmv= hptmv _hptnr= hptnr _hptrr= hptrr .endif _hyperv= hyperv _ichwd= ichwd _ida= ida _iir= iir _intelspi= intelspi _ipmi= ipmi _ips= ips _isci= isci _ipw= ipw _iwi= iwi _iwm= iwm _iwn= iwn .if ${MK_SOURCELESS_UCODE} != "no" _ipwfw= ipwfw _iwifw= iwifw _iwmfw= iwmfw _iwnfw= iwnfw .endif _mly= mly _nfe= nfe _nvd= nvd _nvme= nvme _nvram= nvram .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) _padlock= padlock _padlock_rng= padlock_rng _rdrand_rng= rdrand_rng .endif _pchtherm = pchtherm _s3= s3 _sdhci_acpi= sdhci_acpi _superio= superio _tpm= tpm _twa= twa _vesa= vesa _viawd= viawd _virtio= virtio _wpi= wpi .if ${MK_SOURCELESS_UCODE} != "no" _wpifw= wpifw .endif _x86bios= x86bios .endif .if ${MACHINE_CPUARCH} == "amd64" _amdgpio= amdgpio _ccp= ccp _iavf= iavf _ioat= ioat _ixl= ixl _nvdimm= nvdimm _pms= pms _qlxge= qlxge _qlxgb= qlxgb _sume= sume _vmd= vmd .if ${MK_SOURCELESS_UCODE} != "no" _qlxgbe= qlxgbe _qlnx= qlnx .endif _sfxge= sfxge _sgx= sgx _sgx_linux= sgx_linux _smartpqi= smartpqi .if ${MK_BHYVE} != "no" || defined(ALL_MODULES) .if ${KERN_OPTS:MSMP} _vmm= vmm .endif .endif .endif .if ${MACHINE_CPUARCH} == "i386" # XXX some of these can move to the general case when de-i386'ed # XXX some of these can move now, but are untested on other architectures. _3dfx= 3dfx _3dfx_linux= 3dfx_linux .if ${MK_SOURCELESS_HOST} != "no" _ce= ce .endif .if ${MK_SOURCELESS_UCODE} != "no" _cp= cp .endif _glxiic= glxiic _glxsb= glxsb _pcfclock= pcfclock _pst= pst _sbni= sbni .endif .if ${MACHINE_ARCH} == "armv7" _cfi= cfi _cpsw= cpsw .endif .if ${MACHINE_CPUARCH} == "powerpc" _aacraid= aacraid _agp= agp _an= an _cardbus= cardbus _cbb= cbb _cfi= cfi _cpufreq= cpufreq _exca= exca _ffec= ffec _nvd= nvd _nvme= nvme _pccard= pccard _wi= wi _virtio= virtio .endif .if ${MACHINE_ARCH:Mpowerpc64*} != "" _ipmi= ipmi _ixl= ixl _nvram= opal_nvram .endif .if ${MACHINE_CPUARCH} == "powerpc" && ${MACHINE_ARCH} != "powerpcspe" # Don't build powermac_nvram for powerpcspe, it's never supported. _nvram+= powermac_nvram .endif .if (${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \ ${MACHINE_ARCH:Marmv[67]*} != "" || ${MACHINE_CPUARCH} == "i386") _cloudabi32= cloudabi32 .endif .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" _cloudabi64= cloudabi64 .endif .endif .if ${MACHINE_ARCH:Marmv[67]*} != "" || ${MACHINE_CPUARCH} == "aarch64" _bcm283x_clkman= bcm283x_clkman _bcm283x_pwm= bcm283x_pwm .endif .if !(${COMPILER_TYPE} == "clang" && ${COMPILER_VERSION} < 110000) # LLVM 10 crashes when building if_malo_pci.c, fixed in LLVM11: # https://bugs.llvm.org/show_bug.cgi?id=44351 _malo= malo .endif SUBDIR+=${MODULES_EXTRA} .for reject in ${WITHOUT_MODULES} SUBDIR:= ${SUBDIR:N${reject}} .endfor # Calling kldxref(8) for each module is expensive. .if !defined(NO_XREF) .MAKEFLAGS+= -DNO_XREF afterinstall: .PHONY @if type kldxref >/dev/null 2>&1; then \ ${ECHO} ${KLDXREF_CMD} ${DESTDIR}${KMODDIR}; \ ${KLDXREF_CMD} ${DESTDIR}${KMODDIR}; \ fi .endif SUBDIR:= ${SUBDIR:u:O} .include Index: head/sys/modules/qat/Makefile =================================================================== --- head/sys/modules/qat/Makefile (nonexistent) +++ head/sys/modules/qat/Makefile (revision 367386) @@ -0,0 +1,19 @@ +# $FreeBSD$ + +.PATH: ${SRCTOP}/sys/dev/qat + +KMOD= qat + +SRCS= qat.c \ + qat_ae.c \ + qat_c2xxx.c \ + qat_c3xxx.c \ + qat_c62x.c \ + qat_d15xx.c \ + qat_dh895xcc.c \ + qat_hw15.c \ + qat_hw17.c + +SRCS+= bus_if.h cryptodev_if.h device_if.h pci_if.h + +.include Property changes on: head/sys/modules/qat/Makefile ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property