diff --git a/sys/conf/files.x86 b/sys/conf/files.x86 index 953da7dd1284..21a1b8046f12 100644 --- a/sys/conf/files.x86 +++ b/sys/conf/files.x86 @@ -1,397 +1,398 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # # This file contains all the x86 devices and such that are # common between i386 and amd64, but aren't applicable to # any other architecture we support. # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # atkbdmap.h optional atkbd_dflt_keymap \ compile-with "${KEYMAP} -L ${ATKBD_DFLT_KEYMAP} | ${KEYMAP_FIX} > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "atkbdmap.h" cddl/dev/fbt/x86/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" cddl/dev/dtrace/x86/dis_tables.c optional dtrace_fbt | dtraceall compile-with "${DTRACE_C}" cddl/dev/dtrace/x86/instr_size.c optional dtrace_fbt | dtraceall compile-with "${DTRACE_C}" crypto/aesni/aesni.c optional aesni aesni_ghash.o optional aesni \ dependency "$S/crypto/aesni/aesni_ghash.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} -mmmx -msse -msse4 -maes -mpclmul ${.IMPSRC}" \ no-implicit-rule \ clean "aesni_ghash.o" aesni_ccm.o optional aesni \ dependency "$S/crypto/aesni/aesni_ccm.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} -mmmx -msse -msse4 -maes -mpclmul ${.IMPSRC}" \ no-implicit-rule \ clean "aesni_ccm.o" aesni_wrap.o optional aesni \ dependency "$S/crypto/aesni/aesni_wrap.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} -mmmx -msse -msse4 -maes ${.IMPSRC}" \ no-implicit-rule \ clean "aesni_wrap.o" intel_sha1.o optional aesni \ dependency "$S/crypto/aesni/intel_sha1.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} -mmmx -msse -msse4 -msha ${.IMPSRC}" \ no-implicit-rule \ clean "intel_sha1.o" intel_sha256.o optional aesni \ dependency "$S/crypto/aesni/intel_sha256.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} -mmmx -msse -msse4 -msha ${.IMPSRC}" \ no-implicit-rule \ clean "intel_sha256.o" crypto/openssl/ossl_x86.c optional ossl crypto/via/padlock.c optional padlock crypto/via/padlock_cipher.c optional padlock crypto/via/padlock_hash.c optional padlock dev/acpica/acpi_hpet.c optional acpi dev/acpica/acpi_if.m standard dev/acpica/acpi_pci.c optional acpi pci dev/acpica/acpi_pci_link.c optional acpi pci dev/acpica/acpi_pcib.c optional acpi pci dev/acpica/acpi_pcib_acpi.c optional acpi pci dev/acpica/acpi_pcib_pci.c optional acpi pci dev/acpica/acpi_pxm.c optional acpi dev/acpica/acpi_timer.c optional acpi dev/acpi_support/acpi_wmi_if.m standard dev/agp/agp_amd64.c optional agp dev/agp/agp_i810.c optional agp dev/agp/agp_via.c optional agp dev/amdsmu/amdsmu.c optional amdsmu pci dev/amdsbwd/amdsbwd.c optional amdsbwd dev/amdsmn/amdsmn.c optional amdsmn | amdtemp dev/amdtemp/amdtemp.c optional amdtemp dev/arcmsr/arcmsr.c optional arcmsr pci dev/asmc/asmc.c optional asmc isa dev/atkbdc/atkbd.c optional atkbd atkbdc dev/atkbdc/atkbd_atkbdc.c optional atkbd atkbdc dev/atkbdc/atkbdc.c optional atkbdc dev/atkbdc/atkbdc_isa.c optional atkbdc isa dev/atkbdc/atkbdc_subr.c optional atkbdc dev/atkbdc/psm.c optional psm atkbdc dev/atopcase/atopcase.c optional atopcase acpi hid spibus dev/atopcase/atopcase_acpi.c optional atopcase acpi hid spibus dev/bxe/bxe.c optional bxe pci dev/bxe/bxe_stats.c optional bxe pci dev/bxe/bxe_debug.c optional bxe pci dev/bxe/ecore_sp.c optional bxe pci dev/bxe/bxe_elink.c optional bxe pci dev/bxe/57710_init_values.c optional bxe pci dev/bxe/57711_init_values.c optional bxe pci dev/bxe/57712_init_values.c optional bxe pci dev/coretemp/coretemp.c optional coretemp dev/cpuctl/cpuctl.c optional cpuctl dev/dpms/dpms.c optional dpms dev/fb/fb.c optional fb | vga dev/fb/s3_pci.c optional s3pci dev/fb/vesa.c optional vga vesa dev/fb/vga.c optional vga dev/fdc/fdc.c optional fdc dev/fdc/fdc_acpi.c optional fdc dev/fdc/fdc_isa.c optional fdc isa dev/gpio/bytgpio.c optional bytgpio dev/gpio/chvgpio.c optional chvgpio dev/hpt27xx/hpt27xx_os_bsd.c optional hpt27xx dev/hpt27xx/hpt27xx_osm_bsd.c optional hpt27xx dev/hpt27xx/hpt27xx_config.c optional hpt27xx dev/hpt27xx/$M-elf.hpt27xx_lib.o optional hpt27xx dev/hptmv/entry.c optional hptmv dev/hptmv/mv.c optional hptmv dev/hptmv/gui_lib.c optional hptmv dev/hptmv/hptproc.c optional hptmv dev/hptmv/ioctl.c optional hptmv dev/hptmv/$M-elf.hptmvraid.o optional hptmv dev/hptnr/hptnr_os_bsd.c optional hptnr dev/hptnr/hptnr_osm_bsd.c optional hptnr dev/hptnr/hptnr_config.c optional hptnr dev/hptnr/$M-elf.hptnr_lib.o optional hptnr dev/hptrr/hptrr_os_bsd.c optional hptrr dev/hptrr/hptrr_osm_bsd.c optional hptrr dev/hptrr/hptrr_config.c optional hptrr dev/hptrr/$M-elf.hptrr_lib.o optional hptrr dev/hwpmc/hwpmc_amd.c optional hwpmc dev/hwpmc/hwpmc_intel.c optional hwpmc dev/hwpmc/hwpmc_core.c optional hwpmc dev/hwpmc/hwpmc_uncore.c optional hwpmc dev/hwpmc/hwpmc_tsc.c optional hwpmc dev/hwpmc/hwpmc_x86.c optional hwpmc dev/hyperv/hvsock/hv_sock.c optional hyperv dev/hyperv/input/hv_hid.c optional hyperv hvhid dev/hyperv/input/hv_kbd.c optional hyperv dev/hyperv/input/hv_kbdc.c optional hyperv dev/hyperv/pcib/vmbus_pcib.c optional hyperv pci dev/hyperv/netvsc/hn_nvs.c optional hyperv dev/hyperv/netvsc/hn_rndis.c optional hyperv dev/hyperv/netvsc/if_hn.c optional hyperv dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c optional hyperv dev/hyperv/utilities/hv_kvp.c optional hyperv dev/hyperv/utilities/hv_snapshot.c optional hyperv dev/hyperv/utilities/vmbus_heartbeat.c optional hyperv dev/hyperv/utilities/vmbus_ic.c optional hyperv dev/hyperv/utilities/vmbus_shutdown.c optional hyperv dev/hyperv/utilities/vmbus_timesync.c optional hyperv dev/hyperv/vmbus/hyperv.c optional hyperv dev/hyperv/vmbus/x86/hyperv_x86.c optional hyperv dev/hyperv/vmbus/x86/vmbus_x86.c optional hyperv dev/hyperv/vmbus/hyperv_busdma.c optional hyperv dev/hyperv/vmbus/vmbus.c optional hyperv pci dev/hyperv/vmbus/vmbus_br.c optional hyperv dev/hyperv/vmbus/vmbus_chan.c optional hyperv dev/hyperv/vmbus/vmbus_et.c optional hyperv dev/hyperv/vmbus/vmbus_if.m optional hyperv dev/hyperv/vmbus/vmbus_res.c optional hyperv dev/hyperv/vmbus/vmbus_xact.c optional hyperv dev/ichwd/i6300esbwd.c optional ichwd dev/ichwd/ichwd.c optional ichwd dev/imcsmb/imcsmb.c optional imcsmb dev/imcsmb/imcsmb_pci.c optional imcsmb pci dev/intel/pchtherm.c optional pchtherm dev/intel/spi.c optional intelspi dev/intel/spi_pci.c optional intelspi pci dev/intel/spi_acpi.c optional intelspi acpi dev/io/iodev.c optional io dev/iommu/busdma_iommu.c optional acpi iommu pci dev/iommu/iommu_gas.c optional acpi iommu pci dev/ipmi/ipmi.c optional ipmi dev/ipmi/ipmi_acpi.c optional ipmi acpi dev/ipmi/ipmi_bt.c optional ipmi dev/ipmi/ipmi_isa.c optional ipmi isa dev/ipmi/ipmi_kcs.c optional ipmi dev/ipmi/ipmi_pci.c optional ipmi pci dev/ipmi/ipmi_smbios.c optional ipmi dev/ipmi/ipmi_smbus.c optional ipmi smbus dev/ipmi/ipmi_smic.c optional ipmi dev/ipmi/ipmi_ssif.c optional ipmi smbus dev/isci/isci.c optional isci \ compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" dev/isci/isci_controller.c optional isci dev/isci/isci_domain.c optional isci dev/isci/isci_interrupt.c optional isci dev/isci/isci_io_request.c optional isci dev/isci/isci_logger.c optional isci dev/isci/isci_oem_parameters.c optional isci dev/isci/isci_remote_device.c optional isci dev/isci/isci_sysctl.c optional isci dev/isci/isci_task_request.c optional isci dev/isci/isci_timer.c optional isci dev/isci/scil/sati.c optional isci dev/isci/scil/sati_abort_task_set.c optional isci dev/isci/scil/sati_atapi.c optional isci dev/isci/scil/sati_device.c optional isci dev/isci/scil/sati_inquiry.c optional isci dev/isci/scil/sati_log_sense.c optional isci dev/isci/scil/sati_lun_reset.c optional isci dev/isci/scil/sati_mode_pages.c optional isci dev/isci/scil/sati_mode_select.c optional isci \ compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" dev/isci/scil/sati_mode_sense.c optional isci dev/isci/scil/sati_mode_sense_10.c optional isci dev/isci/scil/sati_mode_sense_6.c optional isci dev/isci/scil/sati_move.c optional isci dev/isci/scil/sati_passthrough.c optional isci \ compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" dev/isci/scil/sati_read.c optional isci dev/isci/scil/sati_read_buffer.c optional isci dev/isci/scil/sati_read_capacity.c optional isci dev/isci/scil/sati_reassign_blocks.c optional isci \ compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" dev/isci/scil/sati_report_luns.c optional isci dev/isci/scil/sati_request_sense.c optional isci dev/isci/scil/sati_start_stop_unit.c optional isci dev/isci/scil/sati_synchronize_cache.c optional isci dev/isci/scil/sati_test_unit_ready.c optional isci dev/isci/scil/sati_unmap.c optional isci \ compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" dev/isci/scil/sati_util.c optional isci dev/isci/scil/sati_verify.c optional isci dev/isci/scil/sati_write.c optional isci dev/isci/scil/sati_write_and_verify.c optional isci dev/isci/scil/sati_write_buffer.c optional isci dev/isci/scil/sati_write_long.c optional isci dev/isci/scil/sci_abstract_list.c optional isci dev/isci/scil/sci_base_controller.c optional isci dev/isci/scil/sci_base_domain.c optional isci dev/isci/scil/sci_base_iterator.c optional isci dev/isci/scil/sci_base_library.c optional isci dev/isci/scil/sci_base_logger.c optional isci dev/isci/scil/sci_base_memory_descriptor_list.c optional isci dev/isci/scil/sci_base_memory_descriptor_list_decorator.c optional isci dev/isci/scil/sci_base_object.c optional isci dev/isci/scil/sci_base_observer.c optional isci dev/isci/scil/sci_base_phy.c optional isci dev/isci/scil/sci_base_port.c optional isci dev/isci/scil/sci_base_remote_device.c optional isci dev/isci/scil/sci_base_request.c optional isci dev/isci/scil/sci_base_state_machine.c optional isci dev/isci/scil/sci_base_state_machine_logger.c optional isci dev/isci/scil/sci_base_state_machine_observer.c optional isci dev/isci/scil/sci_base_subject.c optional isci dev/isci/scil/sci_util.c optional isci dev/isci/scil/scic_sds_controller.c optional isci \ compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" dev/isci/scil/scic_sds_library.c optional isci dev/isci/scil/scic_sds_pci.c optional isci dev/isci/scil/scic_sds_phy.c optional isci \ compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" dev/isci/scil/scic_sds_port.c optional isci dev/isci/scil/scic_sds_port_configuration_agent.c optional isci dev/isci/scil/scic_sds_remote_device.c optional isci dev/isci/scil/scic_sds_remote_node_context.c optional isci dev/isci/scil/scic_sds_remote_node_table.c optional isci dev/isci/scil/scic_sds_request.c optional isci \ compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" dev/isci/scil/scic_sds_sgpio.c optional isci dev/isci/scil/scic_sds_smp_remote_device.c optional isci dev/isci/scil/scic_sds_smp_request.c optional isci \ compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" dev/isci/scil/scic_sds_ssp_request.c optional isci dev/isci/scil/scic_sds_stp_packet_request.c optional isci dev/isci/scil/scic_sds_stp_remote_device.c optional isci dev/isci/scil/scic_sds_stp_request.c optional isci \ compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" dev/isci/scil/scic_sds_unsolicited_frame_control.c optional isci dev/isci/scil/scif_sas_controller.c optional isci \ compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" dev/isci/scil/scif_sas_controller_state_handlers.c optional isci dev/isci/scil/scif_sas_controller_states.c optional isci dev/isci/scil/scif_sas_domain.c optional isci dev/isci/scil/scif_sas_domain_state_handlers.c optional isci \ compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" dev/isci/scil/scif_sas_domain_states.c optional isci dev/isci/scil/scif_sas_high_priority_request_queue.c optional isci dev/isci/scil/scif_sas_internal_io_request.c optional isci dev/isci/scil/scif_sas_io_request.c optional isci dev/isci/scil/scif_sas_io_request_state_handlers.c optional isci dev/isci/scil/scif_sas_io_request_states.c optional isci dev/isci/scil/scif_sas_library.c optional isci dev/isci/scil/scif_sas_remote_device.c optional isci dev/isci/scil/scif_sas_remote_device_ready_substate_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_ready_substates.c optional isci \ compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" dev/isci/scil/scif_sas_remote_device_starting_substate_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_starting_substates.c optional isci dev/isci/scil/scif_sas_remote_device_state_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_states.c optional isci dev/isci/scil/scif_sas_request.c optional isci dev/isci/scil/scif_sas_smp_activity_clear_affiliation.c optional isci dev/isci/scil/scif_sas_smp_io_request.c optional isci dev/isci/scil/scif_sas_smp_phy.c optional isci dev/isci/scil/scif_sas_smp_remote_device.c optional isci \ compile-with "${NORMAL_C} ${NO_WUNUSED_BUT_SET_VARIABLE}" dev/isci/scil/scif_sas_stp_io_request.c optional isci dev/isci/scil/scif_sas_stp_remote_device.c optional isci dev/isci/scil/scif_sas_stp_task_request.c optional isci dev/isci/scil/scif_sas_task_request.c optional isci dev/isci/scil/scif_sas_task_request_state_handlers.c optional isci dev/isci/scil/scif_sas_task_request_states.c optional isci dev/isci/scil/scif_sas_timer.c optional isci dev/itwd/itwd.c optional itwd dev/kvm_clock/kvm_clock.c optional kvm_clock dev/mana/gdma_main.c optional mana dev/mana/mana_en.c optional mana dev/mana/mana_sysctl.c optional mana dev/mana/shm_channel.c optional mana dev/mana/hw_channel.c optional mana dev/mana/gdma_util.c optional mana dev/nctgpio/nctgpio.c optional nctgpio dev/nfe/if_nfe.c optional nfe pci dev/ntb/if_ntb/if_ntb.c optional if_ntb dev/ntb/ntb_transport.c optional ntb_transport | if_ntb dev/ntb/ntb.c optional ntb | ntb_transport | if_ntb | ntb_hw_amd | ntb_hw_intel | ntb_hw_plx | ntb_hw dev/ntb/ntb_if.m optional ntb | ntb_transport | if_ntb | ntb_hw_amd | ntb_hw_intel | ntb_hw_plx | ntb_hw dev/ntb/ntb_hw/ntb_hw_amd.c optional ntb_hw_amd | ntb_hw dev/ntb/ntb_hw/ntb_hw_intel.c optional ntb_hw_intel | ntb_hw dev/ntb/ntb_hw/ntb_hw_plx.c optional ntb_hw_plx | ntb_hw dev/ntb/test/ntb_tool.c optional ntb_tool dev/nvram/nvram.c optional nvram isa dev/random/ivy.c optional rdrand_rng !random_loadable dev/random/nehemiah.c optional padlock_rng !random_loadable +dev/random/rdseed.c optional rdrand_rng !random_loadable dev/qat_c2xxx/qat.c optional qat_c2xxx dev/qat_c2xxx/qat_ae.c optional qat_c2xxx dev/qat_c2xxx/qat_c2xxx.c optional qat_c2xxx dev/qat_c2xxx/qat_hw15.c optional qat_c2xxx dev/smbios/smbios_subr.c standard dev/speaker/spkr.c optional speaker dev/superio/superio.c optional superio isa dev/syscons/scvesactl.c optional sc vga vesa dev/syscons/scvgarndr.c optional sc vga dev/tpm/tpm.c optional tpm dev/tpm/tpm_acpi.c optional tpm acpi dev/tpm/tpm_isa.c optional tpm isa dev/uart/uart_cpu_x86.c optional uart dev/viawd/viawd.c optional viawd dev/vmd/vmd.c optional vmd | vmd_bus dev/wbwd/wbwd.c optional wbwd dev/wdatwd/wdatwd.c optional wdatwd isa/syscons_isa.c optional sc isa/vga_isa.c optional vga libkern/strcmp.c standard libkern/strncmp.c standard libkern/x86/crc32_sse42.c standard kern/imgact_aout.c optional compat_aout # # x86 shared code between IA32 and AMD64 architectures # x86/acpica/OsdEnvironment.c optional acpi x86/acpica/acpi_apm.c optional acpi x86/acpica/srat.c optional acpi x86/bios/vpd.c optional vpd x86/cpufreq/est.c optional cpufreq x86/cpufreq/hwpstate_amd.c optional cpufreq x86/cpufreq/hwpstate_intel.c optional cpufreq x86/cpufreq/p4tcc.c optional cpufreq x86/cpufreq/powernow.c optional cpufreq x86/iommu/amd_cmd.c optional acpi iommu pci x86/iommu/amd_ctx.c optional acpi iommu pci x86/iommu/amd_drv.c optional acpi iommu pci x86/iommu/amd_event.c optional acpi iommu pci x86/iommu/amd_idpgtbl.c optional acpi iommu pci x86/iommu/amd_intrmap.c optional acpi iommu pci x86/iommu/intel_ctx.c optional acpi iommu pci x86/iommu/intel_drv.c optional acpi iommu pci x86/iommu/intel_fault.c optional acpi iommu pci x86/iommu/intel_idpgtbl.c optional acpi iommu pci x86/iommu/intel_intrmap.c optional acpi iommu pci x86/iommu/intel_qi.c optional acpi iommu pci x86/iommu/intel_quirks.c optional acpi iommu pci x86/iommu/intel_utils.c optional acpi iommu pci x86/iommu/iommu_utils.c optional acpi iommu pci x86/isa/atrtc.c standard x86/isa/clock.c standard x86/isa/isa.c optional isa x86/isa/isa_dma.c optional isa x86/isa/nmi.c standard x86/isa/orm.c optional isa x86/pci/pci_bus.c optional pci x86/pci/qpi.c optional pci x86/x86/autoconf.c standard x86/x86/bus_machdep.c standard x86/x86/busdma_bounce.c standard x86/x86/busdma_machdep.c standard x86/x86/cpu_machdep.c standard x86/x86/dbreg.c optional ddb | gdb x86/x86/dump_machdep.c standard x86/x86/fdt_machdep.c optional fdt x86/x86/identcpu.c standard x86/x86/intr_machdep.c standard x86/x86/legacy.c standard x86/x86/mca.c standard x86/x86/x86_mem.c optional mem x86/x86/mp_x86.c optional smp x86/x86/nexus.c standard x86/x86/pvclock.c optional kvm_clock | xenhvm x86/x86/sdt_machdep.c optional kdtrace_hooks x86/x86/stack_machdep.c optional ddb | stack x86/x86/tsc.c standard x86/x86/ucode.c standard x86/x86/ucode_subr.c standard x86/x86/vmware_guestrpc.c optional vmware_guestrpc x86/x86/delay.c standard x86/xen/hvm.c optional xenhvm x86/xen/xen_apic.c optional xenhvm smp x86/xen/xen_arch_intr.c optional xenhvm diff --git a/sys/dev/random/fenestrasX/fx_pool.c b/sys/dev/random/fenestrasX/fx_pool.c index ec59b97a2070..95c2d223e0de 100644 --- a/sys/dev/random/fenestrasX/fx_pool.c +++ b/sys/dev/random/fenestrasX/fx_pool.c @@ -1,616 +1,619 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Conrad Meyer * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Timer-based reseed interval growth factor and limit in seconds. (§ 3.2) */ #define FXENT_RESSED_INTVL_GFACT 3 #define FXENT_RESEED_INTVL_MAX 3600 /* * Pool reseed schedule. Initially, only pool 0 is active. Until the timer * interval reaches INTVL_MAX, only pool 0 is used. * * After reaching INTVL_MAX, pool k is either activated (if inactive) or used * (if active) every 3^k timer reseeds. (§ 3.3) * * (Entropy harvesting only round robins across active pools.) */ #define FXENT_RESEED_BASE 3 /* * Number of bytes from high quality sources to allocate to pool 0 before * normal round-robin allocation after each timer reseed. (§ 3.4) */ #define FXENT_HI_SRC_POOL0_BYTES 32 /* * § 3.1 * * Low sources provide unconditioned entropy, such as mouse movements; high * sources are assumed to provide high-quality random bytes. Pull sources are * those which can be polled, i.e., anything randomdev calls a "random_source." * * In the whitepaper, low sources are pull. For us, at least in the existing * design, low-quality sources push into some global ring buffer and then get * forwarded into the RNG by a thread that continually polls. Presumably their * design batches low entopy signals in some way (SHA512?) and only requests * them dynamically on reseed. I'm not sure what the benefit is vs feeding * into the pools directly. */ enum fxrng_ent_access_cls { FXRNG_PUSH, FXRNG_PULL, }; enum fxrng_ent_source_cls { FXRNG_HI, FXRNG_LO, FXRNG_GARBAGE, }; struct fxrng_ent_cls { enum fxrng_ent_access_cls entc_axx_cls; enum fxrng_ent_source_cls entc_src_cls; }; static const struct fxrng_ent_cls fxrng_hi_pull = { .entc_axx_cls = FXRNG_PULL, .entc_src_cls = FXRNG_HI, }; static const struct fxrng_ent_cls fxrng_hi_push = { .entc_axx_cls = FXRNG_PUSH, .entc_src_cls = FXRNG_HI, }; static const struct fxrng_ent_cls fxrng_lo_push = { .entc_axx_cls = FXRNG_PUSH, .entc_src_cls = FXRNG_LO, }; static const struct fxrng_ent_cls fxrng_garbage = { .entc_axx_cls = FXRNG_PUSH, .entc_src_cls = FXRNG_GARBAGE, }; /* * This table is a mapping of randomdev's current source abstractions to the * designations above; at some point, if the design seems reasonable, it would * make more sense to pull this up into the abstraction layer instead. */ static const struct fxrng_ent_char { const struct fxrng_ent_cls *entc_cls; } fxrng_ent_char[ENTROPYSOURCE] = { [RANDOM_CACHED] = { .entc_cls = &fxrng_hi_push, }, [RANDOM_ATTACH] = { .entc_cls = &fxrng_lo_push, }, [RANDOM_KEYBOARD] = { .entc_cls = &fxrng_lo_push, }, [RANDOM_MOUSE] = { .entc_cls = &fxrng_lo_push, }, [RANDOM_NET_TUN] = { .entc_cls = &fxrng_lo_push, }, [RANDOM_NET_ETHER] = { .entc_cls = &fxrng_lo_push, }, [RANDOM_NET_NG] = { .entc_cls = &fxrng_lo_push, }, [RANDOM_INTERRUPT] = { .entc_cls = &fxrng_lo_push, }, [RANDOM_SWI] = { .entc_cls = &fxrng_lo_push, }, [RANDOM_FS_ATIME] = { .entc_cls = &fxrng_lo_push, }, [RANDOM_UMA] = { .entc_cls = &fxrng_lo_push, }, [RANDOM_CALLOUT] = { .entc_cls = &fxrng_lo_push, }, [RANDOM_RANDOMDEV] = { .entc_cls = &fxrng_lo_push, }, [RANDOM_PURE_SAFE] = { .entc_cls = &fxrng_hi_push, }, [RANDOM_PURE_GLXSB] = { .entc_cls = &fxrng_hi_push, }, [RANDOM_PURE_HIFN] = { .entc_cls = &fxrng_hi_push, }, [RANDOM_PURE_RDRAND] = { .entc_cls = &fxrng_hi_pull, }, + [RANDOM_PURE_RDSEED] = { + .entc_cls = &fxrng_hi_pull, + }, [RANDOM_PURE_NEHEMIAH] = { .entc_cls = &fxrng_hi_pull, }, [RANDOM_PURE_RNDTEST] = { .entc_cls = &fxrng_garbage, }, [RANDOM_PURE_VIRTIO] = { .entc_cls = &fxrng_hi_pull, }, [RANDOM_PURE_BROADCOM] = { .entc_cls = &fxrng_hi_push, }, [RANDOM_PURE_CCP] = { .entc_cls = &fxrng_hi_pull, }, [RANDOM_PURE_DARN] = { .entc_cls = &fxrng_hi_pull, }, [RANDOM_PURE_TPM] = { .entc_cls = &fxrng_hi_push, }, [RANDOM_PURE_VMGENID] = { .entc_cls = &fxrng_hi_push, }, }; /* Useful for single-bit-per-source state. */ BITSET_DEFINE(fxrng_bits, ENTROPYSOURCE); /* XXX Borrowed from not-yet-committed D22702. */ #ifndef BIT_TEST_SET_ATOMIC_ACQ #define BIT_TEST_SET_ATOMIC_ACQ(_s, n, p) \ (atomic_testandset_acq_long( \ &(p)->__bits[__bitset_word((_s), (n))], (n)) != 0) #endif #define FXENT_TEST_SET_ATOMIC_ACQ(n, p) \ BIT_TEST_SET_ATOMIC_ACQ(ENTROPYSOURCE, n, p) /* For special behavior on first-time entropy sources. (§ 3.1) */ static struct fxrng_bits __read_mostly fxrng_seen; /* For special behavior for high-entropy sources after a reseed. (§ 3.4) */ _Static_assert(FXENT_HI_SRC_POOL0_BYTES <= UINT8_MAX, ""); static uint8_t __read_mostly fxrng_reseed_seen[ENTROPYSOURCE]; /* Entropy pools. Lock order is ENT -> RNG(root) -> RNG(leaf). */ static struct mtx fxent_pool_lk; MTX_SYSINIT(fx_pool, &fxent_pool_lk, "fx entropy pool lock", MTX_DEF); #define FXENT_LOCK() mtx_lock(&fxent_pool_lk) #define FXENT_UNLOCK() mtx_unlock(&fxent_pool_lk) #define FXENT_ASSERT(rng) mtx_assert(&fxent_pool_lk, MA_OWNED) #define FXENT_ASSERT_NOT(rng) mtx_assert(&fxent_pool_lk, MA_NOTOWNED) static struct fxrng_hash fxent_pool[FXRNG_NPOOLS]; static unsigned __read_mostly fxent_nactpools = 1; static struct timeout_task fxent_reseed_timer; static int __read_mostly fxent_timer_ready; /* * Track number of bytes of entropy harvested from high-quality sources prior * to initial keying. The idea is to collect more jitter entropy when fewer * high-quality bytes were available and less if we had other good sources. We * want to provide always-on availability but don't necessarily have *any* * great sources on some platforms. * * Like fxrng_ent_char: at some point, if the design seems reasonable, it would * make more sense to pull this up into the abstraction layer instead. * * Jitter entropy is unimplemented for now. */ static unsigned long fxrng_preseed_ent; void fxrng_pools_init(void) { size_t i; for (i = 0; i < nitems(fxent_pool); i++) fxrng_hash_init(&fxent_pool[i]); } static inline bool fxrng_hi_source(enum random_entropy_source src) { return (fxrng_ent_char[src].entc_cls->entc_src_cls == FXRNG_HI); } /* * A racy check that this high-entropy source's event should contribute to * pool0 on the basis of per-source byte count. The check is racy for two * reasons: * - Performance: The vast majority of the time, we've already taken 32 bytes * from any present high quality source and the racy check lets us avoid * dirtying the cache for the global array. * - Correctness: It's fine that the check is racy. The failure modes are: * • False positive: We will detect when we take the lock. * • False negative: We still collect the entropy; it just won't be * preferentially placed in pool0 in this case. */ static inline bool fxrng_hi_pool0_eligible_racy(enum random_entropy_source src) { return (atomic_load_acq_8(&fxrng_reseed_seen[src]) < FXENT_HI_SRC_POOL0_BYTES); } /* * Top level entropy processing API from randomdev. * * Invoked by the core randomdev subsystem both for preload entropy, "push" * sources (like interrupts, keyboard, etc) and pull sources (RDRAND, etc). */ void fxrng_event_processor(struct harvest_event *event) { enum random_entropy_source src; unsigned pool; bool first_time, first_32; src = event->he_source; ASSERT_DEBUG(event->he_size <= sizeof(event->he_entropy), "%s: he_size: %u > sizeof(he_entropy): %zu", __func__, (unsigned)event->he_size, sizeof(event->he_entropy)); /* * Zero bytes of source entropy doesn't count as observing this source * for the first time. We still harvest the counter entropy. */ first_time = event->he_size > 0 && !FXENT_TEST_SET_ATOMIC_ACQ(src, &fxrng_seen); if (__predict_false(first_time)) { /* * "The first time [any source] provides entropy, it is used to * directly reseed the root PRNG. The entropy pools are * bypassed." (§ 3.1) * * Unlike Windows, we cannot rely on loader(8) seed material * being present, so we perform initial keying in the kernel. * We use brng_generation 0 to represent an unkeyed state. * * Prior to initial keying, it doesn't make sense to try to mix * the entropy directly with the root PRNG state, as the root * PRNG is unkeyed. Instead, we collect pre-keying dynamic * entropy in pool0 and do not bump the root PRNG seed version * or set its key. Initial keying will incorporate pool0 and * bump the brng_generation (seed version). * * After initial keying, we do directly mix in first-time * entropy sources. We use the root BRNG to generate 32 bytes * and use fxrng_hash to mix it with the new entropy source and * re-key with the first 256 bits of hash output. */ FXENT_LOCK(); FXRNG_BRNG_LOCK(&fxrng_root); if (__predict_true(fxrng_root.brng_generation > 0)) { /* Bypass the pools: */ FXENT_UNLOCK(); fxrng_brng_src_reseed(event); FXRNG_BRNG_ASSERT_NOT(&fxrng_root); return; } /* * Keying the root PRNG requires both FXENT_LOCK and the PRNG's * lock, so we only need to hold on to the pool lock to prevent * initial keying without this entropy. */ FXRNG_BRNG_UNLOCK(&fxrng_root); /* Root PRNG hasn't been keyed yet, just accumulate event. */ fxrng_hash_update(&fxent_pool[0], &event->he_somecounter, sizeof(event->he_somecounter)); fxrng_hash_update(&fxent_pool[0], event->he_entropy, event->he_size); if (fxrng_hi_source(src)) { /* Prevent overflow. */ if (fxrng_preseed_ent <= ULONG_MAX - event->he_size) fxrng_preseed_ent += event->he_size; } FXENT_UNLOCK(); return; } /* !first_time */ /* * "The first 32 bytes produced by a high entropy source after a reseed * from the pools is always put in pool 0." (§ 3.4) * * The first-32-byte tracking data in fxrng_reseed_seen is reset in * fxent_timer_reseed_npools() below. */ first_32 = event->he_size > 0 && fxrng_hi_source(src) && atomic_load_acq_int(&fxent_nactpools) > 1 && fxrng_hi_pool0_eligible_racy(src); if (__predict_false(first_32)) { unsigned rem, seen; FXENT_LOCK(); seen = fxrng_reseed_seen[src]; if (seen == FXENT_HI_SRC_POOL0_BYTES) goto round_robin; rem = FXENT_HI_SRC_POOL0_BYTES - seen; rem = MIN(rem, event->he_size); fxrng_reseed_seen[src] = seen + rem; /* * We put 'rem' bytes in pool0, and any remaining bytes are * round-robin'd across other pools. */ fxrng_hash_update(&fxent_pool[0], ((uint8_t *)event->he_entropy) + event->he_size - rem, rem); if (rem == event->he_size) { fxrng_hash_update(&fxent_pool[0], &event->he_somecounter, sizeof(event->he_somecounter)); FXENT_UNLOCK(); return; } /* * If fewer bytes were needed than this even provied, We only * take the last rem bytes of the entropy buffer and leave the * timecounter to be round-robin'd with the remaining entropy. */ event->he_size -= rem; goto round_robin; } /* !first_32 */ FXENT_LOCK(); round_robin: FXENT_ASSERT(); pool = event->he_destination % fxent_nactpools; fxrng_hash_update(&fxent_pool[pool], event->he_entropy, event->he_size); fxrng_hash_update(&fxent_pool[pool], &event->he_somecounter, sizeof(event->he_somecounter)); if (__predict_false(fxrng_hi_source(src) && atomic_load_acq_64(&fxrng_root_generation) == 0)) { /* Prevent overflow. */ if (fxrng_preseed_ent <= ULONG_MAX - event->he_size) fxrng_preseed_ent += event->he_size; } FXENT_UNLOCK(); } /* * Top level "seeded" API/signal from randomdev. * * This is our warning that a request is coming: we need to be seeded. In * fenestrasX, a request for random bytes _never_ fails. "We (ed: ditto) have * observed that there are many callers that never check for the error code, * even if they are generating cryptographic key material." (§ 1.6) * * If we returned 'false', both read_random(9) and chacha20_randomstir() * (arc4random(9)) will blindly charge on with something almost certainly worse * than what we've got, or are able to get quickly enough. */ bool fxrng_alg_seeded(void) { uint8_t hash[FXRNG_HASH_SZ]; sbintime_t sbt; /* The vast majority of the time, we expect to already be seeded. */ if (__predict_true(atomic_load_acq_64(&fxrng_root_generation) != 0)) return (true); /* * Take the lock and recheck; only one thread needs to do the initial * seeding work. */ FXENT_LOCK(); if (atomic_load_acq_64(&fxrng_root_generation) != 0) { FXENT_UNLOCK(); return (true); } /* XXX Any one-off initial seeding goes here. */ fxrng_hash_finish(&fxent_pool[0], hash, sizeof(hash)); fxrng_hash_init(&fxent_pool[0]); fxrng_brng_reseed(hash, sizeof(hash)); FXENT_UNLOCK(); randomdev_unblock(); explicit_bzero(hash, sizeof(hash)); /* * This may be called too early for taskqueue_thread to be initialized. * fxent_pool_timer_init will detect if we've already unblocked and * queue the first timer reseed at that point. */ if (atomic_load_acq_int(&fxent_timer_ready) != 0) { sbt = SBT_1S; taskqueue_enqueue_timeout_sbt(taskqueue_thread, &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2)); } return (true); } /* * Timer-based reseeds and pool expansion. */ static void fxent_timer_reseed_npools(unsigned n) { /* * 64 * 8 => moderately large 512 bytes. Could be static, as we are * only used in a static context. On the other hand, this is in * threadqueue TASK context and we're likely nearly at top of stack * already. */ uint8_t hash[FXRNG_HASH_SZ * FXRNG_NPOOLS]; unsigned i; ASSERT_DEBUG(n > 0 && n <= FXRNG_NPOOLS, "n:%u", n); FXENT_ASSERT(); /* * Collect entropy from pools 0..n-1 by concatenating the output hashes * and then feeding them into fxrng_brng_reseed, which will hash the * aggregate together with the current root PRNG keystate to produce a * new key. It will also bump the global generation counter * appropriately. */ for (i = 0; i < n; i++) { fxrng_hash_finish(&fxent_pool[i], hash + i * FXRNG_HASH_SZ, FXRNG_HASH_SZ); fxrng_hash_init(&fxent_pool[i]); } fxrng_brng_reseed(hash, n * FXRNG_HASH_SZ); explicit_bzero(hash, n * FXRNG_HASH_SZ); /* * "The first 32 bytes produced by a high entropy source after a reseed * from the pools is always put in pool 0." (§ 3.4) * * So here we reset the tracking (somewhat naively given the majority * of sources on most machines are not what we consider "high", but at * 32 bytes it's smaller than a cache line), so the next 32 bytes are * prioritized into pool0. * * See corresponding use of fxrng_reseed_seen in fxrng_event_processor. */ memset(fxrng_reseed_seen, 0, sizeof(fxrng_reseed_seen)); FXENT_ASSERT(); } static void fxent_timer_reseed(void *ctx __unused, int pending __unused) { static unsigned reseed_intvl_sec = 1; /* Only reseeds after FXENT_RESEED_INTVL_MAX is achieved. */ static uint64_t reseed_number = 1; unsigned next_ival, i, k; sbintime_t sbt; if (reseed_intvl_sec < FXENT_RESEED_INTVL_MAX) { next_ival = FXENT_RESSED_INTVL_GFACT * reseed_intvl_sec; if (next_ival > FXENT_RESEED_INTVL_MAX) next_ival = FXENT_RESEED_INTVL_MAX; FXENT_LOCK(); fxent_timer_reseed_npools(1); FXENT_UNLOCK(); } else { /* * The creation of entropy pools beyond 0 is enabled when the * reseed interval hits the maximum. (§ 3.3) */ next_ival = reseed_intvl_sec; /* * Pool 0 is used every reseed; pool 1..0 every 3rd reseed; and in * general, pool n..0 every 3^n reseeds. */ k = reseed_number; reseed_number++; /* Count how many pools, from [0, i), to use for reseed. */ for (i = 1; i < MIN(fxent_nactpools + 1, FXRNG_NPOOLS); i++) { if ((k % FXENT_RESEED_BASE) != 0) break; k /= FXENT_RESEED_BASE; } /* * If we haven't activated pool i yet, activate it and only * reseed from [0, i-1). (§ 3.3) */ FXENT_LOCK(); if (i == fxent_nactpools + 1) { fxent_timer_reseed_npools(fxent_nactpools); fxent_nactpools++; } else { /* Just reseed from [0, i). */ fxent_timer_reseed_npools(i); } FXENT_UNLOCK(); } /* Schedule the next reseed. */ sbt = next_ival * SBT_1S; taskqueue_enqueue_timeout_sbt(taskqueue_thread, &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2)); reseed_intvl_sec = next_ival; } static void fxent_pool_timer_init(void *dummy __unused) { sbintime_t sbt; TIMEOUT_TASK_INIT(taskqueue_thread, &fxent_reseed_timer, 0, fxent_timer_reseed, NULL); if (atomic_load_acq_64(&fxrng_root_generation) != 0) { sbt = SBT_1S; taskqueue_enqueue_timeout_sbt(taskqueue_thread, &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2)); } atomic_store_rel_int(&fxent_timer_ready, 1); } /* After taskqueue_thread is initialized in SI_SUB_TASKQ:SI_ORDER_SECOND. */ SYSINIT(fxent_pool_timer_init, SI_SUB_TASKQ, SI_ORDER_ANY, fxent_pool_timer_init, NULL); diff --git a/sys/dev/random/ivy.c b/sys/dev/random/ivy.c index fa1e4831f1b9..3eb0f261e6dc 100644 --- a/sys/dev/random/ivy.c +++ b/sys/dev/random/ivy.c @@ -1,196 +1,165 @@ /*- + * Copyright (c) 2013, 2025, David E. O'Brien * Copyright (c) 2013 The FreeBSD Foundation - * Copyright (c) 2013 David E. O'Brien * Copyright (c) 2012 Konstantin Belousov * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #define RETRY_COUNT 10 -static bool has_rdrand, has_rdseed; static u_int random_ivy_read(void *, u_int); static const struct random_source random_ivy = { .rs_ident = "Intel Secure Key RNG", .rs_source = RANDOM_PURE_RDRAND, .rs_read = random_ivy_read }; -SYSCTL_NODE(_kern_random, OID_AUTO, rdrand, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, - "rdrand (ivy) entropy source"); static bool acquire_independent_seed_samples = false; -SYSCTL_BOOL(_kern_random_rdrand, OID_AUTO, rdrand_independent_seed, - CTLFLAG_RWTUN, &acquire_independent_seed_samples, 0, - "If non-zero, use more expensive and slow, but safer, seeded samples " - "where RDSEED is not present."); static bool x86_rdrand_store(u_long *buf) { u_long rndval, seed_iterations, i; int retry; /* Per [1], "§ 5.2.6 Generating Seeds from RDRAND," * machines lacking RDSEED will guarantee RDRAND is reseeded every 8kB * of generated output. * * [1]: https://software.intel.com/en-us/articles/intel-digital-random-number-generator-drng-software-implementation-guide#inpage-nav-6-8 */ if (acquire_independent_seed_samples) seed_iterations = 8 * 1024 / sizeof(*buf); else seed_iterations = 1; for (i = 0; i < seed_iterations; i++) { retry = RETRY_COUNT; __asm __volatile( "1:\n\t" "rdrand %1\n\t" /* read randomness into rndval */ "jc 2f\n\t" /* CF is set on success, exit retry loop */ "dec %0\n\t" /* otherwise, retry-- */ "jne 1b\n\t" /* and loop if retries are not exhausted */ "2:" : "+r" (retry), "=r" (rndval) : : "cc"); if (retry == 0) return (false); } *buf = rndval; return (true); } -static bool -x86_rdseed_store(u_long *buf) -{ - u_long rndval; - int retry; - - retry = RETRY_COUNT; - __asm __volatile( - "1:\n\t" - "rdseed %1\n\t" /* read randomness into rndval */ - "jc 2f\n\t" /* CF is set on success, exit retry loop */ - "dec %0\n\t" /* otherwise, retry-- */ - "jne 1b\n\t" /* and loop if retries are not exhausted */ - "2:" - : "+r" (retry), "=r" (rndval) : : "cc"); - *buf = rndval; - return (retry != 0); -} - -static bool -x86_unimpl_store(u_long *buf __unused) -{ - - panic("%s called", __func__); -} - -DEFINE_IFUNC(static, bool, x86_rng_store, (u_long *buf)) -{ - has_rdrand = (cpu_feature2 & CPUID2_RDRAND); - has_rdseed = (cpu_stdext_feature & CPUID_STDEXT_RDSEED); - - if (has_rdseed) - return (x86_rdseed_store); - else if (has_rdrand) - return (x86_rdrand_store); - else - return (x86_unimpl_store); -} - /* It is required that buf length is a multiple of sizeof(u_long). */ static u_int random_ivy_read(void *buf, u_int c) { u_long *b, rndval; u_int count; KASSERT(c % sizeof(*b) == 0, ("partial read %d", c)); b = buf; for (count = c; count > 0; count -= sizeof(*b)) { - if (!x86_rng_store(&rndval)) + if (!x86_rdrand_store(&rndval)) break; *b++ = rndval; } return (c - count); } static int rdrand_modevent(module_t mod, int type, void *unused) { + struct sysctl_ctx_list ctx; + struct sysctl_oid *o; + bool has_rdrand, has_rdseed; int error = 0; + has_rdrand = (cpu_feature2 & CPUID2_RDRAND); + has_rdseed = (cpu_stdext_feature & CPUID_STDEXT_RDSEED); + switch (type) { case MOD_LOAD: - if (has_rdrand || has_rdseed) { + if (has_rdrand && !has_rdseed) { + sysctl_ctx_init(&ctx); + o = SYSCTL_ADD_NODE(&ctx, SYSCTL_STATIC_CHILDREN(_kern_random), + OID_AUTO, "rdrand", CTLFLAG_RW | CTLFLAG_MPSAFE, 0, + "rdrand (ivy) entropy source"); + SYSCTL_ADD_BOOL(&ctx, SYSCTL_CHILDREN(o), OID_AUTO, + "rdrand_independent_seed", CTLFLAG_RDTUN, + &acquire_independent_seed_samples, 0, + "If non-zero, use more expensive and slow, but safer, seeded samples " + "where RDSEED is not present."); random_source_register(&random_ivy); printf("random: fast provider: \"%s\"\n", random_ivy.rs_ident); } break; case MOD_UNLOAD: - if (has_rdrand || has_rdseed) + if (has_rdrand && !has_rdseed) random_source_deregister(&random_ivy); break; case MOD_SHUTDOWN: break; default: error = EOPNOTSUPP; break; } return (error); } static moduledata_t rdrand_mod = { "rdrand", rdrand_modevent, 0 }; DECLARE_MODULE(rdrand, rdrand_mod, SI_SUB_RANDOM, SI_ORDER_FOURTH); MODULE_VERSION(rdrand, 1); MODULE_DEPEND(rdrand, random_harvestq, 1, 1, 1); diff --git a/sys/dev/random/random_harvestq.c b/sys/dev/random/random_harvestq.c index 3b68e41b053a..e38fd38c310b 100644 --- a/sys/dev/random/random_harvestq.c +++ b/sys/dev/random/random_harvestq.c @@ -1,982 +1,983 @@ /*- * Copyright (c) 2017 Oliver Pinter * Copyright (c) 2017 W. Dean Freeman * Copyright (c) 2000-2015 Mark R V Murray * Copyright (c) 2013 Arthur Mesh * Copyright (c) 2004 Robert N. M. Watson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(RANDOM_ENABLE_ETHER) #define _RANDOM_HARVEST_ETHER_OFF 0 #else #define _RANDOM_HARVEST_ETHER_OFF (1u << RANDOM_NET_ETHER) #endif #if defined(RANDOM_ENABLE_UMA) #define _RANDOM_HARVEST_UMA_OFF 0 #else #define _RANDOM_HARVEST_UMA_OFF (1u << RANDOM_UMA) #endif /* * Note that random_sources_feed() will also use this to try and split up * entropy into a subset of pools per iteration with the goal of feeding * HARVESTSIZE into every pool at least once per second. */ #define RANDOM_KTHREAD_HZ 10 static void random_kthread(void); static void random_sources_feed(void); /* * Random must initialize much earlier than epoch, but we can initialize the * epoch code before SMP starts. Prior to SMP, we can safely bypass * concurrency primitives. */ static __read_mostly bool epoch_inited; static __read_mostly epoch_t rs_epoch; static const char *random_source_descr[ENTROPYSOURCE]; /* * How many events to queue up. We create this many items in * an 'empty' queue, then transfer them to the 'harvest' queue with * supplied junk. When used, they are transferred back to the * 'empty' queue. */ #define RANDOM_RING_MAX 1024 #define RANDOM_ACCUM_MAX 8 /* 1 to let the kernel thread run, 0 to terminate, -1 to mark completion */ volatile int random_kthread_control; /* * Allow the sysadmin to select the broad category of entropy types to harvest. * * Updates are synchronized by the harvest mutex. */ __read_frequently u_int hc_source_mask; struct random_sources { CK_LIST_ENTRY(random_sources) rrs_entries; const struct random_source *rrs_source; }; static CK_LIST_HEAD(sources_head, random_sources) source_list = CK_LIST_HEAD_INITIALIZER(source_list); SYSCTL_NODE(_kern_random, OID_AUTO, harvest, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Entropy Device Parameters"); /* * Put all the harvest queue context stuff in one place. * this make is a bit easier to lock and protect. */ static struct harvest_context { /* The harvest mutex protects all of harvest_context and * the related data. */ struct mtx hc_mtx; /* Round-robin destination cache. */ u_int hc_destination[ENTROPYSOURCE]; /* The context of the kernel thread processing harvested entropy */ struct proc *hc_kthread_proc; /* * A pair of buffers for queued events. New events are added to the * active queue while the kthread processes the other one in parallel. */ struct entropy_buffer { struct harvest_event ring[RANDOM_RING_MAX]; u_int pos; } hc_entropy_buf[2]; u_int hc_active_buf; struct fast_entropy_accumulator { volatile u_int pos; uint32_t buf[RANDOM_ACCUM_MAX]; } hc_entropy_fast_accumulator; } harvest_context; #define RANDOM_HARVEST_INIT_LOCK() mtx_init(&harvest_context.hc_mtx, \ "entropy harvest mutex", NULL, MTX_SPIN) #define RANDOM_HARVEST_LOCK() mtx_lock_spin(&harvest_context.hc_mtx) #define RANDOM_HARVEST_UNLOCK() mtx_unlock_spin(&harvest_context.hc_mtx) static struct kproc_desc random_proc_kp = { "rand_harvestq", random_kthread, &harvest_context.hc_kthread_proc, }; /* Pass the given event straight through to Fortuna/Whatever. */ static __inline void random_harvestq_fast_process_event(struct harvest_event *event) { p_random_alg_context->ra_event_processor(event); explicit_bzero(event, sizeof(*event)); } static void random_kthread(void) { struct harvest_context *hc; hc = &harvest_context; for (random_kthread_control = 1; random_kthread_control;) { struct entropy_buffer *buf; u_int entries; /* Deal with queued events. */ RANDOM_HARVEST_LOCK(); buf = &hc->hc_entropy_buf[hc->hc_active_buf]; entries = buf->pos; buf->pos = 0; hc->hc_active_buf = (hc->hc_active_buf + 1) % nitems(hc->hc_entropy_buf); RANDOM_HARVEST_UNLOCK(); for (u_int i = 0; i < entries; i++) random_harvestq_fast_process_event(&buf->ring[i]); /* Poll sources of noise. */ random_sources_feed(); /* XXX: FIX!! Increase the high-performance data rate? Need some measurements first. */ for (u_int i = 0; i < RANDOM_ACCUM_MAX; i++) { if (hc->hc_entropy_fast_accumulator.buf[i]) { random_harvest_direct(&hc->hc_entropy_fast_accumulator.buf[i], sizeof(hc->hc_entropy_fast_accumulator.buf[0]), RANDOM_UMA); hc->hc_entropy_fast_accumulator.buf[i] = 0; } } /* XXX: FIX!! This is a *great* place to pass hardware/live entropy to random(9) */ tsleep_sbt(&hc->hc_kthread_proc, 0, "-", SBT_1S/RANDOM_KTHREAD_HZ, 0, C_PREL(1)); } random_kthread_control = -1; wakeup(&hc->hc_kthread_proc); kproc_exit(0); /* NOTREACHED */ } SYSINIT(random_device_h_proc, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, kproc_start, &random_proc_kp); _Static_assert(SI_SUB_KICK_SCHEDULER > SI_SUB_RANDOM, "random kthread starting before subsystem initialization"); static void rs_epoch_init(void *dummy __unused) { rs_epoch = epoch_alloc("Random Sources", EPOCH_PREEMPT); epoch_inited = true; } SYSINIT(rs_epoch_init, SI_SUB_EPOCH, SI_ORDER_ANY, rs_epoch_init, NULL); /* * Run through all fast sources reading entropy for the given * number of rounds, which should be a multiple of the number * of entropy accumulation pools in use; it is 32 for Fortuna. */ static void random_sources_feed(void) { uint32_t entropy[HARVESTSIZE]; struct epoch_tracker et; struct random_sources *rrs; u_int i, n, npools; bool rse_warm; rse_warm = epoch_inited; /* * Evenly-ish distribute pool population across the second based on how * frequently random_kthread iterates. * * For Fortuna, the math currently works out as such: * * 64 bits * 4 pools = 256 bits per iteration * 256 bits * 10 Hz = 2560 bits per second, 320 B/s * */ npools = howmany(p_random_alg_context->ra_poolcount, RANDOM_KTHREAD_HZ); /*- * If we're not seeded yet, attempt to perform a "full seed", filling * all of the PRNG's pools with entropy; if there is enough entropy * available from "fast" entropy sources this will allow us to finish * seeding and unblock the boot process immediately rather than being * stuck for a few seconds with random_kthread gradually collecting a * small chunk of entropy every 1 / RANDOM_KTHREAD_HZ seconds. * * We collect RANDOM_FORTUNA_DEFPOOLSIZE bytes per pool, i.e. enough * to fill Fortuna's pools in the default configuration. With another * PRNG or smaller pools for Fortuna, we might collect more entropy * than needed to fill the pools, but this is harmless; alternatively, * a different PRNG, larger pools, or fast entropy sources which are * not able to provide as much entropy as we request may result in the * not being fully seeded (and thus remaining blocked) but in that * case we will return here after 1 / RANDOM_KTHREAD_HZ seconds and * try again for a large amount of entropy. */ if (!p_random_alg_context->ra_seeded()) npools = howmany(p_random_alg_context->ra_poolcount * RANDOM_FORTUNA_DEFPOOLSIZE, sizeof(entropy)); /* * Step over all of live entropy sources, and feed their output * to the system-wide RNG. */ if (rse_warm) epoch_enter_preempt(rs_epoch, &et); CK_LIST_FOREACH(rrs, &source_list, rrs_entries) { for (i = 0; i < npools; i++) { if (rrs->rrs_source->rs_read == NULL) { /* Source pushes entropy asynchronously. */ continue; } n = rrs->rrs_source->rs_read(entropy, sizeof(entropy)); KASSERT((n <= sizeof(entropy)), ("%s: rs_read returned too much data (%u > %zu)", __func__, n, sizeof(entropy))); /* * Sometimes the HW entropy source doesn't have anything * ready for us. This isn't necessarily untrustworthy. * We don't perform any other verification of an entropy * source (i.e., length is allowed to be anywhere from 1 * to sizeof(entropy), quality is unchecked, etc), so * don't balk verbosely at slow random sources either. * There are reports that RDSEED on x86 metal falls * behind the rate at which we query it, for example. * But it's still a better entropy source than RDRAND. */ if (n == 0) continue; random_harvest_direct(entropy, n, rrs->rrs_source->rs_source); } } if (rse_warm) epoch_exit_preempt(rs_epoch, &et); explicit_bzero(entropy, sizeof(entropy)); } /* * State used for conducting NIST SP 800-90B health tests on entropy sources. */ static struct health_test_softc { uint32_t ht_rct_value[HARVESTSIZE + 1]; u_int ht_rct_count; /* number of samples with the same value */ u_int ht_rct_limit; /* constant after init */ uint32_t ht_apt_value[HARVESTSIZE + 1]; u_int ht_apt_count; /* number of samples with the same value */ u_int ht_apt_seq; /* sequence number of the last sample */ u_int ht_apt_cutoff; /* constant after init */ uint64_t ht_total_samples; bool ondemand; /* Set to true to restart the state machine */ enum { INIT = 0, /* initial state */ DISABLED, /* health checking is disabled */ STARTUP, /* doing startup tests, samples are discarded */ STEADY, /* steady-state operation */ FAILED, /* health check failed, discard samples */ } ht_state; } healthtest[ENTROPYSOURCE]; #define RANDOM_SELFTEST_STARTUP_SAMPLES 1024 /* 4.3, requirement 4 */ #define RANDOM_SELFTEST_APT_WINDOW 512 /* 4.4.2 */ static void copy_event(uint32_t dst[static HARVESTSIZE + 1], const struct harvest_event *event) { memset(dst, 0, sizeof(uint32_t) * (HARVESTSIZE + 1)); memcpy(dst, event->he_entropy, event->he_size); if (event->he_source <= RANDOM_ENVIRONMENTAL_END) { /* * For pure entropy sources the timestamp counter is generally * quite determinstic since samples are taken at regular * intervals, so does not contribute much to the entropy. To * make health tests more effective, exclude it from the sample, * since it might otherwise defeat the health tests in a * scenario where the source is stuck. */ dst[HARVESTSIZE] = event->he_somecounter; } } static void random_healthtest_rct_init(struct health_test_softc *ht, const struct harvest_event *event) { ht->ht_rct_count = 1; copy_event(ht->ht_rct_value, event); } /* * Apply the repitition count test to a sample. * * Return false if the test failed, i.e., we observed >= C consecutive samples * with the same value, and true otherwise. */ static bool random_healthtest_rct_next(struct health_test_softc *ht, const struct harvest_event *event) { uint32_t val[HARVESTSIZE + 1]; copy_event(val, event); if (memcmp(val, ht->ht_rct_value, sizeof(ht->ht_rct_value)) != 0) { ht->ht_rct_count = 1; memcpy(ht->ht_rct_value, val, sizeof(ht->ht_rct_value)); return (true); } else { ht->ht_rct_count++; return (ht->ht_rct_count < ht->ht_rct_limit); } } static void random_healthtest_apt_init(struct health_test_softc *ht, const struct harvest_event *event) { ht->ht_apt_count = 1; ht->ht_apt_seq = 1; copy_event(ht->ht_apt_value, event); } static bool random_healthtest_apt_next(struct health_test_softc *ht, const struct harvest_event *event) { uint32_t val[HARVESTSIZE + 1]; if (ht->ht_apt_seq == 0) { random_healthtest_apt_init(ht, event); return (true); } copy_event(val, event); if (memcmp(val, ht->ht_apt_value, sizeof(ht->ht_apt_value)) == 0) { ht->ht_apt_count++; if (ht->ht_apt_count >= ht->ht_apt_cutoff) return (false); } ht->ht_apt_seq++; if (ht->ht_apt_seq == RANDOM_SELFTEST_APT_WINDOW) ht->ht_apt_seq = 0; return (true); } /* * Run the health tests for the given event. This is assumed to be called from * a serialized context. */ bool random_harvest_healthtest(const struct harvest_event *event) { struct health_test_softc *ht; ht = &healthtest[event->he_source]; /* * Was on-demand testing requested? Restart the state machine if so, * restarting the startup tests. */ if (atomic_load_bool(&ht->ondemand)) { atomic_store_bool(&ht->ondemand, false); ht->ht_state = INIT; } switch (ht->ht_state) { case __predict_false(INIT): /* Store the first sample and initialize test state. */ random_healthtest_rct_init(ht, event); random_healthtest_apt_init(ht, event); ht->ht_total_samples = 0; ht->ht_state = STARTUP; return (false); case DISABLED: /* No health testing for this source. */ return (true); case STEADY: case STARTUP: ht->ht_total_samples++; if (random_healthtest_rct_next(ht, event) && random_healthtest_apt_next(ht, event)) { if (ht->ht_state == STARTUP && ht->ht_total_samples >= RANDOM_SELFTEST_STARTUP_SAMPLES) { printf( "random: health test passed for source %s\n", random_source_descr[event->he_source]); ht->ht_state = STEADY; } return (ht->ht_state == STEADY); } ht->ht_state = FAILED; printf( "random: health test failed for source %s, discarding samples\n", random_source_descr[event->he_source]); /* FALLTHROUGH */ case FAILED: return (false); } } static bool nist_healthtest_enabled = false; SYSCTL_BOOL(_kern_random, OID_AUTO, nist_healthtest_enabled, CTLFLAG_RDTUN, &nist_healthtest_enabled, 0, "Enable NIST SP 800-90B health tests for noise sources"); static void random_healthtest_init(enum random_entropy_source source, int min_entropy) { struct health_test_softc *ht; ht = &healthtest[source]; memset(ht, 0, sizeof(*ht)); KASSERT(ht->ht_state == INIT, ("%s: health test state is %d for source %d", __func__, ht->ht_state, source)); /* * If health-testing is enabled, validate all sources except CACHED and * VMGENID: they are deterministic sources used only a small, fixed * number of times, so statistical testing is not applicable. */ if (!nist_healthtest_enabled || source == RANDOM_CACHED || source == RANDOM_PURE_VMGENID) { ht->ht_state = DISABLED; return; } /* * Set cutoff values for the two tests, given a min-entropy estimate for * the source and allowing for an error rate of 1 in 2^{34}. With a * min-entropy estimate of 1 bit and a sample rate of RANDOM_KTHREAD_HZ, * we expect to see an false positive once in ~54.5 years. * * The RCT limit comes from the formula in section 4.4.1. * * The APT cutoffs are calculated using the formula in section 4.4.2 * footnote 10 with the number of Bernoulli trials changed from W to * W-1, since the test as written counts the number of samples equal to * the first sample in the window, and thus tests W-1 samples. We * provide cutoffs for estimates up to sizeof(uint32_t)*HARVESTSIZE*8 * bits. */ const int apt_cutoffs[] = { [1] = 329, [2] = 195, [3] = 118, [4] = 73, [5] = 48, [6] = 33, [7] = 23, [8] = 17, [9] = 13, [10] = 11, [11] = 9, [12] = 8, [13] = 7, [14] = 6, [15] = 5, [16] = 5, [17 ... 19] = 4, [20 ... 25] = 3, [26 ... 42] = 2, [43 ... 64] = 1, }; const int error_rate = 34; if (min_entropy == 0) { /* * For environmental sources, the main source of entropy is the * associated timecounter value. Since these sources can be * influenced by unprivileged users, we conservatively use a * min-entropy estimate of 1 bit per sample. For "pure" * sources, we assume 8 bits per sample, as such sources provide * a variable amount of data per read and in particular might * only provide a single byte at a time. */ min_entropy = source >= RANDOM_PURE_START ? 8 : 1; } else if (min_entropy < 0 || min_entropy >= nitems(apt_cutoffs)) { panic("invalid min_entropy %d for %s", min_entropy, random_source_descr[source]); } ht->ht_rct_limit = 1 + howmany(error_rate, min_entropy); ht->ht_apt_cutoff = apt_cutoffs[min_entropy]; } static int random_healthtest_ondemand(SYSCTL_HANDLER_ARGS) { u_int mask, source; int error; mask = 0; error = sysctl_handle_int(oidp, &mask, 0, req); if (error != 0 || req->newptr == NULL) return (error); while (mask != 0) { source = ffs(mask) - 1; if (source < nitems(healthtest)) atomic_store_bool(&healthtest[source].ondemand, true); mask &= ~(1u << source); } return (0); } SYSCTL_PROC(_kern_random, OID_AUTO, nist_healthtest_ondemand, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, random_healthtest_ondemand, "I", "Re-run NIST SP 800-90B startup health tests for a noise source"); static int random_check_uint_harvestmask(SYSCTL_HANDLER_ARGS) { static const u_int user_immutable_mask = (((1 << ENTROPYSOURCE) - 1) & (-1UL << RANDOM_PURE_START)) | _RANDOM_HARVEST_ETHER_OFF | _RANDOM_HARVEST_UMA_OFF; int error; u_int value; value = atomic_load_int(&hc_source_mask); error = sysctl_handle_int(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (flsl(value) > ENTROPYSOURCE) return (EINVAL); /* * Disallow userspace modification of pure entropy sources. */ RANDOM_HARVEST_LOCK(); hc_source_mask = (value & ~user_immutable_mask) | (hc_source_mask & user_immutable_mask); RANDOM_HARVEST_UNLOCK(); return (0); } SYSCTL_PROC(_kern_random_harvest, OID_AUTO, mask, CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, random_check_uint_harvestmask, "IU", "Entropy harvesting mask"); static int random_print_harvestmask(SYSCTL_HANDLER_ARGS) { struct sbuf sbuf; int error, i; error = sysctl_wire_old_buffer(req, 0); if (error == 0) { u_int mask; sbuf_new_for_sysctl(&sbuf, NULL, 128, req); mask = atomic_load_int(&hc_source_mask); for (i = ENTROPYSOURCE - 1; i >= 0; i--) { bool present; present = (mask & (1u << i)) != 0; sbuf_cat(&sbuf, present ? "1" : "0"); } error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); } return (error); } SYSCTL_PROC(_kern_random_harvest, OID_AUTO, mask_bin, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, random_print_harvestmask, "A", "Entropy harvesting mask (printable)"); static const char *random_source_descr[ENTROPYSOURCE] = { [RANDOM_CACHED] = "CACHED", [RANDOM_ATTACH] = "ATTACH", [RANDOM_KEYBOARD] = "KEYBOARD", [RANDOM_MOUSE] = "MOUSE", [RANDOM_NET_TUN] = "NET_TUN", [RANDOM_NET_ETHER] = "NET_ETHER", [RANDOM_NET_NG] = "NET_NG", [RANDOM_INTERRUPT] = "INTERRUPT", [RANDOM_SWI] = "SWI", [RANDOM_FS_ATIME] = "FS_ATIME", [RANDOM_UMA] = "UMA", [RANDOM_CALLOUT] = "CALLOUT", [RANDOM_RANDOMDEV] = "RANDOMDEV", /* ENVIRONMENTAL_END */ [RANDOM_PURE_SAFE] = "PURE_SAFE", /* PURE_START */ [RANDOM_PURE_GLXSB] = "PURE_GLXSB", [RANDOM_PURE_HIFN] = "PURE_HIFN", [RANDOM_PURE_RDRAND] = "PURE_RDRAND", + [RANDOM_PURE_RDSEED] = "PURE_RDSEED", [RANDOM_PURE_NEHEMIAH] = "PURE_NEHEMIAH", [RANDOM_PURE_RNDTEST] = "PURE_RNDTEST", [RANDOM_PURE_VIRTIO] = "PURE_VIRTIO", [RANDOM_PURE_BROADCOM] = "PURE_BROADCOM", [RANDOM_PURE_CCP] = "PURE_CCP", [RANDOM_PURE_DARN] = "PURE_DARN", [RANDOM_PURE_TPM] = "PURE_TPM", [RANDOM_PURE_VMGENID] = "PURE_VMGENID", [RANDOM_PURE_QUALCOMM] = "PURE_QUALCOMM", [RANDOM_PURE_ARMV8] = "PURE_ARMV8", [RANDOM_PURE_ARM_TRNG] = "PURE_ARM_TRNG", /* "ENTROPYSOURCE" */ }; static int random_print_harvestmask_symbolic(SYSCTL_HANDLER_ARGS) { struct sbuf sbuf; int error, i; bool first; first = true; error = sysctl_wire_old_buffer(req, 0); if (error == 0) { u_int mask; sbuf_new_for_sysctl(&sbuf, NULL, 128, req); mask = atomic_load_int(&hc_source_mask); for (i = ENTROPYSOURCE - 1; i >= 0; i--) { bool present; present = (mask & (1u << i)) != 0; if (i >= RANDOM_PURE_START && !present) continue; if (!first) sbuf_cat(&sbuf, ","); sbuf_cat(&sbuf, !present ? "[" : ""); sbuf_cat(&sbuf, random_source_descr[i]); sbuf_cat(&sbuf, !present ? "]" : ""); first = false; } error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); } return (error); } SYSCTL_PROC(_kern_random_harvest, OID_AUTO, mask_symbolic, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, random_print_harvestmask_symbolic, "A", "Entropy harvesting mask (symbolic)"); static void random_harvestq_init(void *unused __unused) { static const u_int almost_everything_mask = (((1 << (RANDOM_ENVIRONMENTAL_END + 1)) - 1) & ~_RANDOM_HARVEST_ETHER_OFF & ~_RANDOM_HARVEST_UMA_OFF); hc_source_mask = almost_everything_mask; RANDOM_HARVEST_INIT_LOCK(); harvest_context.hc_active_buf = 0; for (int i = RANDOM_START; i <= RANDOM_ENVIRONMENTAL_END; i++) random_healthtest_init(i, 0); } SYSINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_THIRD, random_harvestq_init, NULL); /* * Subroutine to slice up a contiguous chunk of 'entropy' and feed it into the * underlying algorithm. Returns number of bytes actually fed into underlying * algorithm. */ static size_t random_early_prime(char *entropy, size_t len) { struct harvest_event event; size_t i; len = rounddown(len, sizeof(event.he_entropy)); if (len == 0) return (0); for (i = 0; i < len; i += sizeof(event.he_entropy)) { event.he_somecounter = random_get_cyclecount(); event.he_size = sizeof(event.he_entropy); event.he_source = RANDOM_CACHED; event.he_destination = harvest_context.hc_destination[RANDOM_CACHED]++; memcpy(event.he_entropy, entropy + i, sizeof(event.he_entropy)); random_harvestq_fast_process_event(&event); } explicit_bzero(entropy, len); return (len); } /* * Subroutine to search for known loader-loaded files in memory and feed them * into the underlying algorithm early in boot. Returns the number of bytes * loaded (zero if none were loaded). */ static size_t random_prime_loader_file(const char *type) { uint8_t *keyfile, *data; size_t size; keyfile = preload_search_by_type(type); if (keyfile == NULL) return (0); data = preload_fetch_addr(keyfile); size = preload_fetch_size(keyfile); if (data == NULL) return (0); return (random_early_prime(data, size)); } /* * This is used to prime the RNG by grabbing any early random stuff * known to the kernel, and inserting it directly into the hashing * module, currently Fortuna. */ static void random_harvestq_prime(void *unused __unused) { size_t size; /* * Get entropy that may have been preloaded by loader(8) * and use it to pre-charge the entropy harvest queue. */ size = random_prime_loader_file(RANDOM_CACHED_BOOT_ENTROPY_MODULE); if (bootverbose) { if (size > 0) printf("random: read %zu bytes from preloaded cache\n", size); else printf("random: no preloaded entropy cache\n"); } size = random_prime_loader_file(RANDOM_PLATFORM_BOOT_ENTROPY_MODULE); if (bootverbose) { if (size > 0) printf("random: read %zu bytes from platform bootloader\n", size); else printf("random: no platform bootloader entropy\n"); } } SYSINIT(random_device_prime, SI_SUB_RANDOM, SI_ORDER_MIDDLE, random_harvestq_prime, NULL); static void random_harvestq_deinit(void *unused __unused) { /* Command the hash/reseed thread to end and wait for it to finish */ random_kthread_control = 0; while (random_kthread_control >= 0) tsleep(&harvest_context.hc_kthread_proc, 0, "harvqterm", hz/5); } SYSUNINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_THIRD, random_harvestq_deinit, NULL); /*- * Entropy harvesting queue routine. * * This is supposed to be fast; do not do anything slow in here! * It is also illegal (and morally reprehensible) to insert any * high-rate data here. "High-rate" is defined as a data source * that is likely to fill up the buffer in much less than 100ms. * This includes the "always-on" sources like the Intel "rdrand" * or the VIA Nehamiah "xstore" sources. */ /* XXXRW: get_cyclecount() is cheap on most modern hardware, where cycle * counters are built in, but on older hardware it will do a real time clock * read which can be quite expensive. */ void random_harvest_queue_(const void *entropy, u_int size, enum random_entropy_source origin) { struct harvest_context *hc; struct entropy_buffer *buf; struct harvest_event *event; KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE, ("%s: origin %d invalid", __func__, origin)); hc = &harvest_context; RANDOM_HARVEST_LOCK(); buf = &hc->hc_entropy_buf[hc->hc_active_buf]; if (buf->pos < RANDOM_RING_MAX) { event = &buf->ring[buf->pos++]; event->he_somecounter = random_get_cyclecount(); event->he_source = origin; event->he_destination = hc->hc_destination[origin]++; if (size <= sizeof(event->he_entropy)) { event->he_size = size; memcpy(event->he_entropy, entropy, size); } else { /* Big event, so squash it */ event->he_size = sizeof(event->he_entropy[0]); event->he_entropy[0] = jenkins_hash(entropy, size, (uint32_t)(uintptr_t)event); } } RANDOM_HARVEST_UNLOCK(); } /*- * Entropy harvesting fast routine. * * This is supposed to be very fast; do not do anything slow in here! * This is the right place for high-rate harvested data. */ void random_harvest_fast_(const void *entropy, u_int size) { u_int pos; pos = harvest_context.hc_entropy_fast_accumulator.pos; harvest_context.hc_entropy_fast_accumulator.buf[pos] ^= jenkins_hash(entropy, size, random_get_cyclecount()); harvest_context.hc_entropy_fast_accumulator.pos = (pos + 1)%RANDOM_ACCUM_MAX; } /*- * Entropy harvesting direct routine. * * This is not supposed to be fast, but will only be used during * (e.g.) booting when initial entropy is being gathered. */ void random_harvest_direct_(const void *entropy, u_int size, enum random_entropy_source origin) { struct harvest_event event; KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE, ("%s: origin %d invalid\n", __func__, origin)); size = MIN(size, sizeof(event.he_entropy)); event.he_somecounter = random_get_cyclecount(); event.he_size = size; event.he_source = origin; event.he_destination = harvest_context.hc_destination[origin]++; memcpy(event.he_entropy, entropy, size); random_harvestq_fast_process_event(&event); } void random_source_register(const struct random_source *rsource) { struct random_sources *rrs; KASSERT(rsource != NULL, ("invalid input to %s", __func__)); rrs = malloc(sizeof(*rrs), M_ENTROPY, M_WAITOK); rrs->rrs_source = rsource; printf("random: registering fast source %s\n", rsource->rs_ident); random_healthtest_init(rsource->rs_source, rsource->rs_min_entropy); RANDOM_HARVEST_LOCK(); hc_source_mask |= (1 << rsource->rs_source); CK_LIST_INSERT_HEAD(&source_list, rrs, rrs_entries); RANDOM_HARVEST_UNLOCK(); } void random_source_deregister(const struct random_source *rsource) { struct random_sources *rrs = NULL; KASSERT(rsource != NULL, ("invalid input to %s", __func__)); RANDOM_HARVEST_LOCK(); hc_source_mask &= ~(1 << rsource->rs_source); CK_LIST_FOREACH(rrs, &source_list, rrs_entries) if (rrs->rrs_source == rsource) { CK_LIST_REMOVE(rrs, rrs_entries); break; } RANDOM_HARVEST_UNLOCK(); if (rrs != NULL && epoch_inited) epoch_wait_preempt(rs_epoch); free(rrs, M_ENTROPY); } static int random_source_handler(SYSCTL_HANDLER_ARGS) { struct epoch_tracker et; struct random_sources *rrs; struct sbuf sbuf; int error, count; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf_new_for_sysctl(&sbuf, NULL, 64, req); count = 0; epoch_enter_preempt(rs_epoch, &et); CK_LIST_FOREACH(rrs, &source_list, rrs_entries) { sbuf_cat(&sbuf, (count++ ? ",'" : "'")); sbuf_cat(&sbuf, rrs->rrs_source->rs_ident); sbuf_cat(&sbuf, "'"); } epoch_exit_preempt(rs_epoch, &et); error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); return (error); } SYSCTL_PROC(_kern_random, OID_AUTO, random_sources, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, random_source_handler, "A", "List of active fast entropy sources."); MODULE_VERSION(random_harvestq, 1); diff --git a/sys/dev/random/ivy.c b/sys/dev/random/rdseed.c similarity index 53% copy from sys/dev/random/ivy.c copy to sys/dev/random/rdseed.c index fa1e4831f1b9..af084aab4ed9 100644 --- a/sys/dev/random/ivy.c +++ b/sys/dev/random/rdseed.c @@ -1,196 +1,169 @@ /*- + * Copyright (c) 2013, 2025, David E. O'Brien * Copyright (c) 2013 The FreeBSD Foundation - * Copyright (c) 2013 David E. O'Brien * Copyright (c) 2012 Konstantin Belousov * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #define RETRY_COUNT 10 -static bool has_rdrand, has_rdseed; -static u_int random_ivy_read(void *, u_int); +static u_int random_rdseed_read(void *, u_int); -static const struct random_source random_ivy = { - .rs_ident = "Intel Secure Key RNG", - .rs_source = RANDOM_PURE_RDRAND, - .rs_read = random_ivy_read +static struct random_source random_rdseed = { + .rs_ident = "Intel Secure Key Seed", + .rs_source = RANDOM_PURE_RDSEED, + .rs_read = random_rdseed_read }; -SYSCTL_NODE(_kern_random, OID_AUTO, rdrand, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, - "rdrand (ivy) entropy source"); -static bool acquire_independent_seed_samples = false; -SYSCTL_BOOL(_kern_random_rdrand, OID_AUTO, rdrand_independent_seed, - CTLFLAG_RWTUN, &acquire_independent_seed_samples, 0, - "If non-zero, use more expensive and slow, but safer, seeded samples " - "where RDSEED is not present."); - -static bool -x86_rdrand_store(u_long *buf) -{ - u_long rndval, seed_iterations, i; - int retry; - - /* Per [1], "§ 5.2.6 Generating Seeds from RDRAND," - * machines lacking RDSEED will guarantee RDRAND is reseeded every 8kB - * of generated output. - * - * [1]: https://software.intel.com/en-us/articles/intel-digital-random-number-generator-drng-software-implementation-guide#inpage-nav-6-8 - */ - if (acquire_independent_seed_samples) - seed_iterations = 8 * 1024 / sizeof(*buf); - else - seed_iterations = 1; - - for (i = 0; i < seed_iterations; i++) { - retry = RETRY_COUNT; - __asm __volatile( - "1:\n\t" - "rdrand %1\n\t" /* read randomness into rndval */ - "jc 2f\n\t" /* CF is set on success, exit retry loop */ - "dec %0\n\t" /* otherwise, retry-- */ - "jne 1b\n\t" /* and loop if retries are not exhausted */ - "2:" - : "+r" (retry), "=r" (rndval) : : "cc"); - if (retry == 0) - return (false); - } - *buf = rndval; - return (true); -} +SYSCTL_NODE(_kern_random, OID_AUTO, rdseed, CTLFLAG_RW, 0, + "rdseed (x86) entropy source"); +/* XXX: kern.random.rdseed.enabled=0 also disables RDRAND */ +static bool enabled = true; +SYSCTL_BOOL(_kern_random_rdseed, OID_AUTO, enabled, CTLFLAG_RDTUN, &enabled, 0, + "If zero, disable the use of RDSEED."); static bool x86_rdseed_store(u_long *buf) { u_long rndval; int retry; retry = RETRY_COUNT; __asm __volatile( "1:\n\t" "rdseed %1\n\t" /* read randomness into rndval */ "jc 2f\n\t" /* CF is set on success, exit retry loop */ "dec %0\n\t" /* otherwise, retry-- */ "jne 1b\n\t" /* and loop if retries are not exhausted */ "2:" : "+r" (retry), "=r" (rndval) : : "cc"); *buf = rndval; return (retry != 0); } -static bool -x86_unimpl_store(u_long *buf __unused) -{ - - panic("%s called", __func__); -} - -DEFINE_IFUNC(static, bool, x86_rng_store, (u_long *buf)) -{ - has_rdrand = (cpu_feature2 & CPUID2_RDRAND); - has_rdseed = (cpu_stdext_feature & CPUID_STDEXT_RDSEED); - - if (has_rdseed) - return (x86_rdseed_store); - else if (has_rdrand) - return (x86_rdrand_store); - else - return (x86_unimpl_store); -} - /* It is required that buf length is a multiple of sizeof(u_long). */ static u_int -random_ivy_read(void *buf, u_int c) +random_rdseed_read(void *buf, u_int c) { u_long *b, rndval; u_int count; KASSERT(c % sizeof(*b) == 0, ("partial read %d", c)); b = buf; for (count = c; count > 0; count -= sizeof(*b)) { - if (!x86_rng_store(&rndval)) + if (!x86_rdseed_store(&rndval)) break; *b++ = rndval; } return (c - count); } static int -rdrand_modevent(module_t mod, int type, void *unused) +rdseed_modevent(module_t mod, int type, void *unused) { + bool has_rdseed; int error = 0; + has_rdseed = (cpu_stdext_feature & CPUID_STDEXT_RDSEED); + switch (type) { case MOD_LOAD: - if (has_rdrand || has_rdseed) { - random_source_register(&random_ivy); - printf("random: fast provider: \"%s\"\n", random_ivy.rs_ident); + if (has_rdseed && enabled) { + random_source_register(&random_rdseed); + printf("random: fast provider: \"%s\"\n", random_rdseed.rs_ident); } break; case MOD_UNLOAD: - if (has_rdrand || has_rdseed) - random_source_deregister(&random_ivy); + if (has_rdseed) + random_source_deregister(&random_rdseed); break; case MOD_SHUTDOWN: break; default: error = EOPNOTSUPP; break; } return (error); } -static moduledata_t rdrand_mod = { - "rdrand", - rdrand_modevent, +static moduledata_t rdseed_mod = { + "rdseed", + rdseed_modevent, 0 }; -DECLARE_MODULE(rdrand, rdrand_mod, SI_SUB_RANDOM, SI_ORDER_FOURTH); -MODULE_VERSION(rdrand, 1); -MODULE_DEPEND(rdrand, random_harvestq, 1, 1, 1); +DECLARE_MODULE(rdseed, rdseed_mod, SI_SUB_RANDOM, SI_ORDER_FOURTH); +MODULE_VERSION(rdseed, 1); +MODULE_DEPEND(rdseed, random_harvestq, 1, 1, 1); + +/* + * Intel's RDSEED Entropy Assessment Report min-entropy claim is 0.6 Shannons + * per bit of data output. Rrefer to the following Entropy Source Validation + * (ESV) certificates: + * + * E#87: Junos OS Physical Entropy Source - Broadwell EP 10-Core Die + * Broadwell-EP-10 FCLGA2011 Intel(R) Xeon(R) E5-2620 V4 Processor + * https://csrc.nist.gov/projects/cryptographic-module-validation-program/entropy-validations/certificate/87 + * (URLs below omitted for brevity but follow same format.) + * + * E#121: Junos OS Physical Entropy Source - Intel Atom C3000 Series + * (Denverton) 16 Core Die with FCBGA1310 Package + * + * E#122: Junos OS Physical Entropy Source - Intel Xeon D-1500 Family + * (Broadwell) 8 Core Die with FCBGA1667 Package + * + * E#123: Junos OS Physical Entropy Source - Intel Xeon D-2100 Series + * (Skylake) 18 Core Die with FCBGA2518 Package + * + * E#141: Junos OS Physical Entropy Source - Intel Xeon D-10 Series + * (Ice Lake-D-10) Die with FCBGA2227 Package + * + * E#169: Junos OS Physical Entropy Source - Intel Xeon AWS-1000 v4 and + * E5 v4 (Broadwell EP) 15 Core Die with FCLGA2011 Package + */ diff --git a/sys/modules/Makefile b/sys/modules/Makefile index efe1e7ba3ab6..d877167a7eae 100644 --- a/sys/modules/Makefile +++ b/sys/modules/Makefile @@ -1,955 +1,957 @@ SYSDIR?=${SRCTOP}/sys .include "${SYSDIR}/conf/kern.opts.mk" SUBDIR_PARALLEL= # Modules that include binary-only blobs of microcode should be selectable by # MK_SOURCELESS_UCODE option (see below). .include "${SYSDIR}/conf/config.mk" .if defined(MODULES_OVERRIDE) && !defined(ALL_MODULES) SUBDIR=${MODULES_OVERRIDE} .else SUBDIR= \ ${_3dfx} \ ${_3dfx_linux} \ ${_aac} \ ${_aacraid} \ accf_data \ accf_dns \ accf_http \ accf_tls \ acl_nfs4 \ acl_posix1e \ ${_acpi} \ ae \ ${_aesni} \ age \ ${_agp} \ ahci \ aic7xxx \ alc \ ale \ alq \ ${_amd_ecc_inject} \ ${_amdgpio} \ ${_amdsmu} \ ${_amdsbwd} \ ${_amdsmn} \ ${_amdtemp} \ ${_aout} \ ${_arcmsr} \ ${_allwinner} \ ${_armv8crypto} \ ${_armv8_rng} \ ${_asmc} \ ata \ ath \ ath_dfs \ ath_hal \ ath_hal_ar5210 \ ath_hal_ar5211 \ ath_hal_ar5212 \ ath_hal_ar5416 \ ath_hal_ar9300 \ ath_main \ ath_rate \ ${_autofs} \ axgbe \ backlight \ ${_bce} \ ${_bcm283x_clkman} \ ${_bcm283x_pwm} \ bfe \ bge \ bhnd \ ${_bxe} \ ${_bios} \ ${_blake2} \ ${_bnxt} \ bridgestp \ bwi \ bwn \ ${_bytgpio} \ ${_chvgpio} \ cam \ ${_cardbus} \ ${_carp} \ cas \ ${_cbb} \ cc \ ${_ccp} \ cd9660 \ cd9660_iconv \ ${_cfi} \ ${_chromebook_platform} \ ${_ciss} \ ${_coretemp} \ ${_cpsw} \ ${_cpuctl} \ ${_cpufreq} \ ${_crypto} \ ${_cryptodev} \ ctl \ ${_cxgb} \ ${_cxgbe} \ dc \ dcons \ dcons_crom \ ${_dpaa2} \ ${_dpdk_lpm4} \ ${_dpdk_lpm6} \ ${_dpms} \ dummymbuf \ dummynet \ ${_dwwdt} \ e6000sw \ ${_efirt} \ ${_em} \ ${_ena} \ ${_enic} \ ${_enetc} \ ${_et} \ etherswitch \ evdev \ ${_exca} \ ext2fs \ fdc \ fdescfs \ ${_felix} \ ${_ffec} \ ${_fib_dxr} \ filemon \ firewire \ firmware \ flash \ ${_ftgpio} \ ${_ftwd} \ fusefs \ ${_fxp} \ gem \ geom \ ${_glxiic} \ ${_glxsb} \ gpio \ ${_gve} \ hid \ hifn \ ${_hpt27xx} \ ${_hptiop} \ ${_hptmv} \ ${_hptnr} \ ${_hptrr} \ hwpmc \ ${_hwt} \ ${_hyperv} \ i2c \ ${_iavf} \ ${_ibcore} \ ${_ichwd} \ ${_ice} \ ${_ice_ddp} \ ${_irdma} \ ${_ida} \ if_bridge \ ${_if_cgem} \ if_disc \ if_edsc \ ${_if_enc} \ if_epair \ ${_genet} \ ${_if_gif} \ ${_if_gre} \ ${_if_me} \ if_infiniband \ if_lagg \ if_ovpn \ ${_if_stf} \ if_tuntap \ if_vlan \ if_vxlan \ ${_if_wg} \ iflib \ ${_igc} \ imgact_binmisc \ ${_imx} \ ${_intelspi} \ ${_io} \ ${_ioat} \ ${_ipoib} \ ipdivert \ ${_ipfilter} \ ${_ipfw} \ ipfw_nat \ ${_ipfw_nat64} \ ${_ipfw_nptv6} \ ${_ipfw_pmod} \ ${_ipmi} \ ip6_mroute_mod \ ip_mroute_mod \ ${_ips} \ ${_ipsec} \ ${_ipw} \ ${_ipwfw} \ ${_isci} \ ${_iser} \ isp \ ${_ispfw} \ ${_itwd} \ ${_iwi} \ ${_iwifw} \ ${_iwlwifi} \ ${_iwm} \ ${_iwn} \ ${_iwnfw} \ ${_iwx} \ ${_ix} \ ${_ixv} \ ${_ixl} \ jme \ kbdmux \ kgssapi \ kgssapi_krb5 \ khelp \ krpc \ ksyms \ le \ lge \ libalias \ libiconv \ libmchain \ lindebugfs \ linuxkpi \ linuxkpi_hdmi \ linuxkpi_video \ linuxkpi_wlan \ ${_lio} \ lpt \ ${_mac_biba} \ ${_mac_bsdextended} \ ${_mac_ddb} \ ${_mac_do} \ ${_mac_ifoff} \ ${_mac_ipacl} \ ${_mac_lomac} \ ${_mac_mls} \ ${_mac_none} \ ${_mac_ntpd} \ ${_mac_partition} \ ${_mac_pimd} \ ${_mac_portacl} \ ${_mac_priority} \ ${_mac_seeotheruids} \ ${_mac_stub} \ ${_mac_test} \ ${_mac_veriexec} \ ${_mac_veriexec_sha1} \ ${_mac_veriexec_sha256} \ ${_mac_veriexec_sha384} \ ${_mac_veriexec_sha512} \ ${_malo} \ ${_mana} \ md \ mdio \ ${_mgb} \ mem \ mfi \ mii \ miiproxy \ mlx \ mlxfw \ ${_mlx4} \ ${_mlx4ib} \ ${_mlx4en} \ ${_mlx5} \ ${_mlx5en} \ ${_mlx5ib} \ mmc \ mmcsd \ ${_mpi3mr} \ ${_mpr} \ ${_mps} \ mpt \ mqueue \ mrsas \ msdosfs \ msdosfs_iconv \ msk \ ${_mthca} \ mvs \ mwl \ ${_mwlfw} \ mxge \ my \ ${_nctgpio} \ ${_ncthwm} \ ${_neta} \ netlink \ ${_netgraph} \ ${_nfe} \ nfscl \ nfscommon \ nfsd \ nfslockd \ nfssvc \ nlsysevent \ nge \ nmdm \ nullfs \ ${_ntb} \ nvd \ ${_nvdimm} \ nvme \ nvmf \ ${_nvram} \ oce \ ${_ocs_fc} \ ${_ossl} \ otus \ ${_otusfw} \ ow \ p9fs \ ${_padlock} \ ${_padlock_rng} \ ${_pchtherm} \ ${_pcfclock} \ ${_pf} \ ${_pflog} \ ${_pflow} \ ${_pfsync} \ plip \ ${_pms} \ ppbus \ ppc \ ppi \ pps \ procfs \ proto \ pseudofs \ ${_pst} \ ${_pt} \ pty \ puc \ pwm \ ${_qat} \ ${_qatfw} \ ${_qat_c2xxx} \ ${_qat_c2xxxfw} \ ${_qlxge} \ ${_qlxgb} \ ${_qlxgbe} \ ${_qlnx} \ ral \ ${_ralfw} \ ${_random_fortuna} \ ${_random_other} \ rc4 \ ${_rdma} \ ${_rdrand_rng} \ + ${_rdseed_rng} \ re \ rl \ ${_rockchip} \ rtsx \ ${_rtw88} \ ${_rtw89} \ rtwn \ rtwn_pci \ rtwn_usb \ ${_rtwnfw} \ ${_s3} \ ${_safe} \ safexcel \ ${_sbni} \ scc \ ${_sctp} \ sdhci \ ${_sdhci_acpi} \ ${_sdhci_fdt} \ sdhci_pci \ sdio \ ${_sff} \ sem \ send \ ${_sfxge} \ sge \ ${_sgx} \ ${_sgx_linux} \ siftr \ siis \ sis \ sk \ ${_smartpqi} \ smbfs \ snp \ sound \ ${_speaker} \ spi \ ${_splash} \ ste \ stge \ ${_sume} \ ${_superio} \ ${_p2sb} \ sym \ ${_syscons} \ sysvipc \ tarfs \ tcp \ ${_ti} \ tmpfs \ ${_toecore} \ tpm \ tws \ uart \ udf \ udf_iconv \ ufs \ ${_ufshci} \ uinput \ unionfs \ usb \ ${_vesa} \ ${_vf_i2c} \ virtio \ vge \ ${_viawd} \ videomode \ vkbd \ ${_vmd} \ ${_vmm} \ ${_vmware} \ vr \ vte \ ${_wbwd} \ ${_wdatwd} \ wlan \ wlan_acl \ wlan_amrr \ wlan_ccmp \ wlan_gcmp \ wlan_rssadapt \ wlan_tkip \ wlan_wep \ wlan_xauth \ ${_wpi} \ ${_wpifw} \ ${_wtap} \ ${_x86bios} \ xdr \ xl \ xz \ zlib .if ${MK_AUTOFS} != "no" || defined(ALL_MODULES) _autofs= autofs .endif .if ${MK_DTRACE} != "no" || defined(ALL_MODULES) .if ${KERN_OPTS:MKDTRACE_HOOKS} SUBDIR+= dtrace .endif SUBDIR+= opensolaris .endif .if !${MACHINE_ABI:Mlong32} _bnxt= bnxt .endif .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) .if exists(${SRCTOP}/sys/opencrypto) _crypto= crypto _cryptodev= cryptodev _random_fortuna=random_fortuna _random_other= random_other .endif .endif .if ${MK_CUSE} != "no" || defined(ALL_MODULES) SUBDIR+= cuse .endif .if ${MK_EFI} != "no" .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" _efirt= efirt .endif .endif .if (${MK_INET_SUPPORT} != "no" || ${MK_INET6_SUPPORT} != "no") || \ defined(ALL_MODULES) _carp= carp _toecore= toecore _if_enc= if_enc _if_gif= if_gif _if_gre= if_gre .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) _if_wg= if_wg .endif _ipfw_pmod= ipfw_pmod .if ${KERN_OPTS:MIPSEC_SUPPORT} && !${KERN_OPTS:MIPSEC} _ipsec= ipsec .endif .if ${KERN_OPTS:MSCTP_SUPPORT} || ${KERN_OPTS:MSCTP} _sctp= sctp .endif .endif .if (${MK_INET_SUPPORT} != "no" && ${MK_INET6_SUPPORT} != "no") || \ defined(ALL_MODULES) _if_stf= if_stf .endif .if (${KERN_OPTS:MINET} && ${MK_INET_SUPPORT} != "no") || defined(ALL_MODULES) _if_me= if_me _ipfw= ipfw .if ${MK_INET6_SUPPORT} != "no" || defined(ALL_MODULES) _ipfw_nat64= ipfw_nat64 .endif .endif .if ${MK_INET6_SUPPORT} != "no" || defined(ALL_MODULES) _ipfw_nptv6= ipfw_nptv6 .endif .if ${MK_IPFILTER} != "no" || defined(ALL_MODULES) _ipfilter= ipfilter .endif .if ${MK_INET_SUPPORT} != "no" && ${KERN_OPTS:MFIB_ALGO} && ${KERN_OPTS:MINET} _dpdk_lpm4= dpdk_lpm4 _fib_dxr= fib_dxr .endif .if ${MK_INET6_SUPPORT} != "no" && ${KERN_OPTS:MFIB_ALGO} _dpdk_lpm6= dpdk_lpm6 .endif .if ${MK_ISCSI} != "no" || defined(ALL_MODULES) SUBDIR+= cfiscsi SUBDIR+= iscsi .endif .if !empty(OPT_FDT) SUBDIR+= fdt .endif # Linuxulator .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \ ${MACHINE_CPUARCH} == "i386" SUBDIR+= linprocfs SUBDIR+= linsysfs .endif .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386" SUBDIR+= linux .endif .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" SUBDIR+= linux64 SUBDIR+= linux_common .endif .if ${MACHINE_CPUARCH} != "arm" .if ${MK_OFED} != "no" || defined(ALL_MODULES) _ibcore= ibcore _ipoib= ipoib _iser= iser _mthca= mthca _rdma= rdma .endif .endif .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \ ${MACHINE_CPUARCH} == "i386" || ${MACHINE_ARCH:Mpowerpc64*} != "" _ipmi= ipmi _mlx4= mlx4 _mlx5= mlx5 .if (${MK_INET_SUPPORT} != "no" && ${MK_INET6_SUPPORT} != "no") || \ defined(ALL_MODULES) _mlx4en= mlx4en _mlx5en= mlx5en .endif .if ${MK_OFED} != "no" || defined(ALL_MODULES) _mlx4ib= mlx4ib _mlx5ib= mlx5ib .endif .endif .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \ ${MACHINE_CPUARCH} == "i386" _ena= ena _gve= gve _igc= igc _iwlwifi= iwlwifi _rtw88= rtw88 _rtw89= rtw89 _vmware= vmware .endif .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \ ${MACHINE_CPUARCH} == "i386" || ${MACHINE_ARCH} == "armv7" || \ ${MACHINE_ARCH:Mpowerpc64*} != "" _ossl= ossl .endif # MAC framework .if ${KERN_OPTS:MMAC} || defined(ALL_MODULES) _mac_biba= mac_biba _mac_bsdextended= mac_bsdextended .if ${KERN_OPTS:MDDB} || defined(ALL_MODULES) _mac_ddb= mac_ddb .endif _mac_do= mac_do _mac_ifoff= mac_ifoff _mac_ipacl= mac_ipacl _mac_lomac= mac_lomac _mac_mls= mac_mls _mac_none= mac_none _mac_ntpd= mac_ntpd _mac_partition= mac_partition _mac_pimd= mac_pimd _mac_portacl= mac_portacl _mac_priority= mac_priority _mac_seeotheruids= mac_seeotheruids _mac_stub= mac_stub _mac_test= mac_test .if ${MK_VERIEXEC} != "no" || defined(ALL_MODULES) _mac_veriexec= mac_veriexec _mac_veriexec_sha1= mac_veriexec_sha1 _mac_veriexec_sha256= mac_veriexec_sha256 _mac_veriexec_sha384= mac_veriexec_sha384 _mac_veriexec_sha512= mac_veriexec_sha512 .endif .endif .if ${MK_NETGRAPH} != "no" || defined(ALL_MODULES) _netgraph= netgraph .endif .if (${MK_PF} != "no" && (${MK_INET_SUPPORT} != "no" || \ ${MK_INET6_SUPPORT} != "no")) || defined(ALL_MODULES) _pf= pf _pflog= pflog _pflow= pflow .if ${MK_INET_SUPPORT} != "no" _pfsync= pfsync .endif .endif .if ${MK_SOURCELESS_UCODE} != "no" _bce= bce _fxp= fxp _ispfw= ispfw _ti= ti _mwlfw= mwlfw _otusfw= otusfw _ralfw= ralfw _rtwnfw= rtwnfw .endif .if ${MK_SOURCELESS_UCODE} != "no" && ${MACHINE_CPUARCH} != "arm" && \ ${MACHINE_ARCH} != "powerpc" && ${MACHINE_ARCH} != "powerpcspe" && \ ${MACHINE_CPUARCH} != "riscv" _cxgbe= cxgbe .endif # This has only been tested on amd64 and arm64 .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "aarch64" _mpi3mr=mpi3mr .endif # Specific to the Raspberry Pi. .if ${MACHINE_CPUARCH} == "aarch64" _genet= genet .endif .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "aarch64" || \ ${MACHINE_ARCH:Mpowerpc64*} _ice= ice .if ${MK_SOURCELESS_UCODE} != "no" _ice_ddp= ice_ddp .endif .if ${MK_OFED} != "no" || defined(ALL_MODULES) .if ${MK_INET_SUPPORT} != "no" && ${MK_INET6_SUPPORT} != "no" _irdma= irdma .endif .endif .endif .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "arm" || \ ${MACHINE_CPUARCH} == "riscv" .if !empty(OPT_FDT) _allwinner= allwinner _if_cgem= if_cgem _sdhci_fdt= sdhci_fdt .endif .endif # These rely on 64bit atomics .if ${MACHINE_ARCH} != "powerpc" && ${MACHINE_ARCH} != "powerpcspe" _mps= mps _mpr= mpr .endif .if ${MK_TESTS} != "no" || defined(ALL_MODULES) SUBDIR+= ktest SUBDIR+= tests .endif .if ${MK_ZFS} != "no" || defined(ALL_MODULES) SUBDIR+= zfs .endif .if ${MK_SOURCELESS_UCODE} != "no" _cxgb= cxgb .endif .if ${MACHINE_CPUARCH} == "aarch64" _armv8crypto= armv8crypto _armv8_rng= armv8_rng _dpaa2= dpaa2 _sff= sff _em= em _hyperv= hyperv _vf_i2c= vf_i2c .if !empty(OPT_FDT) _dwwdt= dwwdt _enetc= enetc _felix= felix _rockchip= rockchip .endif .endif .if ${MACHINE_CPUARCH} == "arm" _imx= imx .endif .if ${MACHINE_CPUARCH} == "i386" || ${MACHINE_CPUARCH} == "amd64" _agp= agp .if ${MACHINE_CPUARCH} == "i386" || !empty(COMPAT_FREEBSD32_ENABLED) _aout= aout .endif _bios= bios .if ${MK_SOURCELESS_UCODE} != "no" _bxe= bxe .endif _cardbus= cardbus _cbb= cbb _cpuctl= cpuctl _cpufreq= cpufreq _dpms= dpms _em= em _et= et _ftgpio= ftgpio _ftwd= ftwd _exca= exca _io= io _itwd= itwd _ix= ix _ixv= ixv .if ${MK_SOURCELESS_UCODE} != "no" _lio= lio .endif _mana= mana _mgb= mgb _nctgpio= nctgpio _ncthwm= ncthwm _ntb= ntb _ocs_fc= ocs_fc _p2sb= p2sb _qat_c2xxx= qat_c2xxx _qat_c2xxxfw= qat_c2xxxfw _safe= safe _speaker= speaker _splash= splash _syscons= syscons _wbwd= wbwd _wdatwd= wdatwd _aac= aac _aacraid= aacraid _acpi= acpi .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) _aesni= aesni .endif _amd_ecc_inject=amd_ecc_inject _amdsmu= amdsmu _amdsbwd= amdsbwd _amdsmn= amdsmn _amdtemp= amdtemp _arcmsr= arcmsr _asmc= asmc .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) _blake2= blake2 .endif _bytgpio= bytgpio _chvgpio= chvgpio _ciss= ciss _chromebook_platform= chromebook_platform _coretemp= coretemp .if ${MK_SOURCELESS_HOST} != "no" && empty(KCSAN_ENABLED) _hpt27xx= hpt27xx .endif _hptiop= hptiop .if ${MK_SOURCELESS_HOST} != "no" && empty(KCSAN_ENABLED) _hptmv= hptmv _hptnr= hptnr _hptrr= hptrr .endif _hyperv= hyperv _ichwd= ichwd _ida= ida _intelspi= intelspi _ips= ips _isci= isci _ipw= ipw _iwi= iwi _iwm= iwm _iwn= iwn .if ${MK_SOURCELESS_UCODE} != "no" _ipwfw= ipwfw _iwifw= iwifw _iwnfw= iwnfw .endif _nfe= nfe _nvram= nvram .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) _padlock= padlock _padlock_rng= padlock_rng _rdrand_rng= rdrand_rng +_rdseed_rng= rdseed_rng .endif _pchtherm = pchtherm _s3= s3 _sdhci_acpi= sdhci_acpi _superio= superio _vesa= vesa _viawd= viawd _vmd= vmd _wpi= wpi .if ${MK_SOURCELESS_UCODE} != "no" _wpifw= wpifw .endif .if ${KERN_OPTS:MVIMAGE} _wtap= wtap .endif _x86bios= x86bios .endif .if ${MACHINE_CPUARCH} == "amd64" _amdgpio= amdgpio _ccp= ccp _enic= enic _iavf= iavf _ioat= ioat _iwx= iwx _ixl= ixl _nvdimm= nvdimm _pms= pms _pt= pt _qat= qat .if ${MK_SOURCELESS_UCODE} != "no" _qatfw= qatfw .endif _qlxge= qlxge _qlxgb= qlxgb _sume= sume .if ${MK_SOURCELESS_UCODE} != "no" _qlxgbe= qlxgbe _qlnx= qlnx .endif _sfxge= sfxge _sgx= sgx _sgx_linux= sgx_linux _smartpqi= smartpqi _p2sb= p2sb .endif .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" _hwt= hwt .endif .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \ ${MACHINE_CPUARCH} == "riscv" .if ${MK_BHYVE} != "no" || defined(ALL_MODULES) .if ${KERN_OPTS:MSMP} _vmm= vmm .endif .endif .endif .if ${MACHINE_CPUARCH} == "i386" # XXX some of these can move to the general case when de-i386'ed # XXX some of these can move now, but are untested on other architectures. _3dfx= 3dfx _3dfx_linux= 3dfx_linux _glxiic= glxiic _glxsb= glxsb _pcfclock= pcfclock _pst= pst _sbni= sbni .endif .if ${MACHINE_ARCH} == "armv7" _cfi= cfi _cpsw= cpsw .endif .if ${MACHINE_CPUARCH} == "powerpc" _aacraid= aacraid _agp= agp _an= an _cardbus= cardbus _cbb= cbb _cfi= cfi _cpufreq= cpufreq _exca= exca _ffec= ffec .endif .if ${MACHINE_ARCH:Mpowerpc64*} != "" _ixl= ixl _nvram= opal_nvram .endif .if ${MACHINE_CPUARCH} == "powerpc" && ${MACHINE_ARCH} != "powerpcspe" # Don't build powermac_nvram for powerpcspe, it's never supported. _nvram+= powermac_nvram .endif .if ${MACHINE_CPUARCH} == "arm" || ${MACHINE_CPUARCH} == "aarch64" _bcm283x_clkman= bcm283x_clkman _bcm283x_pwm= bcm283x_pwm _neta= neta .endif .if !(${COMPILER_TYPE} == "clang" && ${COMPILER_VERSION} < 110000) # LLVM 10 crashes when building if_malo_pci.c, fixed in LLVM11: # https://bugs.llvm.org/show_bug.cgi?id=44351 _malo= malo .endif .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "aarch64" _ufshci=ufshci .endif SUBDIR+=${MODULES_EXTRA} .for reject in ${WITHOUT_MODULES} SUBDIR:= ${SUBDIR:N${reject}} .endfor .endif # MODULES_OVERRIDE -- Keep last # Calling kldxref(8) for each module is expensive. .if !defined(NO_XREF) .MAKEFLAGS+= -DNO_XREF afterinstall: .PHONY ${KLDXREF_CMD} ${DESTDIR}${KMODDIR} .if defined(NO_ROOT) && defined(METALOG) echo ".${DISTBASE}${KMODDIR}/linker.hints type=file mode=0644 uname=root gname=wheel" | \ cat -l >> ${METALOG} .endif .endif SUBDIR:= ${SUBDIR:u:O} .include diff --git a/sys/modules/rdrand_rng/Makefile b/sys/modules/rdrand_rng/Makefile index 7fa7a8bb8fb9..496fc863033f 100644 --- a/sys/modules/rdrand_rng/Makefile +++ b/sys/modules/rdrand_rng/Makefile @@ -1,14 +1,9 @@ .PATH: ${SRCTOP}/sys/dev/random KMOD= rdrand_rng SRCS= ivy.c SRCS+= bus_if.h device_if.h CFLAGS+= -I${SRCTOP}/sys -# ld.bfd doesn't support ifuncs invoked non-PIC -.if ${MACHINE_CPUARCH} == "i386" -CFLAGS.gcc= -fPIC -.endif - .include diff --git a/sys/modules/rdseed_rng/Makefile b/sys/modules/rdseed_rng/Makefile new file mode 100644 index 000000000000..6593505546dd --- /dev/null +++ b/sys/modules/rdseed_rng/Makefile @@ -0,0 +1,9 @@ +.PATH: ${SRCTOP}/sys/dev/random + +KMOD= rdseed_rng +SRCS= rdseed.c +SRCS+= bus_if.h device_if.h + +CFLAGS+= -I${SRCTOP}/sys + +.include diff --git a/sys/sys/random.h b/sys/sys/random.h index 4dc5b74abe4f..af6b1e117423 100644 --- a/sys/sys/random.h +++ b/sys/sys/random.h @@ -1,172 +1,173 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000-2015, 2017 Mark R. V. Murray * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _SYS_RANDOM_H_ #define _SYS_RANDOM_H_ #include #ifdef _KERNEL struct uio; /* * In the loadable random world, there are set of dangling pointers left in the * core kernel: * * read_random, read_random_uio, is_random_seeded are function pointers, * rather than functions. * * p_random_alg_context is a true pointer in loadable random kernels. * * These are initialized at SI_SUB_RANDOM:SI_ORDER_SECOND during boot. The * read-type pointers are initialized by random_alg_context_init() in * randomdev.c and p_random_alg_context in the algorithm, e.g., fortuna.c's * random_fortuna_init_alg(). The nice thing about function pointers is they * have a similar calling convention to ordinary functions. * * (In !loadable, the read_random, etc, routines are just plain functions; * p_random_alg_context is a macro for the public visibility * &random_alg_context.) */ #if defined(RANDOM_LOADABLE) extern void (*_read_random)(void *, u_int); extern int (*_read_random_uio)(struct uio *, bool); extern bool (*_is_random_seeded)(void); #define read_random(a, b) (*_read_random)(a, b) #define read_random_uio(a, b) (*_read_random_uio)(a, b) #define is_random_seeded() (*_is_random_seeded)() #else void read_random(void *, u_int); int read_random_uio(struct uio *, bool); bool is_random_seeded(void); #endif /* * Note: if you add or remove members of random_entropy_source, remember to * also update the strings in the static array random_source_descr[] in * random_harvestq.c. */ enum random_entropy_source { RANDOM_START = 0, RANDOM_CACHED = 0, /* Environmental sources */ RANDOM_ATTACH, RANDOM_KEYBOARD, RANDOM_MOUSE, RANDOM_NET_TUN, RANDOM_NET_ETHER, RANDOM_NET_NG, RANDOM_INTERRUPT, RANDOM_SWI, RANDOM_FS_ATIME, RANDOM_UMA, /* Special!! UMA/SLAB Allocator */ RANDOM_CALLOUT, RANDOM_RANDOMDEV, RANDOM_ENVIRONMENTAL_END = RANDOM_RANDOMDEV, /* Fast hardware random-number sources from here on. */ RANDOM_PURE_START, RANDOM_PURE_SAFE = RANDOM_PURE_START, RANDOM_PURE_GLXSB, RANDOM_PURE_HIFN, RANDOM_PURE_RDRAND, + RANDOM_PURE_RDSEED, RANDOM_PURE_NEHEMIAH, RANDOM_PURE_RNDTEST, RANDOM_PURE_VIRTIO, RANDOM_PURE_BROADCOM, RANDOM_PURE_CCP, RANDOM_PURE_DARN, RANDOM_PURE_TPM, RANDOM_PURE_VMGENID, RANDOM_PURE_QUALCOMM, RANDOM_PURE_ARMV8, RANDOM_PURE_ARM_TRNG, ENTROPYSOURCE }; _Static_assert(ENTROPYSOURCE <= 32, "hardcoded assumption that values fit in a typical word-sized bitset"); #define RANDOM_CACHED_BOOT_ENTROPY_MODULE "boot_entropy_cache" #define RANDOM_PLATFORM_BOOT_ENTROPY_MODULE "boot_entropy_platform" extern u_int hc_source_mask; void random_harvest_queue_(const void *, u_int, enum random_entropy_source); void random_harvest_fast_(const void *, u_int); void random_harvest_direct_(const void *, u_int, enum random_entropy_source); static __inline void random_harvest_queue(const void *entropy, u_int size, enum random_entropy_source origin) { if (hc_source_mask & (1 << origin)) random_harvest_queue_(entropy, size, origin); } static __inline void random_harvest_fast(const void *entropy, u_int size, enum random_entropy_source origin) { if (hc_source_mask & (1 << origin)) random_harvest_fast_(entropy, size); } static __inline void random_harvest_direct(const void *entropy, u_int size, enum random_entropy_source origin) { if (hc_source_mask & (1 << origin)) random_harvest_direct_(entropy, size, origin); } #if defined(RANDOM_ENABLE_UMA) #define random_harvest_fast_uma(a, b, c) random_harvest_fast(a, b, c) #else /* !defined(RANDOM_ENABLE_UMA) */ #define random_harvest_fast_uma(a, b, c) do {} while (0) #endif /* defined(RANDOM_ENABLE_UMA) */ #if defined(RANDOM_ENABLE_ETHER) #define random_harvest_queue_ether(a, b) random_harvest_queue(a, b, RANDOM_NET_ETHER) #else /* !defined(RANDOM_ENABLE_ETHER) */ #define random_harvest_queue_ether(a, b) do {} while (0) #endif /* defined(RANDOM_ENABLE_ETHER) */ #else /* !_KERNEL */ #if defined(_FORTIFY_SOURCE) && _FORTIFY_SOURCE > 0 #include #endif #endif /* _KERNEL */ #define GRND_NONBLOCK 0x1 #define GRND_RANDOM 0x2 #define GRND_INSECURE 0x4 __BEGIN_DECLS ssize_t getrandom(void *buf, size_t buflen, unsigned int flags); __END_DECLS #endif /* _SYS_RANDOM_H_ */