diff --git a/share/man/man9/Makefile b/share/man/man9/Makefile index a284eaca5ecb..6af9880b8d57 100644 --- a/share/man/man9/Makefile +++ b/share/man/man9/Makefile @@ -1,2509 +1,2510 @@ .include MAN= accept_filter.9 \ accf_data.9 \ accf_dns.9 \ accf_http.9 \ accf_tls.9 \ acl.9 \ alq.9 \ altq.9 \ atomic.9 \ backlight.9 \ bhnd.9 \ bhnd_erom.9 \ bios.9 \ bitset.9 \ bpf.9 \ buf.9 \ buf_ring.9 \ BUF_ISLOCKED.9 \ BUF_LOCK.9 \ BUF_LOCKFREE.9 \ BUF_LOCKINIT.9 \ BUF_RECURSED.9 \ BUF_TIMELOCK.9 \ BUF_UNLOCK.9 \ bus_activate_resource.9 \ BUS_ADD_CHILD.9 \ bus_adjust_resource.9 \ bus_alloc_resource.9 \ BUS_BIND_INTR.9 \ bus_child_present.9 \ BUS_CHILD_DELETED.9 \ BUS_CHILD_DETACHED.9 \ BUS_CHILD_LOCATION.9 \ BUS_CHILD_PNPINFO.9 \ BUS_CONFIG_INTR.9 \ bus_delayed_attach_children.9 \ BUS_DESCRIBE_INTR.9 \ bus_dma.9 \ bus_generic_attach.9 \ bus_generic_detach.9 \ bus_generic_new_pass.9 \ bus_generic_print_child.9 \ bus_generic_read_ivar.9 \ bus_generic_shutdown.9 \ BUS_GET_CPUS.9 \ BUS_GET_PROPERTY.9 \ bus_get_resource.9 \ BUS_HINTED_CHILD.9 \ bus_map_resource.9 \ BUS_NEW_PASS.9 \ BUS_PRINT_CHILD.9 \ BUS_READ_IVAR.9 \ BUS_RESCAN.9 \ bus_release_resource.9 \ bus_set_pass.9 \ bus_set_resource.9 \ BUS_SETUP_INTR.9 \ bus_space.9 \ byteorder.9 \ callout.9 \ casuword.9 \ cd.9 \ cdefs.9 \ cnv.9 \ condvar.9 \ config_intrhook.9 \ contigmalloc.9 \ copy.9 \ counter.9 \ cpu_machdep.9 \ cpuset.9 \ cr_bsd_visible.9 \ cr_cansee.9 \ cr_canseejailproc.9 \ cr_canseeothergids.9 \ cr_canseeotheruids.9 \ critical_enter.9 \ crypto.9 \ crypto_buffer.9 \ crypto_driver.9 \ crypto_request.9 \ crypto_session.9 \ CTASSERT.9 \ DB_COMMAND.9 \ DECLARE_GEOM_CLASS.9 \ DECLARE_MODULE.9 \ DEFINE_IFUNC.9 \ DELAY.9 \ devclass.9 \ devclass_find.9 \ devclass_get_device.9 \ devclass_get_devices.9 \ devclass_get_drivers.9 \ devclass_get_maxunit.9 \ devclass_get_name.9 \ devclass_get_softc.9 \ dev_clone.9 \ devfs_set_cdevpriv.9 \ device.9 \ device_add_child.9 \ DEVICE_ATTACH.9 \ device_delete_child.9 \ device_delete_children.9 \ DEVICE_DETACH.9 \ device_enable.9 \ device_find_child.9 \ device_get_children.9 \ device_get_devclass.9 \ device_get_driver.9 \ device_get_ivars.9 \ device_get_name.9 \ device_get_parent.9 \ device_get_property.9 \ device_get_softc.9 \ device_get_state.9 \ device_get_sysctl.9 \ device_get_unit.9 \ DEVICE_IDENTIFY.9 \ device_printf.9 \ DEVICE_PROBE.9 \ device_probe_and_attach.9 \ device_quiet.9 \ device_set_desc.9 \ device_set_driver.9 \ device_set_flags.9 \ DEVICE_SHUTDOWN.9 \ DEV_MODULE.9 \ dev_refthread.9 \ devctl_notify.9 \ devctl_process_running.9 \ devctl_safe_quote_sb.9 \ devstat.9 \ devtoname.9 \ disk.9 \ dnv.9 \ domain.9 \ domainset.9 \ dpcpu.9 \ drbr.9 \ driver.9 \ DRIVER_MODULE.9 \ efirt.9 \ epoch.9 \ ether_gen_addr.9 \ EVENTHANDLER.9 \ eventtimers.9 \ extattr.9 \ fail.9 \ fdt_pinctrl.9 \ fetch.9 \ firmware.9 \ fpu_kern.9 \ g_access.9 \ g_attach.9 \ g_bio.9 \ g_consumer.9 \ g_data.9 \ get_cyclecount.9 \ getenv.9 \ getnewvnode.9 \ g_event.9 \ g_geom.9 \ g_provider.9 \ g_provider_by_name.9 \ groupmember.9 \ g_wither_geom.9 \ gone_in.9 \ hardclock.9 \ hash.9 \ hashinit.9 \ hexdump.9 \ hhook.9 \ hz.9 \ ieee80211.9 \ ieee80211_amrr.9 \ ieee80211_beacon.9 \ ieee80211_bmiss.9 \ ieee80211_crypto.9 \ ieee80211_ddb.9 \ ieee80211_input.9 \ ieee80211_node.9 \ ieee80211_output.9 \ ieee80211_proto.9 \ ieee80211_radiotap.9 \ ieee80211_regdomain.9 \ ieee80211_scan.9 \ ieee80211_vap.9 \ iflib.9 \ iflibdd.9 \ iflibdi.9 \ iflibtxrx.9 \ ifnet.9 \ inittodr.9 \ insmntque.9 \ intr_event.9 \ intro.9 \ kasan.9 \ KASSERT.9 \ kern_reboot.9 \ kern_testfrwk.9 \ kern_yield.9 \ kernacc.9 \ kernel_mount.9 \ khelp.9 \ kmsan.9 \ kobj.9 \ kproc.9 \ kqueue.9 \ kstack_contains.9 \ kthread.9 \ ktr.9 \ lock.9 \ locking.9 \ LOCK_PROFILING.9 \ mac.9 \ make_dev.9 \ malloc.9 \ mbchain.9 \ mbuf.9 \ mbuf_tags.9 \ mdchain.9 \ memcchr.9 \ memguard.9 \ microseq.9 \ microtime.9 \ microuptime.9 \ mi_switch.9 \ mod_cc.9 \ module.9 \ MODULE_DEPEND.9 \ MODULE_PNP_INFO.9 \ MODULE_VERSION.9 \ mtx_pool.9 \ mutex.9 \ namei.9 \ netisr.9 \ nv.9 \ nvmem.9 \ OF_child.9 \ OF_device_from_xref.9 \ OF_finddevice.9 \ OF_getprop.9 \ OF_node_from_xref.9 \ OF_package_to_path.9 \ ofw_bus_is_compatible.9 \ ofw_bus_status_okay.9 \ ofw_graph.9 \ osd.9 \ owll.9 \ own.9 \ panic.9 \ p_candebug.9 \ p_cansee.9 \ pci.9 \ PCI_IOV_ADD_VF.9 \ PCI_IOV_INIT.9 \ pci_iov_schema.9 \ PCI_IOV_UNINIT.9 \ pfil.9 \ pfind.9 \ pget.9 \ pgfind.9 \ PHOLD.9 \ physio.9 \ pmap.9 \ pmap_activate.9 \ pmap_clear_modify.9 \ pmap_copy.9 \ pmap_enter.9 \ pmap_extract.9 \ pmap_kextract.9 \ pmap_growkernel.9 \ pmap_init.9 \ pmap_is_modified.9 \ pmap_is_prefaultable.9 \ pmap_map.9 \ pmap_mincore.9 \ pmap_object_init_pt.9 \ pmap_page_exists_quick.9 \ pmap_page_init.9 \ pmap_pinit.9 \ pmap_protect.9 \ pmap_qenter.9 \ pmap_quick_enter_page.9 \ pmap_release.9 \ pmap_remove.9 \ pmap_resident_count.9 \ pmap_unwire.9 \ pmap_zero_page.9 \ printf.9 \ prison_check.9 \ priv.9 \ prng.9 \ proc_rwmem.9 \ pseudofs.9 \ psignal.9 \ pwmbus.9 \ random.9 \ random_harvest.9 \ ratecheck.9 \ redzone.9 \ refcount.9 \ regulator.9 \ resettodr.9 \ resource_int_value.9 \ rijndael.9 \ rman.9 \ rmlock.9 \ rtentry.9 \ runqueue.9 \ rwlock.9 \ sbuf.9 \ scheduler.9 \ SDT.9 \ securelevel_gt.9 \ selrecord.9 \ sema.9 \ seqc.9 \ sf_buf.9 \ sglist.9 \ shm_map.9 \ signal.9 \ sleep.9 \ sleepqueue.9 \ smr.9 \ socket.9 \ stack.9 \ store.9 \ style.9 \ style.lua.9 \ ${_superio.9} \ swi.9 \ sx.9 \ syscall_helper_register.9 \ SYSCALL_MODULE.9 \ sysctl.9 \ sysctl_add_oid.9 \ sysctl_ctx_init.9 \ SYSINIT.9 \ taskqueue.9 \ tcp_functions.9 \ thread_exit.9 \ time.9 \ tvtohz.9 \ ucred.9 \ uidinfo.9 \ uio.9 \ unr.9 \ vaccess.9 \ vaccess_acl_nfs4.9 \ vaccess_acl_posix1e.9 \ vflush.9 \ VFS.9 \ vfs_busy.9 \ VFS_CHECKEXP.9 \ vfsconf.9 \ VFS_FHTOVP.9 \ vfs_getnewfsid.9 \ vfs_getopt.9 \ vfs_getvfs.9 \ VFS_MOUNT.9 \ vfs_mountedfrom.9 \ VFS_QUOTACTL.9 \ VFS_ROOT.9 \ vfs_rootmountalloc.9 \ VFS_SET.9 \ VFS_STATFS.9 \ vfs_suser.9 \ VFS_SYNC.9 \ vfs_timestamp.9 \ vfs_unbusy.9 \ VFS_UNMOUNT.9 \ vfs_unmountall.9 \ VFS_VGET.9 \ vget.9 \ vgone.9 \ vhold.9 \ vinvalbuf.9 \ vm_fault_prefault.9 \ vm_map.9 \ vm_map_check_protection.9 \ vm_map_delete.9 \ vm_map_entry_resize_free.9 \ vm_map_find.9 \ vm_map_findspace.9 \ vm_map_inherit.9 \ vm_map_init.9 \ vm_map_insert.9 \ vm_map_lock.9 \ vm_map_lookup.9 \ vm_map_madvise.9 \ vm_map_max.9 \ vm_map_protect.9 \ vm_map_remove.9 \ vm_map_simplify_entry.9 \ vm_map_stack.9 \ vm_map_submap.9 \ vm_map_sync.9 \ vm_map_wire.9 \ vm_page_alloc.9 \ vm_page_bits.9 \ vm_page_busy.9 \ vm_page_deactivate.9 \ vm_page_dontneed.9 \ vm_page_aflag.9 \ vm_page_free.9 \ vm_page_grab.9 \ vm_page_insert.9 \ vm_page_lookup.9 \ vm_page_rename.9 \ vm_page_wire.9 \ vm_set_page_size.9 \ vmem.9 \ vn_deallocate.9 \ vn_fullpath.9 \ vn_isdisk.9 \ vnet.9 \ vnode.9 \ vnode_pager_setsize.9 \ vnode_pager_purge_range.9 \ VOP_ACCESS.9 \ VOP_ACLCHECK.9 \ VOP_ADVISE.9 \ VOP_ADVLOCK.9 \ VOP_ALLOCATE.9 \ VOP_ATTRIB.9 \ VOP_BMAP.9 \ VOP_BWRITE.9 \ VOP_COPY_FILE_RANGE.9 \ VOP_CREATE.9 \ VOP_DEALLOCATE.9 \ VOP_FSYNC.9 \ VOP_GETACL.9 \ VOP_GETEXTATTR.9 \ VOP_GETPAGES.9 \ VOP_INACTIVE.9 \ VOP_IOCTL.9 \ VOP_LINK.9 \ VOP_LISTEXTATTR.9 \ VOP_LOCK.9 \ VOP_LOOKUP.9 \ VOP_OPENCLOSE.9 \ VOP_PATHCONF.9 \ VOP_PRINT.9 \ VOP_RDWR.9 \ VOP_READ_PGCACHE.9 \ VOP_READDIR.9 \ VOP_READLINK.9 \ VOP_REALLOCBLKS.9 \ VOP_REMOVE.9 \ VOP_RENAME.9 \ VOP_REVOKE.9 \ VOP_SETACL.9 \ VOP_SETEXTATTR.9 \ VOP_STRATEGY.9 \ VOP_VPTOCNP.9 \ VOP_VPTOFH.9 \ vref.9 \ vrefcnt.9 \ vrele.9 \ vslock.9 \ watchdog.9 \ zero_region.9 \ zone.9 MLINKS= accept_filter.9 accept_filt_add.9 \ accept_filter.9 accept_filt_del.9 \ accept_filter.9 accept_filt_generic_mod_event.9 \ accept_filter.9 accept_filt_get.9 MLINKS+=alq.9 ALQ.9 \ alq.9 alq_close.9 \ alq.9 alq_flush.9 \ alq.9 alq_get.9 \ alq.9 alq_getn.9 \ alq.9 alq_open.9 \ alq.9 alq_open_flags.9 \ alq.9 alq_post.9 \ alq.9 alq_post_flags.9 \ alq.9 alq_write.9 \ alq.9 alq_writen.9 MLINKS+=altq.9 ALTQ.9 MLINKS+=atomic.9 atomic_add.9 \ atomic.9 atomic_clear.9 \ atomic.9 atomic_cmpset.9 \ atomic.9 atomic_fcmpset.9 \ atomic.9 atomic_fetchadd.9 \ atomic.9 atomic_load.9 \ atomic.9 atomic_readandclear.9 \ atomic.9 atomic_set.9 \ atomic.9 atomic_store.9 \ atomic.9 atomic_subtract.9 \ atomic.9 atomic_swap.9 \ atomic.9 atomic_testandclear.9 \ atomic.9 atomic_testandset.9 \ atomic.9 atomic_thread_fence.9 MLINKS+=bhnd.9 BHND_MATCH_BOARD_TYPE.9 \ bhnd.9 BHND_MATCH_BOARD_VENDOR.9 \ bhnd.9 BHND_MATCH_CHIP_ID.9 \ bhnd.9 BHND_MATCH_CHIP_PKG.9 \ bhnd.9 BHND_MATCH_CHIP_REV.9 \ bhnd.9 BHND_MATCH_CORE_ID.9 \ bhnd.9 BHND_MATCH_CORE_VENDOR.9 \ bhnd.9 bhnd_activate_resource.9 \ bhnd.9 bhnd_alloc_pmu.9 \ bhnd.9 bhnd_alloc_resource.9 \ bhnd.9 bhnd_alloc_resource_any.9 \ bhnd.9 bhnd_alloc_resources.9 \ bhnd.9 bhnd_board_matches.9 \ bhnd.9 bhnd_bus_match_child.9 \ bhnd.9 bhnd_bus_read_1.9 \ bhnd.9 bhnd_bus_read_2.9 \ bhnd.9 bhnd_bus_read_4.9 \ bhnd.9 bhnd_bus_read_stream_1.9 \ bhnd.9 bhnd_bus_read_stream_2.9 \ bhnd.9 bhnd_bus_read_stream_4.9 \ bhnd.9 bhnd_bus_write_1.9 \ bhnd.9 bhnd_bus_write_2.9 \ bhnd.9 bhnd_bus_write_4.9 \ bhnd.9 bhnd_bus_write_stream_1.9 \ bhnd.9 bhnd_bus_write_stream_2.9 \ bhnd.9 bhnd_bus_write_stream_4.9 \ bhnd.9 bhnd_chip_matches.9 \ bhnd.9 bhnd_core_class.9 \ bhnd.9 bhnd_core_get_match_desc.9 \ bhnd.9 bhnd_core_matches.9 \ bhnd.9 bhnd_core_name.9 \ bhnd.9 bhnd_cores_equal.9 \ bhnd.9 bhnd_deactivate_resource.9 \ bhnd.9 bhnd_decode_port_rid.9 \ bhnd.9 bhnd_deregister_provider.9 \ bhnd.9 bhnd_device_lookup.9 \ bhnd.9 bhnd_device_matches.9 \ bhnd.9 bhnd_device_quirks.9 \ bhnd.9 bhnd_driver_get_erom_class.9 \ bhnd.9 bhnd_enable_clocks.9 \ bhnd.9 bhnd_find_core_class.9 \ bhnd.9 bhnd_find_core_name.9 \ bhnd.9 bhnd_format_chip_id.9 \ bhnd.9 bhnd_get_attach_type.9 \ bhnd.9 bhnd_get_chipid.9 \ bhnd.9 bhnd_get_class.9 \ bhnd.9 bhnd_get_clock_freq.9 \ bhnd.9 bhnd_get_clock_latency.9 \ bhnd.9 bhnd_get_core_index.9 \ bhnd.9 bhnd_get_core_info.9 \ bhnd.9 bhnd_get_core_unit.9 \ bhnd.9 bhnd_get_device.9 \ bhnd.9 bhnd_get_device_name.9 \ bhnd.9 bhnd_get_dma_translation.9 \ bhnd.9 bhnd_get_hwrev.9 \ bhnd.9 bhnd_get_intr_count.9 \ bhnd.9 bhnd_get_intr_ivec.9 \ bhnd.9 bhnd_get_port_count.9 \ bhnd.9 bhnd_get_port_rid.9 \ bhnd.9 bhnd_get_region_addr.9 \ bhnd.9 bhnd_get_region_count.9 \ bhnd.9 bhnd_get_vendor.9 \ bhnd.9 bhnd_get_vendor_name.9 \ bhnd.9 bhnd_hwrev_matches.9 \ bhnd.9 bhnd_is_hw_suspended.9 \ bhnd.9 bhnd_is_region_valid.9 \ bhnd.9 bhnd_map_intr.9 \ bhnd.9 bhnd_match_core.9 \ bhnd.9 bhnd_nvram_getvar.9 \ bhnd.9 bhnd_nvram_getvar_array.9 \ bhnd.9 bhnd_nvram_getvar_int.9 \ bhnd.9 bhnd_nvram_getvar_int16.9 \ bhnd.9 bhnd_nvram_getvar_int32.9 \ bhnd.9 bhnd_nvram_getvar_int8.9 \ bhnd.9 bhnd_nvram_getvar_str.9 \ bhnd.9 bhnd_nvram_getvar_uint.9 \ bhnd.9 bhnd_nvram_getvar_uint16.9 \ bhnd.9 bhnd_nvram_getvar_uint32.9 \ bhnd.9 bhnd_nvram_getvar_uint8.9 \ bhnd.9 bhnd_nvram_string_array_next.9 \ bhnd.9 bhnd_read_board_info.9 \ bhnd.9 bhnd_read_config.9 \ bhnd.9 bhnd_read_ioctl.9 \ bhnd.9 bhnd_read_iost.9 \ bhnd.9 bhnd_register_provider.9 \ bhnd.9 bhnd_release_ext_rsrc.9 \ bhnd.9 bhnd_release_pmu.9 \ bhnd.9 bhnd_release_provider.9 \ bhnd.9 bhnd_release_resource.9 \ bhnd.9 bhnd_release_resources.9 \ bhnd.9 bhnd_request_clock.9 \ bhnd.9 bhnd_request_ext_rsrc.9 \ bhnd.9 bhnd_reset_hw.9 \ bhnd.9 bhnd_retain_provider.9 \ bhnd.9 bhnd_set_custom_core_desc.9 \ bhnd.9 bhnd_set_default_core_desc.9 \ bhnd.9 bhnd_suspend_hw.9 \ bhnd.9 bhnd_unmap_intr.9 \ bhnd.9 bhnd_vendor_name.9 \ bhnd.9 bhnd_write_config.9 \ bhnd.9 bhnd_write_ioctl.9 MLINKS+=bhnd_erom.9 bhnd_erom_alloc.9 \ bhnd_erom.9 bhnd_erom_dump.9 \ bhnd_erom.9 bhnd_erom_fini_static.9 \ bhnd_erom.9 bhnd_erom_free.9 \ bhnd_erom.9 bhnd_erom_free_core_table.9 \ bhnd_erom.9 bhnd_erom_get_core_table.9 \ bhnd_erom.9 bhnd_erom_init_static.9 \ bhnd_erom.9 bhnd_erom_io.9 \ bhnd_erom.9 bhnd_erom_io_fini.9 \ bhnd_erom.9 bhnd_erom_io_map.9 \ bhnd_erom.9 bhnd_erom_io_read.9 \ bhnd_erom.9 bhnd_erom_iobus_init.9 \ bhnd_erom.9 bhnd_erom_iores_new.9 \ bhnd_erom.9 bhnd_erom_lookup_core.9 \ bhnd_erom.9 bhnd_erom_lookup_core_addr.9 \ bhnd_erom.9 bhnd_erom_probe.9 \ bhnd_erom.9 bhnd_erom_probe_driver_classes.9 MLINKS+=bitset.9 BITSET_DEFINE.9 \ bitset.9 BITSET_T_INITIALIZER.9 \ bitset.9 BITSET_FSET.9 \ bitset.9 BIT_CLR.9 \ bitset.9 BIT_COPY.9 \ bitset.9 BIT_ISSET.9 \ bitset.9 BIT_SET.9 \ bitset.9 BIT_ZERO.9 \ bitset.9 BIT_FILL.9 \ bitset.9 BIT_SETOF.9 \ bitset.9 BIT_EMPTY.9 \ bitset.9 BIT_ISFULLSET.9 \ bitset.9 BIT_FFS.9 \ bitset.9 BIT_FFS_AT.9 \ bitset.9 BIT_FLS.9 \ bitset.9 BIT_FOREACH_ISSET.9 \ bitset.9 BIT_FOREACH_ISCLR.9 \ bitset.9 BIT_COUNT.9 \ bitset.9 BIT_SUBSET.9 \ bitset.9 BIT_OVERLAP.9 \ bitset.9 BIT_CMP.9 \ bitset.9 BIT_OR.9 \ bitset.9 BIT_OR2.9 \ bitset.9 BIT_ORNOT.9 \ bitset.9 BIT_ORNOT2.9 \ bitset.9 BIT_AND.9 \ bitset.9 BIT_AND2.9 \ bitset.9 BIT_ANDNOT.9 \ bitset.9 BIT_ANDNOT2.9 \ bitset.9 BIT_XOR.9 \ bitset.9 BIT_XOR2.9 \ bitset.9 BIT_CLR_ATOMIC.9 \ bitset.9 BIT_SET_ATOMIC.9 \ bitset.9 BIT_SET_ATOMIC_ACQ.9 \ bitset.9 BIT_TEST_SET_ATOMIC.9 \ bitset.9 BIT_TEST_CLR_ATOMIC.9 \ bitset.9 BIT_AND_ATOMIC.9 \ bitset.9 BIT_OR_ATOMIC.9 \ bitset.9 BIT_COPY_STORE_REL.9 MLINKS+=bpf.9 bpfattach.9 \ bpf.9 bpfattach2.9 \ bpf.9 bpfdetach.9 \ bpf.9 bpf_filter.9 \ bpf.9 bpf_mtap.9 \ bpf.9 bpf_mtap2.9 \ bpf.9 bpf_tap.9 \ bpf.9 bpf_validate.9 MLINKS+=buf.9 bp.9 MLINKS+=buf_ring.9 buf_ring_alloc.9 \ buf_ring.9 buf_ring_free.9 \ buf_ring.9 buf_ring_enqueue.9 \ buf_ring.9 buf_ring_enqueue_bytes.9 \ buf_ring.9 buf_ring_dequeue_mc.9 \ buf_ring.9 buf_ring_dequeue_sc.9 \ buf_ring.9 buf_ring_count.9 \ buf_ring.9 buf_ring_empty.9 \ buf_ring.9 buf_ring_full.9 \ buf_ring.9 buf_ring_peek.9 MLINKS+=bus_activate_resource.9 bus_deactivate_resource.9 MLINKS+=bus_alloc_resource.9 bus_alloc_resource_any.9 MLINKS+=BUS_BIND_INTR.9 bus_bind_intr.9 MLINKS+=BUS_CONFIG_INTR.9 bus_config_intr.9 MLINKS+=BUS_DESCRIBE_INTR.9 bus_describe_intr.9 MLINKS+=bus_dma.9 busdma.9 \ bus_dma.9 bus_dmamap_create.9 \ bus_dma.9 bus_dmamap_destroy.9 \ bus_dma.9 bus_dmamap_load.9 \ bus_dma.9 bus_dmamap_load_bio.9 \ bus_dma.9 bus_dmamap_load_ccb.9 \ bus_dma.9 bus_dmamap_load_crp.9 \ bus_dma.9 bus_dmamap_load_crp_buffer.9 \ bus_dma.9 bus_dmamap_load_mbuf.9 \ bus_dma.9 bus_dmamap_load_mbuf_sg.9 \ bus_dma.9 bus_dmamap_load_uio.9 \ bus_dma.9 bus_dmamap_sync.9 \ bus_dma.9 bus_dmamap_unload.9 \ bus_dma.9 bus_dmamem_alloc.9 \ bus_dma.9 bus_dmamem_free.9 \ bus_dma.9 bus_dma_tag_create.9 \ bus_dma.9 bus_dma_tag_destroy.9 MLINKS+=bus_generic_print_child.9 bus_print_child_footer.9 \ bus_generic_print_child.9 bus_print_child_header.9 MLINKS+=bus_generic_read_ivar.9 bus_generic_write_ivar.9 MLINKS+=BUS_GET_CPUS.9 bus_get_cpus.9 MLINKS+=bus_map_resource.9 bus_unmap_resource.9 \ bus_map_resource.9 resource_init_map_request.9 MLINKS+=BUS_READ_IVAR.9 BUS_WRITE_IVAR.9 MLINKS+=BUS_SETUP_INTR.9 bus_setup_intr.9 \ BUS_SETUP_INTR.9 BUS_TEARDOWN_INTR.9 \ BUS_SETUP_INTR.9 bus_teardown_intr.9 MLINKS+=bus_space.9 bus_space_alloc.9 \ bus_space.9 bus_space_barrier.9 \ bus_space.9 bus_space_copy_region_1.9 \ bus_space.9 bus_space_copy_region_2.9 \ bus_space.9 bus_space_copy_region_4.9 \ bus_space.9 bus_space_copy_region_8.9 \ bus_space.9 bus_space_copy_region_stream_1.9 \ bus_space.9 bus_space_copy_region_stream_2.9 \ bus_space.9 bus_space_copy_region_stream_4.9 \ bus_space.9 bus_space_copy_region_stream_8.9 \ bus_space.9 bus_space_free.9 \ bus_space.9 bus_space_map.9 \ bus_space.9 bus_space_peek_1.9 \ bus_space.9 bus_space_peek_2.9 \ bus_space.9 bus_space_peek_4.9 \ bus_space.9 bus_space_peek_8.9 \ bus_space.9 bus_space_poke_1.9 \ bus_space.9 bus_space_poke_2.9 \ bus_space.9 bus_space_poke_4.9 \ bus_space.9 bus_space_poke_8.9 \ bus_space.9 bus_space_read_1.9 \ bus_space.9 bus_space_read_2.9 \ bus_space.9 bus_space_read_4.9 \ bus_space.9 bus_space_read_8.9 \ bus_space.9 bus_space_read_multi_1.9 \ bus_space.9 bus_space_read_multi_2.9 \ bus_space.9 bus_space_read_multi_4.9 \ bus_space.9 bus_space_read_multi_8.9 \ bus_space.9 bus_space_read_multi_stream_1.9 \ bus_space.9 bus_space_read_multi_stream_2.9 \ bus_space.9 bus_space_read_multi_stream_4.9 \ bus_space.9 bus_space_read_multi_stream_8.9 \ bus_space.9 bus_space_read_region_1.9 \ bus_space.9 bus_space_read_region_2.9 \ bus_space.9 bus_space_read_region_4.9 \ bus_space.9 bus_space_read_region_8.9 \ bus_space.9 bus_space_read_region_stream_1.9 \ bus_space.9 bus_space_read_region_stream_2.9 \ bus_space.9 bus_space_read_region_stream_4.9 \ bus_space.9 bus_space_read_region_stream_8.9 \ bus_space.9 bus_space_read_stream_1.9 \ bus_space.9 bus_space_read_stream_2.9 \ bus_space.9 bus_space_read_stream_4.9 \ bus_space.9 bus_space_read_stream_8.9 \ bus_space.9 bus_space_set_multi_1.9 \ bus_space.9 bus_space_set_multi_2.9 \ bus_space.9 bus_space_set_multi_4.9 \ bus_space.9 bus_space_set_multi_8.9 \ bus_space.9 bus_space_set_multi_stream_1.9 \ bus_space.9 bus_space_set_multi_stream_2.9 \ bus_space.9 bus_space_set_multi_stream_4.9 \ bus_space.9 bus_space_set_multi_stream_8.9 \ bus_space.9 bus_space_set_region_1.9 \ bus_space.9 bus_space_set_region_2.9 \ bus_space.9 bus_space_set_region_4.9 \ bus_space.9 bus_space_set_region_8.9 \ bus_space.9 bus_space_set_region_stream_1.9 \ bus_space.9 bus_space_set_region_stream_2.9 \ bus_space.9 bus_space_set_region_stream_4.9 \ bus_space.9 bus_space_set_region_stream_8.9 \ bus_space.9 bus_space_subregion.9 \ bus_space.9 bus_space_unmap.9 \ bus_space.9 bus_space_write_1.9 \ bus_space.9 bus_space_write_2.9 \ bus_space.9 bus_space_write_4.9 \ bus_space.9 bus_space_write_8.9 \ bus_space.9 bus_space_write_multi_1.9 \ bus_space.9 bus_space_write_multi_2.9 \ bus_space.9 bus_space_write_multi_4.9 \ bus_space.9 bus_space_write_multi_8.9 \ bus_space.9 bus_space_write_multi_stream_1.9 \ bus_space.9 bus_space_write_multi_stream_2.9 \ bus_space.9 bus_space_write_multi_stream_4.9 \ bus_space.9 bus_space_write_multi_stream_8.9 \ bus_space.9 bus_space_write_region_1.9 \ bus_space.9 bus_space_write_region_2.9 \ bus_space.9 bus_space_write_region_4.9 \ bus_space.9 bus_space_write_region_8.9 \ bus_space.9 bus_space_write_region_stream_1.9 \ bus_space.9 bus_space_write_region_stream_2.9 \ bus_space.9 bus_space_write_region_stream_4.9 \ bus_space.9 bus_space_write_region_stream_8.9 \ bus_space.9 bus_space_write_stream_1.9 \ bus_space.9 bus_space_write_stream_2.9 \ bus_space.9 bus_space_write_stream_4.9 \ bus_space.9 bus_space_write_stream_8.9 MLINKS+=byteorder.9 be16dec.9 \ byteorder.9 be16enc.9 \ byteorder.9 be16toh.9 \ byteorder.9 be32dec.9 \ byteorder.9 be32enc.9 \ byteorder.9 be32toh.9 \ byteorder.9 be64dec.9 \ byteorder.9 be64enc.9 \ byteorder.9 be64toh.9 \ byteorder.9 bswap16.9 \ byteorder.9 bswap32.9 \ byteorder.9 bswap64.9 \ byteorder.9 htobe16.9 \ byteorder.9 htobe32.9 \ byteorder.9 htobe64.9 \ byteorder.9 htole16.9 \ byteorder.9 htole32.9 \ byteorder.9 htole64.9 \ byteorder.9 le16dec.9 \ byteorder.9 le16enc.9 \ byteorder.9 le16toh.9 \ byteorder.9 le32dec.9 \ byteorder.9 le32enc.9 \ byteorder.9 le32toh.9 \ byteorder.9 le64dec.9 \ byteorder.9 le64enc.9 \ byteorder.9 le64toh.9 MLINKS+=callout.9 callout_active.9 \ callout.9 callout_deactivate.9 \ callout.9 callout_drain.9 \ callout.9 callout_init.9 \ callout.9 callout_init_mtx.9 \ callout.9 callout_init_rm.9 \ callout.9 callout_init_rw.9 \ callout.9 callout_pending.9 \ callout.9 callout_reset.9 \ callout.9 callout_reset_curcpu.9 \ callout.9 callout_reset_on.9 \ callout.9 callout_reset_sbt.9 \ callout.9 callout_reset_sbt_curcpu.9 \ callout.9 callout_reset_sbt_on.9 \ callout.9 callout_schedule.9 \ callout.9 callout_schedule_curcpu.9 \ callout.9 callout_schedule_on.9 \ callout.9 callout_schedule_sbt.9 \ callout.9 callout_schedule_sbt_curcpu.9 \ callout.9 callout_schedule_sbt_on.9 \ callout.9 callout_stop.9 \ callout.9 callout_when.9 MLINKS+=cnv.9 cnvlist.9 \ cnv.9 cnvlist_free_binary.9 \ cnv.9 cnvlist_free_bool.9 \ cnv.9 cnvlist_free_bool_array.9 \ cnv.9 cnvlist_free_descriptor.9 \ cnv.9 cnvlist_free_descriptor_array.9 \ cnv.9 cnvlist_free_null.9 \ cnv.9 cnvlist_free_number.9 \ cnv.9 cnvlist_free_number_array.9 \ cnv.9 cnvlist_free_nvlist.9 \ cnv.9 cnvlist_free_nvlist_array.9 \ cnv.9 cnvlist_free_string.9 \ cnv.9 cnvlist_free_string_array.9 \ cnv.9 cnvlist_get_binary.9 \ cnv.9 cnvlist_get_bool.9 \ cnv.9 cnvlist_get_bool_array.9 \ cnv.9 cnvlist_get_descriptor.9 \ cnv.9 cnvlist_get_descriptor_array.9 \ cnv.9 cnvlist_get_number.9 \ cnv.9 cnvlist_get_number_array.9 \ cnv.9 cnvlist_get_nvlist.9 \ cnv.9 cnvlist_get_nvlist_array.9 \ cnv.9 cnvlist_get_string.9 \ cnv.9 cnvlist_get_string_array.9 \ cnv.9 cnvlist_take_binary.9 \ cnv.9 cnvlist_take_bool.9 \ cnv.9 cnvlist_take_bool_array.9 \ cnv.9 cnvlist_take_descriptor.9 \ cnv.9 cnvlist_take_descriptor_array.9 \ cnv.9 cnvlist_take_number.9 \ cnv.9 cnvlist_take_number_array.9 \ cnv.9 cnvlist_take_nvlist.9 \ cnv.9 cnvlist_take_nvlist_array.9 \ cnv.9 cnvlist_take_string.9 \ cnv.9 cnvlist_take_string_array.9 MLINKS+=condvar.9 cv_broadcast.9 \ condvar.9 cv_broadcastpri.9 \ condvar.9 cv_destroy.9 \ condvar.9 cv_init.9 \ condvar.9 cv_signal.9 \ condvar.9 cv_timedwait.9 \ condvar.9 cv_timedwait_sig.9 \ condvar.9 cv_timedwait_sig_sbt.9 \ condvar.9 cv_wait.9 \ condvar.9 cv_wait_sig.9 \ condvar.9 cv_wait_unlock.9 \ condvar.9 cv_wmesg.9 MLINKS+=config_intrhook.9 config_intrhook_disestablish.9 \ config_intrhook.9 config_intrhook_drain.9 \ config_intrhook.9 config_intrhook_establish.9 \ config_intrhook.9 config_intrhook_oneshot.9 MLINKS+=contigmalloc.9 contigmalloc_domainset.9 MLINKS+=casuword.9 casueword.9 \ casuword.9 casueword32.9 \ casuword.9 casuword32.9 MLINKS+=copy.9 copyin.9 \ copy.9 copyin_nofault.9 \ copy.9 copyinstr.9 \ copy.9 copyout.9 \ copy.9 copyout_nofault.9 \ copy.9 copystr.9 MLINKS+=counter.9 counter_u64_alloc.9 \ counter.9 counter_u64_free.9 \ counter.9 counter_u64_add.9 \ counter.9 counter_enter.9 \ counter.9 counter_exit.9 \ counter.9 counter_ratecheck.9 \ counter.9 counter_u64_add_protected.9 \ counter.9 counter_u64_fetch.9 \ counter.9 counter_u64_zero.9 \ counter.9 SYSCTL_COUNTER_U64.9 \ counter.9 SYSCTL_ADD_COUNTER_U64.9 \ counter.9 SYSCTL_COUNTER_U64_ARRAY.9 \ counter.9 SYSCTL_ADD_COUNTER_U64_ARRAY.9 MLINKS+=cpu_machdep.9 cpu_copy_thread.9 \ cpu_machdep.9 cpu_exec_vmspace_reuse.9 \ cpu_machdep.9 cpu_exit.9 \ cpu_machdep.9 cpu_fetch_syscall_args.9 \ cpu_machdep.9 cpu_fork.9 \ cpu_machdep.9 cpu_fork_kthread_handler.9 \ cpu_machdep.9 cpu_idle.9 \ cpu_machdep.9 cpu_idle_wakeup.9 \ cpu_machdep.9 cpu_procctl.9 \ cpu_machdep.9 cpu_ptrace.9 \ cpu_machdep.9 cpu_set_syscall_retval.9 \ cpu_machdep.9 cpu_set_upcall.9 \ cpu_machdep.9 cpu_set_user_tls.9 \ cpu_machdep.9 cpu_switch.9 \ cpu_machdep.9 cpu_sync_core.9 \ cpu_machdep.9 cpu_thread_alloc.9 \ cpu_machdep.9 cpu_thread_clean.9 \ cpu_machdep.9 cpu_thread_exit.9 \ cpu_machdep.9 cpu_thread_free.9 \ - cpu_machdep.9 cpu_throw.9 + cpu_machdep.9 cpu_throw.9 \ + cpu_machdep.9 cpu_update_pcb.9 MLINKS+=cpuset.9 CPUSET_T_INITIALIZER.9 \ cpuset.9 CPUSET_FSET.9 \ cpuset.9 CPU_CLR.9 \ cpuset.9 CPU_COPY.9 \ cpuset.9 CPU_ISSET.9 \ cpuset.9 CPU_SET.9 \ cpuset.9 CPU_ZERO.9 \ cpuset.9 CPU_FILL.9 \ cpuset.9 CPU_SETOF.9 \ cpuset.9 CPU_EMPTY.9 \ cpuset.9 CPU_ISFULLSET.9 \ cpuset.9 CPU_FFS.9 \ cpuset.9 CPU_COUNT.9 \ cpuset.9 CPU_SUBSET.9 \ cpuset.9 CPU_OVERLAP.9 \ cpuset.9 CPU_CMP.9 \ cpuset.9 CPU_OR.9 \ cpuset.9 CPU_ORNOT.9 \ cpuset.9 CPU_AND.9 \ cpuset.9 CPU_ANDNOT.9 \ cpuset.9 CPU_CLR_ATOMIC.9 \ cpuset.9 CPU_SET_ATOMIC.9 \ cpuset.9 CPU_SET_ATOMIC_ACQ.9 \ cpuset.9 CPU_AND_ATOMIC.9 \ cpuset.9 CPU_OR_ATOMIC.9 \ cpuset.9 CPU_COPY_STORE_REL.9 MLINKS+=critical_enter.9 critical.9 \ critical_enter.9 critical_exit.9 \ critical_enter.9 CRITICAL_ASSERT.9 MLINKS+=crypto_buffer.9 crypto_apply.9 \ crypto_buffer.9 crypto_apply_buf.9 \ crypto_buffer.9 crypto_buffer_contiguous_segment.9 \ crypto_buffer.9 crypto_buffer_len.9 \ crypto_buffer.9 crypto_contiguous_segment.9 \ crypto_buffer.9 crypto_cursor_init.9 \ crypto_buffer.9 crypto_cursor_advance.9 \ crypto_buffer.9 crypto_cursor_copyback.9 \ crypto_buffer.9 crypto_cursor_copydata.9 \ crypto_buffer.9 crypto_cursor_copydata_noadv.9 \ crypto_buffer.9 crypto_cursor_segment.9 \ crypto_buffer.9 CRYPTO_HAS_OUTPUT_BUFFER.9 MLINKS+=crypto_driver.9 crypto_copyback.9 \ crypto_driver.9 crypto_copydata.9 \ crypto_driver.9 crypto_done.9 \ crypto_driver.9 crypto_get_driverid.9 \ crypto_driver.9 crypto_get_driver_session.9 \ crypto_driver.9 crypto_read_iv.9 \ crypto_driver.9 crypto_unblock.9 \ crypto_driver.9 crypto_unregister_all.9 \ crypto_driver.9 CRYPTODEV_FREESESSION.9 \ crypto_driver.9 CRYPTODEV_NEWSESSION.9 \ crypto_driver.9 CRYPTODEV_PROBESESSION.9 \ crypto_driver.9 CRYPTODEV_PROCESS.9 \ crypto_driver.9 hmac_init_ipad.9 \ crypto_driver.9 hmac_init_opad.9 MLINKS+=crypto_request.9 crypto_clonereq.9 \ crypto_request.9 crypto_destroyreq.9 \ crypto_request.9 crypto_dispatch.9 \ crypto_request.9 crypto_freereq.9 \ crypto_request.9 crypto_getreq.9 \ crypto_request.9 crypto_initreq.9 \ crypto_request.9 crypto_use_buf.9 \ crypto_request.9 crypto_use_mbuf.9 \ crypto_request.9 crypto_use_output_buf.9 \ crypto_request.9 crypto_use_output_mbuf.9 \ crypto_request.9 crypto_use_output_uio.9 \ crypto_request.9 crypto_use_uio.9 MLINKS+=crypto_session.9 crypto_auth_hash.9 \ crypto_session.9 crypto_cipher.9 \ crypto_session.9 crypto_get_params.9 \ crypto_session.9 crypto_newsession.9 \ crypto_session.9 crypto_freesession.9 MLINKS+=DB_COMMAND.9 DB_ALIAS.9 \ DB_COMMAND.9 DB_ALIAS_FLAGS.9 \ DB_COMMAND.9 DB_COMMAND_FLAGS.9 \ DB_COMMAND.9 DB_SHOW_ALIAS.9 \ DB_COMMAND.9 DB_SHOW_ALIAS_FLAGS.9 \ DB_COMMAND.9 DB_SHOW_ALL_ALIAS.9 \ DB_COMMAND.9 DB_SHOW_ALL_COMMAND.9 \ DB_COMMAND.9 DB_SHOW_COMMAND.9 \ DB_COMMAND.9 DB_SHOW_COMMAND_FLAGS.9 \ DB_COMMAND.9 DB_DECLARE_TABLE.9 \ DB_COMMAND.9 DB_DEFINE_TABLE.9 \ DB_COMMAND.9 DB_TABLE_COMMAND.9 \ DB_COMMAND.9 DB_TABLE_COMMAND_FLAGS.9 \ DB_COMMAND.9 DB_TABLE_ALIAS.9 \ DB_COMMAND.9 DB_TABLE_ALIAS_FLAGS.9 MLINKS+=DECLARE_MODULE.9 DECLARE_MODULE_TIED.9 MLINKS+=dev_clone.9 drain_dev_clone_events.9 MLINKS+=dev_refthread.9 devvn_refthread.9 \ dev_refthread.9 dev_relthread.9 MLINKS+=devfs_set_cdevpriv.9 devfs_clear_cdevpriv.9 \ devfs_set_cdevpriv.9 devfs_get_cdevpriv.9 \ devfs_set_cdevpriv.9 devfs_foreach_cdevpriv.9 MLINKS+=device_add_child.9 device_add_child_ordered.9 MLINKS+=device_enable.9 device_disable.9 \ device_enable.9 device_is_enabled.9 MLINKS+=device_get_ivars.9 device_set_ivars.9 MLINKS+=device_get_name.9 device_get_nameunit.9 MLINKS+=device_get_state.9 device_busy.9 \ device_get_state.9 device_is_alive.9 \ device_get_state.9 device_is_attached.9 \ device_get_state.9 device_unbusy.9 MLINKS+=device_get_sysctl.9 device_get_sysctl_ctx.9 \ device_get_sysctl.9 device_get_sysctl_tree.9 MLINKS+=device_quiet.9 device_is_quiet.9 \ device_quiet.9 device_verbose.9 MLINKS+=device_set_desc.9 device_get_desc.9 \ device_set_desc.9 device_set_descf.9 \ device_set_desc.9 device_set_desc_copy.9 MLINKS+=device_set_flags.9 device_get_flags.9 MLINKS+=devstat.9 devicestat.9 \ devstat.9 devstat_new_entry.9 \ devstat.9 devstat_end_transaction.9 \ devstat.9 devstat_end_transaction_bio.9 \ devstat.9 devstat_remove_entry.9 \ devstat.9 devstat_start_transaction.9 \ devstat.9 devstat_start_transaction_bio.9 MLINKS+=disk.9 disk_add_alias.9 \ disk.9 disk_alloc.9 \ disk.9 disk_create.9 \ disk.9 disk_destroy.9 \ disk.9 disk_gone.9 \ disk.9 disk_resize.9 MLINKS+=dnv.9 dnvlist.9 \ dnv.9 dnvlist_get_binary.9 \ dnv.9 dnvlist_get_bool.9 \ dnv.9 dnvlist_get_descriptor.9 \ dnv.9 dnvlist_get_number.9 \ dnv.9 dnvlist_get_nvlist.9 \ dnv.9 dnvlist_get_string.9 \ dnv.9 dnvlist_take_binary.9 \ dnv.9 dnvlist_take_bool.9 \ dnv.9 dnvlist_take_descriptor.9 \ dnv.9 dnvlist_take_number.9 \ dnv.9 dnvlist_take_nvlist.9 \ dnv.9 dnvlist_take_string.9 MLINKS+=domain.9 protosw.9 \ domain.9 domain_add.9 \ domain.9 protosw_register.9 \ domain.9 protosw_unregister.9 MLINKS+=dpcpu.9 DPCPU_DECLARE.9 \ dpcpu.9 DPCPU_DEFINE.9 \ dpcpu.9 DPCPU_DEFINE_STATIC.9 \ dpcpu.9 DPCPU_GET.9 \ dpcpu.9 DPCPU_ID_PTR.9 \ dpcpu.9 DPCPU_ID_GET.9 \ dpcpu.9 DPCPU_ID_SET.9 \ dpcpu.9 DPCPU_PTR.9 \ dpcpu.9 DPCPU_SET.9 MLINKS+=drbr.9 drbr_free.9 \ drbr.9 drbr_enqueue.9 \ drbr.9 drbr_dequeue.9 \ drbr.9 drbr_dequeue_cond.9 \ drbr.9 drbr_flush.9 \ drbr.9 drbr_empty.9 \ drbr.9 drbr_inuse.9 \ drbr.9 drbr_stats_update.9 MLINKS+=DRIVER_MODULE.9 DRIVER_MODULE_ORDERED.9 \ DRIVER_MODULE.9 EARLY_DRIVER_MODULE.9 \ DRIVER_MODULE.9 EARLY_DRIVER_MODULE_ORDERED.9 MLINKS+=epoch.9 epoch_context.9 \ epoch.9 epoch_alloc.9 \ epoch.9 epoch_free.9 \ epoch.9 epoch_enter.9 \ epoch.9 epoch_exit.9 \ epoch.9 epoch_wait.9 \ epoch.9 epoch_call.9 \ epoch.9 epoch_drain_callbacks.9 \ epoch.9 in_epoch.9 MLINKS+=EVENTHANDLER.9 EVENTHANDLER_DECLARE.9 \ EVENTHANDLER.9 EVENTHANDLER_DEFINE.9 \ EVENTHANDLER.9 EVENTHANDLER_DEREGISTER.9 \ EVENTHANDLER.9 eventhandler_deregister.9 \ EVENTHANDLER.9 eventhandler_find_list.9 \ EVENTHANDLER.9 EVENTHANDLER_INVOKE.9 \ EVENTHANDLER.9 eventhandler_prune_list.9 \ EVENTHANDLER.9 EVENTHANDLER_REGISTER.9 \ EVENTHANDLER.9 eventhandler_register.9 MLINKS+=eventtimers.9 et_register.9 \ eventtimers.9 et_deregister.9 \ eventtimers.9 et_ban.9 \ eventtimers.9 et_find.9 \ eventtimers.9 et_free.9 \ eventtimers.9 et_init.9 \ eventtimers.9 ET_LOCK.9 \ eventtimers.9 ET_UNLOCK.9 \ eventtimers.9 et_start.9 \ eventtimers.9 et_stop.9 MLINKS+=fail.9 KFAIL_POINT_CODE.9 \ fail.9 KFAIL_POINT_ERROR.9 \ fail.9 KFAIL_POINT_GOTO.9 \ fail.9 KFAIL_POINT_RETURN.9 \ fail.9 KFAIL_POINT_RETURN_VOID.9 MLINKS+=fdt_pinctrl.9 fdt_pinctrl_configure.9 \ fdt_pinctrl.9 fdt_pinctrl_configure_by_name.9 \ fdt_pinctrl.9 fdt_pinctrl_configure_tree.9 \ fdt_pinctrl.9 fdt_pinctrl_register.9 MLINKS+=fetch.9 fubyte.9 \ fetch.9 fuword.9 \ fetch.9 fuword16.9 \ fetch.9 fuword32.9 \ fetch.9 fuword64.9 \ fetch.9 fueword.9 \ fetch.9 fueword32.9 \ fetch.9 fueword64.9 MLINKS+=firmware.9 firmware_get.9 \ firmware.9 firmware_put.9 \ firmware.9 firmware_register.9 \ firmware.9 firmware_unregister.9 MLINKS+=fpu_kern.9 fpu_kern_alloc_ctx.9 \ fpu_kern.9 fpu_kern_free_ctx.9 \ fpu_kern.9 fpu_kern_enter.9 \ fpu_kern.9 fpu_kern_leave.9 \ fpu_kern.9 fpu_kern_thread.9 \ fpu_kern.9 is_fpu_kern_thread.9 MLINKS+=g_attach.9 g_detach.9 MLINKS+=g_bio.9 bio.9 \ g_bio.9 g_alloc_bio.9 \ g_bio.9 g_clone_bio.9 \ g_bio.9 g_destroy_bio.9 \ g_bio.9 g_duplicate_bio.9 \ g_bio.9 g_format_bio.9 \ g_bio.9 g_new_bio.9 \ g_bio.9 g_print_bio.9 \ g_bio.9 g_reset_bio.9 MLINKS+=g_consumer.9 g_destroy_consumer.9 \ g_consumer.9 g_new_consumer.9 MLINKS+=g_data.9 g_read_data.9 \ g_data.9 g_write_data.9 MLINKS+=getenv.9 freeenv.9 \ getenv.9 getenv_int.9 \ getenv.9 getenv_long.9 \ getenv.9 getenv_string.9 \ getenv.9 getenv_quad.9 \ getenv.9 getenv_uint.9 \ getenv.9 getenv_ulong.9 \ getenv.9 getenv_bool.9 \ getenv.9 getenv_is_true.9 \ getenv.9 getenv_is_false.9 \ getenv.9 kern_getenv.9 \ getenv.9 kern_setenv.9 \ getenv.9 kern_unsetenv.9 \ getenv.9 setenv.9 \ getenv.9 testenv.9 \ getenv.9 unsetenv.9 MLINKS+=g_event.9 g_cancel_event.9 \ g_event.9 g_post_event.9 \ g_event.9 g_waitfor_event.9 MLINKS+=g_geom.9 g_destroy_geom.9 \ g_geom.9 g_new_geomf.9 MLINKS+=g_provider.9 g_destroy_provider.9 \ g_provider.9 g_error_provider.9 \ g_provider.9 g_new_providerf.9 MLINKS+=gone_in.9 gone_in_dev.9 MLINKS+=groupmember.9 realgroupmember.9 MLINKS+=hash.9 hash32.9 \ hash.9 hash32_buf.9 \ hash.9 hash32_str.9 \ hash.9 hash32_stre.9 \ hash.9 hash32_strn.9 \ hash.9 hash32_strne.9 \ hash.9 jenkins_hash.9 \ hash.9 jenkins_hash32.9 MLINKS+=hashinit.9 hashdestroy.9 \ hashinit.9 hashinit_flags.9 \ hashinit.9 phashinit.9 MLINKS+=hhook.9 hhook_head_register.9 \ hhook.9 hhook_head_deregister.9 \ hhook.9 hhook_head_deregister_lookup.9 \ hhook.9 hhook_run_hooks.9 \ hhook.9 HHOOKS_RUN_IF.9 \ hhook.9 HHOOKS_RUN_LOOKUP_IF.9 MLINKS+=hz.9 tick.9 MLINKS+=ieee80211.9 ieee80211_ifattach.9 \ ieee80211.9 ieee80211_ifdetach.9 MLINKS+=ieee80211_amrr.9 ieee80211_amrr_choose.9 \ ieee80211_amrr.9 ieee80211_amrr_cleanup.9 \ ieee80211_amrr.9 ieee80211_amrr_init.9 \ ieee80211_amrr.9 ieee80211_amrr_node_init.9 \ ieee80211_amrr.9 ieee80211_amrr_setinterval.9 \ ieee80211_amrr.9 ieee80211_amrr_tx_complete.9 \ ieee80211_amrr.9 ieee80211_amrr_tx_update.9 MLINKS+=ieee80211_beacon.9 ieee80211_beacon_alloc.9 \ ieee80211_beacon.9 ieee80211_beacon_notify.9 \ ieee80211_beacon.9 ieee80211_beacon_update.9 MLINKS+=ieee80211_bmiss.9 ieee80211_beacon_miss.9 MLINKS+=ieee80211_crypto.9 ieee80211_crypto_available.9 \ ieee80211_crypto.9 ieee80211_crypto_decap.9 \ ieee80211_crypto.9 ieee80211_crypto_delglobalkeys.9 \ ieee80211_crypto.9 ieee80211_crypto_delkey.9 \ ieee80211_crypto.9 ieee80211_crypto_demic.9 \ ieee80211_crypto.9 ieee80211_crypto_encap.9 \ ieee80211_crypto.9 ieee80211_crypto_enmic.9 \ ieee80211_crypto.9 ieee80211_crypto_newkey.9 \ ieee80211_crypto.9 ieee80211_crypto_register.9 \ ieee80211_crypto.9 ieee80211_crypto_reload_keys.9 \ ieee80211_crypto.9 ieee80211_crypto_setkey.9 \ ieee80211_crypto.9 ieee80211_crypto_unregister.9 \ ieee80211_crypto.9 ieee80211_key_update_begin.9 \ ieee80211_crypto.9 ieee80211_key_update_end.9 \ ieee80211_crypto.9 ieee80211_notify_michael_failure.9 \ ieee80211_crypto.9 ieee80211_notify_replay_failure.9 MLINKS+=ieee80211_input.9 ieee80211_input_all.9 MLINKS+=ieee80211_node.9 ieee80211_dump_node.9 \ ieee80211_node.9 ieee80211_dump_nodes.9 \ ieee80211_node.9 ieee80211_find_rxnode.9 \ ieee80211_node.9 ieee80211_find_rxnode_withkey.9 \ ieee80211_node.9 ieee80211_free_node.9 \ ieee80211_node.9 ieee80211_iterate_nodes.9 \ ieee80211_node.9 ieee80211_ref_node.9 MLINKS+=ieee80211_output.9 ieee80211_process_callback.9 \ ieee80211_output.9 M_SEQNO_GET.9 \ ieee80211_output.9 M_WME_GETAC.9 MLINKS+=ieee80211_proto.9 ieee80211_new_state.9 \ ieee80211_proto.9 ieee80211_resume_all.9 \ ieee80211_proto.9 ieee80211_start_all.9 \ ieee80211_proto.9 ieee80211_stop_all.9 \ ieee80211_proto.9 ieee80211_suspend_all.9 \ ieee80211_proto.9 ieee80211_waitfor_parent.9 MLINKS+=ieee80211_radiotap.9 ieee80211_radiotap_active.9 \ ieee80211_radiotap.9 ieee80211_radiotap_active_vap.9 \ ieee80211_radiotap.9 ieee80211_radiotap_attach.9 \ ieee80211_radiotap.9 ieee80211_radiotap_tx.9 \ ieee80211_radiotap.9 radiotap.9 MLINKS+=ieee80211_regdomain.9 ieee80211_alloc_countryie.9 \ ieee80211_regdomain.9 ieee80211_init_channels.9 \ ieee80211_regdomain.9 ieee80211_sort_channels.9 MLINKS+=ieee80211_scan.9 ieee80211_add_scan.9 \ ieee80211_scan.9 ieee80211_bg_scan.9 \ ieee80211_scan.9 ieee80211_cancel_scan.9 \ ieee80211_scan.9 ieee80211_cancel_scan_any.9 \ ieee80211_scan.9 ieee80211_check_scan.9 \ ieee80211_scan.9 ieee80211_check_scan_current.9 \ ieee80211_scan.9 ieee80211_flush.9 \ ieee80211_scan.9 ieee80211_probe_curchan.9 \ ieee80211_scan.9 ieee80211_scan_assoc_fail.9 \ ieee80211_scan.9 ieee80211_scan_done.9 \ ieee80211_scan.9 ieee80211_scan_dump_channels.9 \ ieee80211_scan.9 ieee80211_scan_flush.9 \ ieee80211_scan.9 ieee80211_scan_iterate.9 \ ieee80211_scan.9 ieee80211_scan_next.9 \ ieee80211_scan.9 ieee80211_scan_timeout.9 \ ieee80211_scan.9 ieee80211_scanner_get.9 \ ieee80211_scan.9 ieee80211_scanner_register.9 \ ieee80211_scan.9 ieee80211_scanner_unregister.9 \ ieee80211_scan.9 ieee80211_scanner_unregister_all.9 \ ieee80211_scan.9 ieee80211_start_scan.9 MLINKS+=ieee80211_vap.9 ieee80211_vap_attach.9 \ ieee80211_vap.9 ieee80211_vap_detach.9 \ ieee80211_vap.9 ieee80211_vap_setup.9 MLINKS+=iflibdd.9 ifdi_attach_pre.9 \ iflibdd.9 ifdi_attach_post.9 \ iflibdd.9 ifdi_detach.9 \ iflibdd.9 ifdi_get_counter.9 \ iflibdd.9 ifdi_i2c_req.9 \ iflibdd.9 ifdi_init.9 \ iflibdd.9 ifdi_intr_enable.9 \ iflibdd.9 ifdi_intr_disable.9 \ iflibdd.9 ifdi_led_func.9 \ iflibdd.9 ifdi_link_intr_enable.9 \ iflibdd.9 ifdi_media_set.9 \ iflibdd.9 ifdi_media_status.9 \ iflibdd.9 ifdi_media_change.9 \ iflibdd.9 ifdi_mtu_set.9 \ iflibdd.9 ifdi_multi_set.9 \ iflibdd.9 ifdi_promisc_set.9 \ iflibdd.9 ifdi_queues_alloc.9 \ iflibdd.9 ifdi_queues_free.9 \ iflibdd.9 ifdi_queue_intr_enable.9 \ iflibdd.9 ifdi_resume.9 \ iflibdd.9 ifdi_rxq_setup.9 \ iflibdd.9 ifdi_stop.9 \ iflibdd.9 ifdi_suspend.9 \ iflibdd.9 ifdi_sysctl_int_delay.9 \ iflibdd.9 ifdi_timer.9 \ iflibdd.9 ifdi_txq_setup.9 \ iflibdd.9 ifdi_update_admin_status.9 \ iflibdd.9 ifdi_vf_add.9 \ iflibdd.9 ifdi_vflr_handle.9 \ iflibdd.9 ifdi_vlan_register.9 \ iflibdd.9 ifdi_vlan_unregister.9 \ iflibdd.9 ifdi_watchdog_reset.9 \ iflibdd.9 iov_init.9 \ iflibdd.9 iov_uinit.9 MLINKS+=iflibdi.9 iflib_add_int_delay_sysctl.9 \ iflibdi.9 iflib_device_attach.9 \ iflibdi.9 iflib_device_deregister.9 \ iflibdi.9 iflib_device_detach.9 \ iflibdi.9 iflib_device_suspend.9 \ iflibdi.9 iflib_device_register.9 \ iflibdi.9 iflib_device_resume.9 \ iflibdi.9 iflib_led_create.9 \ iflibdi.9 iflib_irq_alloc.9 \ iflibdi.9 iflib_irq_alloc_generic.9 \ iflibdi.9 iflib_link_intr_deferred.9 \ iflibdi.9 iflib_link_state_change.9 \ iflibdi.9 iflib_rx_intr_deferred.9 \ iflibdi.9 iflib_tx_intr_deferred.9 MLINKS+=iflibtxrx.9 isc_rxd_available.9 \ iflibtxrx.9 isc_rxd_refill.9 \ iflibtxrx.9 isc_rxd_flush.9 \ iflibtxrx.9 isc_rxd_pkt_get.9 \ iflibtxrx.9 isc_txd_credits_update.9 \ iflibtxrx.9 isc_txd_encap.9 \ iflibtxrx.9 isc_txd_flush.9 MLINKS+=ifnet.9 if_addmulti.9 \ ifnet.9 if_alloc.9 \ ifnet.9 if_alloc_dev.9 \ ifnet.9 if_alloc_domain.9 \ ifnet.9 if_allmulti.9 \ ifnet.9 if_attach.9 \ ifnet.9 if_data.9 \ ifnet.9 IF_DEQUEUE.9 \ ifnet.9 if_delmulti.9 \ ifnet.9 if_detach.9 \ ifnet.9 if_down.9 \ ifnet.9 if_findmulti.9 \ ifnet.9 if_free.9 \ ifnet.9 if_free_type.9 \ ifnet.9 if_up.9 \ ifnet.9 ifa_free.9 \ ifnet.9 ifa_ifwithaddr.9 \ ifnet.9 ifa_ifwithdstaddr.9 \ ifnet.9 ifa_ifwithnet.9 \ ifnet.9 ifa_ref.9 \ ifnet.9 ifaddr.9 \ ifnet.9 ifaof_ifpforaddr.9 \ ifnet.9 ifioctl.9 \ ifnet.9 ifpromisc.9 \ ifnet.9 ifqueue.9 \ ifnet.9 ifunit.9 \ ifnet.9 ifunit_ref.9 MLINKS+=insmntque.9 insmntque1.9 MLINKS+=intr_event.9 intr_event_add_handler.9 \ intr_event.9 intr_event_create.9 \ intr_event.9 intr_event_destroy.9 \ intr_event.9 intr_event_handle.9 \ intr_event.9 intr_event_remove_handler.9 \ intr_event.9 intr_priority.9 MLINKS+=KASSERT.9 MPASS.9 MLINKS+=kern_reboot.9 reboot.9 \ kern_reboot.9 shutdown_nice.9 MLINKS+=kern_yield.9 maybe_yield.9 \ kern_yield.9 should_yield.9 MLINKS+=kernacc.9 useracc.9 MLINKS+=kernel_mount.9 free_mntarg.9 \ kernel_mount.9 mount_arg.9 \ kernel_mount.9 mount_argb.9 \ kernel_mount.9 mount_argf.9 \ kernel_mount.9 mount_argsu.9 MLINKS+=kasan.9 KASAN.9 \ kasan.9 kasan_mark.9 MLINKS+=khelp.9 khelp_add_hhook.9 \ khelp.9 KHELP_DECLARE_MOD.9 \ khelp.9 KHELP_DECLARE_MOD_UMA.9 \ khelp.9 khelp_destroy_osd.9 \ khelp.9 khelp_get_id.9 \ khelp.9 khelp_get_osd.9 \ khelp.9 khelp_init_osd.9 \ khelp.9 khelp_remove_hhook.9 MLINKS+=kmsan.9 KMSAN.9 \ kmsan.9 kmsan_check.9 \ kmsan.9 kmsan_check_bio.9 \ kmsan.9 kmsan_check_ccb.9 \ kmsan.9 kmsan_check_mbuf.9 \ kmsan.9 kmsan_check_uio.9 \ kmsan.9 kmsan_mark.9 \ kmsan.9 kmsan_oirg.9 MLINKS+=kobj.9 DEFINE_CLASS.9 \ kobj.9 kobj_class_compile.9 \ kobj.9 kobj_class_compile_static.9 \ kobj.9 kobj_class_free.9 \ kobj.9 kobj_create.9 \ kobj.9 kobj_delete.9 \ kobj.9 kobj_init.9 \ kobj.9 kobj_init_static.9 MLINKS+=kproc.9 kproc_create.9 \ kproc.9 kproc_exit.9 \ kproc.9 kproc_kthread_add.9 \ kproc.9 kproc_resume.9 \ kproc.9 kproc_shutdown.9 \ kproc.9 kproc_start.9 \ kproc.9 kproc_suspend.9 \ kproc.9 kproc_suspend_check.9 \ kproc.9 kthread_create.9 MLINKS+=kqueue.9 knlist_add.9 \ kqueue.9 knlist_clear.9 \ kqueue.9 knlist_delete.9 \ kqueue.9 knlist_destroy.9 \ kqueue.9 knlist_empty.9 \ kqueue.9 knlist_init.9 \ kqueue.9 knlist_init_mtx.9 \ kqueue.9 knlist_remove.9 \ kqueue.9 knote_fdclose.9 \ kqueue.9 KNOTE_LOCKED.9 \ kqueue.9 KNOTE_UNLOCKED.9 \ kqueue.9 kqfd_register.9 \ kqueue.9 kqueue_add_filteropts.9 \ kqueue.9 kqueue_del_filteropts.9 MLINKS+=kthread.9 kthread_add.9 \ kthread.9 kthread_exit.9 \ kthread.9 kthread_resume.9 \ kthread.9 kthread_shutdown.9 \ kthread.9 kthread_start.9 \ kthread.9 kthread_suspend.9 \ kthread.9 kthread_suspend_check.9 MLINKS+=ktr.9 CTR0.9 \ ktr.9 CTR1.9 \ ktr.9 CTR2.9 \ ktr.9 CTR3.9 \ ktr.9 CTR4.9 \ ktr.9 CTR5.9 \ ktr.9 CTR6.9 MLINKS+=lock.9 lockdestroy.9 \ lock.9 lockinit.9 \ lock.9 lockmgr.9 \ lock.9 lockmgr_args.9 \ lock.9 lockmgr_args_rw.9 \ lock.9 lockmgr_assert.9 \ lock.9 lockmgr_disown.9 \ lock.9 lockmgr_disowned.9 \ lock.9 lockmgr_lock_flags.9 \ lock.9 lockmgr_printinfo.9 \ lock.9 lockmgr_recursed.9 \ lock.9 lockmgr_rw.9 \ lock.9 lockmgr_slock.9 \ lock.9 lockmgr_unlock.9 \ lock.9 lockmgr_xlock.9 \ lock.9 lockstatus.9 MLINKS+=LOCK_PROFILING.9 MUTEX_PROFILING.9 MLINKS+=make_dev.9 destroy_dev.9 \ make_dev.9 destroy_dev_drain.9 \ make_dev.9 destroy_dev_sched.9 \ make_dev.9 destroy_dev_sched_cb.9 \ make_dev.9 dev_depends.9 \ make_dev.9 make_dev_alias.9 \ make_dev.9 make_dev_alias_p.9 \ make_dev.9 make_dev_cred.9 \ make_dev.9 make_dev_credf.9 \ make_dev.9 make_dev_p.9 \ make_dev.9 make_dev_s.9 MLINKS+=malloc.9 free.9 \ malloc.9 malloc_aligned.9 \ malloc.9 malloc_domainset.9 \ malloc.9 malloc_domainset_aligned.9 \ malloc.9 malloc_domainset_exec.9 \ malloc.9 malloc_exec.9 \ malloc.9 malloc_usable_size.9 \ malloc.9 mallocarray.9 \ malloc.9 mallocarray_domainset.9 \ malloc.9 MALLOC_DECLARE.9 \ malloc.9 MALLOC_DEFINE.9 \ malloc.9 realloc.9 \ malloc.9 reallocf.9 \ malloc.9 zfree.9 MLINKS+=mbchain.9 mb_detach.9 \ mbchain.9 mb_done.9 \ mbchain.9 mb_fixhdr.9 \ mbchain.9 mb_init.9 \ mbchain.9 mb_initm.9 \ mbchain.9 mb_put_int64be.9 \ mbchain.9 mb_put_int64le.9 \ mbchain.9 mb_put_mbuf.9 \ mbchain.9 mb_put_mem.9 \ mbchain.9 mb_put_uint16be.9 \ mbchain.9 mb_put_uint16le.9 \ mbchain.9 mb_put_uint32be.9 \ mbchain.9 mb_put_uint32le.9 \ mbchain.9 mb_put_uint8.9 \ mbchain.9 mb_put_uio.9 \ mbchain.9 mb_reserve.9 MLINKS+=\ mbuf.9 m_adj.9 \ mbuf.9 m_align.9 \ mbuf.9 M_ALIGN.9 \ mbuf.9 m_append.9 \ mbuf.9 m_apply.9 \ mbuf.9 m_cat.9 \ mbuf.9 m_catpkt.9 \ mbuf.9 MCHTYPE.9 \ mbuf.9 MCLGET.9 \ mbuf.9 m_collapse.9 \ mbuf.9 m_copyback.9 \ mbuf.9 m_copydata.9 \ mbuf.9 m_copym.9 \ mbuf.9 m_copypacket.9 \ mbuf.9 m_copyup.9 \ mbuf.9 m_defrag.9 \ mbuf.9 m_devget.9 \ mbuf.9 m_dup.9 \ mbuf.9 m_dup_pkthdr.9 \ mbuf.9 MEXTADD.9 \ mbuf.9 m_fixhdr.9 \ mbuf.9 m_free.9 \ mbuf.9 m_freem.9 \ mbuf.9 MGET.9 \ mbuf.9 m_get.9 \ mbuf.9 m_get2.9 \ mbuf.9 m_get3.9 \ mbuf.9 m_getjcl.9 \ mbuf.9 m_getcl.9 \ mbuf.9 MGETHDR.9 \ mbuf.9 m_gethdr.9 \ mbuf.9 m_getm.9 \ mbuf.9 m_getptr.9 \ mbuf.9 MH_ALIGN.9 \ mbuf.9 M_LEADINGSPACE.9 \ mbuf.9 m_length.9 \ mbuf.9 M_MOVE_PKTHDR.9 \ mbuf.9 m_move_pkthdr.9 \ mbuf.9 M_PREPEND.9 \ mbuf.9 m_prepend.9 \ mbuf.9 m_pulldown.9 \ mbuf.9 m_pullup.9 \ mbuf.9 m_split.9 \ mbuf.9 mtod.9 \ mbuf.9 mtodo.9 \ mbuf.9 M_TRAILINGSPACE.9 \ mbuf.9 m_unshare.9 \ mbuf.9 M_WRITABLE.9 MLINKS+=\ mbuf_tags.9 m_tag_alloc.9 \ mbuf_tags.9 m_tag_copy.9 \ mbuf_tags.9 m_tag_copy_chain.9 \ mbuf_tags.9 m_tag_delete.9 \ mbuf_tags.9 m_tag_delete_chain.9 \ mbuf_tags.9 m_tag_delete_nonpersistent.9 \ mbuf_tags.9 m_tag_find.9 \ mbuf_tags.9 m_tag_first.9 \ mbuf_tags.9 m_tag_free.9 \ mbuf_tags.9 m_tag_get.9 \ mbuf_tags.9 m_tag_init.9 \ mbuf_tags.9 m_tag_locate.9 \ mbuf_tags.9 m_tag_next.9 \ mbuf_tags.9 m_tag_prepend.9 \ mbuf_tags.9 m_tag_unlink.9 MLINKS+=mdchain.9 md_append_record.9 \ mdchain.9 md_done.9 \ mdchain.9 md_get_int64.9 \ mdchain.9 md_get_int64be.9 \ mdchain.9 md_get_int64le.9 \ mdchain.9 md_get_mbuf.9 \ mdchain.9 md_get_mem.9 \ mdchain.9 md_get_uint16.9 \ mdchain.9 md_get_uint16be.9 \ mdchain.9 md_get_uint16le.9 \ mdchain.9 md_get_uint32.9 \ mdchain.9 md_get_uint32be.9 \ mdchain.9 md_get_uint32le.9 \ mdchain.9 md_get_uint8.9 \ mdchain.9 md_get_uio.9 \ mdchain.9 md_initm.9 \ mdchain.9 md_next_record.9 MLINKS+=microtime.9 bintime.9 \ microtime.9 getbintime.9 \ microtime.9 getmicrotime.9 \ microtime.9 getnanotime.9 \ microtime.9 nanotime.9 MLINKS+=microuptime.9 binuptime.9 \ microuptime.9 getbinuptime.9 \ microuptime.9 getmicrouptime.9 \ microuptime.9 getnanouptime.9 \ microuptime.9 getsbinuptime.9 \ microuptime.9 nanouptime.9 \ microuptime.9 sbinuptime.9 MLINKS+=mod_cc.9 CCV.9 \ mod_cc.9 DECLARE_CC_MODULE.9 MLINKS+=mtx_pool.9 mtx_pool_alloc.9 \ mtx_pool.9 mtx_pool_create.9 \ mtx_pool.9 mtx_pool_destroy.9 \ mtx_pool.9 mtx_pool_find.9 \ mtx_pool.9 mtx_pool_lock.9 \ mtx_pool.9 mtx_pool_lock_spin.9 \ mtx_pool.9 mtx_pool_unlock.9 \ mtx_pool.9 mtx_pool_unlock_spin.9 MLINKS+=mutex.9 mtx_assert.9 \ mutex.9 mtx_destroy.9 \ mutex.9 mtx_init.9 \ mutex.9 mtx_initialized.9 \ mutex.9 mtx_lock.9 \ mutex.9 mtx_lock_flags.9 \ mutex.9 mtx_lock_spin.9 \ mutex.9 mtx_lock_spin_flags.9 \ mutex.9 mtx_owned.9 \ mutex.9 mtx_recursed.9 \ mutex.9 mtx_sleep.9 \ mutex.9 MTX_SYSINIT.9 \ mutex.9 mtx_trylock.9 \ mutex.9 mtx_trylock_flags.9 \ mutex.9 mtx_trylock_spin.9 \ mutex.9 mtx_trylock_spin_flags.9 \ mutex.9 mtx_unlock.9 \ mutex.9 mtx_unlock_flags.9 \ mutex.9 mtx_unlock_spin.9 \ mutex.9 mtx_unlock_spin_flags.9 MLINKS+=namei.9 NDFREE.9 \ namei.9 NDINIT.9 MLINKS+=netisr.9 netisr_clearqdrops.9 \ netisr.9 netisr_default_flow2cpu.9 \ netisr.9 netisr_dispatch.9 \ netisr.9 netisr_dispatch_src.9 \ netisr.9 netisr_get_cpucount.9 \ netisr.9 netisr_get_cpuid.9 \ netisr.9 netisr_getqdrops.9 \ netisr.9 netisr_getqlimit.9 \ netisr.9 netisr_queue.9 \ netisr.9 netisr_queue_src.9 \ netisr.9 netisr_register.9 \ netisr.9 netisr_setqlimit.9 \ netisr.9 netisr_unregister.9 MLINKS+=nv.9 libnv.9 \ nv.9 nvlist.9 \ nv.9 nvlist_add_binary.9 \ nv.9 nvlist_add_bool.9 \ nv.9 nvlist_add_bool_array.9 \ nv.9 nvlist_add_descriptor.9 \ nv.9 nvlist_add_descriptor_array.9 \ nv.9 nvlist_add_null.9 \ nv.9 nvlist_add_number.9 \ nv.9 nvlist_add_number_array.9 \ nv.9 nvlist_add_nvlist.9 \ nv.9 nvlist_add_nvlist_array.9 \ nv.9 nvlist_add_string.9 \ nv.9 nvlist_add_stringf.9 \ nv.9 nvlist_add_stringv.9 \ nv.9 nvlist_add_string_array.9 \ nv.9 nvlist_append_bool_array.9 \ nv.9 nvlist_append_descriptor_array.9 \ nv.9 nvlist_append_nvlist_array.9 \ nv.9 nvlist_append_number_array.9 \ nv.9 nvlist_append_string_array.9 \ nv.9 nvlist_clone.9 \ nv.9 nvlist_create.9 \ nv.9 nvlist_destroy.9 \ nv.9 nvlist_dump.9 \ nv.9 nvlist_empty.9 \ nv.9 nvlist_error.9 \ nv.9 nvlist_exists.9 \ nv.9 nvlist_exists_binary.9 \ nv.9 nvlist_exists_bool.9 \ nv.9 nvlist_exists_bool_array.9 \ nv.9 nvlist_exists_descriptor.9 \ nv.9 nvlist_exists_descriptor_array.9 \ nv.9 nvlist_exists_null.9 \ nv.9 nvlist_exists_number.9 \ nv.9 nvlist_exists_number_array.9 \ nv.9 nvlist_exists_nvlist.9 \ nv.9 nvlist_exists_nvlist_array.9 \ nv.9 nvlist_exists_string.9 \ nv.9 nvlist_exists_type.9 \ nv.9 nvlist_fdump.9 \ nv.9 nvlist_flags.9 \ nv.9 nvlist_free.9 \ nv.9 nvlist_free_binary.9 \ nv.9 nvlist_free_bool.9 \ nv.9 nvlist_free_bool_array.9 \ nv.9 nvlist_free_descriptor.9 \ nv.9 nvlist_free_descriptor_array.9 \ nv.9 nvlist_free_null.9 \ nv.9 nvlist_free_number.9 \ nv.9 nvlist_free_number_array.9 \ nv.9 nvlist_free_nvlist.9 \ nv.9 nvlist_free_nvlist_array.9 \ nv.9 nvlist_free_string.9 \ nv.9 nvlist_free_string_array.9 \ nv.9 nvlist_free_type.9 \ nv.9 nvlist_get_binary.9 \ nv.9 nvlist_get_bool.9 \ nv.9 nvlist_get_bool_array.9 \ nv.9 nvlist_get_descriptor.9 \ nv.9 nvlist_get_descriptor_array.9 \ nv.9 nvlist_get_number.9 \ nv.9 nvlist_get_number_array.9 \ nv.9 nvlist_get_nvlist.9 \ nv.9 nvlist_get_nvlist_array.9 \ nv.9 nvlist_get_parent.9 \ nv.9 nvlist_get_string.9 \ nv.9 nvlist_get_string_array.9 \ nv.9 nvlist_move_binary.9 \ nv.9 nvlist_move_descriptor.9 \ nv.9 nvlist_move_descriptor_array.9 \ nv.9 nvlist_move_nvlist.9 \ nv.9 nvlist_move_nvlist_array.9 \ nv.9 nvlist_move_string.9 \ nv.9 nvlist_move_string_array.9 \ nv.9 nvlist_next.9 \ nv.9 nvlist_pack.9 \ nv.9 nvlist_recv.9 \ nv.9 nvlist_send.9 \ nv.9 nvlist_set_error.9 \ nv.9 nvlist_size.9 \ nv.9 nvlist_take_binary.9 \ nv.9 nvlist_take_bool.9 \ nv.9 nvlist_take_bool_array.9 \ nv.9 nvlist_take_descriptor.9 \ nv.9 nvlist_take_descriptor_array.9 \ nv.9 nvlist_take_number.9 \ nv.9 nvlist_take_number_array.9 \ nv.9 nvlist_take_nvlist.9 \ nv.9 nvlist_take_nvlist_array.9 \ nv.9 nvlist_take_string.9 \ nv.9 nvlist_take_string_array.9 \ nv.9 nvlist_unpack.9 \ nv.9 nvlist_xfer.9 MLINKS+=OF_child.9 OF_parent.9 \ OF_child.9 OF_peer.9 MLINKS+=OF_device_from_xref.9 OF_device_register_xref.9 \ OF_device_from_xref.9 OF_xref_from_device.9 MLINKS+=OF_getprop.9 OF_getencprop.9 \ OF_getprop.9 OF_getencprop_alloc.9 \ OF_getprop.9 OF_getencprop_alloc_multi.9 \ OF_getprop.9 OF_getprop_alloc.9 \ OF_getprop.9 OF_getprop_alloc_multi.9 \ OF_getprop.9 OF_getproplen.9 \ OF_getprop.9 OF_hasprop.9 \ OF_getprop.9 OF_nextprop.9 \ OF_getprop.9 OF_prop_free.9 \ OF_getprop.9 OF_searchencprop.9 \ OF_getprop.9 OF_searchprop.9 \ OF_getprop.9 OF_setprop.9 MLINKS+=OF_node_from_xref.9 OF_xref_from_node.9 MLINKS+=ofw_bus_is_compatible.9 ofw_bus_is_compatible_strict.9 \ ofw_bus_is_compatible.9 ofw_bus_node_is_compatible.9 \ ofw_bus_is_compatible.9 ofw_bus_search_compatible.9 MLINKS+= ofw_bus_status_okay.9 ofw_bus_get_status.9 \ ofw_bus_status_okay.9 ofw_bus_node_status_okay.9 MLINKS+=ofw_graph.9 ofw_graph_get_device_by_port_ep.9 \ ofw_graph.9 ofw_graph_get_endpoint_by_idx.9 \ ofw_graph.9 ofw_graph_get_port_by_idx.9 \ ofw_graph.9 ofw_graph_get_remove_endpoint.9 \ ofw_graph.9 ofw_graph_get_remove_parent.9 \ ofw_graph.9 ofw_graph_port_get_num_endpoints.9 MLINKS+=osd.9 osd_call.9 \ osd.9 osd_del.9 \ osd.9 osd_deregister.9 \ osd.9 osd_exit.9 \ osd.9 osd_free_reserved.9 \ osd.9 osd_get.9 \ osd.9 osd_register.9 \ osd.9 osd_reserve.9 \ osd.9 osd_set.9 \ osd.9 osd_set_reserved.9 MLINKS+=panic.9 vpanic.9 \ panic.9 KERNEL_PANICKED.9 MLINKS+=pci.9 pci_alloc_msi.9 \ pci.9 pci_alloc_msix.9 \ pci.9 pci_disable_busmaster.9 \ pci.9 pci_disable_io.9 \ pci.9 pci_enable_busmaster.9 \ pci.9 pci_enable_io.9 \ pci.9 pci_find_bsf.9 \ pci.9 pci_find_cap.9 \ pci.9 pci_find_dbsf.9 \ pci.9 pci_find_device.9 \ pci.9 pci_find_extcap.9 \ pci.9 pci_find_htcap.9 \ pci.9 pci_find_pcie_root_port.9 \ pci.9 pci_get_id.9 \ pci.9 pci_get_max_read_req.9 \ pci.9 pci_get_powerstate.9 \ pci.9 pci_get_vpd_ident.9 \ pci.9 pci_get_vpd_readonly.9 \ pci.9 pci_iov_attach.9 \ pci.9 pci_iov_attach_name.9 \ pci.9 pci_iov_detach.9 \ pci.9 pci_msi_count.9 \ pci.9 pci_msix_count.9 \ pci.9 pci_msix_pba_bar.9 \ pci.9 pci_msix_table_bar.9 \ pci.9 pci_pending_msix.9 \ pci.9 pci_read_config.9 \ pci.9 pci_release_msi.9 \ pci.9 pci_remap_msix.9 \ pci.9 pci_restore_state.9 \ pci.9 pci_save_state.9 \ pci.9 pci_set_powerstate.9 \ pci.9 pci_set_max_read_req.9 \ pci.9 pci_write_config.9 \ pci.9 pcie_adjust_config.9 \ pci.9 pcie_flr.9 \ pci.9 pcie_max_completion_timeout.9 \ pci.9 pcie_read_config.9 \ pci.9 pcie_wait_for_pending_transactions.9 \ pci.9 pcie_write_config.9 MLINKS+=pci_iov_schema.9 pci_iov_schema_alloc_node.9 \ pci_iov_schema.9 pci_iov_schema_add_bool.9 \ pci_iov_schema.9 pci_iov_schema_add_string.9 \ pci_iov_schema.9 pci_iov_schema_add_uint8.9 \ pci_iov_schema.9 pci_iov_schema_add_uint16.9 \ pci_iov_schema.9 pci_iov_schema_add_uint32.9 \ pci_iov_schema.9 pci_iov_schema_add_uint64.9 \ pci_iov_schema.9 pci_iov_schema_add_unicast_mac.9 MLINKS+=pfil.9 pfil_add_hook.9 \ pfil.9 pfil_head_register.9 \ pfil.9 pfil_head_unregister.9 \ pfil.9 pfil_remove_hook.9 \ pfil.9 pfil_run_hooks.9 \ pfil.9 pfil_link.9 MLINKS+=pfind.9 zpfind.9 MLINKS+=PHOLD.9 PRELE.9 \ PHOLD.9 _PHOLD.9 \ PHOLD.9 _PRELE.9 \ PHOLD.9 PROC_ASSERT_HELD.9 \ PHOLD.9 PROC_ASSERT_NOT_HELD.9 MLINKS+=pmap_copy.9 pmap_copy_page.9 MLINKS+=pmap_extract.9 pmap_extract_and_hold.9 MLINKS+=pmap_kextract.9 vtophys.9 MLINKS+=pmap_is_modified.9 pmap_ts_referenced.9 MLINKS+=pmap_pinit.9 pmap_pinit0.9 MLINKS+=pmap_qenter.9 pmap_qremove.9 MLINKS+=pmap_quick_enter_page.9 pmap_quick_remove_page.9 MLINKS+=pmap_remove.9 pmap_remove_all.9 \ pmap_remove.9 pmap_remove_pages.9 MLINKS+=pmap_resident_count.9 pmap_wired_count.9 MLINKS+=pmap_zero_page.9 pmap_zero_area.9 MLINKS+=printf.9 log.9 \ printf.9 tprintf.9 \ printf.9 uprintf.9 \ printf.9 vlog.9 \ printf.9 vprintf.9 MLINKS+=priv.9 priv_check.9 \ priv.9 priv_check_cred.9 MLINKS+=prng.9 prng32.9 \ prng.9 prng32_bounded.9 \ prng.9 prng64.9 \ prng.9 prng64_bounded.9 MLINKS+=proc_rwmem.9 proc_readmem.9 \ proc_rwmem.9 proc_writemem.9 MLINKS+=psignal.9 pgsignal.9 \ psignal.9 tdsignal.9 MLINKS+=pwmbus.9 pwm.9 MLINKS+=random.9 arc4rand.9 \ random.9 arc4random.9 \ random.9 is_random_seeded.9 \ random.9 read_random.9 \ random.9 read_random_uio.9 \ random.9 srandom.9 MLINKS+=random_harvest.9 random_harvest_direct.9 \ random_harvest.9 random_harvest_fast.9 \ random_harvest.9 random_harvest_queue.9 MLINKS+=ratecheck.9 ppsratecheck.9 MLINKS+=refcount.9 refcount_acquire.9 \ refcount.9 refcount_acquire_checked.9 \ refcount.9 refcount_acquire_if_not_zero.9 \ refcount.9 refcount_init.9 \ refcount.9 refcount_load.9 \ refcount.9 refcount_release.9 \ refcount.9 refcount_release_if_last.9 \ refcount.9 refcount_release_if_not_last.9 MLINKS+=resource_int_value.9 resource_long_value.9 \ resource_int_value.9 resource_string_value.9 MLINKS+=rman.9 rman_activate_resource.9 \ rman.9 rman_adjust_resource.9 \ rman.9 rman_deactivate_resource.9 \ rman.9 rman_fini.9 \ rman.9 rman_first_free_region.9 \ rman.9 rman_get_bushandle.9 \ rman.9 rman_get_bustag.9 \ rman.9 rman_get_device.9 \ rman.9 rman_get_end.9 \ rman.9 rman_get_flags.9 \ rman.9 rman_get_mapping.9 \ rman.9 rman_get_rid.9 \ rman.9 rman_get_size.9 \ rman.9 rman_get_start.9 \ rman.9 rman_get_type.9 \ rman.9 rman_get_virtual.9 \ rman.9 rman_init.9 \ rman.9 rman_init_from_resource.9 \ rman.9 rman_is_region_manager.9 \ rman.9 rman_last_free_region.9 \ rman.9 rman_make_alignment_flags.9 \ rman.9 rman_manage_region.9 \ rman.9 rman_release_resource.9 \ rman.9 rman_reserve_resource.9 \ rman.9 rman_reserve_resource_bound.9 \ rman.9 rman_set_bushandle.9 \ rman.9 rman_set_bustag.9 \ rman.9 rman_set_mapping.9 \ rman.9 rman_set_rid.9 \ rman.9 rman_set_type.9 \ rman.9 rman_set_virtual.9 MLINKS+=rmlock.9 rm_assert.9 \ rmlock.9 rm_destroy.9 \ rmlock.9 rm_init.9 \ rmlock.9 rm_init_flags.9 \ rmlock.9 rm_rlock.9 \ rmlock.9 rm_runlock.9 \ rmlock.9 rm_sleep.9 \ rmlock.9 RM_SYSINIT.9 \ rmlock.9 RM_SYSINIT_FLAGS.9 \ rmlock.9 rm_try_rlock.9 \ rmlock.9 rm_wlock.9 \ rmlock.9 rm_wowned.9 \ rmlock.9 rm_wunlock.9 MLINKS+=runqueue.9 choosethread.9 \ runqueue.9 procrunnable.9 \ runqueue.9 remrunqueue.9 \ runqueue.9 setrunqueue.9 MLINKS+=rwlock.9 rw_assert.9 \ rwlock.9 rw_destroy.9 \ rwlock.9 rw_downgrade.9 \ rwlock.9 rw_init.9 \ rwlock.9 rw_init_flags.9 \ rwlock.9 rw_initialized.9 \ rwlock.9 rw_rlock.9 \ rwlock.9 rw_runlock.9 \ rwlock.9 rw_unlock.9 \ rwlock.9 rw_sleep.9 \ rwlock.9 RW_SYSINIT.9 \ rwlock.9 RW_SYSINIT_FLAGS.9 \ rwlock.9 rw_try_rlock.9 \ rwlock.9 rw_try_upgrade.9 \ rwlock.9 rw_try_wlock.9 \ rwlock.9 rw_wlock.9 \ rwlock.9 rw_wowned.9 \ rwlock.9 rw_wunlock.9 MLINKS+=sbuf.9 sbuf_bcat.9 \ sbuf.9 sbuf_bcopyin.9 \ sbuf.9 sbuf_bcpy.9 \ sbuf.9 sbuf_cat.9 \ sbuf.9 sbuf_clear.9 \ sbuf.9 sbuf_clear_flags.9 \ sbuf.9 sbuf_copyin.9 \ sbuf.9 sbuf_cpy.9 \ sbuf.9 sbuf_data.9 \ sbuf.9 sbuf_delete.9 \ sbuf.9 sbuf_done.9 \ sbuf.9 sbuf_error.9 \ sbuf.9 sbuf_finish.9 \ sbuf.9 sbuf_get_flags.9 \ sbuf.9 sbuf_hexdump.9 \ sbuf.9 sbuf_len.9 \ sbuf.9 sbuf_new.9 \ sbuf.9 sbuf_new_auto.9 \ sbuf.9 sbuf_new_for_sysctl.9 \ sbuf.9 sbuf_nl_terminate.9 \ sbuf.9 sbuf_printf.9 \ sbuf.9 sbuf_printf_drain.9 \ sbuf.9 sbuf_putbuf.9 \ sbuf.9 sbuf_putc.9 \ sbuf.9 sbuf_set_drain.9 \ sbuf.9 sbuf_set_flags.9 \ sbuf.9 sbuf_setpos.9 \ sbuf.9 sbuf_start_section.9 \ sbuf.9 sbuf_end_section.9 \ sbuf.9 sbuf_trim.9 \ sbuf.9 sbuf_vprintf.9 MLINKS+=scheduler.9 curpriority_cmp.9 \ scheduler.9 maybe_resched.9 \ scheduler.9 propagate_priority.9 \ scheduler.9 resetpriority.9 \ scheduler.9 roundrobin.9 \ scheduler.9 roundrobin_interval.9 \ scheduler.9 schedclock.9 \ scheduler.9 schedcpu.9 \ scheduler.9 sched_setup.9 \ scheduler.9 setrunnable.9 \ scheduler.9 updatepri.9 MLINKS+=SDT.9 SDT_PROVIDER_DECLARE.9 \ SDT.9 SDT_PROVIDER_DEFINE.9 \ SDT.9 SDT_PROBE_DECLARE.9 \ SDT.9 SDT_PROBE_DEFINE.9 \ SDT.9 SDT_PROBE.9 MLINKS+=securelevel_gt.9 securelevel_ge.9 MLINKS+=selrecord.9 seldrain.9 \ selrecord.9 selwakeup.9 MLINKS+=sema.9 sema_destroy.9 \ sema.9 sema_init.9 \ sema.9 sema_post.9 \ sema.9 sema_timedwait.9 \ sema.9 sema_trywait.9 \ sema.9 sema_value.9 \ sema.9 sema_wait.9 MLINKS+=seqc.9 seqc_consistent.9 \ seqc.9 seqc_read.9 \ seqc.9 seqc_write_begin.9 \ seqc.9 seqc_write_end.9 MLINKS+=sf_buf.9 sf_buf_alloc.9 \ sf_buf.9 sf_buf_free.9 \ sf_buf.9 sf_buf_kva.9 \ sf_buf.9 sf_buf_page.9 MLINKS+=sglist.9 sglist_alloc.9 \ sglist.9 sglist_append.9 \ sglist.9 sglist_append_bio.9 \ sglist.9 sglist_append_mbuf.9 \ sglist.9 sglist_append_mbuf_epg.9 \ sglist.9 sglist_append_phys.9 \ sglist.9 sglist_append_sglist.9 \ sglist.9 sglist_append_single_mbuf.9 \ sglist.9 sglist_append_uio.9 \ sglist.9 sglist_append_user.9 \ sglist.9 sglist_append_vmpages.9 \ sglist.9 sglist_build.9 \ sglist.9 sglist_clone.9 \ sglist.9 sglist_consume_uio.9 \ sglist.9 sglist_count.9 \ sglist.9 sglist_count_mbuf_epg.9 \ sglist.9 sglist_count_vmpages.9 \ sglist.9 sglist_free.9 \ sglist.9 sglist_hold.9 \ sglist.9 sglist_init.9 \ sglist.9 sglist_join.9 \ sglist.9 sglist_length.9 \ sglist.9 sglist_reset.9 \ sglist.9 sglist_slice.9 \ sglist.9 sglist_split.9 MLINKS+=shm_map.9 shm_unmap.9 MLINKS+=signal.9 cursig.9 \ signal.9 execsigs.9 \ signal.9 issignal.9 \ signal.9 killproc.9 \ signal.9 pgsigio.9 \ signal.9 postsig.9 \ signal.9 SETSETNEQ.9 \ signal.9 SETSETOR.9 \ signal.9 SIGADDSET.9 \ signal.9 SIG_CONTSIGMASK.9 \ signal.9 SIGDELSET.9 \ signal.9 SIGEMPTYSET.9 \ signal.9 sigexit.9 \ signal.9 SIGFILLSET.9 \ signal.9 siginit.9 \ signal.9 SIGISEMPTY.9 \ signal.9 SIGISMEMBER.9 \ signal.9 SIGNOTEMPTY.9 \ signal.9 signotify.9 \ signal.9 SIGPENDING.9 \ signal.9 SIGSETAND.9 \ signal.9 SIGSETCANTMASK.9 \ signal.9 SIGSETEQ.9 \ signal.9 SIGSETNAND.9 \ signal.9 SIG_STOPSIGMASK.9 \ signal.9 trapsignal.9 MLINKS+=sleep.9 msleep.9 \ sleep.9 msleep_sbt.9 \ sleep.9 msleep_spin.9 \ sleep.9 msleep_spin_sbt.9 \ sleep.9 pause.9 \ sleep.9 pause_sig.9 \ sleep.9 pause_sbt.9 \ sleep.9 tsleep.9 \ sleep.9 tsleep_sbt.9 \ sleep.9 wakeup.9 \ sleep.9 wakeup_one.9 \ sleep.9 wakeup_any.9 MLINKS+=sleepqueue.9 init_sleepqueues.9 \ sleepqueue.9 sleepq_abort.9 \ sleepqueue.9 sleepq_add.9 \ sleepqueue.9 sleepq_alloc.9 \ sleepqueue.9 sleepq_broadcast.9 \ sleepqueue.9 sleepq_free.9 \ sleepqueue.9 sleepq_lookup.9 \ sleepqueue.9 sleepq_lock.9 \ sleepqueue.9 sleepq_release.9 \ sleepqueue.9 sleepq_remove.9 \ sleepqueue.9 sleepq_set_timeout.9 \ sleepqueue.9 sleepq_set_timeout_sbt.9 \ sleepqueue.9 sleepq_signal.9 \ sleepqueue.9 sleepq_sleepcnt.9 \ sleepqueue.9 sleepq_timedwait.9 \ sleepqueue.9 sleepq_timedwait_sig.9 \ sleepqueue.9 sleepq_type.9 \ sleepqueue.9 sleepq_wait.9 \ sleepqueue.9 sleepq_wait_sig.9 MLINKS+=smr.9 smr_advance.9 \ smr.9 smr_create.9 \ smr.9 smr_destroy.9 \ smr.9 smr_enter.9 \ smr.9 smr_exit.9 \ smr.9 smr_poll.9 \ smr.9 smr_synchronize.9 \ smr.9 smr_wait.9 MLINKS+=socket.9 soabort.9 \ socket.9 soaccept.9 \ socket.9 sobind.9 \ socket.9 socheckuid.9 \ socket.9 soclose.9 \ socket.9 soconnect.9 \ socket.9 socreate.9 \ socket.9 sodisconnect.9 \ socket.9 sodtor_set.9 \ socket.9 sodupsockaddr.9 \ socket.9 sofree.9 \ socket.9 sogetopt.9 \ socket.9 sohasoutofband.9 \ socket.9 solisten.9 \ socket.9 solisten_proto.9 \ socket.9 solisten_proto_check.9 \ socket.9 sonewconn.9 \ socket.9 sooptcopyin.9 \ socket.9 sooptcopyout.9 \ socket.9 sopoll.9 \ socket.9 sopoll_generic.9 \ socket.9 soreceive.9 \ socket.9 soreceive_dgram.9 \ socket.9 soreceive_generic.9 \ socket.9 soreceive_stream.9 \ socket.9 soreserve.9 \ socket.9 sorflush.9 \ socket.9 sosend.9 \ socket.9 sosend_dgram.9 \ socket.9 sosend_generic.9 \ socket.9 sosetopt.9 \ socket.9 soshutdown.9 \ socket.9 sotoxsocket.9 \ socket.9 soupcall_clear.9 \ socket.9 soupcall_set.9 \ socket.9 sowakeup.9 MLINKS+=stack.9 stack_copy.9 \ stack.9 stack_create.9 \ stack.9 stack_destroy.9 \ stack.9 stack_print.9 \ stack.9 stack_print_ddb.9 \ stack.9 stack_print_short.9 \ stack.9 stack_print_short_ddb.9 \ stack.9 stack_put.9 \ stack.9 stack_save.9 \ stack.9 stack_sbuf_print.9 \ stack.9 stack_sbuf_print_ddb.9 \ stack.9 stack_zero.9 MLINKS+=store.9 subyte.9 \ store.9 suword.9 \ store.9 suword16.9 \ store.9 suword32.9 \ store.9 suword64.9 MLINKS+=swi.9 swi_add.9 \ swi.9 swi_remove.9 \ swi.9 swi_sched.9 MLINKS+=sx.9 sx_assert.9 \ sx.9 sx_destroy.9 \ sx.9 sx_downgrade.9 \ sx.9 sx_init.9 \ sx.9 sx_init_flags.9 \ sx.9 sx_sleep.9 \ sx.9 sx_slock.9 \ sx.9 sx_slock_sig.9 \ sx.9 sx_sunlock.9 \ sx.9 SX_SYSINIT.9 \ sx.9 SX_SYSINIT_FLAGS.9 \ sx.9 sx_try_slock.9 \ sx.9 sx_try_upgrade.9 \ sx.9 sx_try_xlock.9 \ sx.9 sx_unlock.9 \ sx.9 sx_xholder.9 \ sx.9 sx_xlock.9 \ sx.9 sx_xlock_sig.9 \ sx.9 sx_xlocked.9 \ sx.9 sx_xunlock.9 MLINKS+=syscall_helper_register.9 syscall_helper_unregister.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER_COMPAT.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER_COMPAT_F.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER_F.9 MLINKS+=sysctl.9 SYSCTL_DECL.9 \ sysctl.9 SYSCTL_ADD_CONST_STRING.9 \ sysctl.9 SYSCTL_ADD_INT.9 \ sysctl.9 SYSCTL_ADD_LONG.9 \ sysctl.9 SYSCTL_ADD_NODE.9 \ sysctl.9 SYSCTL_ADD_NODE_WITH_LABEL.9 \ sysctl.9 SYSCTL_ADD_OPAQUE.9 \ sysctl.9 SYSCTL_ADD_PROC.9 \ sysctl.9 SYSCTL_ADD_QUAD.9 \ sysctl.9 SYSCTL_ADD_ROOT_NODE.9 \ sysctl.9 SYSCTL_ADD_S8.9 \ sysctl.9 SYSCTL_ADD_S16.9 \ sysctl.9 SYSCTL_ADD_S32.9 \ sysctl.9 SYSCTL_ADD_S64.9 \ sysctl.9 SYSCTL_ADD_STRING.9 \ sysctl.9 SYSCTL_ADD_STRUCT.9 \ sysctl.9 SYSCTL_ADD_TIMEVAL_SEC.9 \ sysctl.9 SYSCTL_ADD_U8.9 \ sysctl.9 SYSCTL_ADD_U16.9 \ sysctl.9 SYSCTL_ADD_U32.9 \ sysctl.9 SYSCTL_ADD_U64.9 \ sysctl.9 SYSCTL_ADD_UAUTO.9 \ sysctl.9 SYSCTL_ADD_UINT.9 \ sysctl.9 SYSCTL_ADD_ULONG.9 \ sysctl.9 SYSCTL_ADD_UMA_CUR.9 \ sysctl.9 SYSCTL_ADD_UMA_MAX.9 \ sysctl.9 SYSCTL_ADD_UQUAD.9 \ sysctl.9 SYSCTL_CHILDREN.9 \ sysctl.9 SYSCTL_STATIC_CHILDREN.9 \ sysctl.9 SYSCTL_NODE_CHILDREN.9 \ sysctl.9 SYSCTL_PARENT.9 \ sysctl.9 SYSCTL_INT.9 \ sysctl.9 SYSCTL_INT_WITH_LABEL.9 \ sysctl.9 SYSCTL_LONG.9 \ sysctl.9 sysctl_msec_to_ticks.9 \ sysctl.9 SYSCTL_NODE.9 \ sysctl.9 SYSCTL_NODE_WITH_LABEL.9 \ sysctl.9 SYSCTL_OPAQUE.9 \ sysctl.9 SYSCTL_PROC.9 \ sysctl.9 SYSCTL_QUAD.9 \ sysctl.9 SYSCTL_ROOT_NODE.9 \ sysctl.9 SYSCTL_S8.9 \ sysctl.9 SYSCTL_S16.9 \ sysctl.9 SYSCTL_S32.9 \ sysctl.9 SYSCTL_S64.9 \ sysctl.9 SYSCTL_STRING.9 \ sysctl.9 SYSCTL_STRUCT.9 \ sysctl.9 SYSCTL_TIMEVAL_SEC.9 \ sysctl.9 SYSCTL_U8.9 \ sysctl.9 SYSCTL_U16.9 \ sysctl.9 SYSCTL_U32.9 \ sysctl.9 SYSCTL_U64.9 \ sysctl.9 SYSCTL_UINT.9 \ sysctl.9 SYSCTL_ULONG.9 \ sysctl.9 SYSCTL_UMA_CUR.9 \ sysctl.9 SYSCTL_UMA_MAX.9 \ sysctl.9 SYSCTL_UQUAD.9 MLINKS+=sysctl_add_oid.9 sysctl_move_oid.9 \ sysctl_add_oid.9 sysctl_remove_oid.9 \ sysctl_add_oid.9 sysctl_remove_name.9 MLINKS+=sysctl_ctx_init.9 sysctl_ctx_entry_add.9 \ sysctl_ctx_init.9 sysctl_ctx_entry_del.9 \ sysctl_ctx_init.9 sysctl_ctx_entry_find.9 \ sysctl_ctx_init.9 sysctl_ctx_free.9 MLINKS+=SYSINIT.9 SYSUNINIT.9 MLINKS+=taskqueue.9 TASK_INIT.9 \ taskqueue.9 TASK_INITIALIZER.9 \ taskqueue.9 taskqueue_block.9 \ taskqueue.9 taskqueue_cancel.9 \ taskqueue.9 taskqueue_cancel_timeout.9 \ taskqueue.9 taskqueue_create.9 \ taskqueue.9 taskqueue_create_fast.9 \ taskqueue.9 TASKQUEUE_DECLARE.9 \ taskqueue.9 TASKQUEUE_DEFINE.9 \ taskqueue.9 TASKQUEUE_DEFINE_THREAD.9 \ taskqueue.9 taskqueue_drain.9 \ taskqueue.9 taskqueue_drain_all.9 \ taskqueue.9 taskqueue_drain_timeout.9 \ taskqueue.9 taskqueue_enqueue.9 \ taskqueue.9 taskqueue_enqueue_timeout.9 \ taskqueue.9 taskqueue_enqueue_timeout_sbt.9 \ taskqueue.9 TASKQUEUE_FAST_DEFINE.9 \ taskqueue.9 TASKQUEUE_FAST_DEFINE_THREAD.9 \ taskqueue.9 taskqueue_free.9 \ taskqueue.9 taskqueue_member.9 \ taskqueue.9 taskqueue_quiesce.9 \ taskqueue.9 taskqueue_run.9 \ taskqueue.9 taskqueue_set_callback.9 \ taskqueue.9 taskqueue_start_threads.9 \ taskqueue.9 taskqueue_start_threads_cpuset.9 \ taskqueue.9 taskqueue_start_threads_in_proc.9 \ taskqueue.9 taskqueue_unblock.9 \ taskqueue.9 TIMEOUT_TASK_INIT.9 MLINKS+=tcp_functions.9 register_tcp_functions.9 \ tcp_functions.9 register_tcp_functions_as_name.9 \ tcp_functions.9 register_tcp_functions_as_names.9 \ tcp_functions.9 deregister_tcp_functions.9 MLINKS+=time.9 boottime.9 \ time.9 time_second.9 \ time.9 time_uptime.9 MLINKS+=ucred.9 crcopy.9 \ ucred.9 crcopysafe.9 \ ucred.9 crdup.9 \ ucred.9 crfree.9 \ ucred.9 crget.9 \ ucred.9 crhold.9 \ ucred.9 crsetgroups.9 \ ucred.9 cru2x.9 MLINKS+=uidinfo.9 uifind.9 \ uidinfo.9 uifree.9 \ uidinfo.9 uihashinit.9 \ uidinfo.9 uihold.9 MLINKS+=uio.9 uiomove.9 \ uio.9 uiomove_frombuf.9 \ uio.9 uiomove_nofault.9 MLINKS+=unr.9 alloc_unr.9 \ unr.9 alloc_unrl.9 \ unr.9 alloc_unr_specific.9 \ unr.9 clear_unrhdr.9 \ unr.9 delete_unrhdr.9 \ unr.9 free_unr.9 \ unr.9 new_unrhdr.9 .if ${MK_USB} != "no" MAN+= usbdi.9 MLINKS+=usbdi.9 usbd_do_request.9 \ usbdi.9 usbd_do_request_flags.9 \ usbdi.9 usbd_errstr.9 \ usbdi.9 usbd_lookup_id_by_info.9 \ usbdi.9 usbd_lookup_id_by_uaa.9 \ usbdi.9 usbd_transfer_clear_stall.9 \ usbdi.9 usbd_transfer_drain.9 \ usbdi.9 usbd_transfer_pending.9 \ usbdi.9 usbd_transfer_poll.9 \ usbdi.9 usbd_transfer_setup.9 \ usbdi.9 usbd_transfer_start.9 \ usbdi.9 usbd_transfer_stop.9 \ usbdi.9 usbd_transfer_submit.9 \ usbdi.9 usbd_transfer_unsetup.9 \ usbdi.9 usbd_xfer_clr_flag.9 \ usbdi.9 usbd_xfer_frame_data.9 \ usbdi.9 usbd_xfer_frame_len.9 \ usbdi.9 usbd_xfer_get_frame.9 \ usbdi.9 usbd_xfer_get_priv.9 \ usbdi.9 usbd_xfer_is_stalled.9 \ usbdi.9 usbd_xfer_max_framelen.9 \ usbdi.9 usbd_xfer_max_frames.9 \ usbdi.9 usbd_xfer_max_len.9 \ usbdi.9 usbd_xfer_set_flag.9 \ usbdi.9 usbd_xfer_set_frame_data.9 \ usbdi.9 usbd_xfer_set_frame_len.9 \ usbdi.9 usbd_xfer_set_frame_offset.9 \ usbdi.9 usbd_xfer_set_frames.9 \ usbdi.9 usbd_xfer_set_interval.9 \ usbdi.9 usbd_xfer_set_priv.9 \ usbdi.9 usbd_xfer_set_stall.9 \ usbdi.9 usbd_xfer_set_timeout.9 \ usbdi.9 usbd_xfer_softc.9 \ usbdi.9 usbd_xfer_state.9 \ usbdi.9 usbd_xfer_status.9 \ usbdi.9 usb_fifo_alloc_buffer.9 \ usbdi.9 usb_fifo_attach.9 \ usbdi.9 usb_fifo_detach.9 \ usbdi.9 usb_fifo_free_buffer.9 \ usbdi.9 usb_fifo_get_data.9 \ usbdi.9 usb_fifo_get_data_buffer.9 \ usbdi.9 usb_fifo_get_data_error.9 \ usbdi.9 usb_fifo_get_data_linear.9 \ usbdi.9 usb_fifo_put_bytes_max.9 \ usbdi.9 usb_fifo_put_data.9 \ usbdi.9 usb_fifo_put_data_buffer.9 \ usbdi.9 usb_fifo_put_data_error.9 \ usbdi.9 usb_fifo_put_data_linear.9 \ usbdi.9 usb_fifo_reset.9 \ usbdi.9 usb_fifo_softc.9 \ usbdi.9 usb_fifo_wakeup.9 .endif MLINKS+=vfsconf.9 vfs_modevent.9 \ vfsconf.9 vfs_register.9 \ vfsconf.9 vfs_unregister.9 MLINKS+=vfs_getopt.9 vfs_copyopt.9 \ vfs_getopt.9 vfs_filteropt.9 \ vfs_getopt.9 vfs_flagopt.9 \ vfs_getopt.9 vfs_getopts.9 \ vfs_getopt.9 vfs_scanopt.9 \ vfs_getopt.9 vfs_setopt.9 \ vfs_getopt.9 vfs_setopt_part.9 \ vfs_getopt.9 vfs_setopts.9 MLINKS+=vhold.9 vdrop.9 \ vhold.9 vdropl.9 \ vhold.9 vholdl.9 MLINKS+=vmem.9 vmem_add.9 \ vmem.9 vmem_alloc.9 \ vmem.9 vmem_create.9 \ vmem.9 vmem_destroy.9 \ vmem.9 vmem_free.9 \ vmem.9 vmem_xalloc.9 \ vmem.9 vmem_xfree.9 MLINKS+=vm_map_lock.9 vm_map_lock_downgrade.9 \ vm_map_lock.9 vm_map_lock_read.9 \ vm_map_lock.9 vm_map_lock_upgrade.9 \ vm_map_lock.9 vm_map_trylock.9 \ vm_map_lock.9 vm_map_trylock_read.9 \ vm_map_lock.9 vm_map_unlock.9 \ vm_map_lock.9 vm_map_unlock_read.9 MLINKS+=vm_map_lookup.9 vm_map_lookup_done.9 MLINKS+=vm_map_max.9 vm_map_min.9 \ vm_map_max.9 vm_map_pmap.9 MLINKS+=vm_map_stack.9 vm_map_growstack.9 MLINKS+=vm_map_wire.9 vm_map_unwire.9 MLINKS+=vm_page_alloc.9 vm_page_alloc_after.9 \ vm_page_alloc.9 vm_page_alloc_contig.9 \ vm_page_alloc.9 vm_page_alloc_contig_domain.9 \ vm_page_alloc.9 vm_page_alloc_domain.9 \ vm_page_alloc.9 vm_page_alloc_domain_after.9 \ vm_page_alloc.9 vm_page_alloc_noobj.9 \ vm_page_alloc.9 vm_page_alloc_noobj_contig.9 \ vm_page_alloc.9 vm_page_alloc_noobj_contig_domain.9 \ vm_page_alloc.9 vm_page_alloc_noobj_domain.9 MLINKS+=vm_page_bits.9 vm_page_clear_dirty.9 \ vm_page_bits.9 vm_page_dirty.9 \ vm_page_bits.9 vm_page_is_valid.9 \ vm_page_bits.9 vm_page_set_invalid.9 \ vm_page_bits.9 vm_page_set_validclean.9 \ vm_page_bits.9 vm_page_test_dirty.9 \ vm_page_bits.9 vm_page_undirty.9 \ vm_page_bits.9 vm_page_zero_invalid.9 MLINKS+=vm_page_busy.9 vm_page_busied.9 \ vm_page_busy.9 vm_page_busy_downgrade.9 \ vm_page_busy.9 vm_page_busy_sleep.9 \ vm_page_busy.9 vm_page_sbusied.9 \ vm_page_busy.9 vm_page_sunbusy.9 \ vm_page_busy.9 vm_page_trysbusy.9 \ vm_page_busy.9 vm_page_tryxbusy.9 \ vm_page_busy.9 vm_page_xbusied.9 \ vm_page_busy.9 vm_page_xunbusy.9 \ vm_page_busy.9 vm_page_assert_sbusied.9 \ vm_page_busy.9 vm_page_assert_unbusied.9 \ vm_page_busy.9 vm_page_assert_xbusied.9 MLINKS+=vm_page_aflag.9 vm_page_aflag_clear.9 \ vm_page_aflag.9 vm_page_aflag_set.9 \ vm_page_aflag.9 vm_page_reference.9 MLINKS+=vm_page_free.9 vm_page_free_toq.9 \ vm_page_free.9 vm_page_free_zero.9 \ vm_page_free.9 vm_page_try_to_free.9 MLINKS+=vm_page_insert.9 vm_page_remove.9 MLINKS+=vm_page_wire.9 vm_page_unwire.9 \ vm_page_wire.9 vm_page_unwire_noq.9 \ vm_page_wire.9 vm_page_wire_mapped.9 MLINKS+=VOP_ACCESS.9 VOP_ACCESSX.9 MLINKS+=VOP_ATTRIB.9 VOP_GETATTR.9 \ VOP_ATTRIB.9 VOP_SETATTR.9 \ VOP_ATTRIB.9 VOP_STAT.9 MLINKS+=VOP_CREATE.9 VOP_MKDIR.9 \ VOP_CREATE.9 VOP_MKNOD.9 \ VOP_CREATE.9 VOP_SYMLINK.9 MLINKS+=VOP_FSYNC.9 VOP_FDATASYNC.9 MLINKS+=VOP_GETPAGES.9 VOP_PUTPAGES.9 MLINKS+=VOP_INACTIVE.9 VOP_RECLAIM.9 MLINKS+=VOP_LOCK.9 vn_lock.9 \ VOP_LOCK.9 VOP_ISLOCKED.9 \ VOP_LOCK.9 VOP_UNLOCK.9 MLINKS+=VOP_OPENCLOSE.9 VOP_CLOSE.9 \ VOP_OPENCLOSE.9 VOP_OPEN.9 MLINKS+=VOP_RDWR.9 VOP_READ.9 \ VOP_RDWR.9 VOP_WRITE.9 MLINKS+=VOP_REMOVE.9 VOP_RMDIR.9 MLINKS+=vnet.9 vimage.9 MLINKS+=vref.9 VREF.9 \ vref.9 vrefl.9 MLINKS+=vrele.9 vput.9 \ vrele.9 vunref.9 MLINKS+=vslock.9 vsunlock.9 MLINKS+=zone.9 uma.9 \ zone.9 uma_prealloc.9 \ zone.9 uma_reclaim.9 \ zone.9 uma_zalloc.9 \ zone.9 uma_zalloc_arg.9 \ zone.9 uma_zalloc_domain.9 \ zone.9 uma_zalloc_pcpu.9 \ zone.9 uma_zalloc_pcpu_arg.9 \ zone.9 uma_zcache_create.9 \ zone.9 uma_zcreate.9 \ zone.9 uma_zdestroy.9 \ zone.9 uma_zfree.9 \ zone.9 uma_zfree_arg.9 \ zone.9 uma_zfree_pcpu.9 \ zone.9 uma_zfree_pcpu_arg.9 \ zone.9 uma_zone_get_cur.9 \ zone.9 uma_zone_get_max.9 \ zone.9 uma_zone_reclaim.9 \ zone.9 uma_zone_reserve.9 \ zone.9 uma_zone_reserve_kva.9 \ zone.9 uma_zone_set_allocf.9 \ zone.9 uma_zone_set_freef.9 \ zone.9 uma_zone_set_max.9 \ zone.9 uma_zone_set_maxaction.9 \ zone.9 uma_zone_set_maxcache.9 \ zone.9 uma_zone_set_warning.9 \ zone.9 uma_zsecond_create.9 # This makes more sense for amd64 and i386 but # we decide to install all manpages in all architectures _superio.9= superio.9 MLINKS+=superio.9 superio_devid.9 \ superio.9 superio_dev_disable.9 \ superio.9 superio_dev_enable.9 \ superio.9 superio_dev_enabled.9 \ superio.9 superio_find_dev.9 \ superio.9 superio_get_dma.9 \ superio.9 superio_get_iobase.9 \ superio.9 superio_get_irq.9 \ superio.9 superio_get_ldn.9 \ superio.9 superio_get_type.9 \ superio.9 superio_read.9 \ superio.9 superio_revid.9 \ superio.9 superio_vendor.9 \ superio.9 superio_write.9 .include diff --git a/share/man/man9/cpu_machdep.9 b/share/man/man9/cpu_machdep.9 index 9ab42807eac1..30ac5ea36642 100644 --- a/share/man/man9/cpu_machdep.9 +++ b/share/man/man9/cpu_machdep.9 @@ -1,397 +1,416 @@ .\"- .\" SPDX-License-Identifier: BSD-2-Clause .\" .\" Copyright (c) 2024 (holder) .\" .\" This software was developed by SRI International, the University of .\" Cambridge Computer Laboratory (Department of Computer Science and .\" Technology), and Capabilities Limited under Defense Advanced Research .\" Projects Agency (DARPA) Contract No. FA8750-24-C-B047 ("DEC"). .\" -.Dd January 3, 2025 +.Dd January 31, 2025 .Dt cpu_machdep 9 .Os .Sh NAME .Nm cpu_machdep , .Nm cpu_copy_thread , .Nm cpu_exec_vmspace_reuse , .Nm cpu_exit , .Nm cpu_fetch_syscall_args , .Nm cpu_fork , .Nm cpu_fork_kthread_handler , .Nm cpu_idle , .Nm cpu_idle_wakeup , .Nm cpu_procctl , .Nm cpu_set_syscall_retval , .Nm cpu_set_upcall , .Nm cpu_set_user_tls , .Nm cpu_switch , .Nm cpu_sync_core , .Nm cpu_thread_alloc , .Nm cpu_thread_clean , .Nm cpu_thread_exit , .Nm cpu_thread_free , -.Nm cpu_throw +.Nm cpu_throw , +.Nm cpu_update_pcb .Nd machine-dependent interfaces to handle CPU and thread state .Sh SYNOPSIS .In sys/proc.h .In sys/ptrace.h .Ft void .Fn cpu_copy_thread "struct thread *td" "struct thread *td0" .Ft bool .Fn cpu_exec_vmspace_reuse "struct proc *p" "struct vm_map *map" .Ft void .Fn cpu_exit "struct thread *td" .Ft int .Fn cpu_fetch_syscall_args "struct thread *td" .Ft void .Fo cpu_fork .Fa "struct thread *td1" "struct proc *p2" "struct thread *td2" "int flags" .Fc .Ft void .Fo cpu_fork_kthread_handler .Fa "struct thread *td" "void (*func)(void *)" "void *arg" .Fc .Ft void .Fn cpu_idle "int busy" .Ft int .Fn cpu_idle_wakeup "int cpu" .Ft int .Fo cpu_procctl .Fa "struct thread *td" "int idtype" "id_t id" "int com" "void *data" .Fc .Ft int .Fn cpu_ptrace "struct thread *_td" "int req" "void *addr" "int data" .Ft void .Fn cpu_set_syscall_retval "struct thread *td" "int error" .Ft int .Fo cpu_set_upcall .Fa "struct thread *td" "void (*entry)(void *)" "void *arg" "stack_t *stack" .Fc .Ft int .Fn cpu_set_user_tls "struct thread *td" "void *tls_base" .Ft void .Fn cpu_switch "struct thread *old" "struct thread *new" "struct mtx *mtx" .Ft void .Fn cpu_sync_core "void" .Ft void .Fn cpu_thread_alloc "struct thread *td" .Ft void .Fn cpu_thread_clean "struct thread *td" .Ft void .Fn cpu_thread_exit "struct thread *td" .Ft void .Fn cpu_thread_free "struct thread *td" .Ft void .Fn cpu_throw "struct thread *old" "struct thread *new" +.Ft void +.Fn cpu_update_pcb "struct thread *td" .Sh DESCRIPTION These functions provide architecture-specific implementations of machine-independent abstractions. .Pp .Fn cpu_exec_vmspace_reuse returns true if .Fn exec_new_vmspace can reuse an existing .Vt struct vmspace .Pq Fa map for the process .Fa p during .Xr execve 2 . This is only invoked if .Fa map is not shared with any other consumers. If this returns false, .Fn exec_new_vmspace will create a new .Vt struct vmspace . .Pp .Fn cpu_exit releases machine-dependent resources other than the address space for the process containing .Fa td during process exit. .Pp .Fn cpu_fork copies and updates machine-dependent state (for example, the pcb and user registers) from the forking thread .Fa td1 in an existing process to the new thread .Fa td2 in the new process .Fa p2 . This function must set up the new thread's kernel stack and pcb so that .Fa td2 calls .Fn fork_exit when it begins execution passing a pointer to .Fn fork_return as the .Fa callout argument and .Fa td2 as the .Fa arg argument. .Pp .Fn cpu_fork_kthread_handler adjusts a new thread's initial pcb and/or kernel stack to pass .Fa func and .Fa arg as the .Fa callout and .Fa arg arguments to .Fn fork_exit . This must be called before a new thread is scheduled to run and is used to set the .Dq main function for kernel threads. .Pp .Fn cpu_copy_thread copies machine-dependent state (for example, the pcb and user registers) from .Fa td to .Fa td0 when creating a new thread in the same process. This function must set up the new thread's kernel stack and pcb so that .Fa td0 calls .Fn fork_exit when it begins execution passing a pointer to .Fn fork_return as the .Fa callout argument and .Fa td0 as the .Fa arg argument. .Pp .Fn cpu_set_upcall updates a new thread's initial user register state to call .Fa entry with .Fa arg as the sole argument using the user stack described in .Fa stack . .Pp .Fn cpu_set_user_tls sets a new thread's initial user thread pointer register to reference the user TLS base pointer .Fa tls_base . .Pp +.Fn cpu_update_pcb +updates the pcb of the current thread with current user register values. +This is invoked before writing out register notes in a core dump. +This function typically only has to update user registers for the current +thread that are saved in the pcb during context switches rather than +in the trapframe on kernel entry. +.Pp +Note that when +.Fn cpu_update_pcb +is used, +threads in a process other than the current thread are stopped, +typically by +.Fn thread_single . +The pcbs of those stopped threads should already be updated by +.Fn cpu_switch . +.Pp .Fn cpu_fetch_syscall_args fetches the current system call arguments for the native FreeBSD ABI from the current thread's user register state and/or user stack. The arguments are saved in the .Fa td_sa member of .Fa td . .Pp .Fn cpu_set_syscall_retval updates the user register state for .Fa td to store system call error and return values. If .Fa error is 0, indicate success and return the two values in .Fa td_retval . If .Fa error is .Dv ERESTART, adjust the user PC to re-invoke the current system call after returning to user mode. If .Fa error is .Dv EJUSTRETURN , leave the current user register state unchanged. For any other value of .Fa error , indicate error and return .Fa error as the error code. .Pp .Fn cpu_idle waits for the next interrupt to occur on the current CPU. If an architecture supports low power idling, this function should place the CPU into a low power state while waiting. .Fa busy is a hint from the scheduler. If .Fa busy is non-zero, the scheduler expects a short sleep, so the CPU should prefer low-latency over maximum power savings. If .Fa busy is zero, the CPU should maximumize power savings including deferring unnecessary clock interrupts via .Fn cpu_idleclock . .Pp .Fn cpu_idle_wakeup awakens the idle CPU with the ID .Fa cpu from a low-power state. .Pp .Fn cpu_procctl handles any machine-dependent .Xr procctl 2 requests. .Pp .Fn cpu_ptrace handles any machine-dependent .Xr ptrace 2 requests. .Pp .Fn cpu_switch switches the current CPU between threads by swapping register state. This function saves the current CPU register state in the pcb of .Fa old and loads register values from the pcb of .Fa new before returning. While the pcb generally contains caller-save kernel register state, it can also contain user registers that are not saved in the trapframe. .Pp After saving the current CPU register state of .Fa old , .Fn cpu_switch stores .Fa mtx in the .Fa td_lock member of .Fa old transferring ownership of the old thread. No data belonging to .Fa old can be accessed after that store. Specifically, the old thread's kernel stack must not be accessed after this point. .Pp When .Dv SCHED_ULE is being used, this function must wait (via spinning) for the .Fa td_lock member of .Fa new to change to a value not equal to .Va &blocked_lock before loading register values from .Fa new or accessing its kernel stack. .Pp From the caller's perspective, .Fn cpu_switch returns when .Fa old is rescheduled in the future, possibly on a different CPU. However, the implementation of .Fn cpu_switch returns immediately on the same CPU into the previously-saved context of .Fa new . .Pp .Fn cpu_throw is similar to .Fn cpu_switch but does not save any state for .Fa old or write to the old thread's .Fa td_lock member. .Pp .Fn cpu_sync_core ensures that all possible speculation and out-of-order execution is serialized on the current CPU. Note that this is called from an IPI handler so only has to handle additional serialization beyond that provided by handling an IPI. .Ss Thread Object Lifecycle These functions support the management of machine-dependent thread state in conjunction with a thread object's lifecycle. .Pp The general model is that a thread object is allocated each time a new kernel thread is created either by system calls like .Xr fork 2 or .Xr thr_new 2 or when kernel-only threads are created via .Xr kproc_create 9 , .Xr kproc_kthread_add 9 , or .Xr kthread_add 9 . When a kernel thread exits, the thread object is freed. However, there is one special case to support an optimization where each free process object caches a thread object. When a process exits, the last thread object is not freed but remains attached to the process. When the process object is later reused for a new process in .Xr fork 2 , the kernel recycles that last thread object and uses it as the initial thread in the new process. When a thread is recycled, some of the steps in the thread allocation and free cycle are skipped as an optimization. .Pp .Fn cpu_thread_alloc initializes machine-dependent fields in .Fa td after allocating a new kernel stack. This function typically sets the .Fa td_pcb and initial .Fa td_frame pointers. .Fn cpu_thread_alloc is called both when allocating a new thread object and when a recycled thread allocates a new kernel stack. Note that this function is .Em not called if a recycled thread reuses its existing kernel stack. .Pp .Fn cpu_thread_clean releases any machine-dependent resources for the last thread in a process during .Xr wait 2 . The thread is a candidate for recycling so should be reset to run as a new thread in case it is recycled by a future .Xr fork 2 . .Pp .Fn cpu_thread_exit cleans any machine-dependent state in .Fa td while it is exiting. This is called by the exiting thread so cannot free state needed during in-kernel execution. .Pp .Fn cpu_thread_free releases any machine-dependent state in .Fa td when it is being freed. This is called for any thread that was not the last thread in a process once it has finished execution. .Sh SEE ALSO .Xr fork 2 , .Xr procctl 2 , .Xr ptrace 2 , .Xr thr_new 2 , .Xr wait 2 , .Xr kproc_create 9 , .Xr kproc_kthread_add 9 , .Xr kthread_add 9 , .Xr mi_switch 9 .Sh AUTHORS This manual page was developed by SRI International, the University of Cambridge Computer Laboratory (Department of Computer Science and Technology), and Capabilities Limited under contract .Pq FA8750-24-C-B047 .Pq Do DEC Dc . diff --git a/sys/amd64/amd64/ptrace_machdep.c b/sys/amd64/amd64/ptrace_machdep.c index 3b05fded585f..715f0749bda9 100644 --- a/sys/amd64/amd64/ptrace_machdep.c +++ b/sys/amd64/amd64/ptrace_machdep.c @@ -1,412 +1,408 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 Konstantin Belousov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef COMPAT_FREEBSD32 struct ptrace_xstate_info32 { uint32_t xsave_mask1, xsave_mask2; uint32_t xsave_len; }; #endif static bool get_segbases(struct regset *rs, struct thread *td, void *buf, size_t *sizep) { struct segbasereg *reg; struct pcb *pcb; if (buf != NULL) { KASSERT(*sizep == sizeof(*reg), ("%s: invalid size", __func__)); reg = buf; pcb = td->td_pcb; - if (td == curthread) - update_pcb_bases(pcb); reg->r_fsbase = pcb->pcb_fsbase; reg->r_gsbase = pcb->pcb_gsbase; } *sizep = sizeof(*reg); return (true); } static bool set_segbases(struct regset *rs, struct thread *td, void *buf, size_t size) { struct segbasereg *reg; struct pcb *pcb; KASSERT(size == sizeof(*reg), ("%s: invalid size", __func__)); reg = buf; pcb = td->td_pcb; set_pcb_flags(pcb, PCB_FULL_IRET); pcb->pcb_fsbase = reg->r_fsbase; td->td_frame->tf_fs = _ufssel; pcb->pcb_gsbase = reg->r_gsbase; td->td_frame->tf_gs = _ugssel; return (true); } static struct regset regset_segbases = { .note = NT_X86_SEGBASES, .size = sizeof(struct segbasereg), .get = get_segbases, .set = set_segbases, }; ELF_REGSET(regset_segbases); #ifdef COMPAT_FREEBSD32 static bool get_segbases32(struct regset *rs, struct thread *td, void *buf, size_t *sizep) { struct segbasereg32 *reg; struct pcb *pcb; if (buf != NULL) { KASSERT(*sizep == sizeof(*reg), ("%s: invalid size", __func__)); reg = buf; pcb = td->td_pcb; - if (td == curthread) - update_pcb_bases(pcb); reg->r_fsbase = (uint32_t)pcb->pcb_fsbase; reg->r_gsbase = (uint32_t)pcb->pcb_gsbase; } *sizep = sizeof(*reg); return (true); } static bool set_segbases32(struct regset *rs, struct thread *td, void *buf, size_t size) { struct segbasereg32 *reg; struct pcb *pcb; KASSERT(size == sizeof(*reg), ("%s: invalid size", __func__)); reg = buf; pcb = td->td_pcb; set_pcb_flags(pcb, PCB_FULL_IRET); pcb->pcb_fsbase = reg->r_fsbase; td->td_frame->tf_fs = _ufssel; pcb->pcb_gsbase = reg->r_gsbase; td->td_frame->tf_gs = _ugssel; return (true); } static struct regset regset_segbases32 = { .note = NT_X86_SEGBASES, .size = sizeof(struct segbasereg32), .get = get_segbases32, .set = set_segbases32, }; ELF32_REGSET(regset_segbases32); #endif static int cpu_ptrace_xstate(struct thread *td, int req, void *addr, int data) { struct ptrace_xstate_info info; #ifdef COMPAT_FREEBSD32 struct ptrace_xstate_info32 info32; #endif char *savefpu; int error; if (!use_xsave) return (EOPNOTSUPP); switch (req) { case PT_GETXSTATE_OLD: fpugetregs(td); savefpu = (char *)(get_pcb_user_save_td(td) + 1); error = copyout(savefpu, addr, cpu_max_ext_state_size - sizeof(struct savefpu)); break; case PT_SETXSTATE_OLD: if (data > cpu_max_ext_state_size - sizeof(struct savefpu)) { error = EINVAL; break; } savefpu = malloc(data, M_TEMP, M_WAITOK); error = copyin(addr, savefpu, data); if (error == 0) { fpugetregs(td); error = fpusetxstate(td, savefpu, data); } free(savefpu, M_TEMP); break; case PT_GETXSTATE_INFO: #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) { if (data != sizeof(info32)) { error = EINVAL; } else { info32.xsave_len = cpu_max_ext_state_size; info32.xsave_mask1 = xsave_mask; info32.xsave_mask2 = xsave_mask >> 32; error = copyout(&info32, addr, data); } } else #endif { if (data != sizeof(info)) { error = EINVAL; } else { bzero(&info, sizeof(info)); info.xsave_len = cpu_max_ext_state_size; info.xsave_mask = xsave_mask; error = copyout(&info, addr, data); } } break; case PT_GETXSTATE: fpugetregs(td); savefpu = (char *)(get_pcb_user_save_td(td)); error = copyout(savefpu, addr, cpu_max_ext_state_size); break; case PT_SETXSTATE: if (data < sizeof(struct savefpu) || data > cpu_max_ext_state_size) { error = EINVAL; break; } savefpu = malloc(data, M_TEMP, M_WAITOK); error = copyin(addr, savefpu, data); if (error == 0) error = fpusetregs(td, (struct savefpu *)savefpu, savefpu + sizeof(struct savefpu), data - sizeof(struct savefpu)); free(savefpu, M_TEMP); break; default: error = EINVAL; break; } return (error); } static void cpu_ptrace_setbase(struct thread *td, int req, register_t r) { struct pcb *pcb; pcb = td->td_pcb; set_pcb_flags(pcb, PCB_FULL_IRET); if (req == PT_SETFSBASE) { pcb->pcb_fsbase = r; td->td_frame->tf_fs = _ufssel; } else { pcb->pcb_gsbase = r; td->td_frame->tf_gs = _ugssel; } } #ifdef COMPAT_FREEBSD32 #define PT_I386_GETXMMREGS (PT_FIRSTMACH + 0) #define PT_I386_SETXMMREGS (PT_FIRSTMACH + 1) static int cpu32_ptrace(struct thread *td, int req, void *addr, int data) { struct savefpu *fpstate; struct pcb *pcb; uint32_t r; int error; switch (req) { case PT_I386_GETXMMREGS: fpugetregs(td); error = copyout(get_pcb_user_save_td(td), addr, sizeof(*fpstate)); break; case PT_I386_SETXMMREGS: fpugetregs(td); fpstate = get_pcb_user_save_td(td); error = copyin(addr, fpstate, sizeof(*fpstate)); fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask; break; case PT_GETXSTATE_OLD: case PT_SETXSTATE_OLD: case PT_GETXSTATE_INFO: case PT_GETXSTATE: case PT_SETXSTATE: error = cpu_ptrace_xstate(td, req, addr, data); break; case PT_GETFSBASE: case PT_GETGSBASE: if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) { error = EINVAL; break; } pcb = td->td_pcb; if (td == curthread) update_pcb_bases(pcb); r = req == PT_GETFSBASE ? pcb->pcb_fsbase : pcb->pcb_gsbase; error = copyout(&r, addr, sizeof(r)); break; case PT_SETFSBASE: case PT_SETGSBASE: if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) { error = EINVAL; break; } error = copyin(addr, &r, sizeof(r)); if (error != 0) break; cpu_ptrace_setbase(td, req, r); break; default: error = EINVAL; break; } return (error); } #endif int cpu_ptrace(struct thread *td, int req, void *addr, int data) { register_t *r, rv; struct pcb *pcb; int error; #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) return (cpu32_ptrace(td, req, addr, data)); #endif /* Support old values of PT_GETXSTATE_OLD and PT_SETXSTATE_OLD. */ if (req == PT_FIRSTMACH + 0) req = PT_GETXSTATE_OLD; if (req == PT_FIRSTMACH + 1) req = PT_SETXSTATE_OLD; switch (req) { case PT_GETXSTATE_OLD: case PT_SETXSTATE_OLD: case PT_GETXSTATE_INFO: case PT_GETXSTATE: case PT_SETXSTATE: error = cpu_ptrace_xstate(td, req, addr, data); break; case PT_GETFSBASE: case PT_GETGSBASE: pcb = td->td_pcb; if (td == curthread) update_pcb_bases(pcb); r = req == PT_GETFSBASE ? &pcb->pcb_fsbase : &pcb->pcb_gsbase; error = copyout(r, addr, sizeof(*r)); break; case PT_SETFSBASE: case PT_SETGSBASE: error = copyin(addr, &rv, sizeof(rv)); if (error != 0) break; if (rv >= td->td_proc->p_sysent->sv_maxuser) { error = EINVAL; break; } cpu_ptrace_setbase(td, req, rv); break; default: error = EINVAL; break; } return (error); } int ptrace_set_pc(struct thread *td, unsigned long addr) { td->td_frame->tf_rip = addr; set_pcb_flags(td->td_pcb, PCB_FULL_IRET); return (0); } int ptrace_single_step(struct thread *td) { PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); if ((td->td_frame->tf_rflags & PSL_T) == 0) { td->td_frame->tf_rflags |= PSL_T; td->td_dbgflags |= TDB_STEP; } return (0); } int ptrace_clear_single_step(struct thread *td) { PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); td->td_frame->tf_rflags &= ~PSL_T; td->td_dbgflags &= ~TDB_STEP; return (0); } diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c index 1c6b1549422b..2b280e960b2e 100644 --- a/sys/amd64/amd64/vm_machdep.c +++ b/sys/amd64/amd64/vm_machdep.c @@ -1,675 +1,682 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1982, 1986 The Regents of the University of California. * Copyright (c) 1989, 1990 William Jolitz * Copyright (c) 1994 John Dyson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ */ #include #include "opt_isa.h" #include "opt_cpu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include _Static_assert(OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf), "OFFSETOF_MONITORBUF does not correspond with offset of pc_monitorbuf."); void set_top_of_stack_td(struct thread *td) { td->td_md.md_stack_base = td->td_kstack + td->td_kstack_pages * PAGE_SIZE; } struct savefpu * get_pcb_user_save_td(struct thread *td) { KASSERT(((vm_offset_t)td->td_md.md_usr_fpu_save % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area ptr %p td %p", td->td_md.md_usr_fpu_save, td)); return (td->td_md.md_usr_fpu_save); } struct pcb * get_pcb_td(struct thread *td) { return (&td->td_md.md_pcb); } struct savefpu * get_pcb_user_save_pcb(struct pcb *pcb) { struct thread *td; td = __containerof(pcb, struct thread, td_md.md_pcb); return (get_pcb_user_save_td(td)); } void * alloc_fpusave(int flags) { void *res; struct savefpu_ymm *sf; res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags); if (use_xsave) { sf = (struct savefpu_ymm *)res; bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd)); sf->sv_xstate.sx_hd.xstate_bv = xsave_mask; } return (res); } /* * Common code shared between cpu_fork() and cpu_copy_thread() for * initializing a thread. */ static void copy_thread(struct thread *td1, struct thread *td2) { struct pcb *pcb2; pcb2 = td2->td_pcb; /* Ensure that td1's pcb is up to date for user threads. */ if ((td2->td_pflags & TDP_KTHREAD) == 0) { MPASS(td1 == curthread); fpuexit(td1); update_pcb_bases(td1->td_pcb); } /* Copy td1's pcb */ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); /* Properly initialize pcb_save */ pcb2->pcb_save = get_pcb_user_save_pcb(pcb2); /* Kernel threads start with clean FPU and segment bases. */ if ((td2->td_pflags & TDP_KTHREAD) != 0) { pcb2->pcb_fsbase = 0; pcb2->pcb_gsbase = 0; clear_pcb_flags(pcb2, PCB_FPUINITDONE | PCB_USERFPUINITDONE | PCB_KERNFPU | PCB_KERNFPU_THR); } else { MPASS((pcb2->pcb_flags & (PCB_KERNFPU | PCB_KERNFPU_THR)) == 0); bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2), cpu_max_ext_state_size); } td2->td_frame = (struct trapframe *)td2->td_md.md_stack_base - 1; /* * Set registers for trampoline to user mode. Leave space for the * return address on stack. These are the kernel mode register values. */ pcb2->pcb_r12 = (register_t)fork_return; /* fork_trampoline argument */ pcb2->pcb_rbp = 0; pcb2->pcb_rsp = (register_t)td2->td_frame - sizeof(void *); pcb2->pcb_rbx = (register_t)td2; /* fork_trampoline argument */ pcb2->pcb_rip = (register_t)fork_trampoline; /*- * pcb2->pcb_dr*: cloned above. * pcb2->pcb_savefpu: cloned above. * pcb2->pcb_flags: cloned above. * pcb2->pcb_onfault: cloned above (always NULL here?). * pcb2->pcb_[fg]sbase: cloned above */ pcb2->pcb_tssp = NULL; /* Setup to release spin count in fork_exit(). */ td2->td_md.md_spinlock_count = 1; td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I; pmap_thread_init_invl_gen(td2); /* * Copy the trap frame for the return to user mode as if from a syscall. * This copies most of the user mode register values. Some of these * registers are rewritten by cpu_set_upcall() and linux_set_upcall(). */ if ((td1->td_proc->p_flag & P_KPROC) == 0) { bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe)); /* * If the current thread has the trap bit set (i.e. a debugger * had single stepped the process to the system call), we need * to clear the trap flag from the new frame. Otherwise, the new * thread will receive a (likely unexpected) SIGTRAP when it * executes the first instruction after returning to userland. */ td2->td_frame->tf_rflags &= ~PSL_T; } } /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) { struct proc *p1; struct pcb *pcb2; struct mdproc *mdp1, *mdp2; struct proc_ldt *pldt; p1 = td1->td_proc; if ((flags & RFPROC) == 0) { if ((flags & RFMEM) == 0) { /* unshare user LDT */ mdp1 = &p1->p_md; mtx_lock(&dt_lock); if ((pldt = mdp1->md_ldt) != NULL && pldt->ldt_refcnt > 1 && user_ldt_alloc(p1, 1) == NULL) panic("could not copy LDT"); mtx_unlock(&dt_lock); } return; } /* Point the stack and pcb to the actual location */ set_top_of_stack_td(td2); td2->td_pcb = pcb2 = get_pcb_td(td2); copy_thread(td1, td2); /* Reset debug registers in the new process */ x86_clear_dbregs(pcb2); /* Point mdproc and then copy over p1's contents */ mdp2 = &p2->p_md; bcopy(&p1->p_md, mdp2, sizeof(*mdp2)); /* Set child return values. */ p2->p_sysent->sv_set_fork_retval(td2); /* As on i386, do not copy io permission bitmap. */ pcb2->pcb_tssp = NULL; /* New segment registers. */ set_pcb_flags_raw(pcb2, PCB_FULL_IRET); /* Copy the LDT, if necessary. */ mdp1 = &td1->td_proc->p_md; mdp2 = &p2->p_md; if (mdp1->md_ldt == NULL) { mdp2->md_ldt = NULL; return; } mtx_lock(&dt_lock); if (mdp1->md_ldt != NULL) { if (flags & RFMEM) { mdp1->md_ldt->ldt_refcnt++; mdp2->md_ldt = mdp1->md_ldt; bcopy(&mdp1->md_ldt_sd, &mdp2->md_ldt_sd, sizeof(struct system_segment_descriptor)); } else { mdp2->md_ldt = NULL; mdp2->md_ldt = user_ldt_alloc(p2, 0); if (mdp2->md_ldt == NULL) panic("could not copy LDT"); amd64_set_ldt_data(td2, 0, max_ldt_segment, (struct user_segment_descriptor *) mdp1->md_ldt->ldt_base); } } else mdp2->md_ldt = NULL; mtx_unlock(&dt_lock); /* * Now, cpu_switch() can schedule the new process. * pcb_rsp is loaded pointing to the cpu_switch() stack frame * containing the return address when exiting cpu_switch. * This will normally be to fork_trampoline(), which will have * %rbx loaded with the new proc's pointer. fork_trampoline() * will set up a stack to call fork_return(p, frame); to complete * the return to user-mode. */ } void x86_set_fork_retval(struct thread *td) { struct trapframe *frame = td->td_frame; frame->tf_rax = 0; /* Child returns zero */ frame->tf_rflags &= ~PSL_C; /* success */ frame->tf_rdx = 1; /* System V emulation */ } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) { /* * Note that the trap frame follows the args, so the function * is really called like this: func(arg, frame); */ td->td_pcb->pcb_r12 = (long) func; /* function */ td->td_pcb->pcb_rbx = (long) arg; /* first arg */ } void cpu_exit(struct thread *td) { /* * If this process has a custom LDT, release it. */ if (td->td_proc->p_md.md_ldt != NULL) user_ldt_free(td); } void cpu_thread_exit(struct thread *td) { struct pcb *pcb; critical_enter(); if (td == PCPU_GET(fpcurthread)) fpudrop(); critical_exit(); pcb = td->td_pcb; /* Disable any hardware breakpoints. */ if (pcb->pcb_flags & PCB_DBREGS) { reset_dbregs(); clear_pcb_flags(pcb, PCB_DBREGS); } } void cpu_thread_clean(struct thread *td) { struct pcb *pcb; pcb = td->td_pcb; /* * Clean TSS/iomap */ if (pcb->pcb_tssp != NULL) { pmap_pti_remove_kva((vm_offset_t)pcb->pcb_tssp, (vm_offset_t)pcb->pcb_tssp + ctob(IOPAGES + 1)); kmem_free(pcb->pcb_tssp, ctob(IOPAGES + 1)); pcb->pcb_tssp = NULL; } } void cpu_thread_alloc(struct thread *td) { struct pcb *pcb; struct xstate_hdr *xhdr; set_top_of_stack_td(td); td->td_pcb = pcb = get_pcb_td(td); td->td_frame = (struct trapframe *)td->td_md.md_stack_base - 1; td->td_md.md_usr_fpu_save = fpu_save_area_alloc(); pcb->pcb_save = get_pcb_user_save_pcb(pcb); if (use_xsave) { xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1); bzero(xhdr, sizeof(*xhdr)); xhdr->xstate_bv = xsave_mask; } } void cpu_thread_free(struct thread *td) { cpu_thread_clean(td); fpu_save_area_free(td->td_md.md_usr_fpu_save); td->td_md.md_usr_fpu_save = NULL; } bool cpu_exec_vmspace_reuse(struct proc *p, vm_map_t map) { return (((curproc->p_md.md_flags & P_MD_KPTI) != 0) == (vm_map_pmap(map)->pm_ucr3 != PMAP_NO_CR3)); } static void cpu_procctl_kpti_ctl(struct proc *p, int val) { if (pti && val == PROC_KPTI_CTL_ENABLE_ON_EXEC) p->p_md.md_flags |= P_MD_KPTI; if (val == PROC_KPTI_CTL_DISABLE_ON_EXEC) p->p_md.md_flags &= ~P_MD_KPTI; } static void cpu_procctl_kpti_status(struct proc *p, int *val) { *val = (p->p_md.md_flags & P_MD_KPTI) != 0 ? PROC_KPTI_CTL_ENABLE_ON_EXEC: PROC_KPTI_CTL_DISABLE_ON_EXEC; if (vmspace_pmap(p->p_vmspace)->pm_ucr3 != PMAP_NO_CR3) *val |= PROC_KPTI_STATUS_ACTIVE; } static int cpu_procctl_la_ctl(struct proc *p, int val) { int error; error = 0; switch (val) { case PROC_LA_CTL_LA48_ON_EXEC: p->p_md.md_flags |= P_MD_LA48; p->p_md.md_flags &= ~P_MD_LA57; break; case PROC_LA_CTL_LA57_ON_EXEC: if (la57) { p->p_md.md_flags &= ~P_MD_LA48; p->p_md.md_flags |= P_MD_LA57; } else { error = ENOTSUP; } break; case PROC_LA_CTL_DEFAULT_ON_EXEC: p->p_md.md_flags &= ~(P_MD_LA48 | P_MD_LA57); break; } return (error); } static void cpu_procctl_la_status(struct proc *p, int *val) { int res; if ((p->p_md.md_flags & P_MD_LA48) != 0) res = PROC_LA_CTL_LA48_ON_EXEC; else if ((p->p_md.md_flags & P_MD_LA57) != 0) res = PROC_LA_CTL_LA57_ON_EXEC; else res = PROC_LA_CTL_DEFAULT_ON_EXEC; if (p->p_sysent->sv_maxuser == VM_MAXUSER_ADDRESS_LA48) res |= PROC_LA_STATUS_LA48; else res |= PROC_LA_STATUS_LA57; *val = res; } int cpu_procctl(struct thread *td, int idtype, id_t id, int com, void *data) { struct proc *p; int error, val; switch (com) { case PROC_KPTI_CTL: case PROC_KPTI_STATUS: case PROC_LA_CTL: case PROC_LA_STATUS: if (idtype != P_PID) { error = EINVAL; break; } if (com == PROC_KPTI_CTL) { /* sad but true and not a joke */ error = priv_check(td, PRIV_IO); if (error != 0) break; } if (com == PROC_KPTI_CTL || com == PROC_LA_CTL) { error = copyin(data, &val, sizeof(val)); if (error != 0) break; } if (com == PROC_KPTI_CTL && val != PROC_KPTI_CTL_ENABLE_ON_EXEC && val != PROC_KPTI_CTL_DISABLE_ON_EXEC) { error = EINVAL; break; } if (com == PROC_LA_CTL && val != PROC_LA_CTL_LA48_ON_EXEC && val != PROC_LA_CTL_LA57_ON_EXEC && val != PROC_LA_CTL_DEFAULT_ON_EXEC) { error = EINVAL; break; } error = pget(id, PGET_CANSEE | PGET_NOTWEXIT | PGET_NOTID, &p); if (error != 0) break; switch (com) { case PROC_KPTI_CTL: cpu_procctl_kpti_ctl(p, val); break; case PROC_KPTI_STATUS: cpu_procctl_kpti_status(p, &val); break; case PROC_LA_CTL: error = cpu_procctl_la_ctl(p, val); break; case PROC_LA_STATUS: cpu_procctl_la_status(p, &val); break; } PROC_UNLOCK(p); if (com == PROC_KPTI_STATUS || com == PROC_LA_STATUS) error = copyout(&val, data, sizeof(val)); break; default: error = EINVAL; break; } return (error); } void cpu_set_syscall_retval(struct thread *td, int error) { struct trapframe *frame; frame = td->td_frame; if (__predict_true(error == 0)) { frame->tf_rax = td->td_retval[0]; frame->tf_rdx = td->td_retval[1]; frame->tf_rflags &= ~PSL_C; return; } switch (error) { case ERESTART: /* * Reconstruct pc, we know that 'syscall' is 2 bytes, * lcall $X,y is 7 bytes, int 0x80 is 2 bytes. * We saved this in tf_err. * %r10 (which was holding the value of %rcx) is restored * for the next iteration. * %r10 restore is only required for freebsd/amd64 processes, * but shall be innocent for any ia32 ABI. * * Require full context restore to get the arguments * in the registers reloaded at return to usermode. */ frame->tf_rip -= frame->tf_err; frame->tf_r10 = frame->tf_rcx; set_pcb_flags(td->td_pcb, PCB_FULL_IRET); break; case EJUSTRETURN: break; default: frame->tf_rax = error; frame->tf_rflags |= PSL_C; break; } } /* * Initialize machine state, mostly pcb and trap frame for a new * thread, about to return to userspace. Put enough state in the new * thread's PCB to get it to go back to the fork_return(), which * finalizes the thread state and handles peculiarities of the first * return to userspace for the new thread. */ void cpu_copy_thread(struct thread *td, struct thread *td0) { copy_thread(td0, td); set_pcb_flags_raw(td->td_pcb, PCB_FULL_IRET); } /* * Set that machine state for performing an upcall that starts * the entry function with the given argument. */ int cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { /* * Set the trap frame to point at the beginning of the entry * function. */ td->td_frame->tf_rbp = 0; td->td_frame->tf_rsp = (((uintptr_t)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4; td->td_frame->tf_rip = (uintptr_t)entry; /* Return address sentinel value to stop stack unwinding. */ if (suword32((void *)td->td_frame->tf_rsp, 0) != 0) return (EFAULT); /* Pass the argument to the entry point. */ if (suword32( (void *)(td->td_frame->tf_rsp + sizeof(int32_t)), (uint32_t)(uintptr_t)arg) != 0) return (EFAULT); return (0); } #endif /* * Set the trap frame to point at the beginning of the uts * function. */ td->td_frame->tf_rbp = 0; td->td_frame->tf_rsp = ((register_t)stack->ss_sp + stack->ss_size) & ~0x0f; td->td_frame->tf_rsp -= 8; td->td_frame->tf_rip = (register_t)entry; td->td_frame->tf_ds = _udatasel; td->td_frame->tf_es = _udatasel; td->td_frame->tf_fs = _ufssel; td->td_frame->tf_gs = _ugssel; td->td_frame->tf_flags = TF_HASSEGS; /* Return address sentinel value to stop stack unwinding. */ if (suword((void *)td->td_frame->tf_rsp, 0) != 0) return (EFAULT); /* Pass the argument to the entry point. */ td->td_frame->tf_rdi = (register_t)arg; return (0); } int cpu_set_user_tls(struct thread *td, void *tls_base) { struct pcb *pcb; if ((u_int64_t)tls_base >= VM_MAXUSER_ADDRESS) return (EINVAL); pcb = td->td_pcb; set_pcb_flags(pcb, PCB_FULL_IRET); #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { pcb->pcb_gsbase = (register_t)tls_base; return (0); } #endif pcb->pcb_fsbase = (register_t)tls_base; return (0); } + +void +cpu_update_pcb(struct thread *td) +{ + MPASS(td == curthread); + update_pcb_bases(td->td_pcb); +} diff --git a/sys/arm/arm/vm_machdep.c b/sys/arm/arm/vm_machdep.c index 4e0a6bb9cbc5..d31d36ba807d 100644 --- a/sys/arm/arm/vm_machdep.c +++ b/sys/arm/arm/vm_machdep.c @@ -1,304 +1,311 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1982, 1986 The Regents of the University of California. * Copyright (c) 1989, 1990 William Jolitz * Copyright (c) 1994 John Dyson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and William Jolitz. * * Redistribution and use in source and binary :forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * struct switchframe and trapframe must both be a multiple of 8 * for correct stack alignment. */ _Static_assert((sizeof(struct switchframe) % 8) == 0, "Bad alignment"); _Static_assert((sizeof(struct trapframe) % 8) == 0, "Bad alignment"); uint32_t initial_fpscr = VFPSCR_DN | VFPSCR_FZ; /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) { struct pcb *pcb2; struct trapframe *tf; struct mdproc *mdp2; if ((flags & RFPROC) == 0) return; /* Point the pcb to the top of the stack */ pcb2 = (struct pcb *) (td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1; #ifdef VFP /* Store actual state of VFP */ if (curthread == td1) { if ((td1->td_pcb->pcb_fpflags & PCB_FP_STARTED) != 0) vfp_save_state(td1, td1->td_pcb); } #endif td2->td_pcb = pcb2; /* Clone td1's pcb */ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); /* Point to mdproc and then copy over td1's contents */ mdp2 = &p2->p_md; bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2)); /* Point the frame to the stack in front of pcb and copy td1's frame */ td2->td_frame = (struct trapframe *)pcb2 - 1; *td2->td_frame = *td1->td_frame; /* * Create a new fresh stack for the new process. * Copy the trap frame for the return to user mode as if from a * syscall. This copies most of the user mode register values. */ pmap_set_pcb_pagedir(vmspace_pmap(p2->p_vmspace), pcb2); pcb2->pcb_regs.sf_r4 = (register_t)fork_return; pcb2->pcb_regs.sf_r5 = (register_t)td2; pcb2->pcb_regs.sf_lr = (register_t)fork_trampoline; pcb2->pcb_regs.sf_sp = STACKALIGN(td2->td_frame); pcb2->pcb_regs.sf_tpidrurw = (register_t)get_tls(); #ifdef VFP vfp_new_thread(td2, td1, true); #endif tf = td2->td_frame; tf->tf_spsr &= ~PSR_C; tf->tf_r0 = 0; tf->tf_r1 = 0; /* Setup to release spin count in fork_exit(). */ td2->td_md.md_spinlock_count = 1; td2->td_md.md_saved_cspr = PSR_SVC32_MODE; } void cpu_set_syscall_retval(struct thread *td, int error) { struct trapframe *frame; frame = td->td_frame; switch (error) { case 0: frame->tf_r0 = td->td_retval[0]; frame->tf_r1 = td->td_retval[1]; frame->tf_spsr &= ~PSR_C; /* carry bit */ break; case ERESTART: /* * Reconstruct the pc to point at the swi. */ if ((frame->tf_spsr & PSR_T) != 0) frame->tf_pc -= THUMB_INSN_SIZE; else frame->tf_pc -= INSN_SIZE; break; case EJUSTRETURN: /* nothing to do */ break; default: frame->tf_r0 = error; frame->tf_spsr |= PSR_C; /* carry bit */ break; } } /* * Initialize machine state, mostly pcb and trap frame for a new * thread, about to return to userspace. Put enough state in the new * thread's PCB to get it to go back to the fork_return(), which * finalizes the thread state and handles peculiarities of the first * return to userspace for the new thread. */ void cpu_copy_thread(struct thread *td, struct thread *td0) { bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb)); td->td_pcb->pcb_regs.sf_r4 = (register_t)fork_return; td->td_pcb->pcb_regs.sf_r5 = (register_t)td; td->td_pcb->pcb_regs.sf_lr = (register_t)fork_trampoline; td->td_pcb->pcb_regs.sf_sp = STACKALIGN(td->td_frame); td->td_frame->tf_spsr &= ~PSR_C; td->td_frame->tf_r0 = 0; #ifdef VFP vfp_new_thread(td, td0, false); #endif /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_cspr = PSR_SVC32_MODE; } /* * Set that machine state for performing an upcall that starts * the entry function with the given argument. */ int cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf = td->td_frame; tf->tf_usr_sp = STACKALIGN((int)stack->ss_sp + stack->ss_size); tf->tf_pc = (int)entry; tf->tf_r0 = (int)arg; tf->tf_spsr = PSR_USR32_MODE; if ((register_t)entry & 1) tf->tf_spsr |= PSR_T; return (0); } int cpu_set_user_tls(struct thread *td, void *tls_base) { td->td_pcb->pcb_regs.sf_tpidrurw = (register_t)tls_base; if (td == curthread) set_tls(tls_base); return (0); } void cpu_thread_exit(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * PAGE_SIZE) - 1; /* * Ensure td_frame is aligned to an 8 byte boundary as it will be * placed into the stack pointer which must be 8 byte aligned in * the ARM EABI. */ td->td_frame = (struct trapframe *)((caddr_t)td->td_pcb) - 1; } void cpu_thread_free(struct thread *td) { } void cpu_thread_clean(struct thread *td) { } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) { td->td_pcb->pcb_regs.sf_r4 = (register_t)func; /* function */ td->td_pcb->pcb_regs.sf_r5 = (register_t)arg; /* first arg */ } +void +cpu_update_pcb(struct thread *td) +{ + MPASS(td == curthread); + td->td_pcb->pcb_regs.sf_tpidrurw = (register_t)get_tls(); +} + void cpu_exit(struct thread *td) { } bool cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) { return (true); } int cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused, int com __unused, void *data __unused) { return (EINVAL); } void cpu_sync_core(void) { } diff --git a/sys/arm64/arm64/vm_machdep.c b/sys/arm64/arm64/vm_machdep.c index 924628001103..9869ce6cae0b 100644 --- a/sys/arm64/arm64/vm_machdep.c +++ b/sys/arm64/arm64/vm_machdep.c @@ -1,322 +1,330 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #include /* * psci.c is "default" in ARM64 kernel config files * psci_reset will do nothing until/unless the psci device probes/attaches. * Therefore, it is safe to default the cpu_reset_hook to psci_reset. */ cpu_reset_hook_t cpu_reset_hook = psci_reset; /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) { struct pcb *pcb2; struct trapframe *tf; if ((flags & RFPROC) == 0) return; if (td1 == curthread) { /* * Save the tpidr_el0 and the vfp state, these normally happen * in cpu_switch, but if userland changes these then forks * this may not have happened. */ td1->td_pcb->pcb_tpidr_el0 = READ_SPECIALREG(tpidr_el0); td1->td_pcb->pcb_tpidrro_el0 = READ_SPECIALREG(tpidrro_el0); #ifdef VFP if ((td1->td_pcb->pcb_fpflags & PCB_FP_STARTED) != 0) vfp_save_state(td1, td1->td_pcb); #endif } pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1; td2->td_pcb = pcb2; bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); /* Clear the debug register state. */ bzero(&pcb2->pcb_dbg_regs, sizeof(pcb2->pcb_dbg_regs)); ptrauth_fork(td2, td1); tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1); bcopy(td1->td_frame, tf, sizeof(*tf)); tf->tf_x[0] = 0; tf->tf_x[1] = 0; tf->tf_spsr = td1->td_frame->tf_spsr & (PSR_M_32 | PSR_DAIF); td2->td_frame = tf; /* Set the return value registers for fork() */ td2->td_pcb->pcb_x[PCB_X19] = (uintptr_t)fork_return; td2->td_pcb->pcb_x[PCB_X20] = (uintptr_t)td2; td2->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline; td2->td_pcb->pcb_sp = (uintptr_t)td2->td_frame; vfp_new_thread(td2, td1, true); /* Setup to release spin count in fork_exit(). */ td2->td_md.md_spinlock_count = 1; td2->td_md.md_saved_daif = PSR_DAIF_DEFAULT; #if defined(PERTHREAD_SSP) /* Set the new canary */ arc4random_buf(&td2->td_md.md_canary, sizeof(td2->td_md.md_canary)); #endif } void cpu_reset(void) { cpu_reset_hook(); printf("cpu_reset failed"); while(1) __asm volatile("wfi" ::: "memory"); } void cpu_set_syscall_retval(struct thread *td, int error) { struct trapframe *frame; frame = td->td_frame; if (__predict_true(error == 0)) { frame->tf_x[0] = td->td_retval[0]; frame->tf_x[1] = td->td_retval[1]; frame->tf_spsr &= ~PSR_C; /* carry bit */ return; } switch (error) { case ERESTART: frame->tf_elr -= 4; break; case EJUSTRETURN: break; default: frame->tf_spsr |= PSR_C; /* carry bit */ frame->tf_x[0] = error; break; } } /* * Initialize machine state, mostly pcb and trap frame for a new * thread, about to return to userspace. Put enough state in the new * thread's PCB to get it to go back to the fork_return(), which * finalizes the thread state and handles peculiarities of the first * return to userspace for the new thread. */ void cpu_copy_thread(struct thread *td, struct thread *td0) { bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb)); td->td_pcb->pcb_x[PCB_X19] = (uintptr_t)fork_return; td->td_pcb->pcb_x[PCB_X20] = (uintptr_t)td; td->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline; td->td_pcb->pcb_sp = (uintptr_t)td->td_frame; /* Update VFP state for the new thread */ vfp_new_thread(td, td0, false); /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_daif = PSR_DAIF_DEFAULT; #if defined(PERTHREAD_SSP) /* Set the new canary */ arc4random_buf(&td->td_md.md_canary, sizeof(td->td_md.md_canary)); #endif /* Generate new pointer authentication keys. */ ptrauth_copy_thread(td, td0); } /* * Set that machine state for performing an upcall that starts * the entry function with the given argument. */ int cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf = td->td_frame; /* 32bits processes use r13 for sp */ if (td->td_frame->tf_spsr & PSR_M_32) { tf->tf_x[13] = STACKALIGN((uintptr_t)stack->ss_sp + stack->ss_size); if ((register_t)entry & 1) tf->tf_spsr |= PSR_T; } else tf->tf_sp = STACKALIGN((uintptr_t)stack->ss_sp + stack->ss_size); tf->tf_elr = (register_t)entry; tf->tf_x[0] = (register_t)arg; tf->tf_x[29] = 0; tf->tf_lr = 0; return (0); } int cpu_set_user_tls(struct thread *td, void *tls_base) { struct pcb *pcb; if ((uintptr_t)tls_base >= VM_MAXUSER_ADDRESS) return (EINVAL); pcb = td->td_pcb; if (td->td_frame->tf_spsr & PSR_M_32) { /* 32bits arm stores the user TLS into tpidrro */ pcb->pcb_tpidrro_el0 = (register_t)tls_base; pcb->pcb_tpidr_el0 = (register_t)tls_base; if (td == curthread) { WRITE_SPECIALREG(tpidrro_el0, tls_base); WRITE_SPECIALREG(tpidr_el0, tls_base); } } else { pcb->pcb_tpidr_el0 = (register_t)tls_base; if (td == curthread) WRITE_SPECIALREG(tpidr_el0, tls_base); } return (0); } void cpu_thread_exit(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * PAGE_SIZE) - 1; td->td_frame = (struct trapframe *)STACKALIGN( (struct trapframe *)td->td_pcb - 1); ptrauth_thread_alloc(td); } void cpu_thread_free(struct thread *td) { } void cpu_thread_clean(struct thread *td) { } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) { td->td_pcb->pcb_x[PCB_X19] = (uintptr_t)func; td->td_pcb->pcb_x[PCB_X20] = (uintptr_t)arg; } +void +cpu_update_pcb(struct thread *td) +{ + MPASS(td == curthread); + td->td_pcb->pcb_tpidr_el0 = READ_SPECIALREG(tpidr_el0); + td->td_pcb->pcb_tpidrro_el0 = READ_SPECIALREG(tpidrro_el0); +} + void cpu_exit(struct thread *td) { } bool cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) { return (true); } int cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused, int com __unused, void *data __unused) { return (EINVAL); } void cpu_sync_core(void) { /* * Do nothing. According to ARM ARMv8 D1.11 Exception return * If FEAT_ExS is not implemented, or if FEAT_ExS is * implemented and the SCTLR_ELx.EOS field is set, exception * return from ELx is a context synchronization event. */ } diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c index b0dd7534633b..c64f19a30cbd 100644 --- a/sys/i386/i386/vm_machdep.c +++ b/sys/i386/i386/vm_machdep.c @@ -1,631 +1,638 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1982, 1986 The Regents of the University of California. * Copyright (c) 1989, 1990 William Jolitz * Copyright (c) 1994 John Dyson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ */ #include #include "opt_isa.h" #include "opt_npx.h" #include "opt_reset.h" #include "opt_cpu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include _Static_assert(__OFFSETOF_MONITORBUF == offsetof(struct pcpu, pc_monitorbuf), "__OFFSETOF_MONITORBUF does not correspond with offset of pc_monitorbuf."); union savefpu * get_pcb_user_save_td(struct thread *td) { vm_offset_t p; p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE - roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN); KASSERT((p % XSAVE_AREA_ALIGN) == 0, ("Unaligned pcb_user_save area")); return ((union savefpu *)p); } union savefpu * get_pcb_user_save_pcb(struct pcb *pcb) { vm_offset_t p; p = (vm_offset_t)(pcb + 1); return ((union savefpu *)p); } struct pcb * get_pcb_td(struct thread *td) { vm_offset_t p; p = td->td_kstack + td->td_kstack_pages * PAGE_SIZE - roundup2(cpu_max_ext_state_size, XSAVE_AREA_ALIGN) - sizeof(struct pcb); return ((struct pcb *)p); } void * alloc_fpusave(int flags) { void *res; struct savefpu_ymm *sf; res = malloc(cpu_max_ext_state_size, M_DEVBUF, flags); if (use_xsave) { sf = (struct savefpu_ymm *)res; bzero(&sf->sv_xstate.sx_hd, sizeof(sf->sv_xstate.sx_hd)); sf->sv_xstate.sx_hd.xstate_bv = xsave_mask; } return (res); } /* * Common code shared between cpu_fork() and cpu_copy_thread() for * initializing a thread. */ static void copy_thread(struct thread *td1, struct thread *td2) { struct pcb *pcb2; pcb2 = td2->td_pcb; /* Ensure that td1's pcb is up to date for user threads. */ if ((td2->td_pflags & TDP_KTHREAD) == 0) { MPASS(td1 == curthread); td1->td_pcb->pcb_gs = rgs(); critical_enter(); if (PCPU_GET(fpcurthread) == td1) npxsave(td1->td_pcb->pcb_save); critical_exit(); } /* Copy td1's pcb */ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); /* Properly initialize pcb_save */ pcb2->pcb_save = get_pcb_user_save_pcb(pcb2); /* Kernel threads start with clean NPX and segment bases. */ if ((td2->td_pflags & TDP_KTHREAD) != 0) { pcb2->pcb_gs = _udatasel; set_fsbase(td2, 0); set_gsbase(td2, 0); pcb2->pcb_flags &= ~(PCB_NPXINITDONE | PCB_NPXUSERINITDONE | PCB_KERNNPX | PCB_KERNNPX_THR); } else { MPASS((pcb2->pcb_flags & (PCB_KERNNPX | PCB_KERNNPX_THR)) == 0); bcopy(get_pcb_user_save_td(td1), get_pcb_user_save_pcb(pcb2), cpu_max_ext_state_size); } /* * Set registers for trampoline to user mode. Leave space for the * return address on stack. These are the kernel mode register values. */ pcb2->pcb_edi = 0; pcb2->pcb_esi = (int)fork_return; /* trampoline arg */ pcb2->pcb_ebp = 0; pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *); /* trampoline arg */ pcb2->pcb_ebx = (int)td2; /* trampoline arg */ pcb2->pcb_eip = (int)fork_trampoline + setidt_disp; /* * If we didn't copy the pcb, we'd need to do the following registers: * pcb2->pcb_cr3: cloned above. * pcb2->pcb_dr*: cloned above. * pcb2->pcb_savefpu: cloned above. * pcb2->pcb_flags: cloned above. * pcb2->pcb_onfault: cloned above (always NULL here?). * pcb2->pcb_gs: cloned above. * pcb2->pcb_ext: cleared below. */ pcb2->pcb_ext = NULL; /* Setup to release spin count in fork_exit(). */ td2->td_md.md_spinlock_count = 1; td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I; } /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) { struct proc *p1; struct pcb *pcb2; struct mdproc *mdp2; p1 = td1->td_proc; if ((flags & RFPROC) == 0) { if ((flags & RFMEM) == 0) { /* unshare user LDT */ struct mdproc *mdp1 = &p1->p_md; struct proc_ldt *pldt, *pldt1; mtx_lock_spin(&dt_lock); if ((pldt1 = mdp1->md_ldt) != NULL && pldt1->ldt_refcnt > 1) { pldt = user_ldt_alloc(mdp1, pldt1->ldt_len); if (pldt == NULL) panic("could not copy LDT"); mdp1->md_ldt = pldt; set_user_ldt(mdp1); user_ldt_deref(pldt1); } else mtx_unlock_spin(&dt_lock); } return; } /* Point the pcb to the top of the stack */ pcb2 = get_pcb_td(td2); td2->td_pcb = pcb2; copy_thread(td1, td2); /* Reset debug registers in the new process */ x86_clear_dbregs(pcb2); /* Point mdproc and then copy over td1's contents */ mdp2 = &p2->p_md; bcopy(&p1->p_md, mdp2, sizeof(*mdp2)); /* * Copy the trap frame for the return to user mode as if from a * syscall. This copies most of the user mode register values. * The -VM86_STACK_SPACE (-16) is so we can expand the trapframe * if we go to vm86. */ td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - VM86_STACK_SPACE) - 1; bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe)); /* Set child return values. */ p2->p_sysent->sv_set_fork_retval(td2); /* * If the parent process has the trap bit set (i.e. a debugger * had single stepped the process to the system call), we need * to clear the trap flag from the new frame. */ td2->td_frame->tf_eflags &= ~PSL_T; /* Set cr3 for the new process. */ pcb2->pcb_cr3 = pmap_get_cr3(vmspace_pmap(p2->p_vmspace)); /* * XXX don't copy the i/o pages. this should probably be fixed. */ pcb2->pcb_ext = NULL; /* Copy the LDT, if necessary. */ mtx_lock_spin(&dt_lock); if (mdp2->md_ldt != NULL) { if (flags & RFMEM) { mdp2->md_ldt->ldt_refcnt++; } else { mdp2->md_ldt = user_ldt_alloc(mdp2, mdp2->md_ldt->ldt_len); if (mdp2->md_ldt == NULL) panic("could not copy LDT"); } } mtx_unlock_spin(&dt_lock); /* * Now, cpu_switch() can schedule the new process. * pcb_esp is loaded pointing to the cpu_switch() stack frame * containing the return address when exiting cpu_switch. * This will normally be to fork_trampoline(), which will have * %ebx loaded with the new proc's pointer. fork_trampoline() * will set up a stack to call fork_return(p, frame); to complete * the return to user-mode. */ } void x86_set_fork_retval(struct thread *td) { struct trapframe * frame = td->td_frame; frame->tf_eax = 0; /* Child returns zero */ frame->tf_eflags &= ~PSL_C; /* success */ frame->tf_edx = 1; /* System V emulation */ } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) { /* * Note that the trap frame follows the args, so the function * is really called like this: func(arg, frame); */ td->td_pcb->pcb_esi = (int) func; /* function */ td->td_pcb->pcb_ebx = (int) arg; /* first arg */ } void cpu_exit(struct thread *td) { /* * If this process has a custom LDT, release it. Reset pc->pcb_gs * and %gs before we free it in case they refer to an LDT entry. */ mtx_lock_spin(&dt_lock); if (td->td_proc->p_md.md_ldt) { td->td_pcb->pcb_gs = _udatasel; load_gs(_udatasel); user_ldt_free(td); } else mtx_unlock_spin(&dt_lock); } void cpu_thread_exit(struct thread *td) { critical_enter(); if (td == PCPU_GET(fpcurthread)) npxdrop(); critical_exit(); /* Disable any hardware breakpoints. */ if (td->td_pcb->pcb_flags & PCB_DBREGS) { reset_dbregs(); td->td_pcb->pcb_flags &= ~PCB_DBREGS; } } void cpu_thread_clean(struct thread *td) { struct pcb *pcb; pcb = td->td_pcb; if (pcb->pcb_ext != NULL) { /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */ /* * XXX do we need to move the TSS off the allocated pages * before freeing them? (not done here) */ pmap_trm_free(pcb->pcb_ext, ctob(IOPAGES + 1)); pcb->pcb_ext = NULL; } } void cpu_thread_alloc(struct thread *td) { struct pcb *pcb; struct xstate_hdr *xhdr; td->td_pcb = pcb = get_pcb_td(td); td->td_frame = (struct trapframe *)((caddr_t)pcb - VM86_STACK_SPACE) - 1; pcb->pcb_ext = NULL; pcb->pcb_save = get_pcb_user_save_pcb(pcb); if (use_xsave) { xhdr = (struct xstate_hdr *)(pcb->pcb_save + 1); bzero(xhdr, sizeof(*xhdr)); xhdr->xstate_bv = xsave_mask; } } void cpu_thread_free(struct thread *td) { cpu_thread_clean(td); } bool cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) { return (true); } int cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused, int com __unused, void *data __unused) { return (EINVAL); } void cpu_set_syscall_retval(struct thread *td, int error) { switch (error) { case 0: td->td_frame->tf_eax = td->td_retval[0]; td->td_frame->tf_edx = td->td_retval[1]; td->td_frame->tf_eflags &= ~PSL_C; break; case ERESTART: /* * Reconstruct pc, assuming lcall $X,y is 7 bytes, int * 0x80 is 2 bytes. We saved this in tf_err. */ td->td_frame->tf_eip -= td->td_frame->tf_err; break; case EJUSTRETURN: break; default: td->td_frame->tf_eax = error; td->td_frame->tf_eflags |= PSL_C; break; } } /* * Initialize machine state, mostly pcb and trap frame for a new * thread, about to return to userspace. Put enough state in the new * thread's PCB to get it to go back to the fork_return(), which * finalizes the thread state and handles peculiarities of the first * return to userspace for the new thread. */ void cpu_copy_thread(struct thread *td, struct thread *td0) { copy_thread(td0, td); /* * Copy user general-purpose registers. * * Some of these registers are rewritten by cpu_set_upcall() * and linux_set_upcall(). */ bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); /* If the current thread has the trap bit set (i.e. a debugger had * single stepped the process to the system call), we need to clear * the trap flag from the new frame. Otherwise, the new thread will * receive a (likely unexpected) SIGTRAP when it executes the first * instruction after returning to userland. */ td->td_frame->tf_eflags &= ~PSL_T; } /* * Set that machine state for performing an upcall that starts * the entry function with the given argument. */ int cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { /* * Set the trap frame to point at the beginning of the entry * function. */ td->td_frame->tf_ebp = 0; td->td_frame->tf_esp = (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4; td->td_frame->tf_eip = (int)entry; /* Return address sentinel value to stop stack unwinding. */ if (suword((void *)td->td_frame->tf_esp, 0) != 0) return (EFAULT); /* Pass the argument to the entry point. */ if (suword((void *)(td->td_frame->tf_esp + sizeof(void *)), (int)arg) != 0) return (EFAULT); return (0); } int cpu_set_user_tls(struct thread *td, void *tls_base) { struct segment_descriptor sd; uint32_t base; /* * Construct a descriptor and store it in the pcb for * the next context switch. Also store it in the gdt * so that the load of tf_fs into %fs will activate it * at return to userland. */ base = (uint32_t)tls_base; sd.sd_lobase = base & 0xffffff; sd.sd_hibase = (base >> 24) & 0xff; sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */ sd.sd_hilimit = 0xf; sd.sd_type = SDT_MEMRWA; sd.sd_dpl = SEL_UPL; sd.sd_p = 1; sd.sd_xx = 0; sd.sd_def32 = 1; sd.sd_gran = 1; critical_enter(); /* set %gs */ td->td_pcb->pcb_gsd = sd; if (td == curthread) { PCPU_GET(fsgs_gdt)[1] = sd; load_gs(GSEL(GUGS_SEL, SEL_UPL)); } critical_exit(); return (0); } +void +cpu_update_pcb(struct thread *td) +{ + MPASS(td == curthread); + td->td_pcb->pcb_gs = rgs(); +} + /* * Convert kernel VA to physical address */ vm_paddr_t kvtop(void *addr) { vm_paddr_t pa; pa = pmap_kextract((vm_offset_t)addr); if (pa == 0) panic("kvtop: zero page frame"); return (pa); } /* * Get an sf_buf from the freelist. May block if none are available. */ void sf_buf_map(struct sf_buf *sf, int flags) { pmap_sf_buf_map(sf); #ifdef SMP sf_buf_shootdown(sf, flags); #endif } #ifdef SMP static void sf_buf_shootdown_curcpu_cb(pmap_t pmap __unused, vm_offset_t addr1 __unused, vm_offset_t addr2 __unused) { } void sf_buf_shootdown(struct sf_buf *sf, int flags) { cpuset_t other_cpus; u_int cpuid; sched_pin(); cpuid = PCPU_GET(cpuid); if (!CPU_ISSET(cpuid, &sf->cpumask)) { CPU_SET(cpuid, &sf->cpumask); invlpg(sf->kva); } if ((flags & SFB_CPUPRIVATE) == 0) { other_cpus = all_cpus; CPU_CLR(cpuid, &other_cpus); CPU_ANDNOT(&other_cpus, &other_cpus, &sf->cpumask); if (!CPU_EMPTY(&other_cpus)) { CPU_OR(&sf->cpumask, &sf->cpumask, &other_cpus); smp_masked_invlpg(other_cpus, sf->kva, kernel_pmap, sf_buf_shootdown_curcpu_cb); } } sched_unpin(); } #endif /* * MD part of sf_buf_free(). */ int sf_buf_unmap(struct sf_buf *sf) { return (0); } static void sf_buf_invalidate(struct sf_buf *sf) { vm_page_t m = sf->m; /* * Use pmap_qenter to update the pte for * existing mapping, in particular, the PAT * settings are recalculated. */ pmap_qenter(sf->kva, &m, 1); pmap_invalidate_cache_range(sf->kva, sf->kva + PAGE_SIZE); } /* * Invalidate the cache lines that may belong to the page, if * (possibly old) mapping of the page by sf buffer exists. Returns * TRUE when mapping was found and cache invalidated. */ boolean_t sf_buf_invalidate_cache(vm_page_t m) { return (sf_buf_process_page(m, sf_buf_invalidate)); } diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index d94f9e1e5143..fc074ad74e6b 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -1,2929 +1,2932 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2017 Dell EMC * Copyright (c) 2000-2001, 2003 David O'Brien * Copyright (c) 1995-1996 Søren Schmidt * Copyright (c) 1996 Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer * in this position and unchanged. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "opt_capsicum.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ELF_NOTE_ROUNDSIZE 4 #define OLD_EI_BRAND 8 static int __elfN(check_header)(const Elf_Ehdr *hdr); static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp, const char *interp, int32_t *osrel, uint32_t *fctl0); static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry); static int __elfN(load_section)(const struct image_params *imgp, vm_ooffset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot); static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel); static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel); static bool __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote, int32_t *osrel, bool *has_fctl0, uint32_t *fctl0); static vm_prot_t __elfN(trans_prot)(Elf_Word); static Elf_Word __elfN(untrans_prot)(vm_prot_t); static size_t __elfN(prepare_register_notes)(struct thread *td, struct note_info_list *list, struct thread *target_td); SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW | CTLFLAG_MPSAFE, 0, ""); int __elfN(fallback_brand) = -1; SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort"); static int elf_legacy_coredump = 0; SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, &elf_legacy_coredump, 0, "include all and only RW pages in core dumps"); int __elfN(nxstack) = #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */ || \ defined(__arm__) || defined(__aarch64__) || \ defined(__riscv) 1; #else 0; #endif SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, nxstack, CTLFLAG_RW, &__elfN(nxstack), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": support PT_GNU_STACK for non-executable stack control"); #if defined(__amd64__) static int __elfN(vdso) = 1; SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, vdso, CTLFLAG_RWTUN, &__elfN(vdso), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable vdso preloading"); #else static int __elfN(vdso) = 0; #endif #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) int i386_read_exec = 0; SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0, "enable execution from readable segments"); #endif static u_long __elfN(pie_base) = ET_DYN_LOAD_ADDR; static int sysctl_pie_base(SYSCTL_HANDLER_ARGS) { u_long val; int error; val = __elfN(pie_base); error = sysctl_handle_long(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL) return (error); if ((val & PAGE_MASK) != 0) return (EINVAL); __elfN(pie_base) = val; return (0); } SYSCTL_PROC(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, pie_base, CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_pie_base, "LU", "PIE load base without randomization"); SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, ""); #define ASLR_NODE_OID __CONCAT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), _aslr) /* * Enable ASLR by default for 64-bit non-PIE binaries. 32-bit architectures * have limited address space (which can cause issues for applications with * high memory use) so we leave it off there. */ static int __elfN(aslr_enabled) = __ELF_WORD_SIZE == 64; SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, &__elfN(aslr_enabled), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable address map randomization"); /* * Enable ASLR by default for 64-bit PIE binaries. */ static int __elfN(pie_aslr_enabled) = __ELF_WORD_SIZE == 64; SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN, &__elfN(pie_aslr_enabled), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable address map randomization for PIE binaries"); /* * Sbrk is deprecated and it can be assumed that in most cases it will not be * used anyway. This setting is valid only with ASLR enabled, and allows ASLR * to use the bss grow region. */ static int __elfN(aslr_honor_sbrk) = 0; SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW, &__elfN(aslr_honor_sbrk), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used"); static int __elfN(aslr_stack) = __ELF_WORD_SIZE == 64; SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, stack, CTLFLAG_RWTUN, &__elfN(aslr_stack), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable stack address randomization"); static int __elfN(aslr_shared_page) = __ELF_WORD_SIZE == 64; SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, shared_page, CTLFLAG_RWTUN, &__elfN(aslr_shared_page), 0, __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable shared page address randomization"); static int __elfN(sigfastblock) = 1; SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock, CTLFLAG_RWTUN, &__elfN(sigfastblock), 0, "enable sigfastblock for new processes"); static bool __elfN(allow_wx) = true; SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx, CTLFLAG_RWTUN, &__elfN(allow_wx), 0, "Allow pages to be mapped simultaneously writable and executable"); static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; #define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a)) Elf_Brandnote __elfN(freebsd_brandnote) = { .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR), .hdr.n_descsz = sizeof(int32_t), .hdr.n_type = NT_FREEBSD_ABI_TAG, .vendor = FREEBSD_ABI_VENDOR, .flags = BN_TRANSLATE_OSREL, .trans_osrel = __elfN(freebsd_trans_osrel) }; static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel) { uintptr_t p; p = (uintptr_t)(note + 1); p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); *osrel = *(const int32_t *)(p); return (true); } static int GNU_KFREEBSD_ABI_DESC = 3; Elf_Brandnote __elfN(kfreebsd_brandnote) = { .hdr.n_namesz = sizeof(GNU_ABI_VENDOR), .hdr.n_descsz = 16, /* XXX at least 16 */ .hdr.n_type = 1, .vendor = GNU_ABI_VENDOR, .flags = BN_TRANSLATE_OSREL, .trans_osrel = kfreebsd_trans_osrel }; static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel) { const Elf32_Word *desc; uintptr_t p; p = (uintptr_t)(note + 1); p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); desc = (const Elf32_Word *)p; if (desc[0] != GNU_KFREEBSD_ABI_DESC) return (false); /* * Debian GNU/kFreeBSD embed the earliest compatible kernel version * (__FreeBSD_version: Rxx) in the LSB way. */ *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3]; return (true); } int __elfN(insert_brand_entry)(Elf_Brandinfo *entry) { int i; for (i = 0; i < MAX_BRANDS; i++) { if (elf_brand_list[i] == NULL) { elf_brand_list[i] = entry; break; } } if (i == MAX_BRANDS) { printf("WARNING: %s: could not insert brandinfo entry: %p\n", __func__, entry); return (-1); } return (0); } int __elfN(remove_brand_entry)(Elf_Brandinfo *entry) { int i; for (i = 0; i < MAX_BRANDS; i++) { if (elf_brand_list[i] == entry) { elf_brand_list[i] = NULL; break; } } if (i == MAX_BRANDS) return (-1); return (0); } bool __elfN(brand_inuse)(Elf_Brandinfo *entry) { struct proc *p; bool rval = false; sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { if (p->p_sysent == entry->sysvec) { rval = true; break; } } sx_sunlock(&allproc_lock); return (rval); } static Elf_Brandinfo * __elfN(get_brandinfo)(struct image_params *imgp, const char *interp, int32_t *osrel, uint32_t *fctl0) { const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; Elf_Brandinfo *bi, *bi_m; bool ret, has_fctl0; int i, interp_name_len; interp_name_len = interp != NULL ? strlen(interp) + 1 : 0; /* * We support four types of branding -- (1) the ELF EI_OSABI field * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string * branding w/in the ELF header, (3) path of the `interp_path' * field, and (4) the ".note.ABI-tag" ELF section. */ /* Look for an ".note.ABI-tag" ELF section */ bi_m = NULL; for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL) continue; if (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0) continue; if (hdr->e_machine == bi->machine && (bi->flags & (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) { has_fctl0 = false; *fctl0 = 0; *osrel = 0; ret = __elfN(check_note)(imgp, bi->brand_note, osrel, &has_fctl0, fctl0); /* Give brand a chance to veto check_note's guess */ if (ret && bi->header_supported) { ret = bi->header_supported(imgp, osrel, has_fctl0 ? fctl0 : NULL); } /* * If note checker claimed the binary, but the * interpreter path in the image does not * match default one for the brand, try to * search for other brands with the same * interpreter. Either there is better brand * with the right interpreter, or, failing * this, we return first brand which accepted * our note and, optionally, header. */ if (ret && bi_m == NULL && interp != NULL && (bi->interp_path == NULL || (strlen(bi->interp_path) + 1 != interp_name_len || strncmp(interp, bi->interp_path, interp_name_len) != 0))) { bi_m = bi; ret = 0; } if (ret) return (bi); } } if (bi_m != NULL) return (bi_m); /* If the executable has a brand, search for it in the brand list. */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) continue; if (hdr->e_machine == bi->machine && (hdr->e_ident[EI_OSABI] == bi->brand || (bi->compat_3_brand != NULL && strcmp((const char *)&hdr->e_ident[OLD_EI_BRAND], bi->compat_3_brand) == 0))) { /* Looks good, but give brand a chance to veto */ if (bi->header_supported == NULL || bi->header_supported(imgp, NULL, NULL)) { /* * Again, prefer strictly matching * interpreter path. */ if (interp_name_len == 0 && bi->interp_path == NULL) return (bi); if (bi->interp_path != NULL && strlen(bi->interp_path) + 1 == interp_name_len && strncmp(interp, bi->interp_path, interp_name_len) == 0) return (bi); if (bi_m == NULL) bi_m = bi; } } } if (bi_m != NULL) return (bi_m); /* No known brand, see if the header is recognized by any brand */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY || bi->header_supported == NULL) continue; if (hdr->e_machine == bi->machine) { ret = bi->header_supported(imgp, NULL, NULL); if (ret) return (bi); } } /* Lacking a known brand, search for a recognized interpreter. */ if (interp != NULL) { for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || (bi->flags & (BI_BRAND_NOTE_MANDATORY | BI_BRAND_ONLY_STATIC)) != 0) continue; if (hdr->e_machine == bi->machine && bi->interp_path != NULL && /* ELF image p_filesz includes terminating zero */ strlen(bi->interp_path) + 1 == interp_name_len && strncmp(interp, bi->interp_path, interp_name_len) == 0 && (bi->header_supported == NULL || bi->header_supported(imgp, NULL, NULL))) return (bi); } } /* Lacking a recognized interpreter, try the default brand */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 || (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)) continue; if (hdr->e_machine == bi->machine && __elfN(fallback_brand) == bi->brand && (bi->header_supported == NULL || bi->header_supported(imgp, NULL, NULL))) return (bi); } return (NULL); } static bool __elfN(phdr_in_zero_page)(const Elf_Ehdr *hdr) { return (hdr->e_phoff <= PAGE_SIZE && (u_int)hdr->e_phentsize * hdr->e_phnum <= PAGE_SIZE - hdr->e_phoff); } static int __elfN(check_header)(const Elf_Ehdr *hdr) { Elf_Brandinfo *bi; int i; if (!IS_ELF(*hdr) || hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || hdr->e_ident[EI_DATA] != ELF_TARG_DATA || hdr->e_ident[EI_VERSION] != EV_CURRENT || hdr->e_phentsize != sizeof(Elf_Phdr) || hdr->e_version != ELF_TARG_VER) return (ENOEXEC); /* * Make sure we have at least one brand for this machine. */ for (i = 0; i < MAX_BRANDS; i++) { bi = elf_brand_list[i]; if (bi != NULL && bi->machine == hdr->e_machine) break; } if (i == MAX_BRANDS) return (ENOEXEC); return (0); } static int __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot) { struct sf_buf *sf; int error; vm_offset_t off; /* * Create the page if it doesn't exist yet. Ignore errors. */ vm_map_fixed(map, NULL, 0, trunc_page(start), round_page(end) - trunc_page(start), VM_PROT_ALL, VM_PROT_ALL, MAP_CHECK_EXCL); /* * Find the page from the underlying object. */ if (object != NULL) { sf = vm_imgact_map_page(object, offset); if (sf == NULL) return (KERN_FAILURE); off = offset - trunc_page(offset); error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, end - start); vm_imgact_unmap_page(sf); if (error != 0) return (KERN_FAILURE); } return (KERN_SUCCESS); } static int __elfN(map_insert)(const struct image_params *imgp, vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow) { struct sf_buf *sf; vm_offset_t off; vm_size_t sz; int error, locked, rv; if (start != trunc_page(start)) { rv = __elfN(map_partial)(map, object, offset, start, round_page(start), prot); if (rv != KERN_SUCCESS) return (rv); offset += round_page(start) - start; start = round_page(start); } if (end != round_page(end)) { rv = __elfN(map_partial)(map, object, offset + trunc_page(end) - start, trunc_page(end), end, prot); if (rv != KERN_SUCCESS) return (rv); end = trunc_page(end); } if (start >= end) return (KERN_SUCCESS); if ((offset & PAGE_MASK) != 0) { /* * The mapping is not page aligned. This means that we have * to copy the data. */ rv = vm_map_fixed(map, NULL, 0, start, end - start, prot | VM_PROT_WRITE, VM_PROT_ALL, MAP_CHECK_EXCL); if (rv != KERN_SUCCESS) return (rv); if (object == NULL) return (KERN_SUCCESS); for (; start < end; start += sz) { sf = vm_imgact_map_page(object, offset); if (sf == NULL) return (KERN_FAILURE); off = offset - trunc_page(offset); sz = end - start; if (sz > PAGE_SIZE - off) sz = PAGE_SIZE - off; error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start, sz); vm_imgact_unmap_page(sf); if (error != 0) return (KERN_FAILURE); offset += sz; } } else { vm_object_reference(object); rv = vm_map_fixed(map, object, offset, start, end - start, prot, VM_PROT_ALL, cow | MAP_CHECK_EXCL | (object != NULL ? MAP_VN_EXEC : 0)); if (rv != KERN_SUCCESS) { locked = VOP_ISLOCKED(imgp->vp); VOP_UNLOCK(imgp->vp); vm_object_deallocate(object); vn_lock(imgp->vp, locked | LK_RETRY); return (rv); } else if (object != NULL) { MPASS(imgp->vp->v_object == object); VOP_SET_TEXT_CHECKED(imgp->vp); } } return (KERN_SUCCESS); } static int __elfN(load_section)(const struct image_params *imgp, vm_ooffset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot) { struct sf_buf *sf; size_t map_len; vm_map_t map; vm_object_t object; vm_offset_t map_addr; int error, rv, cow; size_t copy_len; vm_ooffset_t file_addr; /* * It's necessary to fail if the filsz + offset taken from the * header is greater than the actual file pager object's size. * If we were to allow this, then the vm_map_find() below would * walk right off the end of the file object and into the ether. * * While I'm here, might as well check for something else that * is invalid: filsz cannot be greater than memsz. */ if ((filsz != 0 && (off_t)filsz + offset > imgp->attr->va_size) || filsz > memsz) { uprintf("elf_load_section: truncated ELF file\n"); return (ENOEXEC); } object = imgp->object; map = &imgp->proc->p_vmspace->vm_map; map_addr = trunc_page((vm_offset_t)vmaddr); file_addr = trunc_page(offset); /* * We have two choices. We can either clear the data in the last page * of an oversized mapping, or we can start the anon mapping a page * early and copy the initialized data into that first page. We * choose the second. */ if (filsz == 0) map_len = 0; else if (memsz > filsz) map_len = trunc_page(offset + filsz) - file_addr; else map_len = round_page(offset + filsz) - file_addr; if (map_len != 0) { /* cow flags: don't dump readonly sections in core */ cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); rv = __elfN(map_insert)(imgp, map, object, file_addr, map_addr, map_addr + map_len, prot, cow); if (rv != KERN_SUCCESS) return (EINVAL); /* we can stop now if we've covered it all */ if (memsz == filsz) return (0); } /* * We have to get the remaining bit of the file into the first part * of the oversized map segment. This is normally because the .data * segment in the file is extended to provide bss. It's a neat idea * to try and save a page, but it's a pain in the behind to implement. */ copy_len = filsz == 0 ? 0 : (offset + filsz) - trunc_page(offset + filsz); map_addr = trunc_page((vm_offset_t)vmaddr + filsz); map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr; /* This had damn well better be true! */ if (map_len != 0) { rv = __elfN(map_insert)(imgp, map, NULL, 0, map_addr, map_addr + map_len, prot, 0); if (rv != KERN_SUCCESS) return (EINVAL); } if (copy_len != 0) { sf = vm_imgact_map_page(object, offset + filsz); if (sf == NULL) return (EIO); /* send the page fragment to user space */ error = copyout((caddr_t)sf_buf_kva(sf), (caddr_t)map_addr, copy_len); vm_imgact_unmap_page(sf); if (error != 0) return (error); } /* * Remove write access to the page if it was only granted by map_insert * to allow copyout. */ if ((prot & VM_PROT_WRITE) == 0) vm_map_protect(map, trunc_page(map_addr), round_page(map_addr + map_len), prot, 0, VM_MAP_PROTECT_SET_PROT); return (0); } static int __elfN(load_sections)(const struct image_params *imgp, const Elf_Ehdr *hdr, const Elf_Phdr *phdr, u_long rbase, u_long *base_addrp) { vm_prot_t prot; u_long base_addr; bool first; int error, i; ASSERT_VOP_LOCKED(imgp->vp, __func__); base_addr = 0; first = true; for (i = 0; i < hdr->e_phnum; i++) { if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) continue; /* Loadable segment */ prot = __elfN(trans_prot)(phdr[i].p_flags); error = __elfN(load_section)(imgp, phdr[i].p_offset, (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, phdr[i].p_memsz, phdr[i].p_filesz, prot); if (error != 0) return (error); /* * Establish the base address if this is the first segment. */ if (first) { base_addr = trunc_page(phdr[i].p_vaddr + rbase); first = false; } } if (base_addrp != NULL) *base_addrp = base_addr; return (0); } /* * Load the file "file" into memory. It may be either a shared object * or an executable. * * The "addr" reference parameter is in/out. On entry, it specifies * the address where a shared object should be loaded. If the file is * an executable, this value is ignored. On exit, "addr" specifies * where the file was actually loaded. * * The "entry" reference parameter is out only. On exit, it specifies * the entry point for the loaded file. */ static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry) { struct { struct nameidata nd; struct vattr attr; struct image_params image_params; } *tempdata; const Elf_Ehdr *hdr = NULL; const Elf_Phdr *phdr = NULL; struct nameidata *nd; struct vattr *attr; struct image_params *imgp; u_long rbase; u_long base_addr = 0; int error; #ifdef CAPABILITY_MODE /* * XXXJA: This check can go away once we are sufficiently confident * that the checks in namei() are correct. */ if (IN_CAPABILITY_MODE(curthread)) return (ECAPMODE); #endif tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK | M_ZERO); nd = &tempdata->nd; attr = &tempdata->attr; imgp = &tempdata->image_params; /* * Initialize part of the common data */ imgp->proc = p; imgp->attr = attr; NDINIT(nd, LOOKUP, ISOPEN | FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, file); if ((error = namei(nd)) != 0) { nd->ni_vp = NULL; goto fail; } NDFREE_PNBUF(nd); imgp->vp = nd->ni_vp; /* * Check permissions, modes, uid, etc on the file, and "open" it. */ error = exec_check_permissions(imgp); if (error) goto fail; error = exec_map_first_page(imgp); if (error) goto fail; imgp->object = nd->ni_vp->v_object; hdr = (const Elf_Ehdr *)imgp->image_header; if ((error = __elfN(check_header)(hdr)) != 0) goto fail; if (hdr->e_type == ET_DYN) rbase = *addr; else if (hdr->e_type == ET_EXEC) rbase = 0; else { error = ENOEXEC; goto fail; } /* Only support headers that fit within first page for now */ if (!__elfN(phdr_in_zero_page)(hdr)) { error = ENOEXEC; goto fail; } phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); if (!aligned(phdr, Elf_Addr)) { error = ENOEXEC; goto fail; } error = __elfN(load_sections)(imgp, hdr, phdr, rbase, &base_addr); if (error != 0) goto fail; if (p->p_sysent->sv_protect != NULL) p->p_sysent->sv_protect(imgp, SVP_INTERP); *addr = base_addr; *entry = (unsigned long)hdr->e_entry + rbase; fail: if (imgp->firstpage) exec_unmap_first_page(imgp); if (nd->ni_vp) { if (imgp->textset) VOP_UNSET_TEXT_CHECKED(nd->ni_vp); vput(nd->ni_vp); } free(tempdata, M_TEMP); return (error); } /* * Select randomized valid address in the map map, between minv and * maxv, with specified alignment. The [minv, maxv) range must belong * to the map. Note that function only allocates the address, it is * up to caller to clamp maxv in a way that the final allocation * length fit into the map. * * Result is returned in *resp, error code indicates that arguments * did not pass sanity checks for overflow and range correctness. */ static int __CONCAT(rnd_, __elfN(base))(vm_map_t map, u_long minv, u_long maxv, u_int align, u_long *resp) { u_long rbase, res; MPASS(vm_map_min(map) <= minv); if (minv >= maxv || minv + align >= maxv || maxv > vm_map_max(map)) { uprintf("Invalid ELF segments layout\n"); return (ENOEXEC); } arc4rand(&rbase, sizeof(rbase), 0); res = roundup(minv, (u_long)align) + rbase % (maxv - minv); res &= ~((u_long)align - 1); if (res >= maxv) res -= align; KASSERT(res >= minv, ("res %#lx < minv %#lx, maxv %#lx rbase %#lx", res, minv, maxv, rbase)); KASSERT(res < maxv, ("res %#lx > maxv %#lx, minv %#lx rbase %#lx", res, maxv, minv, rbase)); *resp = res; return (0); } static int __elfN(enforce_limits)(struct image_params *imgp, const Elf_Ehdr *hdr, const Elf_Phdr *phdr) { struct vmspace *vmspace; const char *err_str; u_long text_size, data_size, total_size, text_addr, data_addr; u_long seg_size, seg_addr; int i; err_str = NULL; text_size = data_size = total_size = text_addr = data_addr = 0; for (i = 0; i < hdr->e_phnum; i++) { if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0) continue; seg_addr = trunc_page(phdr[i].p_vaddr + imgp->et_dyn_addr); seg_size = round_page(phdr[i].p_memsz + phdr[i].p_vaddr + imgp->et_dyn_addr - seg_addr); /* * Make the largest executable segment the official * text segment and all others data. * * Note that obreak() assumes that data_addr + data_size == end * of data load area, and the ELF file format expects segments * to be sorted by address. If multiple data segments exist, * the last one will be used. */ if ((phdr[i].p_flags & PF_X) != 0 && text_size < seg_size) { text_size = seg_size; text_addr = seg_addr; } else { data_size = seg_size; data_addr = seg_addr; } total_size += seg_size; } if (data_addr == 0 && data_size == 0) { data_addr = text_addr; data_size = text_size; } /* * Check limits. It should be safe to check the * limits after loading the segments since we do * not actually fault in all the segments pages. */ PROC_LOCK(imgp->proc); if (data_size > lim_cur_proc(imgp->proc, RLIMIT_DATA)) err_str = "Data segment size exceeds process limit"; else if (text_size > maxtsiz) err_str = "Text segment size exceeds system limit"; else if (total_size > lim_cur_proc(imgp->proc, RLIMIT_VMEM)) err_str = "Total segment size exceeds process limit"; else if (racct_set(imgp->proc, RACCT_DATA, data_size) != 0) err_str = "Data segment size exceeds resource limit"; else if (racct_set(imgp->proc, RACCT_VMEM, total_size) != 0) err_str = "Total segment size exceeds resource limit"; PROC_UNLOCK(imgp->proc); if (err_str != NULL) { uprintf("%s\n", err_str); return (ENOMEM); } vmspace = imgp->proc->p_vmspace; vmspace->vm_tsize = text_size >> PAGE_SHIFT; vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; vmspace->vm_dsize = data_size >> PAGE_SHIFT; vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; return (0); } static int __elfN(get_interp)(struct image_params *imgp, const Elf_Phdr *phdr, char **interpp, bool *free_interpp) { struct thread *td; char *interp; int error, interp_name_len; KASSERT(phdr->p_type == PT_INTERP, ("%s: p_type %u != PT_INTERP", __func__, phdr->p_type)); ASSERT_VOP_LOCKED(imgp->vp, __func__); td = curthread; /* Path to interpreter */ if (phdr->p_filesz < 2 || phdr->p_filesz > MAXPATHLEN) { uprintf("Invalid PT_INTERP\n"); return (ENOEXEC); } interp_name_len = phdr->p_filesz; if (phdr->p_offset > PAGE_SIZE || interp_name_len > PAGE_SIZE - phdr->p_offset) { /* * The vnode lock might be needed by the pagedaemon to * clean pages owned by the vnode. Do not allow sleep * waiting for memory with the vnode locked, instead * try non-sleepable allocation first, and if it * fails, go to the slow path were we drop the lock * and do M_WAITOK. A text reference prevents * modifications to the vnode content. */ interp = malloc(interp_name_len + 1, M_TEMP, M_NOWAIT); if (interp == NULL) { VOP_UNLOCK(imgp->vp); interp = malloc(interp_name_len + 1, M_TEMP, M_WAITOK); vn_lock(imgp->vp, LK_SHARED | LK_RETRY); } error = vn_rdwr(UIO_READ, imgp->vp, interp, interp_name_len, phdr->p_offset, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, NULL, td); if (error != 0) { free(interp, M_TEMP); uprintf("i/o error PT_INTERP %d\n", error); return (error); } interp[interp_name_len] = '\0'; *interpp = interp; *free_interpp = true; return (0); } interp = __DECONST(char *, imgp->image_header) + phdr->p_offset; if (interp[interp_name_len - 1] != '\0') { uprintf("Invalid PT_INTERP\n"); return (ENOEXEC); } *interpp = interp; *free_interpp = false; return (0); } static int __elfN(load_interp)(struct image_params *imgp, const Elf_Brandinfo *brand_info, const char *interp, u_long *addr, u_long *entry) { int error; if (brand_info->interp_newpath != NULL && (brand_info->interp_path == NULL || strcmp(interp, brand_info->interp_path) == 0)) { error = __elfN(load_file)(imgp->proc, brand_info->interp_newpath, addr, entry); if (error == 0) return (0); } error = __elfN(load_file)(imgp->proc, interp, addr, entry); if (error == 0) return (0); uprintf("ELF interpreter %s not found, error %d\n", interp, error); return (error); } /* * Impossible et_dyn_addr initial value indicating that the real base * must be calculated later with some randomization applied. */ #define ET_DYN_ADDR_RAND 1 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) { struct thread *td; const Elf_Ehdr *hdr; const Elf_Phdr *phdr; Elf_Auxargs *elf_auxargs; struct vmspace *vmspace; vm_map_t map; char *interp; Elf_Brandinfo *brand_info; struct sysentvec *sv; u_long addr, baddr, entry, proghdr; u_long maxalign, maxsalign, mapsz, maxv, maxv1, anon_loc; uint32_t fctl0; int32_t osrel; bool free_interp; int error, i, n; hdr = (const Elf_Ehdr *)imgp->image_header; /* * Do we have a valid ELF header ? * * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later * if particular brand doesn't support it. */ if (__elfN(check_header)(hdr) != 0 || (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)) return (-1); /* * From here on down, we return an errno, not -1, as we've * detected an ELF file. */ if (!__elfN(phdr_in_zero_page)(hdr)) { uprintf("Program headers not in the first page\n"); return (ENOEXEC); } phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); if (!aligned(phdr, Elf_Addr)) { uprintf("Unaligned program headers\n"); return (ENOEXEC); } n = error = 0; baddr = 0; osrel = 0; fctl0 = 0; entry = proghdr = 0; interp = NULL; free_interp = false; td = curthread; /* * Somewhat arbitrary, limit accepted max alignment for the * loadable segment to the max supported superpage size. Too * large alignment requests are not useful and are indicators * of corrupted or outright malicious binary. */ maxalign = PAGE_SIZE; maxsalign = PAGE_SIZE * 1024; for (i = MAXPAGESIZES - 1; i > 0; i--) { if (pagesizes[i] > maxsalign) { maxsalign = pagesizes[i]; break; } } mapsz = 0; for (i = 0; i < hdr->e_phnum; i++) { switch (phdr[i].p_type) { case PT_LOAD: if (n == 0) baddr = phdr[i].p_vaddr; if (!powerof2(phdr[i].p_align) || phdr[i].p_align > maxsalign) { uprintf("Invalid segment alignment\n"); error = ENOEXEC; goto ret; } if (phdr[i].p_align > maxalign) maxalign = phdr[i].p_align; if (mapsz + phdr[i].p_memsz < mapsz) { uprintf("Mapsize overflow\n"); error = ENOEXEC; goto ret; } mapsz += phdr[i].p_memsz; n++; /* * If this segment contains the program headers, * remember their virtual address for the AT_PHDR * aux entry. Static binaries don't usually include * a PT_PHDR entry. */ if (phdr[i].p_offset == 0 && hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize <= phdr[i].p_filesz) proghdr = phdr[i].p_vaddr + hdr->e_phoff; break; case PT_INTERP: /* Path to interpreter */ if (interp != NULL) { uprintf("Multiple PT_INTERP headers\n"); error = ENOEXEC; goto ret; } error = __elfN(get_interp)(imgp, &phdr[i], &interp, &free_interp); if (error != 0) goto ret; break; case PT_GNU_STACK: if (__elfN(nxstack)) { imgp->stack_prot = __elfN(trans_prot)(phdr[i].p_flags); if ((imgp->stack_prot & VM_PROT_RW) != VM_PROT_RW) { uprintf("Invalid PT_GNU_STACK\n"); error = ENOEXEC; goto ret; } } imgp->stack_sz = phdr[i].p_memsz; break; case PT_PHDR: /* Program header table info */ proghdr = phdr[i].p_vaddr; break; } } brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel, &fctl0); if (brand_info == NULL) { uprintf("ELF binary type \"%u\" not known.\n", hdr->e_ident[EI_OSABI]); error = ENOEXEC; goto ret; } sv = brand_info->sysvec; if (hdr->e_type == ET_DYN) { if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) { uprintf("Cannot execute shared object\n"); error = ENOEXEC; goto ret; } /* * Honour the base load address from the dso if it is * non-zero for some reason. */ if (baddr == 0) { if ((sv->sv_flags & SV_ASLR) == 0 || (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) imgp->et_dyn_addr = __elfN(pie_base); else if ((__elfN(pie_aslr_enabled) && (imgp->proc->p_flag2 & P2_ASLR_DISABLE) == 0) || (imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0) imgp->et_dyn_addr = ET_DYN_ADDR_RAND; else imgp->et_dyn_addr = __elfN(pie_base); } } /* * Avoid a possible deadlock if the current address space is destroyed * and that address space maps the locked vnode. In the common case, * the locked vnode's v_usecount is decremented but remains greater * than zero. Consequently, the vnode lock is not needed by vrele(). * However, in cases where the vnode lock is external, such as nullfs, * v_usecount may become zero. * * The VV_TEXT flag prevents modifications to the executable while * the vnode is unlocked. */ VOP_UNLOCK(imgp->vp); /* * Decide whether to enable randomization of user mappings. * First, reset user preferences for the setid binaries. * Then, account for the support of the randomization by the * ABI, by user preferences, and make special treatment for * PIE binaries. */ if (imgp->credential_setid) { PROC_LOCK(imgp->proc); imgp->proc->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE | P2_WXORX_DISABLE | P2_WXORX_ENABLE_EXEC); PROC_UNLOCK(imgp->proc); } if ((sv->sv_flags & SV_ASLR) == 0 || (imgp->proc->p_flag2 & P2_ASLR_DISABLE) != 0 || (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) { KASSERT(imgp->et_dyn_addr != ET_DYN_ADDR_RAND, ("imgp->et_dyn_addr == RAND and !ASLR")); } else if ((imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0 || (__elfN(aslr_enabled) && hdr->e_type == ET_EXEC) || imgp->et_dyn_addr == ET_DYN_ADDR_RAND) { imgp->map_flags |= MAP_ASLR; /* * If user does not care about sbrk, utilize the bss * grow region for mappings as well. We can select * the base for the image anywere and still not suffer * from the fragmentation. */ if (!__elfN(aslr_honor_sbrk) || (imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0) imgp->map_flags |= MAP_ASLR_IGNSTART; if (__elfN(aslr_stack)) imgp->map_flags |= MAP_ASLR_STACK; if (__elfN(aslr_shared_page)) imgp->imgp_flags |= IMGP_ASLR_SHARED_PAGE; } if ((!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0 && (imgp->proc->p_flag2 & P2_WXORX_DISABLE) == 0) || (imgp->proc->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0) imgp->map_flags |= MAP_WXORX; error = exec_new_vmspace(imgp, sv); imgp->proc->p_sysent = sv; imgp->proc->p_elf_brandinfo = brand_info; vmspace = imgp->proc->p_vmspace; map = &vmspace->vm_map; maxv = sv->sv_usrstack; if ((imgp->map_flags & MAP_ASLR_STACK) == 0) maxv -= lim_max(td, RLIMIT_STACK); if (error == 0 && mapsz >= maxv - vm_map_min(map)) { uprintf("Excessive mapping size\n"); error = ENOEXEC; } if (error == 0 && imgp->et_dyn_addr == ET_DYN_ADDR_RAND) { KASSERT((map->flags & MAP_ASLR) != 0, ("ET_DYN_ADDR_RAND but !MAP_ASLR")); error = __CONCAT(rnd_, __elfN(base))(map, vm_map_min(map) + mapsz + lim_max(td, RLIMIT_DATA), /* reserve half of the address space to interpreter */ maxv / 2, maxalign, &imgp->et_dyn_addr); } vn_lock(imgp->vp, LK_SHARED | LK_RETRY); if (error != 0) goto ret; error = __elfN(load_sections)(imgp, hdr, phdr, imgp->et_dyn_addr, NULL); if (error != 0) goto ret; error = __elfN(enforce_limits)(imgp, hdr, phdr); if (error != 0) goto ret; /* * We load the dynamic linker where a userland call * to mmap(0, ...) would put it. The rationale behind this * calculation is that it leaves room for the heap to grow to * its maximum allowed size. */ addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(td, RLIMIT_DATA)); if ((map->flags & MAP_ASLR) != 0) { maxv1 = maxv / 2 + addr / 2; error = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1, #if VM_NRESERVLEVEL > 0 pagesizes[VM_NRESERVLEVEL] != 0 ? /* Align anon_loc to the largest superpage size. */ pagesizes[VM_NRESERVLEVEL] : #endif pagesizes[0], &anon_loc); if (error != 0) goto ret; map->anon_loc = anon_loc; } else { map->anon_loc = addr; } entry = (u_long)hdr->e_entry + imgp->et_dyn_addr; imgp->entry_addr = entry; if (sv->sv_protect != NULL) sv->sv_protect(imgp, SVP_IMAGE); if (interp != NULL) { VOP_UNLOCK(imgp->vp); if ((map->flags & MAP_ASLR) != 0) { /* Assume that interpreter fits into 1/4 of AS */ maxv1 = maxv / 2 + addr / 2; error = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1, PAGE_SIZE, &addr); } if (error == 0) { error = __elfN(load_interp)(imgp, brand_info, interp, &addr, &imgp->entry_addr); } vn_lock(imgp->vp, LK_SHARED | LK_RETRY); if (error != 0) goto ret; } else addr = imgp->et_dyn_addr; error = exec_map_stack(imgp); if (error != 0) goto ret; /* * Construct auxargs table (used by the copyout_auxargs routine) */ elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_NOWAIT); if (elf_auxargs == NULL) { VOP_UNLOCK(imgp->vp); elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); vn_lock(imgp->vp, LK_SHARED | LK_RETRY); } elf_auxargs->execfd = -1; elf_auxargs->phdr = proghdr + imgp->et_dyn_addr; elf_auxargs->phent = hdr->e_phentsize; elf_auxargs->phnum = hdr->e_phnum; elf_auxargs->pagesz = PAGE_SIZE; elf_auxargs->base = addr; elf_auxargs->flags = 0; elf_auxargs->entry = entry; elf_auxargs->hdr_eflags = hdr->e_flags; imgp->auxargs = elf_auxargs; imgp->interpreted = 0; imgp->reloc_base = addr; imgp->proc->p_osrel = osrel; imgp->proc->p_fctl0 = fctl0; imgp->proc->p_elf_flags = hdr->e_flags; ret: ASSERT_VOP_LOCKED(imgp->vp, "skipped relock"); if (free_interp) free(interp, M_TEMP); return (error); } #define elf_suword __CONCAT(suword, __ELF_WORD_SIZE) int __elfN(freebsd_copyout_auxargs)(struct image_params *imgp, uintptr_t base) { Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; Elf_Auxinfo *argarray, *pos; struct vmspace *vmspace; rlim_t stacksz; int error, oc; uint32_t bsdflags; argarray = pos = malloc(AT_COUNT * sizeof(*pos), M_TEMP, M_WAITOK | M_ZERO); vmspace = imgp->proc->p_vmspace; if (args->execfd != -1) AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); AUXARGS_ENTRY(pos, AT_PHENT, args->phent); AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); AUXARGS_ENTRY(pos, AT_BASE, args->base); AUXARGS_ENTRY(pos, AT_EHDRFLAGS, args->hdr_eflags); if (imgp->execpathp != 0) AUXARGS_ENTRY_PTR(pos, AT_EXECPATH, imgp->execpathp); AUXARGS_ENTRY(pos, AT_OSRELDATE, imgp->proc->p_ucred->cr_prison->pr_osreldate); if (imgp->canary != 0) { AUXARGS_ENTRY_PTR(pos, AT_CANARY, imgp->canary); AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen); } AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus); if (imgp->pagesizes != 0) { AUXARGS_ENTRY_PTR(pos, AT_PAGESIZES, imgp->pagesizes); AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen); } if ((imgp->sysent->sv_flags & SV_TIMEKEEP) != 0) { AUXARGS_ENTRY(pos, AT_TIMEKEEP, vmspace->vm_shp_base + imgp->sysent->sv_timekeep_offset); } AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj != NULL && imgp->stack_prot != 0 ? imgp->stack_prot : imgp->sysent->sv_stackprot); if (imgp->sysent->sv_hwcap != NULL) AUXARGS_ENTRY(pos, AT_HWCAP, *imgp->sysent->sv_hwcap); if (imgp->sysent->sv_hwcap2 != NULL) AUXARGS_ENTRY(pos, AT_HWCAP2, *imgp->sysent->sv_hwcap2); bsdflags = 0; bsdflags |= __elfN(sigfastblock) ? ELF_BSDF_SIGFASTBLK : 0; oc = atomic_load_int(&vm_overcommit); bsdflags |= (oc & (SWAP_RESERVE_FORCE_ON | SWAP_RESERVE_RLIMIT_ON)) != 0 ? ELF_BSDF_VMNOOVERCOMMIT : 0; AUXARGS_ENTRY(pos, AT_BSDFLAGS, bsdflags); AUXARGS_ENTRY(pos, AT_ARGC, imgp->args->argc); AUXARGS_ENTRY_PTR(pos, AT_ARGV, imgp->argv); AUXARGS_ENTRY(pos, AT_ENVC, imgp->args->envc); AUXARGS_ENTRY_PTR(pos, AT_ENVV, imgp->envv); AUXARGS_ENTRY_PTR(pos, AT_PS_STRINGS, imgp->ps_strings); #ifdef RANDOM_FENESTRASX if ((imgp->sysent->sv_flags & SV_RNG_SEED_VER) != 0) { AUXARGS_ENTRY(pos, AT_FXRNG, vmspace->vm_shp_base + imgp->sysent->sv_fxrng_gen_offset); } #endif if ((imgp->sysent->sv_flags & SV_DSO_SIG) != 0 && __elfN(vdso) != 0) { AUXARGS_ENTRY(pos, AT_KPRELOAD, vmspace->vm_shp_base + imgp->sysent->sv_vdso_offset); } AUXARGS_ENTRY(pos, AT_USRSTACKBASE, round_page(vmspace->vm_stacktop)); stacksz = imgp->proc->p_limit->pl_rlimit[RLIMIT_STACK].rlim_cur; AUXARGS_ENTRY(pos, AT_USRSTACKLIM, stacksz); AUXARGS_ENTRY(pos, AT_NULL, 0); free(imgp->auxargs, M_TEMP); imgp->auxargs = NULL; KASSERT(pos - argarray <= AT_COUNT, ("Too many auxargs")); error = copyout(argarray, (void *)base, sizeof(*argarray) * AT_COUNT); free(argarray, M_TEMP); return (error); } int __elfN(freebsd_fixup)(uintptr_t *stack_base, struct image_params *imgp) { Elf_Addr *base; base = (Elf_Addr *)*stack_base; base--; if (elf_suword(base, imgp->args->argc) == -1) return (EFAULT); *stack_base = (uintptr_t)base; return (0); } /* * Code for generating ELF core dumps. */ typedef void (*segment_callback)(vm_map_entry_t, void *); /* Closure for cb_put_phdr(). */ struct phdr_closure { Elf_Phdr *phdr; /* Program header to fill in */ Elf_Off offset; /* Offset of segment in core file */ }; struct note_info { int type; /* Note type. */ struct regset *regset; /* Register set. */ outfunc_t outfunc; /* Output function. */ void *outarg; /* Argument for the output function. */ size_t outsize; /* Output size. */ TAILQ_ENTRY(note_info) link; /* Link to the next note info. */ }; TAILQ_HEAD(note_info_list, note_info); extern int compress_user_cores; extern int compress_user_cores_level; static void cb_put_phdr(vm_map_entry_t, void *); static void cb_size_segment(vm_map_entry_t, void *); static void each_dumpable_segment(struct thread *, segment_callback, void *, int); static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t, struct note_info_list *, size_t, int); static void __elfN(putnote)(struct thread *td, struct note_info *, struct sbuf *); static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *); static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *); static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *); static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *); static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *); static void note_procstat_files(void *, struct sbuf *, size_t *); static void note_procstat_groups(void *, struct sbuf *, size_t *); static void note_procstat_osrel(void *, struct sbuf *, size_t *); static void note_procstat_rlimit(void *, struct sbuf *, size_t *); static void note_procstat_umask(void *, struct sbuf *, size_t *); static void note_procstat_vmmap(void *, struct sbuf *, size_t *); static int core_compressed_write(void *base, size_t len, off_t offset, void *arg) { return (core_write((struct coredump_params *)arg, base, len, offset, UIO_SYSSPACE, NULL)); } int __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) { struct ucred *cred = td->td_ucred; int compm, error = 0; struct sseg_closure seginfo; struct note_info_list notelst; struct coredump_params params; struct note_info *ninfo; void *hdr, *tmpbuf; size_t hdrsize, notesz, coresize; hdr = NULL; tmpbuf = NULL; TAILQ_INIT(¬elst); /* Size the program segments. */ __elfN(size_segments)(td, &seginfo, flags); /* * Collect info about the core file header area. */ hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count); if (seginfo.count + 1 >= PN_XNUM) hdrsize += sizeof(Elf_Shdr); td->td_proc->p_sysent->sv_elf_core_prepare_notes(td, ¬elst, ¬esz); coresize = round_page(hdrsize + notesz) + seginfo.size; /* Set up core dump parameters. */ params.offset = 0; params.active_cred = cred; params.file_cred = NOCRED; params.td = td; params.vp = vp; params.comp = NULL; #ifdef RACCT if (racct_enable) { PROC_LOCK(td->td_proc); error = racct_add(td->td_proc, RACCT_CORE, coresize); PROC_UNLOCK(td->td_proc); if (error != 0) { error = EFAULT; goto done; } } #endif if (coresize >= limit) { error = EFAULT; goto done; } /* Create a compression stream if necessary. */ compm = compress_user_cores; if ((flags & (SVC_PT_COREDUMP | SVC_NOCOMPRESS)) == SVC_PT_COREDUMP && compm == 0) compm = COMPRESS_GZIP; if (compm != 0) { params.comp = compressor_init(core_compressed_write, compm, CORE_BUF_SIZE, compress_user_cores_level, ¶ms); if (params.comp == NULL) { error = EFAULT; goto done; } tmpbuf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO); } /* * Allocate memory for building the header, fill it up, * and write it out following the notes. */ hdr = malloc(hdrsize, M_TEMP, M_WAITOK); error = __elfN(corehdr)(¶ms, seginfo.count, hdr, hdrsize, ¬elst, notesz, flags); /* Write the contents of all of the writable segments. */ if (error == 0) { Elf_Phdr *php; off_t offset; int i; php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; offset = round_page(hdrsize + notesz); for (i = 0; i < seginfo.count; i++) { error = core_output((char *)(uintptr_t)php->p_vaddr, php->p_filesz, offset, ¶ms, tmpbuf); if (error != 0) break; offset += php->p_filesz; php++; } if (error == 0 && params.comp != NULL) error = compressor_flush(params.comp); } if (error) { log(LOG_WARNING, "Failed to write core file for process %s (error %d)\n", curproc->p_comm, error); } done: free(tmpbuf, M_TEMP); if (params.comp != NULL) compressor_fini(params.comp); while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) { TAILQ_REMOVE(¬elst, ninfo, link); free(ninfo, M_TEMP); } if (hdr != NULL) free(hdr, M_TEMP); return (error); } /* * A callback for each_dumpable_segment() to write out the segment's * program header entry. */ static void cb_put_phdr(vm_map_entry_t entry, void *closure) { struct phdr_closure *phc = (struct phdr_closure *)closure; Elf_Phdr *phdr = phc->phdr; phc->offset = round_page(phc->offset); phdr->p_type = PT_LOAD; phdr->p_offset = phc->offset; phdr->p_vaddr = entry->start; phdr->p_paddr = 0; phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; phdr->p_align = PAGE_SIZE; phdr->p_flags = __elfN(untrans_prot)(entry->protection); phc->offset += phdr->p_filesz; phc->phdr++; } /* * A callback for each_dumpable_segment() to gather information about * the number of segments and their total size. */ static void cb_size_segment(vm_map_entry_t entry, void *closure) { struct sseg_closure *ssc = (struct sseg_closure *)closure; ssc->count++; ssc->size += entry->end - entry->start; } void __elfN(size_segments)(struct thread *td, struct sseg_closure *seginfo, int flags) { seginfo->count = 0; seginfo->size = 0; each_dumpable_segment(td, cb_size_segment, seginfo, flags); } /* * For each writable segment in the process's memory map, call the given * function with a pointer to the map entry and some arbitrary * caller-supplied data. */ static void each_dumpable_segment(struct thread *td, segment_callback func, void *closure, int flags) { struct proc *p = td->td_proc; vm_map_t map = &p->p_vmspace->vm_map; vm_map_entry_t entry; vm_object_t backing_object, object; bool ignore_entry; vm_map_lock_read(map); VM_MAP_ENTRY_FOREACH(entry, map) { /* * Don't dump inaccessible mappings, deal with legacy * coredump mode. * * Note that read-only segments related to the elf binary * are marked MAP_ENTRY_NOCOREDUMP now so we no longer * need to arbitrarily ignore such segments. */ if ((flags & SVC_ALL) == 0) { if (elf_legacy_coredump) { if ((entry->protection & VM_PROT_RW) != VM_PROT_RW) continue; } else { if ((entry->protection & VM_PROT_ALL) == 0) continue; } } /* * Dont include memory segment in the coredump if * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in * madvise(2). Do not dump submaps (i.e. parts of the * kernel map). */ if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) continue; if ((entry->eflags & MAP_ENTRY_NOCOREDUMP) != 0 && (flags & SVC_ALL) == 0) continue; if ((object = entry->object.vm_object) == NULL) continue; /* Ignore memory-mapped devices and such things. */ VM_OBJECT_RLOCK(object); while ((backing_object = object->backing_object) != NULL) { VM_OBJECT_RLOCK(backing_object); VM_OBJECT_RUNLOCK(object); object = backing_object; } ignore_entry = (object->flags & OBJ_FICTITIOUS) != 0; VM_OBJECT_RUNLOCK(object); if (ignore_entry) continue; (*func)(entry, closure); } vm_map_unlock_read(map); } /* * Write the core file header to the file, including padding up to * the page boundary. */ static int __elfN(corehdr)(struct coredump_params *p, int numsegs, void *hdr, size_t hdrsize, struct note_info_list *notelst, size_t notesz, int flags) { struct note_info *ninfo; struct sbuf *sb; int error; /* Fill in the header. */ bzero(hdr, hdrsize); __elfN(puthdr)(p->td, hdr, hdrsize, numsegs, notesz, flags); sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_drain_core_output, p); sbuf_start_section(sb, NULL); sbuf_bcat(sb, hdr, hdrsize); TAILQ_FOREACH(ninfo, notelst, link) __elfN(putnote)(p->td, ninfo, sb); /* Align up to a page boundary for the program segments. */ sbuf_end_section(sb, -1, PAGE_SIZE, 0); error = sbuf_finish(sb); sbuf_delete(sb); return (error); } void __elfN(prepare_notes)(struct thread *td, struct note_info_list *list, size_t *sizep) { struct proc *p; struct thread *thr; size_t size; p = td->td_proc; size = 0; size += __elfN(register_note)(td, list, NT_PRPSINFO, __elfN(note_prpsinfo), p); /* * To have the debugger select the right thread (LWP) as the initial * thread, we dump the state of the thread passed to us in td first. * This is the thread that causes the core dump and thus likely to * be the right thread one wants to have selected in the debugger. */ thr = td; while (thr != NULL) { size += __elfN(prepare_register_notes)(td, list, thr); size += __elfN(register_note)(td, list, -1, __elfN(note_threadmd), thr); thr = thr == td ? TAILQ_FIRST(&p->p_threads) : TAILQ_NEXT(thr, td_plist); if (thr == td) thr = TAILQ_NEXT(thr, td_plist); } size += __elfN(register_note)(td, list, NT_PROCSTAT_PROC, __elfN(note_procstat_proc), p); size += __elfN(register_note)(td, list, NT_PROCSTAT_FILES, note_procstat_files, p); size += __elfN(register_note)(td, list, NT_PROCSTAT_VMMAP, note_procstat_vmmap, p); size += __elfN(register_note)(td, list, NT_PROCSTAT_GROUPS, note_procstat_groups, p); size += __elfN(register_note)(td, list, NT_PROCSTAT_UMASK, note_procstat_umask, p); size += __elfN(register_note)(td, list, NT_PROCSTAT_RLIMIT, note_procstat_rlimit, p); size += __elfN(register_note)(td, list, NT_PROCSTAT_OSREL, note_procstat_osrel, p); size += __elfN(register_note)(td, list, NT_PROCSTAT_PSSTRINGS, __elfN(note_procstat_psstrings), p); size += __elfN(register_note)(td, list, NT_PROCSTAT_AUXV, __elfN(note_procstat_auxv), p); *sizep = size; } void __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs, size_t notesz, int flags) { Elf_Ehdr *ehdr; Elf_Phdr *phdr; Elf_Shdr *shdr; struct phdr_closure phc; Elf_Brandinfo *bi; ehdr = (Elf_Ehdr *)hdr; bi = td->td_proc->p_elf_brandinfo; ehdr->e_ident[EI_MAG0] = ELFMAG0; ehdr->e_ident[EI_MAG1] = ELFMAG1; ehdr->e_ident[EI_MAG2] = ELFMAG2; ehdr->e_ident[EI_MAG3] = ELFMAG3; ehdr->e_ident[EI_CLASS] = ELF_CLASS; ehdr->e_ident[EI_DATA] = ELF_DATA; ehdr->e_ident[EI_VERSION] = EV_CURRENT; ehdr->e_ident[EI_OSABI] = td->td_proc->p_sysent->sv_elf_core_osabi; ehdr->e_ident[EI_ABIVERSION] = 0; ehdr->e_ident[EI_PAD] = 0; ehdr->e_type = ET_CORE; ehdr->e_machine = bi->machine; ehdr->e_version = EV_CURRENT; ehdr->e_entry = 0; ehdr->e_phoff = sizeof(Elf_Ehdr); ehdr->e_flags = td->td_proc->p_elf_flags; ehdr->e_ehsize = sizeof(Elf_Ehdr); ehdr->e_phentsize = sizeof(Elf_Phdr); ehdr->e_shentsize = sizeof(Elf_Shdr); ehdr->e_shstrndx = SHN_UNDEF; if (numsegs + 1 < PN_XNUM) { ehdr->e_phnum = numsegs + 1; ehdr->e_shnum = 0; } else { ehdr->e_phnum = PN_XNUM; ehdr->e_shnum = 1; ehdr->e_shoff = ehdr->e_phoff + (numsegs + 1) * ehdr->e_phentsize; KASSERT(ehdr->e_shoff == hdrsize - sizeof(Elf_Shdr), ("e_shoff: %zu, hdrsize - shdr: %zu", (size_t)ehdr->e_shoff, hdrsize - sizeof(Elf_Shdr))); shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff); memset(shdr, 0, sizeof(*shdr)); /* * A special first section is used to hold large segment and * section counts. This was proposed by Sun Microsystems in * Solaris and has been adopted by Linux; the standard ELF * tools are already familiar with the technique. * * See table 7-7 of the Solaris "Linker and Libraries Guide" * (or 12-7 depending on the version of the document) for more * details. */ shdr->sh_type = SHT_NULL; shdr->sh_size = ehdr->e_shnum; shdr->sh_link = ehdr->e_shstrndx; shdr->sh_info = numsegs + 1; } /* * Fill in the program header entries. */ phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff); /* The note segement. */ phdr->p_type = PT_NOTE; phdr->p_offset = hdrsize; phdr->p_vaddr = 0; phdr->p_paddr = 0; phdr->p_filesz = notesz; phdr->p_memsz = 0; phdr->p_flags = PF_R; phdr->p_align = ELF_NOTE_ROUNDSIZE; phdr++; /* All the writable segments from the program. */ phc.phdr = phdr; phc.offset = round_page(hdrsize + notesz); each_dumpable_segment(td, cb_put_phdr, &phc, flags); } static size_t __elfN(register_regset_note)(struct thread *td, struct note_info_list *list, struct regset *regset, struct thread *target_td) { const struct sysentvec *sv; struct note_info *ninfo; size_t size, notesize; size = 0; if (!regset->get(regset, target_td, NULL, &size) || size == 0) return (0); ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK); ninfo->type = regset->note; ninfo->regset = regset; ninfo->outarg = target_td; ninfo->outsize = size; TAILQ_INSERT_TAIL(list, ninfo, link); sv = td->td_proc->p_sysent; notesize = sizeof(Elf_Note) + /* note header */ roundup2(strlen(sv->sv_elf_core_abi_vendor) + 1, ELF_NOTE_ROUNDSIZE) + /* note name */ roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ return (notesize); } size_t __elfN(register_note)(struct thread *td, struct note_info_list *list, int type, outfunc_t out, void *arg) { const struct sysentvec *sv; struct note_info *ninfo; size_t size, notesize; sv = td->td_proc->p_sysent; size = 0; out(arg, NULL, &size); ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK); ninfo->type = type; ninfo->outfunc = out; ninfo->outarg = arg; ninfo->outsize = size; TAILQ_INSERT_TAIL(list, ninfo, link); if (type == -1) return (size); notesize = sizeof(Elf_Note) + /* note header */ roundup2(strlen(sv->sv_elf_core_abi_vendor) + 1, ELF_NOTE_ROUNDSIZE) + /* note name */ roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ return (notesize); } static size_t append_note_data(const void *src, void *dst, size_t len) { size_t padded_len; padded_len = roundup2(len, ELF_NOTE_ROUNDSIZE); if (dst != NULL) { bcopy(src, dst, len); bzero((char *)dst + len, padded_len - len); } return (padded_len); } size_t __elfN(populate_note)(int type, void *src, void *dst, size_t size, void **descp) { Elf_Note *note; char *buf; size_t notesize; buf = dst; if (buf != NULL) { note = (Elf_Note *)buf; note->n_namesz = sizeof(FREEBSD_ABI_VENDOR); note->n_descsz = size; note->n_type = type; buf += sizeof(*note); buf += append_note_data(FREEBSD_ABI_VENDOR, buf, sizeof(FREEBSD_ABI_VENDOR)); append_note_data(src, buf, size); if (descp != NULL) *descp = buf; } notesize = sizeof(Elf_Note) + /* note header */ roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) + /* note name */ roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */ return (notesize); } static void __elfN(putnote)(struct thread *td, struct note_info *ninfo, struct sbuf *sb) { Elf_Note note; const struct sysentvec *sv; ssize_t old_len, sect_len; size_t new_len, descsz, i; if (ninfo->type == -1) { ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); return; } sv = td->td_proc->p_sysent; note.n_namesz = strlen(sv->sv_elf_core_abi_vendor) + 1; note.n_descsz = ninfo->outsize; note.n_type = ninfo->type; sbuf_bcat(sb, ¬e, sizeof(note)); sbuf_start_section(sb, &old_len); sbuf_bcat(sb, sv->sv_elf_core_abi_vendor, strlen(sv->sv_elf_core_abi_vendor) + 1); sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); if (note.n_descsz == 0) return; sbuf_start_section(sb, &old_len); if (ninfo->regset != NULL) { struct regset *regset = ninfo->regset; void *buf; buf = malloc(ninfo->outsize, M_TEMP, M_ZERO | M_WAITOK); (void)regset->get(regset, ninfo->outarg, buf, &ninfo->outsize); sbuf_bcat(sb, buf, ninfo->outsize); free(buf, M_TEMP); } else ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize); sect_len = sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0); if (sect_len < 0) return; new_len = (size_t)sect_len; descsz = roundup(note.n_descsz, ELF_NOTE_ROUNDSIZE); if (new_len < descsz) { /* * It is expected that individual note emitters will correctly * predict their expected output size and fill up to that size * themselves, padding in a format-specific way if needed. * However, in case they don't, just do it here with zeros. */ for (i = 0; i < descsz - new_len; i++) sbuf_putc(sb, 0); } else if (new_len > descsz) { /* * We can't always truncate sb -- we may have drained some * of it already. */ KASSERT(new_len == descsz, ("%s: Note type %u changed as we " "read it (%zu > %zu). Since it is longer than " "expected, this coredump's notes are corrupt. THIS " "IS A BUG in the note_procstat routine for type %u.\n", __func__, (unsigned)note.n_type, new_len, descsz, (unsigned)note.n_type)); } } /* * Miscellaneous note out functions. */ #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 #include #include typedef struct prstatus32 elf_prstatus_t; typedef struct prpsinfo32 elf_prpsinfo_t; typedef struct fpreg32 elf_prfpregset_t; typedef struct fpreg32 elf_fpregset_t; typedef struct reg32 elf_gregset_t; typedef struct thrmisc32 elf_thrmisc_t; typedef struct ptrace_lwpinfo32 elf_lwpinfo_t; #define ELF_KERN_PROC_MASK KERN_PROC_MASK32 typedef struct kinfo_proc32 elf_kinfo_proc_t; typedef uint32_t elf_ps_strings_t; #else typedef prstatus_t elf_prstatus_t; typedef prpsinfo_t elf_prpsinfo_t; typedef prfpregset_t elf_prfpregset_t; typedef prfpregset_t elf_fpregset_t; typedef gregset_t elf_gregset_t; typedef thrmisc_t elf_thrmisc_t; typedef struct ptrace_lwpinfo elf_lwpinfo_t; #define ELF_KERN_PROC_MASK 0 typedef struct kinfo_proc elf_kinfo_proc_t; typedef vm_offset_t elf_ps_strings_t; #endif static void __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep) { struct sbuf sbarg; size_t len; char *cp, *end; struct proc *p; elf_prpsinfo_t *psinfo; int error; p = arg; if (sb != NULL) { KASSERT(*sizep == sizeof(*psinfo), ("invalid size")); psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK); psinfo->pr_version = PRPSINFO_VERSION; psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t); strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname)); PROC_LOCK(p); if (p->p_args != NULL) { len = sizeof(psinfo->pr_psargs) - 1; if (len > p->p_args->ar_length) len = p->p_args->ar_length; memcpy(psinfo->pr_psargs, p->p_args->ar_args, len); PROC_UNLOCK(p); error = 0; } else { _PHOLD(p); PROC_UNLOCK(p); sbuf_new(&sbarg, psinfo->pr_psargs, sizeof(psinfo->pr_psargs), SBUF_FIXEDLEN); error = proc_getargv(curthread, p, &sbarg); PRELE(p); if (sbuf_finish(&sbarg) == 0) { len = sbuf_len(&sbarg); if (len > 0) len--; } else { len = sizeof(psinfo->pr_psargs) - 1; } sbuf_delete(&sbarg); } if (error != 0 || len == 0 || (ssize_t)len == -1) strlcpy(psinfo->pr_psargs, p->p_comm, sizeof(psinfo->pr_psargs)); else { KASSERT(len < sizeof(psinfo->pr_psargs), ("len is too long: %zu vs %zu", len, sizeof(psinfo->pr_psargs))); cp = psinfo->pr_psargs; end = cp + len - 1; for (;;) { cp = memchr(cp, '\0', end - cp); if (cp == NULL) break; *cp = ' '; } } psinfo->pr_pid = p->p_pid; sbuf_bcat(sb, psinfo, sizeof(*psinfo)); free(psinfo, M_TEMP); } *sizep = sizeof(*psinfo); } static bool __elfN(get_prstatus)(struct regset *rs, struct thread *td, void *buf, size_t *sizep) { elf_prstatus_t *status; if (buf != NULL) { KASSERT(*sizep == sizeof(*status), ("%s: invalid size", __func__)); status = buf; memset(status, 0, *sizep); status->pr_version = PRSTATUS_VERSION; status->pr_statussz = sizeof(elf_prstatus_t); status->pr_gregsetsz = sizeof(elf_gregset_t); status->pr_fpregsetsz = sizeof(elf_fpregset_t); status->pr_osreldate = osreldate; status->pr_cursig = td->td_proc->p_sig; status->pr_pid = td->td_tid; #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 fill_regs32(td, &status->pr_reg); #else fill_regs(td, &status->pr_reg); #endif } *sizep = sizeof(*status); return (true); } static bool __elfN(set_prstatus)(struct regset *rs, struct thread *td, void *buf, size_t size) { elf_prstatus_t *status; KASSERT(size == sizeof(*status), ("%s: invalid size", __func__)); status = buf; #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 set_regs32(td, &status->pr_reg); #else set_regs(td, &status->pr_reg); #endif return (true); } static struct regset __elfN(regset_prstatus) = { .note = NT_PRSTATUS, .size = sizeof(elf_prstatus_t), .get = __elfN(get_prstatus), .set = __elfN(set_prstatus), }; ELF_REGSET(__elfN(regset_prstatus)); static bool __elfN(get_fpregset)(struct regset *rs, struct thread *td, void *buf, size_t *sizep) { elf_prfpregset_t *fpregset; if (buf != NULL) { KASSERT(*sizep == sizeof(*fpregset), ("%s: invalid size", __func__)); fpregset = buf; #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 fill_fpregs32(td, fpregset); #else fill_fpregs(td, fpregset); #endif } *sizep = sizeof(*fpregset); return (true); } static bool __elfN(set_fpregset)(struct regset *rs, struct thread *td, void *buf, size_t size) { elf_prfpregset_t *fpregset; fpregset = buf; KASSERT(size == sizeof(*fpregset), ("%s: invalid size", __func__)); #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 set_fpregs32(td, fpregset); #else set_fpregs(td, fpregset); #endif return (true); } static struct regset __elfN(regset_fpregset) = { .note = NT_FPREGSET, .size = sizeof(elf_prfpregset_t), .get = __elfN(get_fpregset), .set = __elfN(set_fpregset), }; ELF_REGSET(__elfN(regset_fpregset)); static bool __elfN(get_thrmisc)(struct regset *rs, struct thread *td, void *buf, size_t *sizep) { elf_thrmisc_t *thrmisc; if (buf != NULL) { KASSERT(*sizep == sizeof(*thrmisc), ("%s: invalid size", __func__)); thrmisc = buf; bzero(thrmisc, sizeof(*thrmisc)); strcpy(thrmisc->pr_tname, td->td_name); } *sizep = sizeof(*thrmisc); return (true); } static struct regset __elfN(regset_thrmisc) = { .note = NT_THRMISC, .size = sizeof(elf_thrmisc_t), .get = __elfN(get_thrmisc), }; ELF_REGSET(__elfN(regset_thrmisc)); static bool __elfN(get_lwpinfo)(struct regset *rs, struct thread *td, void *buf, size_t *sizep) { elf_lwpinfo_t pl; size_t size; int structsize; size = sizeof(structsize) + sizeof(pl); if (buf != NULL) { KASSERT(*sizep == size, ("%s: invalid size", __func__)); structsize = sizeof(pl); memcpy(buf, &structsize, sizeof(structsize)); bzero(&pl, sizeof(pl)); pl.pl_lwpid = td->td_tid; pl.pl_event = PL_EVENT_NONE; pl.pl_sigmask = td->td_sigmask; pl.pl_siglist = td->td_siglist; if (td->td_si.si_signo != 0) { pl.pl_event = PL_EVENT_SIGNAL; pl.pl_flags |= PL_FLAG_SI; #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 siginfo_to_siginfo32(&td->td_si, &pl.pl_siginfo); #else pl.pl_siginfo = td->td_si; #endif } strcpy(pl.pl_tdname, td->td_name); /* XXX TODO: supply more information in struct ptrace_lwpinfo*/ memcpy((int *)buf + 1, &pl, sizeof(pl)); } *sizep = size; return (true); } static struct regset __elfN(regset_lwpinfo) = { .note = NT_PTLWPINFO, .size = sizeof(int) + sizeof(elf_lwpinfo_t), .get = __elfN(get_lwpinfo), }; ELF_REGSET(__elfN(regset_lwpinfo)); static size_t __elfN(prepare_register_notes)(struct thread *td, struct note_info_list *list, struct thread *target_td) { struct sysentvec *sv = td->td_proc->p_sysent; struct regset **regsetp, **regset_end, *regset; size_t size; size = 0; + if (target_td == td) + cpu_update_pcb(target_td); + /* NT_PRSTATUS must be the first register set note. */ size += __elfN(register_regset_note)(td, list, &__elfN(regset_prstatus), target_td); regsetp = sv->sv_regset_begin; if (regsetp == NULL) { /* XXX: This shouldn't be true for any FreeBSD ABIs. */ size += __elfN(register_regset_note)(td, list, &__elfN(regset_fpregset), target_td); return (size); } regset_end = sv->sv_regset_end; MPASS(regset_end != NULL); for (; regsetp < regset_end; regsetp++) { regset = *regsetp; if (regset->note == NT_PRSTATUS) continue; size += __elfN(register_regset_note)(td, list, regset, target_td); } return (size); } /* * Allow for MD specific notes, as well as any MD * specific preparations for writing MI notes. */ static void __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep) { struct thread *td; void *buf; size_t size; td = (struct thread *)arg; size = *sizep; if (size != 0 && sb != NULL) buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK); else buf = NULL; size = 0; __elfN(dump_thread)(td, buf, &size); KASSERT(sb == NULL || *sizep == size, ("invalid size")); if (size != 0 && sb != NULL) sbuf_bcat(sb, buf, size); free(buf, M_TEMP); *sizep = size; } #ifdef KINFO_PROC_SIZE CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); #endif static void __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = arg; size = sizeof(structsize) + p->p_numthreads * sizeof(elf_kinfo_proc_t); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(elf_kinfo_proc_t); sbuf_bcat(sb, &structsize, sizeof(structsize)); sx_slock(&proctree_lock); PROC_LOCK(p); kern_proc_out(p, sb, ELF_KERN_PROC_MASK); sx_sunlock(&proctree_lock); } *sizep = size; } #ifdef KINFO_FILE_SIZE CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE); #endif static void note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size, sect_sz, i; ssize_t start_len, sect_len; int structsize, filedesc_flags; if (coredump_pack_fileinfo) filedesc_flags = KERN_FILEDESC_PACK_KINFO; else filedesc_flags = 0; p = arg; structsize = sizeof(struct kinfo_file); if (sb == NULL) { size = 0; sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_count_drain, &size); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_filedesc_out(p, sb, -1, filedesc_flags); sbuf_finish(sb); sbuf_delete(sb); *sizep = size; } else { sbuf_start_section(sb, &start_len); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_filedesc_out(p, sb, *sizep - sizeof(structsize), filedesc_flags); sect_len = sbuf_end_section(sb, start_len, 0, 0); if (sect_len < 0) return; sect_sz = sect_len; KASSERT(sect_sz <= *sizep, ("kern_proc_filedesc_out did not respect maxlen; " "requested %zu, got %zu", *sizep - sizeof(structsize), sect_sz - sizeof(structsize))); for (i = 0; i < *sizep - sect_sz && sb->s_error == 0; i++) sbuf_putc(sb, 0); } } #ifdef KINFO_VMENTRY_SIZE CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE); #endif static void note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize, vmmap_flags; if (coredump_pack_vmmapinfo) vmmap_flags = KERN_VMMAP_PACK_KINFO; else vmmap_flags = 0; p = arg; structsize = sizeof(struct kinfo_vmentry); if (sb == NULL) { size = 0; sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_count_drain, &size); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_vmmap_out(p, sb, -1, vmmap_flags); sbuf_finish(sb); sbuf_delete(sb); *sizep = size; } else { sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); kern_proc_vmmap_out(p, sb, *sizep - sizeof(structsize), vmmap_flags); } } static void note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = arg; size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(gid_t); sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups * sizeof(gid_t)); } *sizep = size; } static void note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = arg; size = sizeof(structsize) + sizeof(p->p_pd->pd_cmask); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(p->p_pd->pd_cmask); sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, &p->p_pd->pd_cmask, sizeof(p->p_pd->pd_cmask)); } *sizep = size; } static void note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; struct rlimit rlim[RLIM_NLIMITS]; size_t size; int structsize, i; p = arg; size = sizeof(structsize) + sizeof(rlim); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(rlim); sbuf_bcat(sb, &structsize, sizeof(structsize)); PROC_LOCK(p); for (i = 0; i < RLIM_NLIMITS; i++) lim_rlimit_proc(p, i, &rlim[i]); PROC_UNLOCK(p); sbuf_bcat(sb, rlim, sizeof(rlim)); } *sizep = size; } static void note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = arg; size = sizeof(structsize) + sizeof(p->p_osrel); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(p->p_osrel); sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel)); } *sizep = size; } static void __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; elf_ps_strings_t ps_strings; size_t size; int structsize; p = arg; size = sizeof(structsize) + sizeof(ps_strings); if (sb != NULL) { KASSERT(*sizep == size, ("invalid size")); structsize = sizeof(ps_strings); #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32 ps_strings = PTROUT(PROC_PS_STRINGS(p)); #else ps_strings = PROC_PS_STRINGS(p); #endif sbuf_bcat(sb, &structsize, sizeof(structsize)); sbuf_bcat(sb, &ps_strings, sizeof(ps_strings)); } *sizep = size; } static void __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep) { struct proc *p; size_t size; int structsize; p = arg; if (sb == NULL) { size = 0; sb = sbuf_new(NULL, NULL, AT_COUNT * sizeof(Elf_Auxinfo), SBUF_FIXEDLEN); sbuf_set_drain(sb, sbuf_count_drain, &size); sbuf_bcat(sb, &structsize, sizeof(structsize)); PHOLD(p); proc_getauxv(curthread, p, sb); PRELE(p); sbuf_finish(sb); sbuf_delete(sb); *sizep = size; } else { structsize = sizeof(Elf_Auxinfo); sbuf_bcat(sb, &structsize, sizeof(structsize)); PHOLD(p); proc_getauxv(curthread, p, sb); PRELE(p); } } #define MAX_NOTES_LOOP 4096 bool __elfN(parse_notes)(const struct image_params *imgp, const Elf_Note *checknote, const char *note_vendor, const Elf_Phdr *pnote, bool (*cb)(const Elf_Note *, void *, bool *), void *cb_arg) { const Elf_Note *note, *note0, *note_end; const char *note_name; char *buf; int i, error; bool res; /* We need some limit, might as well use PAGE_SIZE. */ if (pnote == NULL || pnote->p_filesz > PAGE_SIZE) return (false); ASSERT_VOP_LOCKED(imgp->vp, "parse_notes"); if (pnote->p_offset > PAGE_SIZE || pnote->p_filesz > PAGE_SIZE - pnote->p_offset) { buf = malloc(pnote->p_filesz, M_TEMP, M_NOWAIT); if (buf == NULL) { VOP_UNLOCK(imgp->vp); buf = malloc(pnote->p_filesz, M_TEMP, M_WAITOK); vn_lock(imgp->vp, LK_SHARED | LK_RETRY); } error = vn_rdwr(UIO_READ, imgp->vp, buf, pnote->p_filesz, pnote->p_offset, UIO_SYSSPACE, IO_NODELOCKED, curthread->td_ucred, NOCRED, NULL, curthread); if (error != 0) { uprintf("i/o error PT_NOTE\n"); goto retf; } note = note0 = (const Elf_Note *)buf; note_end = (const Elf_Note *)(buf + pnote->p_filesz); } else { note = note0 = (const Elf_Note *)(imgp->image_header + pnote->p_offset); note_end = (const Elf_Note *)(imgp->image_header + pnote->p_offset + pnote->p_filesz); buf = NULL; } for (i = 0; i < MAX_NOTES_LOOP && note >= note0 && note < note_end; i++) { if (!aligned(note, Elf32_Addr)) { uprintf("Unaligned ELF note\n"); goto retf; } if ((const char *)note_end - (const char *)note < sizeof(Elf_Note)) { uprintf("ELF note to short\n"); goto retf; } if (note->n_namesz != checknote->n_namesz || note->n_descsz != checknote->n_descsz || note->n_type != checknote->n_type) goto nextnote; note_name = (const char *)(note + 1); if (note_name + checknote->n_namesz >= (const char *)note_end || strncmp(note_vendor, note_name, checknote->n_namesz) != 0) goto nextnote; if (cb(note, cb_arg, &res)) goto ret; nextnote: note = (const Elf_Note *)((const char *)(note + 1) + roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) + roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE)); } if (i >= MAX_NOTES_LOOP) uprintf("ELF note parser reached %d notes\n", i); retf: res = false; ret: free(buf, M_TEMP); return (res); } struct brandnote_cb_arg { Elf_Brandnote *brandnote; int32_t *osrel; }; static bool brandnote_cb(const Elf_Note *note, void *arg0, bool *res) { struct brandnote_cb_arg *arg; arg = arg0; /* * Fetch the osreldate for binary from the ELF OSABI-note if * necessary. */ *res = (arg->brandnote->flags & BN_TRANSLATE_OSREL) != 0 && arg->brandnote->trans_osrel != NULL ? arg->brandnote->trans_osrel(note, arg->osrel) : true; return (true); } static Elf_Note fctl_note = { .n_namesz = sizeof(FREEBSD_ABI_VENDOR), .n_descsz = sizeof(uint32_t), .n_type = NT_FREEBSD_FEATURE_CTL, }; struct fctl_cb_arg { bool *has_fctl0; uint32_t *fctl0; }; static bool note_fctl_cb(const Elf_Note *note, void *arg0, bool *res) { struct fctl_cb_arg *arg; const Elf32_Word *desc; uintptr_t p; arg = arg0; p = (uintptr_t)(note + 1); p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE); desc = (const Elf32_Word *)p; *arg->has_fctl0 = true; *arg->fctl0 = desc[0]; *res = true; return (true); } /* * Try to find the appropriate ABI-note section for checknote, fetch * the osreldate and feature control flags for binary from the ELF * OSABI-note. Only the first page of the image is searched, the same * as for headers. */ static bool __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *brandnote, int32_t *osrel, bool *has_fctl0, uint32_t *fctl0) { const Elf_Phdr *phdr; const Elf_Ehdr *hdr; struct brandnote_cb_arg b_arg; struct fctl_cb_arg f_arg; int i, j; hdr = (const Elf_Ehdr *)imgp->image_header; phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); b_arg.brandnote = brandnote; b_arg.osrel = osrel; f_arg.has_fctl0 = has_fctl0; f_arg.fctl0 = fctl0; for (i = 0; i < hdr->e_phnum; i++) { if (phdr[i].p_type == PT_NOTE && __elfN(parse_notes)(imgp, &brandnote->hdr, brandnote->vendor, &phdr[i], brandnote_cb, &b_arg)) { for (j = 0; j < hdr->e_phnum; j++) { if (phdr[j].p_type == PT_NOTE && __elfN(parse_notes)(imgp, &fctl_note, FREEBSD_ABI_VENDOR, &phdr[j], note_fctl_cb, &f_arg)) break; } return (true); } } return (false); } /* * Tell kern_execve.c about it, with a little help from the linker. */ static struct execsw __elfN(execsw) = { .ex_imgact = __CONCAT(exec_, __elfN(imgact)), .ex_name = __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) }; EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); static vm_prot_t __elfN(trans_prot)(Elf_Word flags) { vm_prot_t prot; prot = 0; if (flags & PF_X) prot |= VM_PROT_EXECUTE; if (flags & PF_W) prot |= VM_PROT_WRITE; if (flags & PF_R) prot |= VM_PROT_READ; #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__)) if (i386_read_exec && (flags & PF_R)) prot |= VM_PROT_EXECUTE; #endif return (prot); } static Elf_Word __elfN(untrans_prot)(vm_prot_t prot) { Elf_Word flags; flags = 0; if (prot & VM_PROT_EXECUTE) flags |= PF_X; if (prot & VM_PROT_READ) flags |= PF_R; if (prot & VM_PROT_WRITE) flags |= PF_W; return (flags); } diff --git a/sys/powerpc/include/reg.h b/sys/powerpc/include/reg.h index 1fe67ac3967d..781ee3b02289 100644 --- a/sys/powerpc/include/reg.h +++ b/sys/powerpc/include/reg.h @@ -1,92 +1,87 @@ /* $NetBSD: reg.h,v 1.4 2000/06/04 09:30:44 tsubai Exp $ */ #ifndef _POWERPC_REG_H_ #define _POWERPC_REG_H_ #include /* Must match struct trapframe */ struct reg { __register_t fixreg[32]; __register_t lr; __register_t cr; __register_t xer; __register_t ctr; __register_t pc; }; struct fpreg { double fpreg[32]; double fpscr; }; /* Must match pcb.pcb_vec */ struct vmxreg { __uint32_t vr[32][4]; __uint32_t pad[2]; __uint32_t vrsave; __uint32_t vscr; }; struct dbreg { unsigned int junk; }; #ifdef __LP64__ /* Must match struct trapframe */ struct reg32 { __int32_t fixreg[32]; __int32_t lr; __int32_t cr; __int32_t xer; __int32_t ctr; __int32_t pc; }; struct fpreg32 { struct fpreg data; }; struct vmxreg32 { struct vmxreg data; }; struct dbreg32 { struct dbreg data; }; #define __HAVE_REG32 #endif #ifdef _KERNEL /* * XXX these interfaces are MI, so they should be declared in a MI place. */ int fill_regs(struct thread *, struct reg *); int set_regs(struct thread *, struct reg *); int fill_fpregs(struct thread *, struct fpreg *); int set_fpregs(struct thread *, struct fpreg *); int fill_dbregs(struct thread *, struct dbreg *); int set_dbregs(struct thread *, struct dbreg *); -/* - * MD interfaces. - */ -void cpu_save_thread_regs(struct thread *); - #ifdef COMPAT_FREEBSD32 struct image_params; int fill_regs32(struct thread *, struct reg32 *); int set_regs32(struct thread *, struct reg32 *); void ppc32_setregs(struct thread *, struct image_params *, uintptr_t); #define fill_fpregs32(td, reg) fill_fpregs(td,(struct fpreg *)reg) #define set_fpregs32(td, reg) set_fpregs(td,(struct fpreg *)reg) #define fill_dbregs32(td, reg) fill_dbregs(td,(struct dbreg *)reg) #define set_dbregs32(td, reg) set_dbregs(td,(struct dbreg *)reg) #endif #endif #endif /* _POWERPC_REG_H_ */ diff --git a/sys/powerpc/powerpc/elf32_machdep.c b/sys/powerpc/powerpc/elf32_machdep.c index fcdc1aa1e8a5..af01043878db 100644 --- a/sys/powerpc/powerpc/elf32_machdep.c +++ b/sys/powerpc/powerpc/elf32_machdep.c @@ -1,469 +1,467 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 1996-1998 John D. Polstra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #define __ELF_WORD_SIZE 32 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __powerpc64__ #include #include extern const char *freebsd32_syscallnames[]; static void ppc32_fixlimit(struct rlimit *rl, int which); static SYSCTL_NODE(_compat, OID_AUTO, ppc32, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "32-bit mode"); #define PPC32_MAXDSIZ (1024*1024*1024) static u_long ppc32_maxdsiz = PPC32_MAXDSIZ; SYSCTL_ULONG(_compat_ppc32, OID_AUTO, maxdsiz, CTLFLAG_RWTUN, &ppc32_maxdsiz, 0, ""); #define PPC32_MAXSSIZ (64*1024*1024) u_long ppc32_maxssiz = PPC32_MAXSSIZ; SYSCTL_ULONG(_compat_ppc32, OID_AUTO, maxssiz, CTLFLAG_RWTUN, &ppc32_maxssiz, 0, ""); #else static void ppc32_runtime_resolve(void); #endif struct sysentvec elf32_freebsd_sysvec = { .sv_size = SYS_MAXSYSCALL, #ifdef __powerpc64__ .sv_table = freebsd32_sysent, #else .sv_table = sysent, #endif .sv_fixup = __elfN(freebsd_fixup), .sv_copyout_auxargs = __elfN(powerpc_copyout_auxargs), .sv_sendsig = sendsig, .sv_sigcode = sigcode32, .sv_szsigcode = &szsigcode32, .sv_name = "FreeBSD ELF32", .sv_coredump = __elfN(coredump), .sv_elf_core_osabi = ELFOSABI_FREEBSD, .sv_elf_core_abi_vendor = FREEBSD_ABI_VENDOR, .sv_elf_core_prepare_notes = __elfN(prepare_notes), .sv_minsigstksz = MINSIGSTKSZ, .sv_minuser = VM_MIN_ADDRESS, .sv_stackprot = VM_PROT_ALL, #ifdef __powerpc64__ .sv_maxuser = VM_MAXUSER_ADDRESS32, .sv_usrstack = FREEBSD32_USRSTACK, .sv_psstrings = FREEBSD32_PS_STRINGS, .sv_psstringssz = sizeof(struct freebsd32_ps_strings), .sv_copyout_strings = freebsd32_copyout_strings, .sv_setregs = ppc32_setregs, .sv_syscallnames = freebsd32_syscallnames, .sv_fixlimit = ppc32_fixlimit, #else .sv_maxuser = VM_MAXUSER_ADDRESS, .sv_usrstack = USRSTACK, .sv_psstrings = PS_STRINGS, .sv_psstringssz = sizeof(struct ps_strings), .sv_copyout_strings = exec_copyout_strings, .sv_setregs = exec_setregs, .sv_syscallnames = syscallnames, .sv_fixlimit = NULL, #endif .sv_maxssiz = NULL, .sv_flags = SV_ABI_FREEBSD | SV_ILP32 | SV_SHP | SV_ASLR | SV_TIMEKEEP | SV_RNG_SEED_VER | SV_SIGSYS, .sv_set_syscall_retval = cpu_set_syscall_retval, .sv_fetch_syscall_args = cpu_fetch_syscall_args, .sv_shared_page_base = FREEBSD32_SHAREDPAGE, .sv_shared_page_len = PAGE_SIZE, .sv_schedtail = NULL, .sv_thread_detach = NULL, .sv_trap = NULL, .sv_hwcap = &cpu_features, .sv_hwcap2 = &cpu_features2, .sv_onexec_old = exec_onexec_old, .sv_onexit = exit_onexit, .sv_regset_begin = SET_BEGIN(__elfN(regset)), .sv_regset_end = SET_LIMIT(__elfN(regset)), }; INIT_SYSENTVEC(elf32_sysvec, &elf32_freebsd_sysvec); static Elf32_Brandinfo freebsd_brand_info = { .brand = ELFOSABI_FREEBSD, .machine = EM_PPC, .compat_3_brand = "FreeBSD", .interp_path = "/libexec/ld-elf.so.1", .sysvec = &elf32_freebsd_sysvec, #ifdef __powerpc64__ .interp_newpath = "/libexec/ld-elf32.so.1", #else .interp_newpath = NULL, #endif .brand_note = &elf32_freebsd_brandnote, .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE }; SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t) elf32_insert_brand_entry, &freebsd_brand_info); static Elf32_Brandinfo freebsd_brand_oinfo = { .brand = ELFOSABI_FREEBSD, .machine = EM_PPC, .compat_3_brand = "FreeBSD", .interp_path = "/usr/libexec/ld-elf.so.1", .sysvec = &elf32_freebsd_sysvec, .interp_newpath = NULL, .brand_note = &elf32_freebsd_brandnote, .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE }; SYSINIT(oelf32, SI_SUB_EXEC, SI_ORDER_ANY, (sysinit_cfunc_t) elf32_insert_brand_entry, &freebsd_brand_oinfo); void elf_reloc_self(Elf_Dyn *dynp, Elf_Addr relocbase); void elf32_dump_thread(struct thread *td, void *dst, size_t *off) { size_t len; struct pcb *pcb; uint64_t vshr[32]; uint64_t *vsr_dw1; int vsr_idx; len = 0; pcb = td->td_pcb; if (pcb->pcb_flags & PCB_VEC) { - save_vec_nodrop(td); if (dst != NULL) { len += elf32_populate_note(NT_PPC_VMX, &pcb->pcb_vec, (char *)dst + len, sizeof(pcb->pcb_vec), NULL); } else len += elf32_populate_note(NT_PPC_VMX, NULL, NULL, sizeof(pcb->pcb_vec), NULL); } if (pcb->pcb_flags & PCB_VSX) { - save_fpu_nodrop(td); if (dst != NULL) { /* * Doubleword 0 of VSR0-VSR31 overlap with FPR0-FPR31 and * VSR32-VSR63 overlap with VR0-VR31, so we only copy * the non-overlapping data, which is doubleword 1 of VSR0-VSR31. */ for (vsr_idx = 0; vsr_idx < nitems(vshr); vsr_idx++) { vsr_dw1 = (uint64_t *)&pcb->pcb_fpu.fpr[vsr_idx].vsr[2]; vshr[vsr_idx] = *vsr_dw1; } len += elf32_populate_note(NT_PPC_VSX, vshr, (char *)dst + len, sizeof(vshr), NULL); } else len += elf32_populate_note(NT_PPC_VSX, NULL, NULL, sizeof(vshr), NULL); } *off = len; } #ifndef __powerpc64__ bool elf_is_ifunc_reloc(Elf_Size r_info) { return (ELF_R_TYPE(r_info) == R_PPC_IRELATIVE); } /* Process one elf relocation with addend. */ static int elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, int local, elf_lookup_fn lookup) { Elf_Addr *where; Elf_Half *hwhere; Elf_Addr addr; Elf_Addr addend, val; Elf_Word rtype, symidx; const Elf_Rela *rela; int error; switch (type) { case ELF_RELOC_REL: panic("PPC only supports RELA relocations"); break; case ELF_RELOC_RELA: rela = (const Elf_Rela *)data; where = (Elf_Addr *) ((uintptr_t)relocbase + rela->r_offset); hwhere = (Elf_Half *) ((uintptr_t)relocbase + rela->r_offset); addend = rela->r_addend; rtype = ELF_R_TYPE(rela->r_info); symidx = ELF_R_SYM(rela->r_info); break; default: panic("elf_reloc: unknown relocation mode %d\n", type); } switch (rtype) { case R_PPC_NONE: break; case R_PPC_ADDR32: /* word32 S + A */ error = lookup(lf, symidx, 1, &addr); if (error != 0) return (-1); *where = elf_relocaddr(lf, addr + addend); break; case R_PPC_ADDR16_LO: /* #lo(S) */ error = lookup(lf, symidx, 1, &addr); if (error != 0) return (-1); /* * addend values are sometimes relative to sections * (i.e. .rodata) in rela, where in reality they * are relative to relocbase. Detect this condition. */ if (addr > relocbase && addr <= (relocbase + addend)) addr = relocbase; addr = elf_relocaddr(lf, addr + addend); *hwhere = addr & 0xffff; break; case R_PPC_ADDR16_HA: /* #ha(S) */ error = lookup(lf, symidx, 1, &addr); if (error != 0) return (-1); /* * addend values are sometimes relative to sections * (i.e. .rodata) in rela, where in reality they * are relative to relocbase. Detect this condition. */ if (addr > relocbase && addr <= (relocbase + addend)) addr = relocbase; addr = elf_relocaddr(lf, addr + addend); *hwhere = ((addr >> 16) + ((addr & 0x8000) ? 1 : 0)) & 0xffff; break; case R_PPC_RELATIVE: /* word32 B + A */ *where = elf_relocaddr(lf, relocbase + addend); break; case R_PPC_JMP_SLOT: /* PLT jump slot entry */ /* * We currently only support Secure-PLT jump slots. * Given that we reject BSS-PLT modules during load, we * don't need to check again. * The method we are using here is equivilent to * LD_BIND_NOW. */ error = lookup(lf, symidx, 1, &addr); if (error != 0) return (-1); *where = elf_relocaddr(lf, addr + addend); break; case R_PPC_IRELATIVE: addr = relocbase + addend; val = ((Elf32_Addr (*)(void))addr)(); if (*where != val) *where = val; break; default: printf("kldload: unexpected relocation type %d, " "symbol index %d\n", (int)rtype, symidx); return (-1); } return (0); } void elf_reloc_self(Elf_Dyn *dynp, Elf_Addr relocbase) { Elf_Rela *rela = NULL, *relalim; Elf_Addr relasz = 0; Elf_Addr *where; /* * Extract the rela/relasz values from the dynamic section */ for (; dynp->d_tag != DT_NULL; dynp++) { switch (dynp->d_tag) { case DT_RELA: rela = (Elf_Rela *)(relocbase+dynp->d_un.d_ptr); break; case DT_RELASZ: relasz = dynp->d_un.d_val; break; } } /* * Relocate these values */ relalim = (Elf_Rela *)((caddr_t)rela + relasz); for (; rela < relalim; rela++) { if (ELF_R_TYPE(rela->r_info) != R_PPC_RELATIVE) continue; where = (Elf_Addr *)(relocbase + rela->r_offset); *where = (Elf_Addr)(relocbase + rela->r_addend); } } int elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, elf_lookup_fn lookup) { return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup)); } int elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, elf_lookup_fn lookup) { return (elf_reloc_internal(lf, relocbase, data, type, 1, lookup)); } int elf_cpu_load_file(linker_file_t lf) { /* Only sync the cache for non-kernel modules */ if (lf->id != 1) __syncicache(lf->address, lf->size); return (0); } int elf_cpu_unload_file(linker_file_t lf __unused) { return (0); } static void ppc32_runtime_resolve(void) { /* * Since we don't support lazy binding, panic immediately if anyone * manages to call the runtime resolver. */ panic("kldload: Runtime resolver was called unexpectedly!"); } int elf_cpu_parse_dynamic(caddr_t loadbase, Elf_Dyn *dynamic) { Elf_Dyn *dp; bool has_plt = false; bool secure_plt = false; Elf_Addr *got; for (dp = dynamic; dp->d_tag != DT_NULL; dp++) { switch (dp->d_tag) { case DT_PPC_GOT: secure_plt = true; got = (Elf_Addr *)(loadbase + dp->d_un.d_ptr); /* Install runtime resolver canary. */ got[1] = (Elf_Addr)ppc32_runtime_resolve; got[2] = (Elf_Addr)0; break; case DT_PLTGOT: has_plt = true; break; } } if (has_plt && !secure_plt) { printf("kldload: BSS-PLT modules are not supported.\n"); return (-1); } return (0); } #endif #ifdef __powerpc64__ static void ppc32_fixlimit(struct rlimit *rl, int which) { switch (which) { case RLIMIT_DATA: if (ppc32_maxdsiz != 0) { if (rl->rlim_cur > ppc32_maxdsiz) rl->rlim_cur = ppc32_maxdsiz; if (rl->rlim_max > ppc32_maxdsiz) rl->rlim_max = ppc32_maxdsiz; } break; case RLIMIT_STACK: if (ppc32_maxssiz != 0) { if (rl->rlim_cur > ppc32_maxssiz) rl->rlim_cur = ppc32_maxssiz; if (rl->rlim_max > ppc32_maxssiz) rl->rlim_max = ppc32_maxssiz; } break; } } #endif diff --git a/sys/powerpc/powerpc/elf64_machdep.c b/sys/powerpc/powerpc/elf64_machdep.c index b780a2ed82fc..1035e35d286e 100644 --- a/sys/powerpc/powerpc/elf64_machdep.c +++ b/sys/powerpc/powerpc/elf64_machdep.c @@ -1,460 +1,458 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright 1996-1998 John D. Polstra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void exec_setregs_funcdesc(struct thread *td, struct image_params *imgp, uintptr_t stack); struct sysentvec elf64_freebsd_sysvec_v1 = { .sv_size = SYS_MAXSYSCALL, .sv_table = sysent, .sv_fixup = __elfN(freebsd_fixup), .sv_sendsig = sendsig, .sv_sigcode = sigcode64, .sv_szsigcode = &szsigcode64, .sv_name = "FreeBSD ELF64", .sv_coredump = __elfN(coredump), .sv_elf_core_osabi = ELFOSABI_FREEBSD, .sv_elf_core_abi_vendor = FREEBSD_ABI_VENDOR, .sv_elf_core_prepare_notes = __elfN(prepare_notes), .sv_minsigstksz = MINSIGSTKSZ, .sv_minuser = VM_MIN_ADDRESS, .sv_maxuser = VM_MAXUSER_ADDRESS, .sv_usrstack = USRSTACK, .sv_psstrings = PS_STRINGS, .sv_psstringssz = sizeof(struct ps_strings), .sv_stackprot = VM_PROT_ALL, .sv_copyout_auxargs = __elfN(powerpc_copyout_auxargs), .sv_copyout_strings = exec_copyout_strings, .sv_setregs = exec_setregs_funcdesc, .sv_fixlimit = NULL, .sv_maxssiz = NULL, .sv_flags = SV_ABI_FREEBSD | SV_LP64 | SV_SHP | SV_ASLR | SV_TIMEKEEP | SV_RNG_SEED_VER | SV_SIGSYS, .sv_set_syscall_retval = cpu_set_syscall_retval, .sv_fetch_syscall_args = cpu_fetch_syscall_args, .sv_syscallnames = syscallnames, .sv_shared_page_base = SHAREDPAGE, .sv_shared_page_len = PAGE_SIZE, .sv_schedtail = NULL, .sv_thread_detach = NULL, .sv_trap = NULL, .sv_hwcap = &cpu_features, .sv_hwcap2 = &cpu_features2, .sv_onexec_old = exec_onexec_old, .sv_onexit = exit_onexit, .sv_regset_begin = SET_BEGIN(__elfN(regset)), .sv_regset_end = SET_LIMIT(__elfN(regset)), }; struct sysentvec elf64_freebsd_sysvec_v2 = { .sv_size = SYS_MAXSYSCALL, .sv_table = sysent, .sv_fixup = __elfN(freebsd_fixup), .sv_sendsig = sendsig, .sv_sigcode = sigcode64, /* Fixed up in ppc64_init_sysvecs(). */ .sv_szsigcode = &szsigcode64, .sv_name = "FreeBSD ELF64 V2", .sv_coredump = __elfN(coredump), .sv_elf_core_osabi = ELFOSABI_FREEBSD, .sv_elf_core_abi_vendor = FREEBSD_ABI_VENDOR, .sv_elf_core_prepare_notes = __elfN(prepare_notes), .sv_minsigstksz = MINSIGSTKSZ, .sv_minuser = VM_MIN_ADDRESS, .sv_maxuser = VM_MAXUSER_ADDRESS, .sv_usrstack = USRSTACK, .sv_psstrings = PS_STRINGS, .sv_psstringssz = sizeof(struct ps_strings), .sv_stackprot = VM_PROT_ALL, .sv_copyout_auxargs = __elfN(powerpc_copyout_auxargs), .sv_copyout_strings = exec_copyout_strings, .sv_setregs = exec_setregs, .sv_fixlimit = NULL, .sv_maxssiz = NULL, .sv_flags = SV_ABI_FREEBSD | SV_LP64 | SV_SHP | SV_TIMEKEEP | SV_RNG_SEED_VER | SV_SIGSYS, .sv_set_syscall_retval = cpu_set_syscall_retval, .sv_fetch_syscall_args = cpu_fetch_syscall_args, .sv_syscallnames = syscallnames, .sv_shared_page_base = SHAREDPAGE, .sv_shared_page_len = PAGE_SIZE, .sv_schedtail = NULL, .sv_thread_detach = NULL, .sv_trap = NULL, .sv_hwcap = &cpu_features, .sv_hwcap2 = &cpu_features2, .sv_onexec_old = exec_onexec_old, .sv_onexit = exit_onexit, .sv_regset_begin = SET_BEGIN(__elfN(regset)), .sv_regset_end = SET_LIMIT(__elfN(regset)), }; static bool ppc64_elfv1_header_match(const struct image_params *params, const int32_t *, const uint32_t *); static bool ppc64_elfv2_header_match(const struct image_params *params, const int32_t *, const uint32_t *); static Elf64_Brandinfo freebsd_brand_info_elfv1 = { .brand = ELFOSABI_FREEBSD, .machine = EM_PPC64, .compat_3_brand = "FreeBSD", .interp_path = "/libexec/ld-elf.so.1", .sysvec = &elf64_freebsd_sysvec_v1, .interp_newpath = NULL, .brand_note = &elf64_freebsd_brandnote, .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE, .header_supported = &ppc64_elfv1_header_match }; SYSINIT(elf64v1, SI_SUB_EXEC, SI_ORDER_ANY, (sysinit_cfunc_t) elf64_insert_brand_entry, &freebsd_brand_info_elfv1); static Elf64_Brandinfo freebsd_brand_info_elfv2 = { .brand = ELFOSABI_FREEBSD, .machine = EM_PPC64, .compat_3_brand = "FreeBSD", .interp_path = "/libexec/ld-elf.so.1", .sysvec = &elf64_freebsd_sysvec_v2, .interp_newpath = NULL, .brand_note = &elf64_freebsd_brandnote, .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE, .header_supported = &ppc64_elfv2_header_match }; SYSINIT(elf64v2, SI_SUB_EXEC, SI_ORDER_ANY, (sysinit_cfunc_t) elf64_insert_brand_entry, &freebsd_brand_info_elfv2); static Elf64_Brandinfo freebsd_brand_oinfo = { .brand = ELFOSABI_FREEBSD, .machine = EM_PPC64, .compat_3_brand = "FreeBSD", .interp_path = "/usr/libexec/ld-elf.so.1", .sysvec = &elf64_freebsd_sysvec_v1, .interp_newpath = NULL, .brand_note = &elf64_freebsd_brandnote, .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE, .header_supported = &ppc64_elfv1_header_match }; SYSINIT(oelf64, SI_SUB_EXEC, SI_ORDER_ANY, (sysinit_cfunc_t) elf64_insert_brand_entry, &freebsd_brand_oinfo); void elf_reloc_self(Elf_Dyn *dynp, Elf_Addr relocbase); static void ppc64_init_sysvecs(void *arg) { exec_sysvec_init(&elf64_freebsd_sysvec_v2); exec_sysvec_init_secondary(&elf64_freebsd_sysvec_v2, &elf64_freebsd_sysvec_v1); /* * Adjust elfv2 sigcode after elfv1 sysvec is initialized. * exec_sysvec_init_secondary() assumes secondary sysvecs use * identical signal code, and skips allocating a second copy. * Since the ELFv2 trampoline is a strict subset of the ELFv1 code, * we can work around this by adjusting the offset. This also * avoids two copies of the trampoline code being allocated! */ elf64_freebsd_sysvec_v2.sv_sigcode_offset += (uintptr_t)sigcode64_elfv2 - (uintptr_t)&sigcode64; elf64_freebsd_sysvec_v2.sv_szsigcode = &szsigcode64_elfv2; } SYSINIT(elf64_sysvec, SI_SUB_EXEC, SI_ORDER_ANY, ppc64_init_sysvecs, NULL); static bool ppc64_elfv1_header_match(const struct image_params *params, const int32_t *osrel __unused, const uint32_t *fctl0 __unused) { const Elf64_Ehdr *hdr = (const Elf64_Ehdr *)params->image_header; int abi = (hdr->e_flags & 3); return (abi == 0 || abi == 1); } static bool ppc64_elfv2_header_match(const struct image_params *params, const int32_t *osrel __unused, const uint32_t *fctl0 __unused) { const Elf64_Ehdr *hdr = (const Elf64_Ehdr *)params->image_header; int abi = (hdr->e_flags & 3); return (abi == 2); } static void exec_setregs_funcdesc(struct thread *td, struct image_params *imgp, uintptr_t stack) { struct trapframe *tf; register_t entry_desc[3]; tf = trapframe(td); exec_setregs(td, imgp, stack); /* * For 64-bit ELFv1, we need to disentangle the function * descriptor * * 0. entry point * 1. TOC value (r2) * 2. Environment pointer (r11) */ (void)copyin((void *)imgp->entry_addr, entry_desc, sizeof(entry_desc)); tf->srr0 = entry_desc[0] + imgp->reloc_base; tf->fixreg[2] = entry_desc[1] + imgp->reloc_base; tf->fixreg[11] = entry_desc[2] + imgp->reloc_base; } void elf64_dump_thread(struct thread *td, void *dst, size_t *off) { size_t len; struct pcb *pcb; uint64_t vshr[32]; uint64_t *vsr_dw1; int vsr_idx; len = 0; pcb = td->td_pcb; if (pcb->pcb_flags & PCB_VEC) { - save_vec_nodrop(td); if (dst != NULL) { len += elf64_populate_note(NT_PPC_VMX, &pcb->pcb_vec, (char *)dst + len, sizeof(pcb->pcb_vec), NULL); } else len += elf64_populate_note(NT_PPC_VMX, NULL, NULL, sizeof(pcb->pcb_vec), NULL); } if (pcb->pcb_flags & PCB_VSX) { - save_fpu_nodrop(td); if (dst != NULL) { /* * Doubleword 0 of VSR0-VSR31 overlap with FPR0-FPR31 and * VSR32-VSR63 overlap with VR0-VR31, so we only copy * the non-overlapping data, which is doubleword 1 of VSR0-VSR31. */ for (vsr_idx = 0; vsr_idx < nitems(vshr); vsr_idx++) { vsr_dw1 = (uint64_t *)&pcb->pcb_fpu.fpr[vsr_idx].vsr[2]; vshr[vsr_idx] = *vsr_dw1; } len += elf64_populate_note(NT_PPC_VSX, vshr, (char *)dst + len, sizeof(vshr), NULL); } else len += elf64_populate_note(NT_PPC_VSX, NULL, NULL, sizeof(vshr), NULL); } *off = len; } bool elf_is_ifunc_reloc(Elf_Size r_info) { return (ELF_R_TYPE(r_info) == R_PPC_IRELATIVE); } /* Process one elf relocation with addend. */ static int elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, int local, elf_lookup_fn lookup) { Elf_Addr *where; Elf_Addr addr; Elf_Addr addend, val; Elf_Word rtype, symidx; const Elf_Rela *rela; int error; switch (type) { case ELF_RELOC_REL: panic("PPC only supports RELA relocations"); break; case ELF_RELOC_RELA: rela = (const Elf_Rela *)data; where = (Elf_Addr *) (relocbase + rela->r_offset); addend = rela->r_addend; rtype = ELF_R_TYPE(rela->r_info); symidx = ELF_R_SYM(rela->r_info); break; default: panic("elf_reloc: unknown relocation mode %d\n", type); } switch (rtype) { case R_PPC_NONE: break; case R_PPC64_ADDR64: /* doubleword64 S + A */ error = lookup(lf, symidx, 1, &addr); if (error != 0) return (-1); addr += addend; *where = addr; break; case R_PPC_RELATIVE: /* doubleword64 B + A */ *where = elf_relocaddr(lf, relocbase + addend); break; case R_PPC_JMP_SLOT: /* function descriptor copy */ lookup(lf, symidx, 1, &addr); #if !defined(_CALL_ELF) || _CALL_ELF == 1 memcpy(where, (Elf_Addr *)addr, 3*sizeof(Elf_Addr)); #else *where = addr; #endif __asm __volatile("dcbst 0,%0; sync" :: "r"(where) : "memory"); break; case R_PPC_IRELATIVE: addr = relocbase + addend; val = ((Elf64_Addr (*)(void))addr)(); if (*where != val) *where = val; break; default: printf("kldload: unexpected relocation type %d, " "symbol index %d\n", (int)rtype, symidx); return (-1); } return (0); } void elf_reloc_self(Elf_Dyn *dynp, Elf_Addr relocbase) { Elf_Rela *rela = NULL, *relalim; Elf_Addr relasz = 0; Elf_Addr *where; /* * Extract the rela/relasz values from the dynamic section */ for (; dynp->d_tag != DT_NULL; dynp++) { switch (dynp->d_tag) { case DT_RELA: rela = (Elf_Rela *)(relocbase+dynp->d_un.d_ptr); break; case DT_RELASZ: relasz = dynp->d_un.d_val; break; } } /* * Relocate these values */ relalim = (Elf_Rela *)((caddr_t)rela + relasz); for (; rela < relalim; rela++) { if (ELF_R_TYPE(rela->r_info) != R_PPC_RELATIVE) continue; where = (Elf_Addr *)(relocbase + rela->r_offset); *where = (Elf_Addr)(relocbase + rela->r_addend); } } int elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, elf_lookup_fn lookup) { return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup)); } int elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, elf_lookup_fn lookup) { return (elf_reloc_internal(lf, relocbase, data, type, 1, lookup)); } int elf_cpu_load_file(linker_file_t lf) { /* Only sync the cache for non-kernel modules */ if (lf->id != 1) __syncicache(lf->address, lf->size); return (0); } int elf_cpu_unload_file(linker_file_t lf __unused) { return (0); } int elf_cpu_parse_dynamic(caddr_t loadbase __unused, Elf_Dyn *dynamic __unused) { return (0); } diff --git a/sys/powerpc/powerpc/exec_machdep.c b/sys/powerpc/powerpc/exec_machdep.c index 9ab323a0e300..3586c01d6652 100644 --- a/sys/powerpc/powerpc/exec_machdep.c +++ b/sys/powerpc/powerpc/exec_machdep.c @@ -1,1302 +1,1302 @@ /*- * SPDX-License-Identifier: BSD-4-Clause AND BSD-2-Clause * * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 2001 Benno Rice * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ */ #include #include "opt_fpu_emu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FPU_EMU #include #endif #ifdef COMPAT_FREEBSD32 #include #include #include typedef struct __ucontext32 { sigset_t uc_sigmask; mcontext32_t uc_mcontext; uint32_t uc_link; struct sigaltstack32 uc_stack; uint32_t uc_flags; uint32_t __spare__[4]; } ucontext32_t; struct sigframe32 { ucontext32_t sf_uc; struct __siginfo32 sf_si; }; static int grab_mcontext32(struct thread *td, mcontext32_t *, int flags); #endif static int grab_mcontext(struct thread *, mcontext_t *, int); static void cleanup_power_extras(struct thread *); #ifdef __powerpc64__ extern struct sysentvec elf64_freebsd_sysvec_v2; #endif #ifdef __powerpc64__ _Static_assert(sizeof(mcontext_t) == 1392, "mcontext_t size incorrect"); _Static_assert(sizeof(ucontext_t) == 1472, "ucontext_t size incorrect"); _Static_assert(sizeof(siginfo_t) == 80, "siginfo_t size incorrect"); #ifdef COMPAT_FREEBSD32 _Static_assert(sizeof(mcontext32_t) == 1224, "mcontext32_t size incorrect"); _Static_assert(sizeof(ucontext32_t) == 1280, "ucontext32_t size incorrect"); _Static_assert(sizeof(struct __siginfo32) == 64, "struct __siginfo32 size incorrect"); #endif /* COMPAT_FREEBSD32 */ #else /* powerpc */ _Static_assert(sizeof(mcontext_t) == 1224, "mcontext_t size incorrect"); _Static_assert(sizeof(ucontext_t) == 1280, "ucontext_t size incorrect"); _Static_assert(sizeof(siginfo_t) == 64, "siginfo_t size incorrect"); #endif void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct trapframe *tf; struct sigacts *psp; struct sigframe sf; struct thread *td; struct proc *p; #ifdef COMPAT_FREEBSD32 struct __siginfo32 siginfo32; struct sigframe32 sf32; #endif size_t sfpsize; caddr_t sfp, usfp; register_t sp; int oonstack, rndfsize; int sig; int code; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; /* * Fill siginfo structure. */ ksi->ksi_info.si_signo = ksi->ksi_signo; ksi->ksi_info.si_addr = (void *)((tf->exc == EXC_DSI || tf->exc == EXC_DSE) ? tf->dar : tf->srr0); #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(p, SV_ILP32)) { siginfo_to_siginfo32(&ksi->ksi_info, &siginfo32); sig = siginfo32.si_signo; code = siginfo32.si_code; sfp = (caddr_t)&sf32; sfpsize = sizeof(sf32); rndfsize = roundup(sizeof(sf32), 16); sp = (uint32_t)tf->fixreg[1]; oonstack = sigonstack(sp); /* * Save user context */ memset(&sf32, 0, sizeof(sf32)); grab_mcontext32(td, &sf32.sf_uc.uc_mcontext, 0); sf32.sf_uc.uc_sigmask = *mask; sf32.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp; sf32.sf_uc.uc_stack.ss_size = (uint32_t)td->td_sigstk.ss_size; sf32.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf32.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; } else { #endif sig = ksi->ksi_signo; code = ksi->ksi_code; sfp = (caddr_t)&sf; sfpsize = sizeof(sf); #ifdef __powerpc64__ /* * 64-bit PPC defines a 288 byte scratch region * below the stack. */ rndfsize = 288 + roundup(sizeof(sf), 48); #else rndfsize = roundup(sizeof(sf), 16); #endif sp = tf->fixreg[1]; oonstack = sigonstack(sp); /* * Save user context */ memset(&sf, 0, sizeof(sf)); grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack = td->td_sigstk; sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; #ifdef COMPAT_FREEBSD32 } #endif CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* * Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { usfp = (void *)(((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size - rndfsize) & ~0xFul); } else { usfp = (void *)((sp - rndfsize) & ~0xFul); } /* * Set Floating Point facility to "Ignore Exceptions Mode" so signal * handler can run. */ if (td->td_pcb->pcb_flags & PCB_FPU) tf->srr1 = tf->srr1 & ~(PSL_FE0 | PSL_FE1); /* * Set up the registers to return to sigcode. * * r1/sp - sigframe ptr * lr - sig function, dispatched to by blrl in trampoline * r3 - sig number * r4 - SIGINFO ? &siginfo : exception code * r5 - user context * srr0 - trampoline function addr */ tf->lr = (register_t)catcher; tf->fixreg[1] = (register_t)usfp; tf->fixreg[FIRSTARG] = sig; #ifdef COMPAT_FREEBSD32 tf->fixreg[FIRSTARG+2] = (register_t)usfp + ((SV_PROC_FLAG(p, SV_ILP32)) ? offsetof(struct sigframe32, sf_uc) : offsetof(struct sigframe, sf_uc)); #else tf->fixreg[FIRSTARG+2] = (register_t)usfp + offsetof(struct sigframe, sf_uc); #endif if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* * Signal handler installed with SA_SIGINFO. */ #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(p, SV_ILP32)) { sf32.sf_si = siginfo32; tf->fixreg[FIRSTARG+1] = (register_t)usfp + offsetof(struct sigframe32, sf_si); sf32.sf_si = siginfo32; } else { #endif tf->fixreg[FIRSTARG+1] = (register_t)usfp + offsetof(struct sigframe, sf_si); sf.sf_si = ksi->ksi_info; #ifdef COMPAT_FREEBSD32 } #endif } else { /* Old FreeBSD-style arguments. */ tf->fixreg[FIRSTARG+1] = code; tf->fixreg[FIRSTARG+3] = (tf->exc == EXC_DSI) ? tf->dar : tf->srr0; } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); tf->srr0 = (register_t)PROC_SIGCODE(p); /* * copy the frame out to userland. */ if (copyout(sfp, usfp, sfpsize) != 0) { /* * Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp); PROC_LOCK(p); sigexit(td, SIGILL); } CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->srr0, tf->fixreg[1]); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { ucontext_t uc; int error; CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp); if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) { CTR1(KTR_SIG, "sigreturn: efault td=%p", td); return (EFAULT); } error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); /* * Save FPU state if needed. User may have changed it on * signal handler */ if (uc.uc_mcontext.mc_srr1 & PSL_FP) save_fpu(td); kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x", td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]); return (EJUSTRETURN); } #ifdef COMPAT_FREEBSD4 int freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) { return sys_sigreturn(td, (struct sigreturn_args *)uap); } #endif /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { pcb->pcb_lr = tf->srr0; pcb->pcb_sp = tf->fixreg[1]; } /* * get_mcontext/sendsig helper routine that doesn't touch the * proc lock */ static int grab_mcontext(struct thread *td, mcontext_t *mcp, int flags) { struct pcb *pcb; int i; pcb = td->td_pcb; memset(mcp, 0, sizeof(mcontext_t)); mcp->mc_vers = _MC_VERSION; mcp->mc_flags = 0; memcpy(&mcp->mc_frame, td->td_frame, sizeof(struct trapframe)); if (flags & GET_MC_CLEAR_RET) { mcp->mc_gpr[3] = 0; mcp->mc_gpr[4] = 0; } /* * This assumes that floating-point context is *not* lazy, * so if the thread has used FP there would have been a * FP-unavailable exception that would have set things up * correctly. */ if (pcb->pcb_flags & PCB_FPREGS) { if (pcb->pcb_flags & PCB_FPU) { KASSERT(td == curthread, ("get_mcontext: fp save not curthread")); critical_enter(); save_fpu(td); critical_exit(); } mcp->mc_flags |= _MC_FP_VALID; memcpy(&mcp->mc_fpscr, &pcb->pcb_fpu.fpscr, sizeof(double)); for (i = 0; i < 32; i++) memcpy(&mcp->mc_fpreg[i], &pcb->pcb_fpu.fpr[i].fpr, sizeof(double)); } if (pcb->pcb_flags & PCB_VSX) { for (i = 0; i < 32; i++) memcpy(&mcp->mc_vsxfpreg[i], &pcb->pcb_fpu.fpr[i].vsr[2], sizeof(double)); } /* * Repeat for Altivec context */ if (pcb->pcb_flags & PCB_VECREGS) { if (pcb->pcb_flags & PCB_VEC) { KASSERT(td == curthread, ("get_mcontext: altivec save not curthread")); critical_enter(); save_vec(td); critical_exit(); } mcp->mc_flags |= _MC_AV_VALID; mcp->mc_vscr = pcb->pcb_vec.vscr; mcp->mc_vrsave = pcb->pcb_vec.vrsave; memcpy(mcp->mc_avec, pcb->pcb_vec.vr, sizeof(mcp->mc_avec)); } mcp->mc_len = sizeof(*mcp); return (0); } int get_mcontext(struct thread *td, mcontext_t *mcp, int flags) { int error; error = grab_mcontext(td, mcp, flags); if (error == 0) { PROC_LOCK(curthread->td_proc); mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]); PROC_UNLOCK(curthread->td_proc); } return (error); } int set_mcontext(struct thread *td, mcontext_t *mcp) { struct pcb *pcb; struct trapframe *tf; register_t tls; int i; pcb = td->td_pcb; tf = td->td_frame; if (mcp->mc_vers != _MC_VERSION || mcp->mc_len != sizeof(*mcp)) return (EINVAL); /* * Don't let the user change privileged MSR bits. * * psl_userstatic is used here to mask off any bits that can * legitimately vary between user contexts (Floating point * exception control and any facilities that we are using the * "enable on first use" pattern with.) * * All other bits are required to match psl_userset(32). * * Remember to update the platform cpu_init code when implementing * support for a new conditional facility! */ if ((mcp->mc_srr1 & psl_userstatic) != (tf->srr1 & psl_userstatic)) { return (EINVAL); } /* Copy trapframe, preserving TLS pointer across context change */ if (SV_PROC_FLAG(td->td_proc, SV_LP64)) tls = tf->fixreg[13]; else tls = tf->fixreg[2]; memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame)); if (SV_PROC_FLAG(td->td_proc, SV_LP64)) tf->fixreg[13] = tls; else tf->fixreg[2] = tls; /* * Force the FPU back off to ensure the new context will not bypass * the enable_fpu() setup code accidentally. * * This prevents an issue where a process that uses floating point * inside a signal handler could end up in a state where the MSR * did not match pcb_flags. * * Additionally, ensure VSX is disabled as well, as it is illegal * to leave it turned on when FP or VEC are off. */ tf->srr1 &= ~(PSL_FP | PSL_VSX | PSL_VEC); pcb->pcb_flags &= ~(PCB_FPU | PCB_VSX | PCB_VEC); if (mcp->mc_flags & _MC_FP_VALID) { /* enable_fpu() will happen lazily on a fault */ pcb->pcb_flags |= PCB_FPREGS; memcpy(&pcb->pcb_fpu.fpscr, &mcp->mc_fpscr, sizeof(double)); bzero(pcb->pcb_fpu.fpr, sizeof(pcb->pcb_fpu.fpr)); for (i = 0; i < 32; i++) { memcpy(&pcb->pcb_fpu.fpr[i].fpr, &mcp->mc_fpreg[i], sizeof(double)); memcpy(&pcb->pcb_fpu.fpr[i].vsr[2], &mcp->mc_vsxfpreg[i], sizeof(double)); } } if (mcp->mc_flags & _MC_AV_VALID) { /* enable_vec() will happen lazily on a fault */ pcb->pcb_flags |= PCB_VECREGS; pcb->pcb_vec.vscr = mcp->mc_vscr; pcb->pcb_vec.vrsave = mcp->mc_vrsave; memcpy(pcb->pcb_vec.vr, mcp->mc_avec, sizeof(mcp->mc_avec)); } return (0); } /* * Clean up extra POWER state. Some per-process registers and states are not * managed by the MSR, so must be cleaned up explicitly on thread exit. * * Currently this includes: * DSCR -- Data stream control register (PowerISA 2.06+) * FSCR -- Facility Status and Control Register (PowerISA 2.07+) */ static void cleanup_power_extras(struct thread *td) { uint32_t pcb_flags; if (td != curthread) return; pcb_flags = td->td_pcb->pcb_flags; /* Clean up registers not managed by MSR. */ if (pcb_flags & PCB_CFSCR) mtspr(SPR_FSCR, 0); if (pcb_flags & PCB_CDSCR) mtspr(SPR_DSCRP, 0); if (pcb_flags & PCB_FPU) cleanup_fpscr(); } /* * Ensure the PCB has been updated in preparation for copying a thread. * * This is needed because normally this only happens during switching tasks, * but when we are cloning a thread, we need the updated state before doing * the actual copy, so the new thread inherits the current state instead of * the state at the last task switch. * * Keep this in sync with the assembly code in cpu_switch()! */ void -cpu_save_thread_regs(struct thread *td) +cpu_update_pcb(struct thread *td) { uint32_t pcb_flags; struct pcb *pcb; KASSERT(td == curthread, - ("cpu_save_thread_regs: td is not curthread")); + ("cpu_update_pcb: td is not curthread")); pcb = td->td_pcb; pcb_flags = pcb->pcb_flags; #if defined(__powerpc64__) /* Are *any* FSCR flags in use? */ if (pcb_flags & PCB_CFSCR) { pcb->pcb_fscr = mfspr(SPR_FSCR); if (pcb->pcb_fscr & FSCR_EBB) { pcb->pcb_ebb.ebbhr = mfspr(SPR_EBBHR); pcb->pcb_ebb.ebbrr = mfspr(SPR_EBBRR); pcb->pcb_ebb.bescr = mfspr(SPR_BESCR); } if (pcb->pcb_fscr & FSCR_LM) { pcb->pcb_lm.lmrr = mfspr(SPR_LMRR); pcb->pcb_lm.lmser = mfspr(SPR_LMSER); } if (pcb->pcb_fscr & FSCR_TAR) pcb->pcb_tar = mfspr(SPR_TAR); } /* * This is outside of the PCB_CFSCR check because it can be set * independently when running on POWER7/POWER8. */ if (pcb_flags & PCB_CDSCR) pcb->pcb_dscr = mfspr(SPR_DSCRP); #endif #if defined(__SPE__) /* * On E500v2, single-precision scalar instructions and access to * SPEFSCR may be used without PSL_VEC turned on, as long as they * limit themselves to the low word of the registers. * * As such, we need to unconditionally save SPEFSCR, even though * it is also updated in save_vec_nodrop(). */ pcb->pcb_vec.vscr = mfspr(SPR_SPEFSCR); #endif if (pcb_flags & PCB_FPU) save_fpu_nodrop(td); if (pcb_flags & PCB_VEC) save_vec_nodrop(td); } /* * Set set up registers on exec. */ void exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack) { struct trapframe *tf; register_t argc; tf = trapframe(td); bzero(tf, sizeof *tf); #ifdef __powerpc64__ tf->fixreg[1] = -roundup(-stack + 48, 16); #else tf->fixreg[1] = -roundup(-stack + 8, 16); #endif /* * Set up arguments for _start(): * _start(argc, argv, envp, obj, cleanup, ps_strings); * * Notes: * - obj and cleanup are the auxilliary and termination * vectors. They are fixed up by ld.elf_so. * - ps_strings is a NetBSD extention, and will be * ignored by executables which are strictly * compliant with the SVR4 ABI. */ /* Collect argc from the user stack */ argc = fuword((void *)stack); tf->fixreg[3] = argc; tf->fixreg[4] = stack + sizeof(register_t); tf->fixreg[5] = stack + (2 + argc)*sizeof(register_t); tf->fixreg[6] = 0; /* auxiliary vector */ tf->fixreg[7] = 0; /* termination vector */ tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */ tf->srr0 = imgp->entry_addr; #ifdef __powerpc64__ tf->fixreg[12] = imgp->entry_addr; #endif tf->srr1 = psl_userset | PSL_FE_DFLT; cleanup_power_extras(td); td->td_pcb->pcb_flags = 0; } #ifdef COMPAT_FREEBSD32 void ppc32_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack) { struct trapframe *tf; uint32_t argc; tf = trapframe(td); bzero(tf, sizeof *tf); tf->fixreg[1] = -roundup(-stack + 8, 16); argc = fuword32((void *)stack); tf->fixreg[3] = argc; tf->fixreg[4] = stack + sizeof(uint32_t); tf->fixreg[5] = stack + (2 + argc)*sizeof(uint32_t); tf->fixreg[6] = 0; /* auxiliary vector */ tf->fixreg[7] = 0; /* termination vector */ tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */ tf->srr0 = imgp->entry_addr; tf->srr1 = psl_userset32 | PSL_FE_DFLT; cleanup_power_extras(td); td->td_pcb->pcb_flags = 0; } #endif int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *tf; tf = td->td_frame; memcpy(regs, tf, sizeof(struct reg)); return (0); } int fill_dbregs(struct thread *td, struct dbreg *dbregs) { /* No debug registers on PowerPC */ return (ENOSYS); } int fill_fpregs(struct thread *td, struct fpreg *fpregs) { struct pcb *pcb; int i; pcb = td->td_pcb; if ((pcb->pcb_flags & PCB_FPREGS) == 0) memset(fpregs, 0, sizeof(struct fpreg)); else { memcpy(&fpregs->fpscr, &pcb->pcb_fpu.fpscr, sizeof(double)); for (i = 0; i < 32; i++) memcpy(&fpregs->fpreg[i], &pcb->pcb_fpu.fpr[i].fpr, sizeof(double)); } return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *tf; tf = td->td_frame; memcpy(tf, regs, sizeof(struct reg)); return (0); } int set_dbregs(struct thread *td, struct dbreg *dbregs) { /* No debug registers on PowerPC */ return (ENOSYS); } int set_fpregs(struct thread *td, struct fpreg *fpregs) { struct pcb *pcb; int i; pcb = td->td_pcb; pcb->pcb_flags |= PCB_FPREGS; memcpy(&pcb->pcb_fpu.fpscr, &fpregs->fpscr, sizeof(double)); for (i = 0; i < 32; i++) { memcpy(&pcb->pcb_fpu.fpr[i].fpr, &fpregs->fpreg[i], sizeof(double)); } return (0); } #ifdef COMPAT_FREEBSD32 int set_regs32(struct thread *td, struct reg32 *regs) { struct trapframe *tf; int i; tf = td->td_frame; for (i = 0; i < 32; i++) tf->fixreg[i] = regs->fixreg[i]; tf->lr = regs->lr; tf->cr = regs->cr; tf->xer = regs->xer; tf->ctr = regs->ctr; tf->srr0 = regs->pc; return (0); } int fill_regs32(struct thread *td, struct reg32 *regs) { struct trapframe *tf; int i; tf = td->td_frame; for (i = 0; i < 32; i++) regs->fixreg[i] = tf->fixreg[i]; regs->lr = tf->lr; regs->cr = tf->cr; regs->xer = tf->xer; regs->ctr = tf->ctr; regs->pc = tf->srr0; return (0); } static int grab_mcontext32(struct thread *td, mcontext32_t *mcp, int flags) { mcontext_t mcp64; int i, error; error = grab_mcontext(td, &mcp64, flags); if (error != 0) return (error); mcp->mc_vers = mcp64.mc_vers; mcp->mc_flags = mcp64.mc_flags; mcp->mc_onstack = mcp64.mc_onstack; mcp->mc_len = mcp64.mc_len; memcpy(mcp->mc_avec,mcp64.mc_avec,sizeof(mcp64.mc_avec)); memcpy(mcp->mc_av,mcp64.mc_av,sizeof(mcp64.mc_av)); for (i = 0; i < 42; i++) mcp->mc_frame[i] = mcp64.mc_frame[i]; memcpy(mcp->mc_fpreg,mcp64.mc_fpreg,sizeof(mcp64.mc_fpreg)); memcpy(mcp->mc_vsxfpreg,mcp64.mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg)); return (0); } static int get_mcontext32(struct thread *td, mcontext32_t *mcp, int flags) { int error; error = grab_mcontext32(td, mcp, flags); if (error == 0) { PROC_LOCK(curthread->td_proc); mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]); PROC_UNLOCK(curthread->td_proc); } return (error); } static int set_mcontext32(struct thread *td, mcontext32_t *mcp) { mcontext_t mcp64; int i, error; mcp64.mc_vers = mcp->mc_vers; mcp64.mc_flags = mcp->mc_flags; mcp64.mc_onstack = mcp->mc_onstack; mcp64.mc_len = mcp->mc_len; memcpy(mcp64.mc_avec,mcp->mc_avec,sizeof(mcp64.mc_avec)); memcpy(mcp64.mc_av,mcp->mc_av,sizeof(mcp64.mc_av)); for (i = 0; i < 42; i++) mcp64.mc_frame[i] = mcp->mc_frame[i]; mcp64.mc_srr1 |= (td->td_frame->srr1 & 0xFFFFFFFF00000000ULL); memcpy(mcp64.mc_fpreg,mcp->mc_fpreg,sizeof(mcp64.mc_fpreg)); memcpy(mcp64.mc_vsxfpreg,mcp->mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg)); error = set_mcontext(td, &mcp64); return (error); } #endif #ifdef COMPAT_FREEBSD32 int freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap) { ucontext32_t uc; int error; CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp); if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) { CTR1(KTR_SIG, "sigreturn: efault td=%p", td); return (EFAULT); } error = set_mcontext32(td, &uc.uc_mcontext); if (error != 0) return (error); /* * Save FPU state if needed. User may have changed it on * signal handler */ if (uc.uc_mcontext.mc_srr1 & PSL_FP) save_fpu(td); kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x", td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]); return (EJUSTRETURN); } /* * The first two fields of a ucontext_t are the signal mask and the machine * context. The next field is uc_link; we want to avoid destroying the link * when copying out contexts. */ #define UC32_COPY_SIZE offsetof(ucontext32_t, uc_link) int freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap) { ucontext32_t uc; int ret; if (uap->ucp == NULL) ret = EINVAL; else { bzero(&uc, sizeof(uc)); get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->ucp, UC32_COPY_SIZE); } return (ret); } int freebsd32_setcontext(struct thread *td, struct freebsd32_setcontext_args *uap) { ucontext32_t uc; int ret; if (uap->ucp == NULL) ret = EINVAL; else { ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE); if (ret == 0) { ret = set_mcontext32(td, &uc.uc_mcontext); if (ret == 0) { kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); } } } return (ret == 0 ? EJUSTRETURN : ret); } int freebsd32_swapcontext(struct thread *td, struct freebsd32_swapcontext_args *uap) { ucontext32_t uc; int ret; if (uap->oucp == NULL || uap->ucp == NULL) ret = EINVAL; else { bzero(&uc, sizeof(uc)); get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET); PROC_LOCK(td->td_proc); uc.uc_sigmask = td->td_sigmask; PROC_UNLOCK(td->td_proc); ret = copyout(&uc, uap->oucp, UC32_COPY_SIZE); if (ret == 0) { ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE); if (ret == 0) { ret = set_mcontext32(td, &uc.uc_mcontext); if (ret == 0) { kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); } } } } return (ret == 0 ? EJUSTRETURN : ret); } #endif void cpu_set_syscall_retval(struct thread *td, int error) { struct proc *p; struct trapframe *tf; int fixup; if (error == EJUSTRETURN) return; p = td->td_proc; tf = td->td_frame; if (tf->fixreg[0] == SYS___syscall && (SV_PROC_FLAG(p, SV_ILP32))) { int code = tf->fixreg[FIRSTARG + 1]; fixup = ( #if defined(COMPAT_FREEBSD6) && defined(SYS_freebsd6_lseek) code != SYS_freebsd6_lseek && #endif code != SYS_lseek) ? 1 : 0; } else fixup = 0; switch (error) { case 0: if (fixup) { /* * 64-bit return, 32-bit syscall. Fixup byte order */ tf->fixreg[FIRSTARG] = 0; tf->fixreg[FIRSTARG + 1] = td->td_retval[0]; } else { tf->fixreg[FIRSTARG] = td->td_retval[0]; tf->fixreg[FIRSTARG + 1] = td->td_retval[1]; } tf->cr &= ~0x10000000; /* Unset summary overflow */ break; case ERESTART: /* * Set user's pc back to redo the system call. */ tf->srr0 -= 4; break; default: tf->fixreg[FIRSTARG] = error; tf->cr |= 0x10000000; /* Set summary overflow */ break; } } /* * Threading functions */ void cpu_thread_exit(struct thread *td) { cleanup_power_extras(td); } void cpu_thread_clean(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { struct pcb *pcb; pcb = (struct pcb *)((td->td_kstack + td->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb)) & ~0x2fUL); td->td_pcb = pcb; td->td_frame = (struct trapframe *)pcb - 1; } void cpu_thread_free(struct thread *td) { } int cpu_set_user_tls(struct thread *td, void *tls_base) { if (SV_PROC_FLAG(td->td_proc, SV_LP64)) td->td_frame->fixreg[13] = (register_t)tls_base + 0x7010; else td->td_frame->fixreg[2] = (register_t)tls_base + 0x7008; return (0); } void cpu_copy_thread(struct thread *td, struct thread *td0) { struct pcb *pcb2; struct trapframe *tf; struct callframe *cf; /* Ensure td0 pcb is up to date. */ if (td0 == curthread) - cpu_save_thread_regs(td0); + cpu_update_pcb(td0); pcb2 = td->td_pcb; /* Copy the upcall pcb */ bcopy(td0->td_pcb, pcb2, sizeof(*pcb2)); /* Create a stack for the new thread */ tf = td->td_frame; bcopy(td0->td_frame, tf, sizeof(struct trapframe)); tf->fixreg[FIRSTARG] = 0; tf->fixreg[FIRSTARG + 1] = 0; tf->cr &= ~0x10000000; /* Set registers for trampoline to user mode. */ cf = (struct callframe *)tf - 1; memset(cf, 0, sizeof(struct callframe)); cf->cf_func = (register_t)fork_return; cf->cf_arg0 = (register_t)td; cf->cf_arg1 = (register_t)tf; pcb2->pcb_sp = (register_t)cf; #if defined(__powerpc64__) && (!defined(_CALL_ELF) || _CALL_ELF == 1) pcb2->pcb_lr = ((register_t *)fork_trampoline)[0]; pcb2->pcb_toc = ((register_t *)fork_trampoline)[1]; #else pcb2->pcb_lr = (register_t)fork_trampoline; pcb2->pcb_context[0] = pcb2->pcb_lr; #endif pcb2->pcb_cpu.aim.usr_vsid = 0; #ifdef __SPE__ pcb2->pcb_vec.vscr = SPEFSCR_DFLT; #endif /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_msr = psl_kernset; } int cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf; uintptr_t sp; #ifdef __powerpc64__ int error; #endif tf = td->td_frame; /* align stack and alloc space for frame ptr and saved LR */ #ifdef __powerpc64__ sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 48) & ~0x1f; #else sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 8) & ~0x1f; #endif bzero(tf, sizeof(struct trapframe)); tf->fixreg[1] = (register_t)sp; tf->fixreg[3] = (register_t)arg; if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { tf->srr0 = (register_t)entry; #ifdef __powerpc64__ tf->srr1 = psl_userset32 | PSL_FE_DFLT; #else tf->srr1 = psl_userset | PSL_FE_DFLT; #endif } else { #ifdef __powerpc64__ if (td->td_proc->p_sysent == &elf64_freebsd_sysvec_v2) { tf->srr0 = (register_t)entry; /* ELFv2 ABI requires that the global entry point be in r12. */ tf->fixreg[12] = (register_t)entry; } else { register_t entry_desc[3]; error = copyin((void *)entry, entry_desc, sizeof(entry_desc)); if (error != 0) return (error); tf->srr0 = entry_desc[0]; tf->fixreg[2] = entry_desc[1]; tf->fixreg[11] = entry_desc[2]; } tf->srr1 = psl_userset | PSL_FE_DFLT; #endif } td->td_pcb->pcb_flags = 0; #ifdef __SPE__ td->td_pcb->pcb_vec.vscr = SPEFSCR_DFLT; #endif td->td_retval[0] = (register_t)entry; td->td_retval[1] = 0; return (0); } static int emulate_mfspr(int spr, int reg, struct trapframe *frame){ struct thread *td; td = curthread; if (spr == SPR_DSCR || spr == SPR_DSCRP) { if (!(cpu_features2 & PPC_FEATURE2_DSCR)) return (SIGILL); // If DSCR was never set, get the default DSCR if ((td->td_pcb->pcb_flags & PCB_CDSCR) == 0) td->td_pcb->pcb_dscr = mfspr(SPR_DSCRP); frame->fixreg[reg] = td->td_pcb->pcb_dscr; frame->srr0 += 4; return (0); } else return (SIGILL); } static int emulate_mtspr(int spr, int reg, struct trapframe *frame){ struct thread *td; td = curthread; if (spr == SPR_DSCR || spr == SPR_DSCRP) { if (!(cpu_features2 & PPC_FEATURE2_DSCR)) return (SIGILL); td->td_pcb->pcb_flags |= PCB_CDSCR; td->td_pcb->pcb_dscr = frame->fixreg[reg]; mtspr(SPR_DSCRP, frame->fixreg[reg]); frame->srr0 += 4; return (0); } else return (SIGILL); } #define XFX 0xFC0007FF int ppc_instr_emulate(struct trapframe *frame, struct thread *td) { struct pcb *pcb; uint32_t instr; int reg, sig; int rs, spr; instr = fuword32((void *)frame->srr0); sig = SIGILL; if ((instr & 0xfc1fffff) == 0x7c1f42a6) { /* mfpvr */ reg = (instr & ~0xfc1fffff) >> 21; frame->fixreg[reg] = mfpvr(); frame->srr0 += 4; return (0); } else if ((instr & XFX) == 0x7c0002a6) { /* mfspr */ rs = (instr & 0x3e00000) >> 21; spr = (instr & 0x1ff800) >> 16; return emulate_mfspr(spr, rs, frame); } else if ((instr & XFX) == 0x7c0003a6) { /* mtspr */ rs = (instr & 0x3e00000) >> 21; spr = (instr & 0x1ff800) >> 16; return emulate_mtspr(spr, rs, frame); } else if ((instr & 0xfc000ffe) == 0x7c0004ac) { /* various sync */ powerpc_sync(); /* Do a heavy-weight sync */ frame->srr0 += 4; return (0); } pcb = td->td_pcb; #ifdef FPU_EMU if (!(pcb->pcb_flags & PCB_FPREGS)) { bzero(&pcb->pcb_fpu, sizeof(pcb->pcb_fpu)); pcb->pcb_flags |= PCB_FPREGS; } else if (pcb->pcb_flags & PCB_FPU) save_fpu(td); sig = fpu_emulate(frame, &pcb->pcb_fpu); if ((sig == 0 || sig == SIGFPE) && pcb->pcb_flags & PCB_FPU) enable_fpu(td); #endif if (sig == SIGILL) { if (pcb->pcb_lastill != frame->srr0) { /* Allow a second chance, in case of cache sync issues. */ sig = 0; pmap_sync_icache(PCPU_GET(curpmap), frame->srr0, 4); pcb->pcb_lastill = frame->srr0; } } return (sig); } diff --git a/sys/powerpc/powerpc/vm_machdep.c b/sys/powerpc/powerpc/vm_machdep.c index 12c64f9e38bf..d47beedb595e 100644 --- a/sys/powerpc/powerpc/vm_machdep.c +++ b/sys/powerpc/powerpc/vm_machdep.c @@ -1,237 +1,237 @@ /*- * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) * * Copyright (c) 1982, 1986 The Regents of the University of California. * Copyright (c) 1989, 1990 William Jolitz * Copyright (c) 1994 John Dyson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department, and William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ */ /*- * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. * All rights reserved. * * Author: Chris G. Demetriou * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) { struct trapframe *tf; struct callframe *cf; struct pcb *pcb; KASSERT(td1 == curthread || td1 == &thread0, ("cpu_fork: p1 not curproc and not proc0")); CTR3(KTR_PROC, "cpu_fork: called td1=%p p2=%p flags=%x", td1, p2, flags); if ((flags & RFPROC) == 0) return; /* Ensure td1 is up to date before copy. */ if (td1 == curthread) - cpu_save_thread_regs(td1); + cpu_update_pcb(td1); pcb = (struct pcb *)((td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb)) & ~0x2fUL); td2->td_pcb = pcb; /* Copy the pcb */ bcopy(td1->td_pcb, pcb, sizeof(struct pcb)); /* * Create a fresh stack for the new process. * Copy the trap frame for the return to user mode as if from a * syscall. This copies most of the user mode register values. */ tf = (struct trapframe *)pcb - 1; bcopy(td1->td_frame, tf, sizeof(*tf)); /* Set up trap frame. */ tf->fixreg[FIRSTARG] = 0; tf->fixreg[FIRSTARG + 1] = 0; tf->cr &= ~0x10000000; td2->td_frame = tf; cf = (struct callframe *)tf - 1; memset(cf, 0, sizeof(struct callframe)); #if defined(__powerpc64__) && (!defined(_CALL_ELF) || _CALL_ELF == 1) cf->cf_toc = ((register_t *)fork_return)[1]; #endif cf->cf_func = (register_t)fork_return; cf->cf_arg0 = (register_t)td2; cf->cf_arg1 = (register_t)tf; pcb->pcb_sp = (register_t)cf; KASSERT(pcb->pcb_sp % 16 == 0, ("stack misaligned")); #if defined(__powerpc64__) && (!defined(_CALL_ELF) || _CALL_ELF == 1) pcb->pcb_lr = ((register_t *)fork_trampoline)[0]; pcb->pcb_toc = ((register_t *)fork_trampoline)[1]; #else pcb->pcb_lr = (register_t)fork_trampoline; pcb->pcb_context[0] = pcb->pcb_lr; #endif #ifdef AIM pcb->pcb_cpu.aim.usr_vsid = 0; #endif /* Setup to release spin count in fork_exit(). */ td2->td_md.md_spinlock_count = 1; td2->td_md.md_saved_msr = psl_kernset; /* * Now cpu_switch() can schedule the new process. */ } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) { struct callframe *cf; CTR4(KTR_PROC, "%s called with td=%p func=%p arg=%p", __func__, td, func, arg); cf = (struct callframe *)td->td_pcb->pcb_sp; #if defined(__powerpc64__) && (!defined(_CALL_ELF) || _CALL_ELF == 1) cf->cf_toc = ((register_t *)func)[1]; #endif cf->cf_func = (register_t)func; cf->cf_arg0 = (register_t)arg; } void cpu_exit(struct thread *td) { } /* * CPU threading functions related to the VM layer. These could be used * to map the SLB bits required for the kernel stack instead of forcing a * fixed-size KVA. */ bool cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) { return (true); } int cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused, int com __unused, void *data __unused) { return (EINVAL); } void cpu_sync_core(void) { /* * Linux performs "rfi" there. Our rendezvous IPI handler on * the target cpu does "rfi" before and lwsync/sync after the * action, which is stronger than required. */ } diff --git a/sys/riscv/riscv/vm_machdep.c b/sys/riscv/riscv/vm_machdep.c index 726f95213c91..bd510080e02c 100644 --- a/sys/riscv/riscv/vm_machdep.c +++ b/sys/riscv/riscv/vm_machdep.c @@ -1,266 +1,271 @@ /*- * Copyright (c) 2015-2018 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if __riscv_xlen == 64 #define TP_OFFSET 16 /* sizeof(struct tcb) */ #endif /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) { struct pcb *pcb2; struct trapframe *tf; if ((flags & RFPROC) == 0) return; /* RISCVTODO: save the FPU state here */ pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1; td2->td_pcb = pcb2; bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1); bcopy(td1->td_frame, tf, sizeof(*tf)); /* Clear syscall error flag */ tf->tf_t[0] = 0; /* Arguments for child */ tf->tf_a[0] = 0; tf->tf_a[1] = 0; tf->tf_sstatus |= (SSTATUS_SPIE); /* Enable interrupts. */ tf->tf_sstatus &= ~(SSTATUS_SPP); /* User mode. */ td2->td_frame = tf; /* Set the return value registers for fork() */ td2->td_pcb->pcb_s[0] = (uintptr_t)fork_return; td2->td_pcb->pcb_s[1] = (uintptr_t)td2; td2->td_pcb->pcb_ra = (uintptr_t)fork_trampoline; td2->td_pcb->pcb_sp = (uintptr_t)td2->td_frame; /* Setup to release spin count in fork_exit(). */ td2->td_md.md_spinlock_count = 1; td2->td_md.md_saved_sstatus_ie = (SSTATUS_SIE); } void cpu_reset(void) { sbi_system_reset(SBI_SRST_TYPE_COLD_REBOOT, SBI_SRST_REASON_NONE); while(1); } void cpu_set_syscall_retval(struct thread *td, int error) { struct trapframe *frame; frame = td->td_frame; if (__predict_true(error == 0)) { frame->tf_a[0] = td->td_retval[0]; frame->tf_a[1] = td->td_retval[1]; frame->tf_t[0] = 0; /* syscall succeeded */ return; } switch (error) { case ERESTART: frame->tf_sepc -= 4; /* prev instruction */ break; case EJUSTRETURN: break; default: frame->tf_a[0] = error; frame->tf_t[0] = 1; /* syscall error */ break; } } /* * Initialize machine state, mostly pcb and trap frame for a new * thread, about to return to userspace. Put enough state in the new * thread's PCB to get it to go back to the fork_return(), which * finalizes the thread state and handles peculiarities of the first * return to userspace for the new thread. */ void cpu_copy_thread(struct thread *td, struct thread *td0) { bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb)); td->td_pcb->pcb_s[0] = (uintptr_t)fork_return; td->td_pcb->pcb_s[1] = (uintptr_t)td; td->td_pcb->pcb_ra = (uintptr_t)fork_trampoline; td->td_pcb->pcb_sp = (uintptr_t)td->td_frame; /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_sstatus_ie = (SSTATUS_SIE); } /* * Set that machine state for performing an upcall that starts * the entry function with the given argument. */ int cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf; tf = td->td_frame; tf->tf_sp = STACKALIGN((uintptr_t)stack->ss_sp + stack->ss_size); tf->tf_sepc = (register_t)entry; tf->tf_a[0] = (register_t)arg; return (0); } int cpu_set_user_tls(struct thread *td, void *tls_base) { if ((uintptr_t)tls_base >= VM_MAXUSER_ADDRESS) return (EINVAL); /* * The user TLS is set by modifying the trapframe's tp value, which * will be restored when returning to userspace. */ td->td_frame->tf_tp = (register_t)tls_base + TP_OFFSET; return (0); } void cpu_thread_exit(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * PAGE_SIZE) - 1; td->td_frame = (struct trapframe *)STACKALIGN( (caddr_t)td->td_pcb - 8 - sizeof(struct trapframe)); } void cpu_thread_free(struct thread *td) { } void cpu_thread_clean(struct thread *td) { } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) { td->td_pcb->pcb_s[0] = (uintptr_t)func; td->td_pcb->pcb_s[1] = (uintptr_t)arg; td->td_pcb->pcb_ra = (uintptr_t)fork_trampoline; td->td_pcb->pcb_sp = (uintptr_t)td->td_frame; } +void +cpu_update_pcb(struct thread *td) +{ +} + void cpu_exit(struct thread *td) { } bool cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) { return (true); } int cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused, int com __unused, void *data __unused) { return (EINVAL); } void cpu_sync_core(void) { fence_i(); } diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 786cc447dc2c..46482f26e0ef 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -1,1346 +1,1347 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1986, 1989, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _SYS_PROC_H_ #define _SYS_PROC_H_ #include /* For struct callout. */ #include /* For struct klist. */ #ifdef _KERNEL #include #endif #include #ifndef _KERNEL #include #endif #include #include #include #include #include #include #include /* XXX. */ #include #include #include #include #include #ifndef _KERNEL #include /* For structs itimerval, timeval. */ #else #include #include #endif #include #include #include #include #include /* Machine-dependent proc substruct. */ #ifdef _KERNEL #include #endif /* * One structure allocated per session. * * List of locks * (m) locked by s_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct session { u_int s_count; /* Ref cnt; pgrps in session - atomic. */ struct proc *s_leader; /* (m + e) Session leader. */ struct vnode *s_ttyvp; /* (m) Vnode of controlling tty. */ struct cdev_priv *s_ttydp; /* (m) Device of controlling tty. */ struct tty *s_ttyp; /* (e) Controlling tty. */ pid_t s_sid; /* (c) Session ID. */ /* (m) Setlogin() name: */ char s_login[roundup(MAXLOGNAME, sizeof(long))]; struct mtx s_mtx; /* Mutex to protect members. */ }; /* * One structure allocated per process group. * * List of locks * (m) locked by pg_mtx mtx * (e) locked by proctree_lock sx * (c) const until freeing */ struct pgrp { LIST_ENTRY(pgrp) pg_hash; /* (e) Hash chain. */ LIST_HEAD(, proc) pg_members; /* (m + e) Pointer to pgrp members. */ struct session *pg_session; /* (c) Pointer to session. */ struct sigiolst pg_sigiolst; /* (m) List of sigio sources. */ pid_t pg_id; /* (c) Process group id. */ struct mtx pg_mtx; /* Mutex to protect members */ int pg_flags; /* (m) PGRP_ flags */ struct sx pg_killsx; /* Mutual exclusion between group member * fork() and killpg() */ }; #define PGRP_ORPHANED 0x00000001 /* Group is orphaned */ /* * pargs, used to hold a copy of the command line, if it had a sane length. */ struct pargs { u_int ar_ref; /* Reference count. */ u_int ar_length; /* Length. */ u_char ar_args[1]; /* Arguments. */ }; /*- * Description of a process. * * This structure contains the information needed to manage a thread of * control, known in UN*X as a process; it has references to substructures * containing descriptions of things that the process uses, but may share * with related processes. The process structure and the substructures * are always addressable except for those marked "(CPU)" below, * which might be addressable only on a processor on which the process * is running. * * Below is a key of locks used to protect each member of struct proc. The * lock is indicated by a reference to a specific character in parens in the * associated comment. * * - not yet protected * a - only touched by curproc or parent during fork/wait * b - created at fork, never changes * (exception aiods switch vmspaces, but they are also * marked 'P_SYSTEM' so hopefully it will be left alone) * c - locked by proc mtx * d - locked by allproc_lock lock * e - locked by proctree_lock lock * f - session mtx * g - process group mtx * h - callout_lock mtx * i - by curproc or the master session mtx * j - locked by proc slock * k - only accessed by curthread * k*- only accessed by curthread and from an interrupt * kx- only accessed by curthread and by debugger * l - the attaching proc or attaching proc parent * n - not locked, lazy * o - ktrace lock * q - td_contested lock * r - p_peers lock * s - see sleepq_switch(), sleeping_on_old_rtc(), and sleep(9) * t - thread lock * u - process stat lock * w - process timer lock * x - created at fork, only changes during single threading in exec * y - created at first aio, doesn't change until exit or exec at which * point we are single-threaded and only curthread changes it * * If the locking key specifies two identifiers (for example, p_pptr) then * either lock is sufficient for read access, but both locks must be held * for write access. */ struct cpuset; struct filecaps; struct filemon; struct kaioinfo; struct kaudit_record; struct kcov_info; struct kdtrace_proc; struct kdtrace_thread; struct kmsan_td; struct kq_timer_cb_data; struct mqueue_notifier; struct p_sched; struct proc; struct procdesc; struct racct; struct sbuf; struct sleepqueue; struct socket; struct td_sched; struct thread; struct trapframe; struct turnstile; struct vm_map; struct vm_map_entry; struct epoch_tracker; struct syscall_args { u_int code; u_int original_code; struct sysent *callp; register_t args[8]; }; /* * XXX: Does this belong in resource.h or resourcevar.h instead? * Resource usage extension. The times in rusage structs in the kernel are * never up to date. The actual times are kept as runtimes and tick counts * (with control info in the "previous" times), and are converted when * userland asks for rusage info. Backwards compatibility prevents putting * this directly in the user-visible rusage struct. * * Locking for p_rux: (cu) means (u) for p_rux and (c) for p_crux. * Locking for td_rux: (t) for all fields. */ struct rusage_ext { uint64_t rux_runtime; /* (cu) Real time. */ uint64_t rux_uticks; /* (cu) Statclock hits in user mode. */ uint64_t rux_sticks; /* (cu) Statclock hits in sys mode. */ uint64_t rux_iticks; /* (cu) Statclock hits in intr mode. */ uint64_t rux_uu; /* (c) Previous user time in usec. */ uint64_t rux_su; /* (c) Previous sys time in usec. */ uint64_t rux_tu; /* (c) Previous total time in usec. */ }; /* * Kernel runnable context (thread). * This is what is put to sleep and reactivated. * Thread context. Processes may have multiple threads. */ struct thread { struct mtx *volatile td_lock; /* replaces sched lock */ struct proc *td_proc; /* (*) Associated process. */ TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */ TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */ union { TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */ struct thread *td_zombie; /* Zombie list linkage */ }; TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */ LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */ struct cpuset *td_cpuset; /* (t) CPU affinity mask. */ struct domainset_ref td_domain; /* (a) NUMA policy */ struct seltd *td_sel; /* Select queue/channel. */ struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ struct turnstile *td_turnstile; /* (k) Associated turnstile. */ void *td_pad1; /* Available */ struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */ lwpid_t td_tid; /* (b) Thread ID. */ sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */ #define td_siglist td_sigqueue.sq_signals u_char td_lend_user_pri; /* (t) Lend user pri. */ u_char td_allocdomain; /* (b) NUMA domain backing this struct thread. */ u_char td_base_ithread_pri; /* (t) Base ithread pri */ struct kmsan_td *td_kmsan; /* (k) KMSAN state */ /* Cleared during fork1(), thread_create(), or kthread_add(). */ #define td_startzero td_flags int td_flags; /* (t) TDF_* flags. */ int td_ast; /* (t) TDA_* indicators */ int td_inhibitors; /* (t) Why can not run. */ int td_pflags; /* (k) Private thread (TDP_*) flags. */ int td_pflags2; /* (k) Private thread (TDP2_*) flags. */ int td_dupfd; /* (k) Ret value from fdopen. XXX */ int td_sqqueue; /* (t) Sleepqueue queue blocked on. */ const void *td_wchan; /* (t) Sleep address. */ const char *td_wmesg; /* (t) Reason for sleep. */ volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */ u_char td_tsqueue; /* (t) Turnstile queue blocked on. */ u_char _td_pad0[2]; /* Available. */ int td_locks; /* (k) Debug: count of non-spin locks */ int td_rw_rlocks; /* (k) Count of rwlock read locks. */ int td_sx_slocks; /* (k) Count of sx shared locks. */ int td_lk_slocks; /* (k) Count of lockmgr shared locks. */ struct lock_object *td_wantedlock; /* (k) Lock we are contending on */ struct turnstile *td_blocked; /* (t) Lock thread is blocked on. */ const char *td_lockname; /* (t) Name of lock blocked on. */ LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */ struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */ int td_intr_nesting_level; /* (k) Interrupt recursion. */ int td_pinned; /* (k) Temporary cpu pin count. */ struct ucred *td_realucred; /* (k) Reference to credentials. */ struct ucred *td_ucred; /* (k) Used credentials, temporarily switchable. */ struct plimit *td_limit; /* (k) Resource limits. */ int td_slptick; /* (t) Time at sleep. */ int td_blktick; /* (t) Time spent blocked. */ int td_swvoltick; /* (t) Time at last SW_VOL switch. */ int td_swinvoltick; /* (t) Time at last SW_INVOL switch. */ u_int td_cow; /* (*) Number of copy-on-write faults */ struct rusage td_ru; /* (t) rusage information. */ struct rusage_ext td_rux; /* (t) Internal rusage information. */ uint64_t td_incruntime; /* (t) Cpu ticks to transfer to proc. */ uint64_t td_runtime; /* (t) How many cpu ticks we've run. */ u_int td_pticks; /* (t) Statclock hits for profiling */ u_int td_sticks; /* (t) Statclock hits in system mode. */ u_int td_iticks; /* (t) Statclock hits in intr mode. */ u_int td_uticks; /* (t) Statclock hits in user mode. */ int td_intrval; /* (t) Return value for sleepq. */ sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */ volatile u_int td_generation; /* (k) For detection of preemption */ stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */ int td_xsig; /* (c) Signal for ptrace */ u_long td_profil_addr; /* (k) Temporary addr until AST. */ u_int td_profil_ticks; /* (k) Temporary ticks until AST. */ char td_name[MAXCOMLEN + 1]; /* (*) Thread name. */ struct file *td_fpop; /* (k) file referencing cdev under op */ int td_dbgflags; /* (c) Userland debugger flags */ siginfo_t td_si; /* (c) For debugger or core file */ int td_ng_outbound; /* (k) Thread entered ng from above. */ struct osd td_osd; /* (k) Object specific data. */ struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */ pid_t td_dbg_forked; /* (c) Child pid for debugger. */ u_int td_no_sleeping; /* (k) Sleeping disabled count. */ struct vnode *td_vp_reserved;/* (k) Preallocated vnode. */ void *td_su; /* (k) FFS SU private */ sbintime_t td_sleeptimo; /* (t) Sleep timeout. */ int td_rtcgen; /* (s) rtc_generation of abs. sleep */ int td_errno; /* (k) Error from last syscall. */ size_t td_vslock_sz; /* (k) amount of vslock-ed space */ struct kcov_info *td_kcov_info; /* (*) Kernel code coverage data */ long td_ucredref; /* (k) references on td_realucred */ #define td_endzero td_sigmask /* Copied during fork1(), thread_create(), or kthread_add(). */ #define td_startcopy td_endzero sigset_t td_sigmask; /* (c) Current signal mask. */ u_char td_rqindex; /* (t) Run queue index. */ u_char td_base_pri; /* (t) Thread base kernel priority. */ u_char td_priority; /* (t) Thread active priority. */ u_char td_pri_class; /* (t) Scheduling class. */ u_char td_user_pri; /* (t) User pri from estcpu and nice. */ u_char td_base_user_pri; /* (t) Base user pri */ uintptr_t td_rb_list; /* (k) Robust list head. */ uintptr_t td_rbp_list; /* (k) Robust priv list head. */ uintptr_t td_rb_inact; /* (k) Current in-action mutex loc. */ struct syscall_args td_sa; /* (kx) Syscall parameters. Copied on fork for child tracing. */ void *td_sigblock_ptr; /* (k) uptr for fast sigblock. */ uint32_t td_sigblock_val; /* (k) fast sigblock value read at td_sigblock_ptr on kern entry */ #define td_endcopy td_pcb /* * Fields that must be manually set in fork1(), thread_create(), kthread_add(), * or already have been set in the allocator, constructor, etc. */ struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */ enum td_states { TDS_INACTIVE = 0x0, TDS_INHIBITED, TDS_CAN_RUN, TDS_RUNQ, TDS_RUNNING } td_state; /* (t) thread state */ /* Note: td_state must be accessed using TD_{GET,SET}_STATE(). */ union { syscallarg_t tdu_retval[2]; off_t tdu_off; } td_uretoff; /* (k) Syscall aux returns. */ #define td_retval td_uretoff.tdu_retval u_int td_cowgen; /* (k) Generation of COW pointers. */ /* LP64 hole */ struct callout td_slpcallout; /* (h) Callout for sleep. */ struct trapframe *td_frame; /* (k) */ vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */ u_short td_kstack_pages; /* (a) Size of the kstack. */ u_short td_kstack_domain; /* (a) Domain backing kstack KVA. */ volatile u_int td_critnest; /* (k*) Critical section nest level. */ struct mdthread td_md; /* (k) Any machine-dependent fields. */ struct kaudit_record *td_ar; /* (k) Active audit record, if any. */ struct lpohead td_lprof[2]; /* (a) lock profiling objects. */ struct kdtrace_thread *td_dtrace; /* (*) DTrace-specific data. */ struct vnet *td_vnet; /* (k) Effective vnet. */ const char *td_vnet_lpush; /* (k) Debugging vnet push / pop. */ struct trapframe *td_intr_frame;/* (k) Frame of the current irq */ struct proc *td_rfppwait_p; /* (k) The vforked child */ struct vm_page **td_ma; /* (k) uio pages held */ int td_ma_cnt; /* (k) size of *td_ma */ /* LP64 hole */ void *td_emuldata; /* Emulator state data */ int td_lastcpu; /* (t) Last cpu we were on. */ int td_oncpu; /* (t) Which cpu we are on. */ void *td_lkpi_task; /* LinuxKPI task struct pointer */ int td_pmcpend; void *td_remotereq; /* (c) dbg remote request. */ off_t td_ktr_io_lim; /* (k) limit for ktrace file size */ #ifdef EPOCH_TRACE SLIST_HEAD(, epoch_tracker) td_epochs; #endif }; struct thread0_storage { struct thread t0st_thread; uint64_t t0st_sched[10]; }; struct mtx *thread_lock_block(struct thread *); void thread_lock_block_wait(struct thread *); void thread_lock_set(struct thread *, struct mtx *); void thread_lock_unblock(struct thread *, struct mtx *); #define THREAD_LOCK_ASSERT(td, type) \ mtx_assert((td)->td_lock, (type)) #define THREAD_LOCK_BLOCKED_ASSERT(td, type) \ do { \ struct mtx *__m = (td)->td_lock; \ if (__m != &blocked_lock) \ mtx_assert(__m, (type)); \ } while (0) #ifdef INVARIANTS #define THREAD_LOCKPTR_ASSERT(td, lock) \ do { \ struct mtx *__m; \ __m = (td)->td_lock; \ KASSERT(__m == (lock), \ ("Thread %p lock %p does not match %p", td, __m, (lock))); \ } while (0) #define THREAD_LOCKPTR_BLOCKED_ASSERT(td, lock) \ do { \ struct mtx *__m; \ __m = (td)->td_lock; \ KASSERT(__m == (lock) || __m == &blocked_lock, \ ("Thread %p lock %p does not match %p", td, __m, (lock))); \ } while (0) #define TD_LOCKS_INC(td) ((td)->td_locks++) #define TD_LOCKS_DEC(td) do { \ KASSERT(SCHEDULER_STOPPED() || (td)->td_locks > 0, \ ("Thread %p owns no locks", (td))); \ (td)->td_locks--; \ } while (0) #else #define THREAD_LOCKPTR_ASSERT(td, lock) #define THREAD_LOCKPTR_BLOCKED_ASSERT(td, lock) #define TD_LOCKS_INC(td) #define TD_LOCKS_DEC(td) #endif /* * Flags kept in td_flags: * To change these you MUST have the scheduler lock. */ #define TDF_BORROWING 0x00000001 /* Thread is borrowing pri from another. */ #define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */ #define TDF_INMEM 0x00000004 /* Thread's stack is in memory. */ #define TDF_SINTR 0x00000008 /* Sleep is interruptible. */ #define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */ #define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */ #define TDF_UNUSED11 0x00000040 /* Available */ #define TDF_SIGWAIT 0x00000080 /* Ignore ignored signals */ #define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */ #define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */ #define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */ #define TDF_UNUSED1 0x00000800 /* Available */ #define TDF_UNUSED2 0x00001000 /* Available */ #define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */ #define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */ #define TDF_UNUSED3 0x00008000 /* Available */ #define TDF_UNUSED4 0x00010000 /* Available */ #define TDF_UNUSED5 0x00020000 /* Available */ #define TDF_NOLOAD 0x00040000 /* Ignore during load avg calculations. */ #define TDF_SERESTART 0x00080000 /* ERESTART on stop attempts. */ #define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */ #define TDF_SEINTR 0x00200000 /* EINTR on stop attempts. */ #define TDF_UNUSED12 0x00400000 /* Available */ #define TDF_UNUSED6 0x00800000 /* Available */ #define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */ #define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */ #define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */ #define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */ #define TDF_UNUSED7 0x10000000 /* Available */ #define TDF_UNUSED8 0x20000000 /* Available */ #define TDF_UNUSED9 0x40000000 /* Available */ #define TDF_UNUSED10 0x80000000 /* Available */ enum { TDA_AST = 0, /* Special: call all non-flagged AST handlers */ TDA_OWEUPC, TDA_HWPMC, TDA_VFORK, TDA_ALRM, TDA_PROF, TDA_MAC, TDA_SCHED, TDA_UFS, TDA_GEOM, TDA_KQUEUE, TDA_RACCT, TDA_MOD1, /* For third party use, before signals are */ TDA_MOD2, /* processed .. */ TDA_PSELECT, /* For discarding temporary signal mask */ TDA_SIG, TDA_KTRACE, TDA_SUSPEND, TDA_SIGSUSPEND, TDA_MOD3, /* .. and after */ TDA_MOD4, TDA_MAX, }; #define TDAI(tda) (1U << (tda)) #define td_ast_pending(td, tda) ((td->td_ast & TDAI(tda)) != 0) /* Userland debug flags */ #define TDB_SUSPEND 0x00000001 /* Thread is suspended by debugger */ #define TDB_XSIG 0x00000002 /* Thread is exchanging signal under trace */ #define TDB_USERWR 0x00000004 /* Debugger modified memory or registers */ #define TDB_SCE 0x00000008 /* Thread performs syscall enter */ #define TDB_SCX 0x00000010 /* Thread performs syscall exit */ #define TDB_EXEC 0x00000020 /* TDB_SCX from exec(2) family */ #define TDB_FORK 0x00000040 /* TDB_SCX from fork(2) that created new process */ #define TDB_STOPATFORK 0x00000080 /* Stop at the return from fork (child only) */ #define TDB_CHILD 0x00000100 /* New child indicator for ptrace() */ #define TDB_BORN 0x00000200 /* New LWP indicator for ptrace() */ #define TDB_EXIT 0x00000400 /* Exiting LWP indicator for ptrace() */ #define TDB_VFORK 0x00000800 /* vfork indicator for ptrace() */ #define TDB_FSTP 0x00001000 /* The thread is PT_ATTACH leader */ #define TDB_STEP 0x00002000 /* (x86) PSL_T set for PT_STEP */ #define TDB_SSWITCH 0x00004000 /* Suspended in ptracestop */ #define TDB_BOUNDARY 0x00008000 /* ptracestop() at boundary */ #define TDB_COREDUMPREQ 0x00010000 /* Coredump request */ #define TDB_SCREMOTEREQ 0x00020000 /* Remote syscall request */ /* * "Private" flags kept in td_pflags: * These are only written by curthread and thus need no locking. */ #define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */ #define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */ #define TDP_INKTRACE 0x00000004 /* Thread is currently in KTRACE code. */ #define TDP_BUFNEED 0x00000008 /* Do not recurse into the buf flush */ #define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */ #define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */ #define TDP_DEADLKTREAT 0x00000040 /* Lock acquisition - deadlock treatment. */ #define TDP_NOFAULTING 0x00000080 /* Do not handle page faults. */ #define TDP_SIGFASTBLOCK 0x00000100 /* Fast sigblock active */ #define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */ #define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */ #define TDP_SYNCIO 0x00000800 /* Local override, disable async i/o. */ #define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */ #define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */ #define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */ #define TDP_SCHED4 0x00008000 /* Reserved for scheduler private use */ #define TDP_GEOM 0x00010000 /* Settle GEOM before finishing syscall */ #define TDP_SOFTDEP 0x00020000 /* Stuck processing softdep worklist */ #define TDP_NORUNNINGBUF 0x00040000 /* Ignore runningbufspace check */ #define TDP_WAKEUP 0x00080000 /* Don't sleep in umtx cond_wait */ #define TDP_INBDFLUSH 0x00100000 /* Already in BO_BDFLUSH, do not recurse */ #define TDP_KTHREAD 0x00200000 /* This is an official kernel thread */ #define TDP_CALLCHAIN 0x00400000 /* Capture thread's callchain */ #define TDP_IGNSUSP 0x00800000 /* Permission to ignore the MNTK_SUSPEND* */ #define TDP_AUDITREC 0x01000000 /* Audit record pending on thread */ #define TDP_RFPPWAIT 0x02000000 /* Handle RFPPWAIT on syscall exit */ #define TDP_RESETSPUR 0x04000000 /* Reset spurious page fault history. */ #define TDP_NERRNO 0x08000000 /* Last errno is already in td_errno */ #define TDP_UIOHELD 0x10000000 /* Current uio has pages held in td_ma */ #define TDP_EFIRT 0x20000000 /* In firmware (EFI RT) call */ #define TDP_EXECVMSPC 0x40000000 /* Execve destroyed old vmspace */ #define TDP_SIGFASTPENDING 0x80000000 /* Pending signal due to sigfastblock */ #define TDP2_SBPAGES 0x00000001 /* Owns sbusy on some pages */ #define TDP2_COMPAT32RB 0x00000002 /* compat32 ABI for robust lists */ #define TDP2_ACCT 0x00000004 /* Doing accounting */ #define TDP2_SAN_QUIET 0x00000008 /* Disable warnings from K(A|M)SAN */ /* * Reasons that the current thread can not be run yet. * More than one may apply. */ #define TDI_SUSPENDED 0x0001 /* On suspension queue. */ #define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */ #define TDI_LOCK 0x0008 /* Stopped on a lock. */ #define TDI_IWAIT 0x0010 /* Awaiting interrupt. */ #define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING) #define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL) #define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED) #define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK) #define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT) #ifdef _KERNEL #define TD_GET_STATE(td) atomic_load_int(&(td)->td_state) #else #define TD_GET_STATE(td) ((td)->td_state) #endif #define TD_IS_RUNNING(td) (TD_GET_STATE(td) == TDS_RUNNING) #define TD_ON_RUNQ(td) (TD_GET_STATE(td) == TDS_RUNQ) #define TD_CAN_RUN(td) (TD_GET_STATE(td) == TDS_CAN_RUN) #define TD_IS_INHIBITED(td) (TD_GET_STATE(td) == TDS_INHIBITED) #define TD_ON_UPILOCK(td) ((td)->td_flags & TDF_UPIBLOCKED) #define TD_IS_IDLETHREAD(td) ((td)->td_flags & TDF_IDLETD) #define TD_CAN_ABORT(td) (TD_ON_SLEEPQ((td)) && \ ((td)->td_flags & TDF_SINTR) != 0) #define KTDSTATE(td) \ (((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \ ((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \ ((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" : \ ((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding") #define TD_SET_INHIB(td, inhib) do { \ TD_SET_STATE(td, TDS_INHIBITED); \ (td)->td_inhibitors |= (inhib); \ } while (0) #define TD_CLR_INHIB(td, inhib) do { \ if (((td)->td_inhibitors & (inhib)) && \ (((td)->td_inhibitors &= ~(inhib)) == 0)) \ TD_SET_STATE(td, TDS_CAN_RUN); \ } while (0) #define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING) #define TD_SET_LOCK(td) TD_SET_INHIB((td), TDI_LOCK) #define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED) #define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT) #define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING) #define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING) #define TD_CLR_LOCK(td) TD_CLR_INHIB((td), TDI_LOCK) #define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED) #define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT) #ifdef _KERNEL #define TD_SET_STATE(td, state) atomic_store_int(&(td)->td_state, state) #else #define TD_SET_STATE(td, state) (td)->td_state = state #endif #define TD_SET_RUNNING(td) TD_SET_STATE(td, TDS_RUNNING) #define TD_SET_RUNQ(td) TD_SET_STATE(td, TDS_RUNQ) #define TD_SET_CAN_RUN(td) TD_SET_STATE(td, TDS_CAN_RUN) #define TD_SBDRY_INTR(td) \ (((td)->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 0) #define TD_SBDRY_ERRNO(td) \ (((td)->td_flags & TDF_SEINTR) != 0 ? EINTR : ERESTART) /* * Process structure. */ struct proc { LIST_ENTRY(proc) p_list; /* (d) List of all processes. */ TAILQ_HEAD(, thread) p_threads; /* (c) all threads. */ struct mtx p_slock; /* process spin lock */ struct ucred *p_ucred; /* (c) Process owner's identity. */ struct filedesc *p_fd; /* (b) Open files. */ struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */ struct pwddesc *p_pd; /* (b) Cwd, chroot, jail, umask */ struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */ struct plimit *p_limit; /* (c) Resource limits. */ struct callout p_limco; /* (c) Limit callout handle */ struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */ int p_flag; /* (c) P_* flags. */ int p_flag2; /* (c) P2_* flags. */ enum p_states { PRS_NEW = 0, /* In creation */ PRS_NORMAL, /* threads can be run. */ PRS_ZOMBIE } p_state; /* (j/c) Process status. */ pid_t p_pid; /* (b) Process identifier. */ LIST_ENTRY(proc) p_hash; /* (d) Hash chain. */ LIST_ENTRY(proc) p_pglist; /* (g + e) List of processes in pgrp. */ struct proc *p_pptr; /* (c + e) Pointer to parent process. */ LIST_ENTRY(proc) p_sibling; /* (e) List of sibling processes. */ LIST_HEAD(, proc) p_children; /* (e) Pointer to list of children. */ struct proc *p_reaper; /* (e) My reaper. */ LIST_HEAD(, proc) p_reaplist; /* (e) List of my descendants (if I am reaper). */ LIST_ENTRY(proc) p_reapsibling; /* (e) List of siblings - descendants of the same reaper. */ struct mtx p_mtx; /* (n) Lock for this struct. */ struct mtx p_statmtx; /* Lock for the stats */ struct mtx p_itimmtx; /* Lock for the virt/prof timers */ struct mtx p_profmtx; /* Lock for the profiling */ struct ksiginfo *p_ksi; /* Locked by parent proc lock */ sigqueue_t p_sigqueue; /* (c) Sigs not delivered to a td. */ #define p_siglist p_sigqueue.sq_signals pid_t p_oppid; /* (c + e) Real parent pid. */ /* The following fields are all zeroed upon creation in fork. */ #define p_startzero p_vmspace struct vmspace *p_vmspace; /* (b) Address space. */ u_int p_swtick; /* (c) Tick when swapped in or out. */ u_int p_cowgen; /* (c) Generation of COW pointers. */ struct itimerval p_realtimer; /* (c) Alarm timer. */ struct rusage p_ru; /* (a) Exit information. */ struct rusage_ext p_rux; /* (cu) Internal resource usage. */ struct rusage_ext p_crux; /* (c) Internal child resource usage. */ int p_profthreads; /* (c) Num threads in addupc_task. */ volatile int p_exitthreads; /* (j) Number of threads exiting */ int p_traceflag; /* (o) Kernel trace points. */ struct ktr_io_params *p_ktrioparms; /* (c + o) Params for ktrace. */ struct vnode *p_textvp; /* (b) Vnode of executable. */ struct vnode *p_textdvp; /* (b) Dir containing textvp. */ char *p_binname; /* (b) Binary hardlink name. */ u_int p_lock; /* (c) Prevent exit. */ struct sigiolst p_sigiolst; /* (c) List of sigio sources. */ int p_sigparent; /* (c) Signal to parent on exit. */ int p_sig; /* (n) For core dump/debugger XXX. */ u_int p_ptevents; /* (c + e) ptrace() event mask. */ struct kaioinfo *p_aioinfo; /* (y) ASYNC I/O info. */ struct thread *p_singlethread;/* (c + j) If single threading this is it */ int p_suspcount; /* (j) Num threads in suspended mode. */ struct thread *p_xthread; /* (c) Trap thread */ int p_boundary_count;/* (j) Num threads at user boundary */ int p_pendingcnt; /* (c) how many signals are pending */ struct itimers *p_itimers; /* (c) POSIX interval timers. */ struct procdesc *p_procdesc; /* (e) Process descriptor, if any. */ u_int p_treeflag; /* (e) P_TREE flags */ int p_pendingexits; /* (c) Count of pending thread exits. */ struct filemon *p_filemon; /* (c) filemon-specific data. */ int p_pdeathsig; /* (c) Signal from parent on exit. */ /* End area that is zeroed on creation. */ #define p_endzero p_magic /* The following fields are all copied upon creation in fork. */ #define p_startcopy p_endzero u_int p_magic; /* (b) Magic number. */ int p_osrel; /* (x) osreldate for the binary (from ELF note, if any) */ uint32_t p_fctl0; /* (x) ABI feature control, ELF note */ char p_comm[MAXCOMLEN + 1]; /* (x) Process name. */ struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */ struct pargs *p_args; /* (c) Process arguments. */ rlim_t p_cpulimit; /* (c) Current CPU limit in seconds. */ signed char p_nice; /* (c) Process "nice" value. */ int p_fibnum; /* in this routing domain XXX MRT */ pid_t p_reapsubtree; /* (e) Pid of the direct child of the reaper which spawned our subtree. */ uint64_t p_elf_flags; /* (x) ELF flags */ void *p_elf_brandinfo; /* (x) Elf_Brandinfo, NULL for non ELF binaries. */ sbintime_t p_umtx_min_timeout; /* End area that is copied on creation. */ #define p_endcopy p_xexit u_int p_xexit; /* (c) Exit code. */ u_int p_xsig; /* (c) Stop/kill sig. */ struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */ struct knlist *p_klist; /* (c) Knotes attached to this proc. */ int p_numthreads; /* (c) Number of threads. */ struct mdproc p_md; /* Any machine-dependent fields. */ struct callout p_itcallout; /* (h + c) Interval timer callout. */ u_short p_acflag; /* (c) Accounting flags. */ struct proc *p_peers; /* (r) */ struct proc *p_leader; /* (b) */ void *p_emuldata; /* (c) Emulator state data. */ struct label *p_label; /* (*) Proc (not subject) MAC label. */ STAILQ_HEAD(, ktr_request) p_ktr; /* (o) KTR event queue. */ LIST_HEAD(, mqueue_notifier) p_mqnotifier; /* (c) mqueue notifiers.*/ struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */ struct cv p_pwait; /* (*) wait cv for exit/exec. */ uint64_t p_prev_runtime; /* (c) Resource usage accounting. */ struct racct *p_racct; /* (b) Resource accounting. */ int p_throttled; /* (c) Flag for racct pcpu throttling */ /* * An orphan is the child that has been re-parented to the * debugger as a result of attaching to it. Need to keep * track of them for parent to be able to collect the exit * status of what used to be children. */ LIST_ENTRY(proc) p_orphan; /* (e) List of orphan processes. */ LIST_HEAD(, proc) p_orphans; /* (e) Pointer to list of orphans. */ TAILQ_HEAD(, kq_timer_cb_data) p_kqtim_stop; /* (c) */ LIST_ENTRY(proc) p_jaillist; /* (d) Jail process linkage. */ }; #define p_session p_pgrp->pg_session #define p_pgid p_pgrp->pg_id #define NOCPU (-1) /* For when we aren't on a CPU. */ #define NOCPU_OLD (255) #define MAXCPU_OLD (254) #define PROC_SLOCK(p) mtx_lock_spin(&(p)->p_slock) #define PROC_SUNLOCK(p) mtx_unlock_spin(&(p)->p_slock) #define PROC_SLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type)) #define PROC_STATLOCK(p) mtx_lock_spin(&(p)->p_statmtx) #define PROC_STATUNLOCK(p) mtx_unlock_spin(&(p)->p_statmtx) #define PROC_STATLOCK_ASSERT(p, type) mtx_assert(&(p)->p_statmtx, (type)) #define PROC_ITIMLOCK(p) mtx_lock_spin(&(p)->p_itimmtx) #define PROC_ITIMUNLOCK(p) mtx_unlock_spin(&(p)->p_itimmtx) #define PROC_ITIMLOCK_ASSERT(p, type) mtx_assert(&(p)->p_itimmtx, (type)) #define PROC_PROFLOCK(p) mtx_lock_spin(&(p)->p_profmtx) #define PROC_PROFUNLOCK(p) mtx_unlock_spin(&(p)->p_profmtx) #define PROC_PROFLOCK_ASSERT(p, type) mtx_assert(&(p)->p_profmtx, (type)) /* These flags are kept in p_flag. */ #define P_ADVLOCK 0x00000001 /* Process may hold a POSIX advisory lock. */ #define P_CONTROLT 0x00000002 /* Has a controlling terminal. */ #define P_KPROC 0x00000004 /* Kernel process. */ #define P_UNUSED3 0x00000008 /* --available-- */ #define P_PPWAIT 0x00000010 /* Parent is waiting for child to exec/exit. */ #define P_PROFIL 0x00000020 /* Has started profiling. */ #define P_STOPPROF 0x00000040 /* Has thread requesting to stop profiling. */ #define P_HADTHREADS 0x00000080 /* Has had threads (no cleanup shortcuts) */ #define P_SUGID 0x00000100 /* Had set id privileges since last exec. */ #define P_SYSTEM 0x00000200 /* System proc: no sigs or stats. */ #define P_SINGLE_EXIT 0x00000400 /* Threads suspending should exit, not wait. */ #define P_TRACED 0x00000800 /* Debugged process being traced. */ #define P_WAITED 0x00001000 /* Someone is waiting for us. */ #define P_WEXIT 0x00002000 /* Working on exiting. */ #define P_EXEC 0x00004000 /* Process called exec. */ #define P_WKILLED 0x00008000 /* Killed, go to kernel/user boundary ASAP. */ #define P_CONTINUED 0x00010000 /* Proc has continued from a stopped state. */ #define P_STOPPED_SIG 0x00020000 /* Stopped due to SIGSTOP/SIGTSTP. */ #define P_STOPPED_TRACE 0x00040000 /* Stopped because of tracing. */ #define P_STOPPED_SINGLE 0x00080000 /* Only 1 thread can continue (not to user). */ #define P_PROTECTED 0x00100000 /* Do not kill on memory overcommit. */ #define P_SIGEVENT 0x00200000 /* Process pending signals changed. */ #define P_SINGLE_BOUNDARY 0x00400000 /* Threads should suspend at user boundary. */ #define P_HWPMC 0x00800000 /* Process is using HWPMCs */ #define P_JAILED 0x01000000 /* Process is in jail. */ #define P_TOTAL_STOP 0x02000000 /* Stopped in stop_all_proc. */ #define P_INEXEC 0x04000000 /* Process is in execve(). */ #define P_STATCHILD 0x08000000 /* Child process stopped or exited. */ #define P_INMEM 0x10000000 /* Loaded into memory, always set. */ #define P_UNUSED1 0x20000000 /* --available-- */ #define P_UNUSED2 0x40000000 /* --available-- */ #define P_PPTRACE 0x80000000 /* PT_TRACEME by vforked child. */ #define P_STOPPED (P_STOPPED_SIG|P_STOPPED_SINGLE|P_STOPPED_TRACE) #define P_SHOULDSTOP(p) ((p)->p_flag & P_STOPPED) #define P_KILLED(p) ((p)->p_flag & P_WKILLED) /* These flags are kept in p_flag2. */ #define P2_INHERIT_PROTECTED 0x00000001 /* New children get P_PROTECTED. */ #define P2_NOTRACE 0x00000002 /* No ptrace(2) attach or coredumps. */ #define P2_NOTRACE_EXEC 0x00000004 /* Keep P2_NOPTRACE on exec(2). */ #define P2_AST_SU 0x00000008 /* Handles SU ast for kthreads. */ #define P2_PTRACE_FSTP 0x00000010 /* SIGSTOP from PT_ATTACH not yet handled. */ #define P2_TRAPCAP 0x00000020 /* SIGTRAP on ENOTCAPABLE */ #define P2_ASLR_ENABLE 0x00000040 /* Force enable ASLR. */ #define P2_ASLR_DISABLE 0x00000080 /* Force disable ASLR. */ #define P2_ASLR_IGNSTART 0x00000100 /* Enable ASLR to consume sbrk area. */ #define P2_PROTMAX_ENABLE 0x00000200 /* Force enable implied PROT_MAX. */ #define P2_PROTMAX_DISABLE 0x00000400 /* Force disable implied PROT_MAX. */ #define P2_STKGAP_DISABLE 0x00000800 /* Disable stack gap for MAP_STACK */ #define P2_STKGAP_DISABLE_EXEC 0x00001000 /* Stack gap disabled after exec */ #define P2_ITSTOPPED 0x00002000 /* itimers stopped */ #define P2_PTRACEREQ 0x00004000 /* Active ptrace req */ #define P2_NO_NEW_PRIVS 0x00008000 /* Ignore setuid */ #define P2_WXORX_DISABLE 0x00010000 /* WX mappings enabled */ #define P2_WXORX_ENABLE_EXEC 0x00020000 /* WXORX enabled after exec */ #define P2_WEXIT 0x00040000 /* exit just started, no external thread_single() is permitted */ #define P2_REAPKILLED 0x00080000 /* REAP_KILL pass touched me */ #define P2_MEMBAR_PRIVE 0x00100000 /* membar private expedited registered */ #define P2_MEMBAR_PRIVE_SYNCORE 0x00200000 /* membar private expedited sync core registered */ #define P2_MEMBAR_GLOBE 0x00400000 /* membar global expedited registered */ #define P2_LOGSIGEXIT_ENABLE 0x00800000 /* Disable logging on sigexit */ #define P2_LOGSIGEXIT_CTL 0x01000000 /* Override kern.logsigexit */ /* Flags protected by proctree_lock, kept in p_treeflags. */ #define P_TREE_ORPHANED 0x00000001 /* Reparented, on orphan list */ #define P_TREE_FIRST_ORPHAN 0x00000002 /* First element of orphan list */ #define P_TREE_REAPER 0x00000004 /* Reaper of subtree */ #define P_TREE_GRPEXITED 0x00000008 /* exit1() done with job ctl */ /* * These were process status values (p_stat), now they are only used in * legacy conversion code. */ #define SIDL 1 /* Process being created by fork. */ #define SRUN 2 /* Currently runnable. */ #define SSLEEP 3 /* Sleeping on an address. */ #define SSTOP 4 /* Process debugging or suspension. */ #define SZOMB 5 /* Awaiting collection by parent. */ #define SWAIT 6 /* Waiting for interrupt. */ #define SLOCK 7 /* Blocked on a lock. */ #define P_MAGIC 0xbeefface #ifdef _KERNEL /* Types and flags for mi_switch(9). */ #define SW_TYPE_MASK 0xff /* First 8 bits are switch type */ #define SWT_OWEPREEMPT 1 /* Switching due to owepreempt. */ #define SWT_TURNSTILE 2 /* Turnstile contention. */ #define SWT_SLEEPQ 3 /* Sleepq wait. */ #define SWT_RELINQUISH 4 /* yield call. */ #define SWT_NEEDRESCHED 5 /* NEEDRESCHED was set. */ #define SWT_IDLE 6 /* Switching from the idle thread. */ #define SWT_IWAIT 7 /* Waiting for interrupts. */ #define SWT_SUSPEND 8 /* Thread suspended. */ #define SWT_REMOTEPREEMPT 9 /* Remote processor preempted. */ #define SWT_REMOTEWAKEIDLE 10 /* Remote processor preempted idle. */ #define SWT_BIND 11 /* Thread bound to a new CPU. */ #define SWT_COUNT 12 /* Number of switch types. */ /* Flags */ #define SW_VOL 0x0100 /* Voluntary switch. */ #define SW_INVOL 0x0200 /* Involuntary switch. */ #define SW_PREEMPT 0x0400 /* The invol switch is a preemption */ /* How values for thread_single(). */ #define SINGLE_NO_EXIT 0 #define SINGLE_EXIT 1 #define SINGLE_BOUNDARY 2 #define SINGLE_ALLPROC 3 #define FOREACH_PROC_IN_SYSTEM(p) \ LIST_FOREACH((p), &allproc, p_list) #define FOREACH_THREAD_IN_PROC(p, td) \ TAILQ_FOREACH((td), &(p)->p_threads, td_plist) #define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads) /* * We use process IDs <= pid_max <= PID_MAX; PID_MAX + 1 must also fit * in a pid_t, as it is used to represent "no process group". */ #define PID_MAX 99999 #define NO_PID (PID_MAX + 1) #define THREAD0_TID NO_PID extern pid_t pid_max; #define SESS_LEADER(p) ((p)->p_session->s_leader == (p)) /* Lock and unlock a process. */ #define PROC_LOCK(p) mtx_lock(&(p)->p_mtx) #define PROC_TRYLOCK(p) mtx_trylock(&(p)->p_mtx) #define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx) #define PROC_LOCKED(p) mtx_owned(&(p)->p_mtx) #define PROC_WAIT_UNLOCKED(p) mtx_wait_unlocked(&(p)->p_mtx) #define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type)) /* Lock and unlock a process group. */ #define PGRP_LOCK(pg) mtx_lock(&(pg)->pg_mtx) #define PGRP_UNLOCK(pg) mtx_unlock(&(pg)->pg_mtx) #define PGRP_LOCKED(pg) mtx_owned(&(pg)->pg_mtx) #define PGRP_LOCK_ASSERT(pg, type) mtx_assert(&(pg)->pg_mtx, (type)) #define PGRP_LOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_LOCK(pg); \ } while (0) #define PGRP_UNLOCK_PGSIGNAL(pg) do { \ if ((pg) != NULL) \ PGRP_UNLOCK(pg); \ } while (0) /* Lock and unlock a session. */ #define SESS_LOCK(s) mtx_lock(&(s)->s_mtx) #define SESS_UNLOCK(s) mtx_unlock(&(s)->s_mtx) #define SESS_LOCKED(s) mtx_owned(&(s)->s_mtx) #define SESS_LOCK_ASSERT(s, type) mtx_assert(&(s)->s_mtx, (type)) /* * A non-zero p_lock prevents the process from exiting; it will sleep in exit1() * until the count reaches zero. * * PHOLD() asserts that the process (except the current process) is * not exiting and increments p_lock. * _PHOLD() is same as PHOLD(), it takes the process locked. */ #define PHOLD(p) do { \ PROC_LOCK(p); \ _PHOLD(p); \ PROC_UNLOCK(p); \ } while (0) #define _PHOLD(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \ ("PHOLD of exiting process %p", p)); \ (p)->p_lock++; \ } while (0) #define PROC_ASSERT_HELD(p) do { \ KASSERT((p)->p_lock > 0, ("process %p not held", p)); \ } while (0) #define PRELE(p) do { \ PROC_LOCK((p)); \ _PRELE((p)); \ PROC_UNLOCK((p)); \ } while (0) #define _PRELE(p) do { \ PROC_LOCK_ASSERT((p), MA_OWNED); \ PROC_ASSERT_HELD(p); \ (--(p)->p_lock); \ if (((p)->p_flag & P_WEXIT) && (p)->p_lock == 0) \ wakeup(&(p)->p_lock); \ } while (0) #define PROC_ASSERT_NOT_HELD(p) do { \ KASSERT((p)->p_lock == 0, ("process %p held", p)); \ } while (0) #define PROC_UPDATE_COW(p) do { \ struct proc *_p = (p); \ PROC_LOCK_ASSERT((_p), MA_OWNED); \ atomic_store_int(&_p->p_cowgen, _p->p_cowgen + 1); \ } while (0) #define PROC_COW_CHANGECOUNT(td, p) ({ \ struct thread *_td = (td); \ struct proc *_p = (p); \ MPASS(_td == curthread); \ PROC_LOCK_ASSERT(_p, MA_OWNED); \ _p->p_cowgen - _td->td_cowgen; \ }) /* Control whether or not it is safe for curthread to sleep. */ #define THREAD_NO_SLEEPING() do { \ curthread->td_no_sleeping++; \ MPASS(curthread->td_no_sleeping > 0); \ } while (0) #define THREAD_SLEEPING_OK() do { \ MPASS(curthread->td_no_sleeping > 0); \ curthread->td_no_sleeping--; \ } while (0) #define THREAD_CAN_SLEEP() ((curthread)->td_no_sleeping == 0) #define THREAD_CONTENDS_ON_LOCK(lo) do { \ MPASS(curthread->td_wantedlock == NULL); \ curthread->td_wantedlock = lo; \ } while (0) #define THREAD_CONTENTION_DONE(lo) do { \ MPASS(curthread->td_wantedlock == lo); \ curthread->td_wantedlock = NULL; \ } while (0) #define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) #define PIDHASHLOCK(pid) (&pidhashtbl_lock[((pid) & pidhashlock)]) extern LIST_HEAD(pidhashhead, proc) *pidhashtbl; extern struct sx *pidhashtbl_lock; extern u_long pidhash; extern u_long pidhashlock; #define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash]) extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl; extern u_long pgrphash; extern struct sx allproc_lock; extern int allproc_gen; extern struct sx proctree_lock; extern struct mtx ppeers_lock; extern struct mtx procid_lock; extern struct proc proc0; /* Process slot for swapper. */ extern struct thread0_storage thread0_st; /* Primary thread in proc0. */ #define thread0 (thread0_st.t0st_thread) extern struct vmspace vmspace0; /* VM space for proc0. */ extern int hogticks; /* Limit on kernel cpu hogs. */ extern int lastpid; extern int nprocs, maxproc; /* Current and max number of procs. */ extern int maxprocperuid; /* Max procs per uid. */ extern u_long ps_arg_cache_limit; LIST_HEAD(proclist, proc); TAILQ_HEAD(procqueue, proc); TAILQ_HEAD(threadqueue, thread); extern struct proclist allproc; /* List of all processes. */ extern struct proc *initproc, *pageproc; /* Process slots for init, pager. */ extern struct uma_zone *proc_zone; extern struct uma_zone *pgrp_zone; struct proc *pfind(pid_t); /* Find process by id. */ struct proc *pfind_any(pid_t); /* Find (zombie) process by id. */ struct proc *pfind_any_locked(pid_t pid); /* Find process by id, locked. */ struct pgrp *pgfind(pid_t); /* Find process group by id. */ void pidhash_slockall(void); /* Shared lock all pid hash lists. */ void pidhash_sunlockall(void); /* Shared unlock all pid hash lists. */ struct fork_req { int fr_flags; int fr_pages; int *fr_pidp; struct proc **fr_procp; int *fr_pd_fd; int fr_pd_flags; struct filecaps *fr_pd_fcaps; int fr_flags2; #define FR2_DROPSIG_CAUGHT 0x00000001 /* Drop caught non-DFL signals */ #define FR2_SHARE_PATHS 0x00000002 /* Invert sense of RFFDG for paths */ #define FR2_KPROC 0x00000004 /* Create a kernel process */ }; /* * pget() flags. */ #define PGET_HOLD 0x00001 /* Hold the process. */ #define PGET_CANSEE 0x00002 /* Check against p_cansee(). */ #define PGET_CANDEBUG 0x00004 /* Check against p_candebug(). */ #define PGET_ISCURRENT 0x00008 /* Check that the found process is current. */ #define PGET_NOTWEXIT 0x00010 /* Check that the process is not in P_WEXIT. */ #define PGET_NOTINEXEC 0x00020 /* Check that the process is not in P_INEXEC. */ #define PGET_NOTID 0x00040 /* Do not assume tid if pid > PID_MAX. */ #define PGET_WANTREAD (PGET_HOLD | PGET_CANDEBUG | PGET_NOTWEXIT) int pget(pid_t pid, int flags, struct proc **pp); /* ast_register() flags */ #define ASTR_ASTF_REQUIRED 0x0001 /* td_ast TDAI(TDA_X) flag set is required for call */ #define ASTR_TDP 0x0002 /* td_pflags flag set is required */ #define ASTR_KCLEAR 0x0004 /* call me on ast_kclear() */ #define ASTR_UNCOND 0x0008 /* call me always */ void ast(struct trapframe *framep); void ast_kclear(struct thread *td); void ast_register(int ast, int ast_flags, int tdp, void (*f)(struct thread *td, int asts)); void ast_deregister(int tda); void ast_sched_locked(struct thread *td, int tda); void ast_sched_mask(struct thread *td, int ast); void ast_sched(struct thread *td, int tda); void ast_unsched_locked(struct thread *td, int tda); struct thread *choosethread(void); int cr_bsd_visible(struct ucred *u1, struct ucred *u2); int cr_cansee(struct ucred *u1, struct ucred *u2); int cr_canseesocket(struct ucred *cred, struct socket *so); int cr_cansignal(struct ucred *cred, struct proc *proc, int signum); int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, struct session *sess); int enterthispgrp(struct proc *p, struct pgrp *pgrp); int fork1(struct thread *, struct fork_req *); void fork_exit(void (*)(void *, struct trapframe *), void *, struct trapframe *); void fork_return(struct thread *, struct trapframe *); int inferior(struct proc *p); void itimer_proc_continue(struct proc *p); void kqtimer_proc_continue(struct proc *p); void kern_proc_vmmap_resident(struct vm_map *map, struct vm_map_entry *entry, int *resident_count, bool *super); void kern_yield(int); void killjobc(void); int leavepgrp(struct proc *p); int maybe_preempt(struct thread *td); void maybe_yield(void); void mi_switch(int flags); int p_candebug(struct thread *td, struct proc *p); int p_cansee(struct thread *td, struct proc *p); int p_cansched(struct thread *td, struct proc *p); int p_cansignal(struct thread *td, struct proc *p, int signum); int p_canwait(struct thread *td, struct proc *p); struct pargs *pargs_alloc(int len); void pargs_drop(struct pargs *pa); void pargs_hold(struct pargs *pa); int pgrp_calc_jobc(struct pgrp *pgrp); void proc_add_orphan(struct proc *child, struct proc *parent); int proc_get_binpath(struct proc *p, char *binname, char **fullpath, char **freepath); int proc_getargv(struct thread *td, struct proc *p, struct sbuf *sb); int proc_getauxv(struct thread *td, struct proc *p, struct sbuf *sb); int proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb); void procinit(void); int proc_iterate(int (*cb)(struct proc *, void *), void *cbarg); void proc_linkup0(struct proc *p, struct thread *td); void proc_linkup(struct proc *p, struct thread *td); struct proc *proc_realparent(struct proc *child); void proc_reap(struct thread *td, struct proc *p, int *status, int options); void proc_reparent(struct proc *child, struct proc *newparent, bool set_oppid); void proc_set_p2_wexit(struct proc *p); void proc_set_traced(struct proc *p, bool stop); void proc_wkilled(struct proc *p); struct pstats *pstats_alloc(void); void pstats_fork(struct pstats *src, struct pstats *dst); void pstats_free(struct pstats *ps); void proc_clear_orphan(struct proc *p); void reaper_abandon_children(struct proc *p, bool exiting); int securelevel_ge(struct ucred *cr, int level); int securelevel_gt(struct ucred *cr, int level); void sess_hold(struct session *); void sess_release(struct session *); void setrunnable(struct thread *, int); void setsugid(struct proc *p); bool should_yield(void); int sigonstack(size_t sp); void stopevent(struct proc *, u_int, u_int); struct thread *tdfind(lwpid_t, pid_t); void threadinit(void); void tidhash_add(struct thread *); void tidhash_remove(struct thread *); void cpu_idle(int); int cpu_idle_wakeup(int); extern void (*cpu_idle_hook)(sbintime_t); /* Hook to machdep CPU idler. */ void cpu_switch(struct thread *, struct thread *, struct mtx *); void cpu_sync_core(void); void cpu_throw(struct thread *, struct thread *) __dead2; +void cpu_update_pcb(struct thread *); bool curproc_sigkilled(void); void userret(struct thread *, struct trapframe *); void cpu_exit(struct thread *); void exit1(struct thread *, int, int) __dead2; void cpu_copy_thread(struct thread *td, struct thread *td0); bool cpu_exec_vmspace_reuse(struct proc *p, struct vm_map *map); int cpu_fetch_syscall_args(struct thread *td); void cpu_fork(struct thread *, struct proc *, struct thread *, int); void cpu_fork_kthread_handler(struct thread *, void (*)(void *), void *); int cpu_procctl(struct thread *td, int idtype, id_t id, int com, void *data); void cpu_set_syscall_retval(struct thread *, int); int cpu_set_upcall(struct thread *, void (*)(void *), void *, stack_t *); int cpu_set_user_tls(struct thread *, void *tls_base); void cpu_thread_alloc(struct thread *); void cpu_thread_clean(struct thread *); void cpu_thread_exit(struct thread *); void cpu_thread_free(struct thread *); struct thread *thread_alloc(int pages); int thread_check_susp(struct thread *td, bool sleep); void thread_cow_get_proc(struct thread *newtd, struct proc *p); void thread_cow_get(struct thread *newtd, struct thread *td); void thread_cow_free(struct thread *td); void thread_cow_update(struct thread *td); void thread_cow_synced(struct thread *td); int thread_create(struct thread *td, struct rtprio *rtp, int (*initialize_thread)(struct thread *, void *), void *thunk); void thread_exit(void) __dead2; void thread_free(struct thread *td); void thread_link(struct thread *td, struct proc *p); void thread_reap_barrier(void); int thread_recycle(struct thread *, int pages); int thread_single(struct proc *p, int how); void thread_single_end(struct proc *p, int how); void thread_stash(struct thread *td); void thread_stopped(struct proc *p); void childproc_stopped(struct proc *child, int reason); void childproc_continued(struct proc *child); void childproc_exited(struct proc *child); void thread_run_flash(struct thread *td); int thread_suspend_check(int how); bool thread_suspend_check_needed(void); void thread_suspend_switch(struct thread *, struct proc *p); void thread_suspend_one(struct thread *td); void thread_unlink(struct thread *td); void thread_unsuspend(struct proc *p); void thread_wait(struct proc *p); bool stop_all_proc_block(void); void stop_all_proc_unblock(void); void stop_all_proc(void); void resume_all_proc(void); static __inline int curthread_pflags_set(int flags) { struct thread *td; int save; td = curthread; save = ~flags | (td->td_pflags & flags); td->td_pflags |= flags; return (save); } static __inline void curthread_pflags_restore(int save) { curthread->td_pflags &= save; } static __inline int curthread_pflags2_set(int flags) { struct thread *td; int save; td = curthread; save = ~flags | (td->td_pflags2 & flags); td->td_pflags2 |= flags; return (save); } static __inline void curthread_pflags2_restore(int save) { curthread->td_pflags2 &= save; } static __inline __pure2 struct td_sched * td_get_sched(struct thread *td) { return ((struct td_sched *)&td[1]); } #define PROC_ID_PID 0 #define PROC_ID_GROUP 1 #define PROC_ID_SESSION 2 #define PROC_ID_REAP 3 void proc_id_set(int type, pid_t id); void proc_id_set_cond(int type, pid_t id); void proc_id_clear(int type, pid_t id); EVENTHANDLER_LIST_DECLARE(process_ctor); EVENTHANDLER_LIST_DECLARE(process_dtor); EVENTHANDLER_LIST_DECLARE(process_init); EVENTHANDLER_LIST_DECLARE(process_fini); EVENTHANDLER_LIST_DECLARE(process_exit); EVENTHANDLER_LIST_DECLARE(process_fork); EVENTHANDLER_LIST_DECLARE(process_exec); EVENTHANDLER_LIST_DECLARE(thread_ctor); EVENTHANDLER_LIST_DECLARE(thread_dtor); EVENTHANDLER_LIST_DECLARE(thread_init); #endif /* _KERNEL */ #endif /* !_SYS_PROC_H_ */