Index: head/share/man/man9/Makefile =================================================================== --- head/share/man/man9/Makefile (revision 212111) +++ head/share/man/man9/Makefile (revision 212112) @@ -1,1376 +1,1377 @@ # $FreeBSD$ MAN= accept_filter.9 \ accf_data.9 \ accf_dns.9 \ accf_http.9 \ acl.9 \ alloc_unr.9 \ alq.9 \ altq.9 \ atomic.9 \ bios.9 \ boot.9 \ bpf.9 \ buf.9 \ BUF_ISLOCKED.9 \ BUF_LOCK.9 \ BUF_LOCKFREE.9 \ BUF_LOCKINIT.9 \ BUF_RECURSED.9 \ BUF_TIMELOCK.9 \ BUF_UNLOCK.9 \ bus_activate_resource.9 \ BUS_ADD_CHILD.9 \ bus_alloc_resource.9 \ BUS_BIND_INTR.9 \ bus_child_present.9 \ BUS_CONFIG_INTR.9 \ BUS_DESCRIBE_INTR.9 \ bus_dma.9 \ bus_generic_attach.9 \ bus_generic_detach.9 \ bus_generic_new_pass.9 \ bus_generic_print_child.9 \ bus_generic_read_ivar.9 \ bus_generic_shutdown.9 \ BUS_NEW_PASS.9 \ BUS_PRINT_CHILD.9 \ BUS_READ_IVAR.9 \ bus_release_resource.9 \ bus_set_pass.9 \ bus_set_resource.9 \ BUS_SETUP_INTR.9 \ bus_space.9 \ byteorder.9 \ cd.9 \ condvar.9 \ config_intrhook.9 \ contigmalloc.9 \ copy.9 \ cr_cansee.9 \ critical_enter.9 \ cr_seeothergids.9 \ cr_seeotheruids.9 \ crypto.9 \ CTASSERT.9 \ DB_COMMAND.9 \ DECLARE_GEOM_CLASS.9 \ DECLARE_MODULE.9 \ DELAY.9 \ devclass.9 \ devclass_find.9 \ devclass_get_device.9 \ devclass_get_devices.9 \ devclass_get_drivers.9 \ devclass_get_maxunit.9 \ devclass_get_name.9 \ devclass_get_softc.9 \ dev_clone.9 \ devfs_set_cdevpriv.9 \ device.9 \ device_add_child.9 \ DEVICE_ATTACH.9 \ device_delete_child.9 \ DEVICE_DETACH.9 \ device_enable.9 \ device_find_child.9 \ device_get_children.9 \ device_get_devclass.9 \ device_get_driver.9 \ device_get_ivars.9 \ device_get_name.9 \ device_get_parent.9 \ device_get_softc.9 \ device_get_state.9 \ device_get_sysctl.9 \ device_get_unit.9 \ DEVICE_IDENTIFY.9 \ device_printf.9 \ DEVICE_PROBE.9 \ device_probe_and_attach.9 \ device_quiet.9 \ device_set_desc.9 \ device_set_driver.9 \ device_set_flags.9 \ DEVICE_SHUTDOWN.9 \ DEV_MODULE.9 \ devstat.9 \ devtoname.9 \ disk.9 \ domain.9 \ driver.9 \ DRIVER_MODULE.9 \ EVENTHANDLER.9 \ extattr.9 \ fail.9 \ fetch.9 \ firmware.9 \ g_access.9 \ g_attach.9 \ g_bio.9 \ g_consumer.9 \ g_data.9 \ get_cyclecount.9 \ getnewvnode.9 \ g_event.9 \ g_geom.9 \ g_provider.9 \ g_provider_by_name.9 \ groupmember.9 \ g_wither_geom.9 \ hash.9 \ hashinit.9 \ hexdump.9 \ ieee80211.9 \ ieee80211_amrr.9 \ ieee80211_beacon.9 \ ieee80211_bmiss.9 \ ieee80211_crypto.9 \ ieee80211_ddb.9 \ ieee80211_input.9 \ ieee80211_node.9 \ ieee80211_output.9 \ ieee80211_proto.9 \ ieee80211_radiotap.9 \ ieee80211_regdomain.9 \ ieee80211_scan.9 \ ieee80211_vap.9 \ ifnet.9 \ inittodr.9 \ insmntque.9 \ intro.9 \ ithread.9 \ KASSERT.9 \ kernacc.9 \ kernel_mount.9 \ kobj.9 \ kproc.9 \ kqueue.9 \ kthread.9 \ ktr.9 \ lock.9 \ locking.9 \ LOCK_PROFILING.9 \ mac.9 \ make_dev.9 \ malloc.9 \ mbchain.9 \ mbpool.9 \ mbuf.9 \ mbuf_tags.9 \ MD5.9 \ mdchain.9 \ memguard.9 \ microseq.9 \ microtime.9 \ microuptime.9 \ mi_switch.9 \ module.9 \ MODULE_DEPEND.9 \ MODULE_VERSION.9 \ mtx_pool.9 \ mutex.9 \ namei.9 \ netisr.9 \ panic.9 \ pbuf.9 \ p_candebug.9 \ p_cansee.9 \ pci.9 \ pfil.9 \ pfind.9 \ pgfind.9 \ physio.9 \ pmap.9 \ pmap_activate.9 \ pmap_change_wiring.9 \ pmap_clear_modify.9 \ pmap_copy.9 \ pmap_enter.9 \ pmap_extract.9 \ pmap_growkernel.9 \ pmap_init.9 \ pmap_is_modified.9 \ pmap_is_prefaultable.9 \ pmap_map.9 \ pmap_mincore.9 \ pmap_object_init_pt.9 \ pmap_page_exists_quick.9 \ pmap_page_init.9 \ pmap_page_protect.9 \ pmap_pinit.9 \ pmap_qenter.9 \ pmap_release.9 \ pmap_remove.9 \ pmap_resident_count.9 \ pmap_zero_page.9 \ printf.9 \ prison_check.9 \ priv.9 \ pseudofs.9 \ psignal.9 \ random.9 \ random_harvest.9 \ redzone.9 \ refcount.9 \ resettodr.9 \ resource_int_value.9 \ rijndael.9 \ rman.9 \ rmlock.9 \ rtalloc.9 \ rtentry.9 \ runqueue.9 \ rwlock.9 \ sbuf.9 \ scheduler.9 \ securelevel_gt.9 \ selrecord.9 \ sema.9 \ sf_buf.9 \ sglist.9 \ signal.9 \ sleep.9 \ sleepqueue.9 \ socket.9 \ spl.9 \ stack.9 \ store.9 \ style.9 \ swi.9 \ sx.9 \ SYSCALL_MODULE.9 \ sysctl.9 \ sysctl_add_oid.9 \ sysctl_ctx_init.9 \ taskqueue.9 \ thread_exit.9 \ time.9 \ timeout.9 \ tvtohz.9 \ ucred.9 \ uidinfo.9 \ uio.9 \ usbdi.9 \ utopia.9 \ vaccess.9 \ vaccess_acl_nfs4.9 \ vaccess_acl_posix1e.9 \ vcount.9 \ vflush.9 \ VFS.9 \ vfs_busy.9 \ VFS_CHECKEXP.9 \ vfsconf.9 \ VFS_FHTOVP.9 \ vfs_getnewfsid.9 \ vfs_getopt.9 \ vfs_getvfs.9 \ VFS_LOCK_GIANT.9 \ VFS_MOUNT.9 \ vfs_mount.9 \ vfs_mountedfrom.9 \ VFS_QUOTACTL.9 \ VFS_ROOT.9 \ vfs_rootmountalloc.9 \ VFS_SET.9 \ VFS_STATFS.9 \ vfs_suser.9 \ VFS_SYNC.9 \ vfs_timestamp.9 \ vfs_unbusy.9 \ VFS_UNMOUNT.9 \ vfs_unmountall.9 \ VFS_VGET.9 \ vget.9 \ vgone.9 \ vhold.9 \ vinvalbuf.9 \ vm_fault_prefault.9 \ vm_map.9 \ vm_map_check_protection.9 \ vm_map_clean.9 \ vm_map_create.9 \ vm_map_delete.9 \ vm_map_entry_resize_free.9 \ vm_map_find.9 \ vm_map_findspace.9 \ vm_map_inherit.9 \ vm_map_init.9 \ vm_map_insert.9 \ vm_map_lock.9 \ vm_map_lookup.9 \ vm_map_madvise.9 \ vm_map_max.9 \ vm_map_protect.9 \ vm_map_remove.9 \ vm_map_simplify_entry.9 \ vm_map_stack.9 \ vm_map_submap.9 \ vm_map_wire.9 \ vm_page_alloc.9 \ vm_page_bits.9 \ vm_page_cache.9 \ vm_page_copy.9 \ vm_page_deactivate.9 \ vm_page_dontneed.9 \ vm_page_flag.9 \ vm_page_free.9 \ vm_page_grab.9 \ vm_page_hold.9 \ vm_page_insert.9 \ vm_page_io.9 \ vm_page_lookup.9 \ vm_page_protect.9 \ vm_page_rename.9 \ vm_page_sleep_busy.9 \ vm_page_wakeup.9 \ vm_page_wire.9 \ vm_page_zero_fill.9 \ vm_set_page_size.9 \ vn_fullpath.9 \ vn_isdisk.9 \ vnode.9 \ VOP_ACCESS.9 \ VOP_ACLCHECK.9 \ VOP_ADVLOCK.9 \ VOP_ATTRIB.9 \ VOP_BWRITE.9 \ VOP_CREATE.9 \ VOP_FSYNC.9 \ VOP_GETACL.9 \ VOP_GETEXTATTR.9 \ VOP_GETPAGES.9 \ VOP_GETVOBJECT.9 \ VOP_INACTIVE.9 \ VOP_IOCTL.9 \ VOP_LINK.9 \ VOP_LISTEXTATTR.9 \ VOP_LOCK.9 \ VOP_LOOKUP.9 \ VOP_OPENCLOSE.9 \ VOP_PATHCONF.9 \ VOP_PRINT.9 \ VOP_RDWR.9 \ VOP_READDIR.9 \ VOP_READLINK.9 \ VOP_REALLOCBLKS.9 \ VOP_REMOVE.9 \ VOP_RENAME.9 \ VOP_REVOKE.9 \ VOP_SETACL.9 \ VOP_SETEXTATTR.9 \ VOP_STRATEGY.9 \ VOP_VPTOCNP.9 \ VOP_VPTOFH.9 \ vput.9 \ vref.9 \ vrefcnt.9 \ vrele.9 \ vslock.9 \ watchdog.9 \ zero_copy.9 \ zone.9 MLINKS= alloc_unr.9 alloc_unrl.9 \ alloc_unr.9 alloc_unr_specific.9 \ alloc_unr.9 delete_unrhdr.9 \ alloc_unr.9 free_unr.9 \ alloc_unr.9 new_unrhdr.9 MLINKS+=alq.9 ALQ.9 \ alq.9 alq_close.9 \ alq.9 alq_flush.9 \ alq.9 alq_get.9 \ alq.9 alq_open.9 \ alq.9 alq_post.9 \ alq.9 alq_write.9 MLINKS+=altq.9 ALTQ.9 MLINKS+=atomic.9 atomic_add.9 \ atomic.9 atomic_clear.9 \ atomic.9 atomic_cmpset.9 \ atomic.9 atomic_fetchadd.9 \ atomic.9 atomic_load.9 \ atomic.9 atomic_readandclear.9 \ atomic.9 atomic_set.9 \ atomic.9 atomic_store.9 \ atomic.9 atomic_subtract.9 MLINKS+=bpf.9 bpfattach.9 \ bpf.9 bpfattach2.9 \ bpf.9 bpfdetach.9 \ bpf.9 bpf_filter.9 \ bpf.9 bpf_mtap.9 \ bpf.9 bpf_mtap2.9 \ bpf.9 bpf_tap.9 \ bpf.9 bpf_validate.9 MLINKS+=buf.9 bp.9 MLINKS+=bus_activate_resource.9 bus_deactivate_resource.9 MLINKS+=bus_alloc_resource.9 bus_alloc_resource_any.9 MLINKS+=BUS_BIND_INTR.9 bus_bind_intr.9 MLINKS+=BUS_DESCRIBE_INTR.9 bus_describe_intr.9 MLINKS+=bus_dma.9 busdma.9 \ bus_dma.9 bus_dmamap_create.9 \ bus_dma.9 bus_dmamap_destroy.9 \ bus_dma.9 bus_dmamap_load.9 \ bus_dma.9 bus_dmamap_load_mbuf.9 \ bus_dma.9 bus_dmamap_load_mbuf_sg.9 \ bus_dma.9 bus_dmamap_load_uio.9 \ bus_dma.9 bus_dmamap_sync.9 \ bus_dma.9 bus_dmamap_unload.9 \ bus_dma.9 bus_dmamem_alloc.9 \ bus_dma.9 bus_dmamem_free.9 \ bus_dma.9 bus_dma_tag_create.9 \ bus_dma.9 bus_dma_tag_destroy.9 MLINKS+=bus_generic_read_ivar.9 bus_generic_write_ivar.9 MLINKS+=BUS_READ_IVAR.9 BUS_WRITE_IVAR.9 MLINKS+=BUS_SETUP_INTR.9 bus_setup_intr.9 \ BUS_SETUP_INTR.9 BUS_TEARDOWN_INTR.9 \ BUS_SETUP_INTR.9 bus_teardown_intr.9 MLINKS+=bus_space.9 bus_space_barrier.9 \ bus_space.9 bus_space_copy_region_1.9 \ bus_space.9 bus_space_copy_region_2.9 \ bus_space.9 bus_space_copy_region_4.9 \ bus_space.9 bus_space_copy_region_8.9 \ bus_space.9 bus_space_copy_region_stream_1.9 \ bus_space.9 bus_space_copy_region_stream_2.9 \ bus_space.9 bus_space_copy_region_stream_4.9 \ bus_space.9 bus_space_copy_region_stream_8.9 \ bus_space.9 bus_space_free.9 \ bus_space.9 bus_space_map.9 \ bus_space.9 bus_space_read_1.9 \ bus_space.9 bus_space_read_2.9 \ bus_space.9 bus_space_read_4.9 \ bus_space.9 bus_space_read_8.9 \ bus_space.9 bus_space_read_multi_1.9 \ bus_space.9 bus_space_read_multi_2.9 \ bus_space.9 bus_space_read_multi_4.9 \ bus_space.9 bus_space_read_multi_8.9 \ bus_space.9 bus_space_read_multi_stream_1.9 \ bus_space.9 bus_space_read_multi_stream_2.9 \ bus_space.9 bus_space_read_multi_stream_4.9 \ bus_space.9 bus_space_read_multi_stream_8.9 \ bus_space.9 bus_space_read_region_1.9 \ bus_space.9 bus_space_read_region_2.9 \ bus_space.9 bus_space_read_region_4.9 \ bus_space.9 bus_space_read_region_8.9 \ bus_space.9 bus_space_read_region_stream_1.9 \ bus_space.9 bus_space_read_region_stream_2.9 \ bus_space.9 bus_space_read_region_stream_4.9 \ bus_space.9 bus_space_read_region_stream_8.9 \ bus_space.9 bus_space_read_stream_1.9 \ bus_space.9 bus_space_read_stream_2.9 \ bus_space.9 bus_space_read_stream_4.9 \ bus_space.9 bus_space_read_stream_8.9 \ bus_space.9 bus_space_set_multi_1.9 \ bus_space.9 bus_space_set_multi_2.9 \ bus_space.9 bus_space_set_multi_4.9 \ bus_space.9 bus_space_set_multi_8.9 \ bus_space.9 bus_space_set_multi_stream_1.9 \ bus_space.9 bus_space_set_multi_stream_2.9 \ bus_space.9 bus_space_set_multi_stream_4.9 \ bus_space.9 bus_space_set_multi_stream_8.9 \ bus_space.9 bus_space_set_region_1.9 \ bus_space.9 bus_space_set_region_2.9 \ bus_space.9 bus_space_set_region_4.9 \ bus_space.9 bus_space_set_region_8.9 \ bus_space.9 bus_space_set_region_stream_1.9 \ bus_space.9 bus_space_set_region_stream_2.9 \ bus_space.9 bus_space_set_region_stream_4.9 \ bus_space.9 bus_space_set_region_stream_8.9 \ bus_space.9 bus_space_subregion.9 \ bus_space.9 bus_space_unmap.9 \ bus_space.9 bus_space_write_1.9 \ bus_space.9 bus_space_write_2.9 \ bus_space.9 bus_space_write_4.9 \ bus_space.9 bus_space_write_8.9 \ bus_space.9 bus_space_write_multi_1.9 \ bus_space.9 bus_space_write_multi_2.9 \ bus_space.9 bus_space_write_multi_4.9 \ bus_space.9 bus_space_write_multi_8.9 \ bus_space.9 bus_space_write_multi_stream_1.9 \ bus_space.9 bus_space_write_multi_stream_2.9 \ bus_space.9 bus_space_write_multi_stream_4.9 \ bus_space.9 bus_space_write_multi_stream_8.9 \ bus_space.9 bus_space_write_region_1.9 \ bus_space.9 bus_space_write_region_2.9 \ bus_space.9 bus_space_write_region_4.9 \ bus_space.9 bus_space_write_region_8.9 \ bus_space.9 bus_space_write_region_stream_1.9 \ bus_space.9 bus_space_write_region_stream_2.9 \ bus_space.9 bus_space_write_region_stream_4.9 \ bus_space.9 bus_space_write_region_stream_8.9 \ bus_space.9 bus_space_write_stream_1.9 \ bus_space.9 bus_space_write_stream_2.9 \ bus_space.9 bus_space_write_stream_4.9 \ bus_space.9 bus_space_write_stream_8.9 MLINKS+=byteorder.9 be16dec.9 \ byteorder.9 be16enc.9 \ byteorder.9 be16toh.9 \ byteorder.9 be32dec.9 \ byteorder.9 be32enc.9 \ byteorder.9 be32toh.9 \ byteorder.9 be64dec.9 \ byteorder.9 be64enc.9 \ byteorder.9 be64toh.9 \ byteorder.9 bswap16.9 \ byteorder.9 bswap32.9 \ byteorder.9 bswap64.9 \ byteorder.9 htobe16.9 \ byteorder.9 htobe32.9 \ byteorder.9 htobe64.9 \ byteorder.9 htole16.9 \ byteorder.9 htole32.9 \ byteorder.9 htole64.9 \ byteorder.9 le16dec.9 \ byteorder.9 le16enc.9 \ byteorder.9 le16toh.9 \ byteorder.9 le32dec.9 \ byteorder.9 le32enc.9 \ byteorder.9 le32toh.9 \ byteorder.9 le64dec.9 \ byteorder.9 le64enc.9 \ byteorder.9 le64toh.9 MLINKS+=condvar.9 cv_broadcast.9 \ condvar.9 cv_broadcastpri.9 \ condvar.9 cv_destroy.9 \ condvar.9 cv_init.9 \ condvar.9 cv_signal.9 \ condvar.9 cv_timedwait.9 \ condvar.9 cv_timedwait_sig.9 \ condvar.9 cv_wait.9 \ condvar.9 cv_wait_sig.9 \ condvar.9 cv_wait_unlock.9 \ condvar.9 cv_wmesg.9 MLINKS+=config_intrhook.9 config_intrhook_disestablish.9 \ config_intrhook.9 config_intrhook_establish.9 MLINKS+=contigmalloc.9 contigfree.9 MLINKS+=copy.9 copyin.9 \ copy.9 copyinstr.9 \ copy.9 copyout.9 \ copy.9 copystr.9 MLINKS+=critical_enter.9 critical.9 \ critical_enter.9 critical_exit.9 MLINKS+=crypto.9 crypto_dispatch.9 \ crypto.9 crypto_done.9 \ crypto.9 crypto_freereq.9 \ crypto.9 crypto_freesession.9 \ crypto.9 crypto_get_driverid.9 \ crypto.9 crypto_getreq.9 \ crypto.9 crypto_kdispatch.9 \ crypto.9 crypto_kdone.9 \ crypto.9 crypto_kregister.9 \ crypto.9 crypto_newsession.9 \ crypto.9 crypto_register.9 \ crypto.9 crypto_unblock.9 \ crypto.9 crypto_unregister.9 \ crypto.9 crypto_unregister_all.9 MLINKS+=DB_COMMAND.9 DB_SHOW_ALL_COMMAND.9 \ DB_COMMAND.9 DB_SHOW_COMMAND.9 MLINKS+=dev_clone.9 drain_dev_clone_events.9 MLINKS+=devfs_set_cdevpriv.9 devfs_clear_cdevpriv.9 \ devfs_set_cdevpriv.9 devfs_get_cdevpriv.9 MLINKS+=device_add_child.9 device_add_child_ordered.9 MLINKS+=device_enable.9 device_disable.9 \ device_enable.9 device_is_enabled.9 MLINKS+=device_get_ivars.9 device_set_ivars.9 MLINKS+=device_get_name.9 device_get_nameunit.9 MLINKS+=device_get_state.9 device_busy.9 \ device_get_state.9 device_is_alive.9 \ device_get_state.9 device_is_attached.9 \ device_get_state.9 device_unbusy.9 MLINKS+=device_get_sysctl.9 device_get_sysctl_ctx.9 \ device_get_sysctl.9 device_get_sysctl_tree.9 MLINKS+=device_quiet.9 device_is_quiet.9 \ device_quiet.9 device_verbose.9 MLINKS+=device_set_desc.9 device_get_desc.9 \ device_set_desc.9 device_set_desc_copy.9 MLINKS+=device_set_flags.9 device_get_flags.9 MLINKS+=devstat.9 devicestat.9 \ devstat.9 devstat_add_entry.9 \ devstat.9 devstat_end_transaction.9 \ devstat.9 devstat_remove_entry.9 \ devstat.9 devstat_start_transaction.9 MLINKS+=disk.9 disk_alloc.9 \ disk.9 disk_create.9 \ disk.9 disk_destroy.9 \ disk.9 disk_gone.9 MLINKS+=domain.9 DOMAIN_SET.9 \ domain.9 net_add_domain.9 \ domain.9 pfctlinput.9 \ domain.9 pfctlinput2.9 \ domain.9 pffindproto.9 \ domain.9 pffindtype.9 MLINKS+=DRIVER_MODULE.9 MULTI_DRIVER_MODULE.9 MLINKS+=EVENTHANDLER.9 EVENTHANDLER_DECLARE.9 \ EVENTHANDLER.9 EVENTHANDLER_DEREGISTER.9 \ EVENTHANDLER.9 eventhandler_deregister.9 \ EVENTHANDLER.9 eventhandler_find_list.9 \ EVENTHANDLER.9 EVENTHANDLER_INVOKE.9 \ EVENTHANDLER.9 eventhandler_prune_list.9 \ EVENTHANDLER.9 EVENTHANDLER_REGISTER.9 \ EVENTHANDLER.9 eventhandler_register.9 MLINKS+=fetch.9 fubyte.9 \ fetch.9 fuswintr.9 \ fetch.9 fuword.9 \ fetch.9 fuword16.9 \ fetch.9 fuword32.9 \ fetch.9 fuword64.9 MLINKS+=g_attach.9 g_detach.9 MLINKS+=g_bio.9 g_clone_bio.9 \ g_bio.9 g_destroy_bio.9 \ g_bio.9 g_new_bio.9 \ g_bio.9 g_print_bio.9 MLINKS+=g_consumer.9 g_destroy_consumer.9 \ g_consumer.9 g_new_consumer.9 MLINKS+=g_data.9 g_read_data.9 \ g_data.9 g_write_data.9 MLINKS+=g_event.9 g_cancel_event.9 \ g_event.9 g_post_event.9 \ g_event.9 g_waitfor_event.9 MLINKS+=g_geom.9 g_destroy_geom.9 \ g_geom.9 g_new_geomf.9 MLINKS+=g_provider.9 g_destroy_provider.9 \ g_provider.9 g_error_provider.9 \ g_provider.9 g_new_providerf.9 MLINKS+=hash.9 hash32.9 \ hash.9 hash32_buf.9 \ hash.9 hash32_str.9 \ hash.9 hash32_stre.9 \ hash.9 hash32_strn.9 \ hash.9 hash32_strne.9 MLINKS+=hashinit.9 hashdestroy.9 \ hashinit.9 hashinit_flags.9 \ hashinit.9 phashinit.9 MLINKS+=ieee80211.9 ieee80211_ifattach.9 \ ieee80211.9 ieee80211_ifdetach.9 MLINKS+=ieee80211_amrr.9 ieee80211_amrr_cleanup.9 \ ieee80211_amrr.9 ieee80211_amrr_init.9 \ ieee80211_amrr.9 ieee80211_amrr_node_init.9 \ ieee80211_amrr.9 ieee80211_amrr_setinterval.9 \ ieee80211_amrr.9 ieee80211_amrr_tx_complete.9 \ ieee80211_amrr.9 ieee80211_amrr_tx_update.9 MLINKS+=ieee80211_beacon.9 ieee80211_beacon_alloc.9 \ ieee80211_beacon.9 ieee80211_beacon_notify.9 \ ieee80211_beacon.9 ieee80211_beacon_update.9 MLINKS+=ieee80211_bmiss.9 ieee80211_beacon_miss.9 MLINKS+=ieee80211_crypto.9 ieee80211_crypto_available.9 \ ieee80211_crypto.9 ieee80211_crypto_decap.9 \ ieee80211_crypto.9 ieee80211_crypto_delglobalkeys.9 \ ieee80211_crypto.9 ieee80211_crypto_demic.9 \ ieee80211_crypto.9 ieee80211_crypto_encap.9 \ ieee80211_crypto.9 ieee80211_crypto_enmic.9 \ ieee80211_crypto.9 ieee80211_crypto_newkey.9 \ ieee80211_crypto.9 ieee80211_crypto_register.9 \ ieee80211_crypto.9 ieee80211_crypto_reload_keys.9 \ ieee80211_crypto.9 ieee80211_crypto_setkey.9 \ ieee80211_crypto.9 ieee80211_crypto_unregister.9 \ ieee80211_crypto.9 ieee80211_key_update_begin.9 \ ieee80211_crypto.9 ieee80211_key_update_end.9 \ ieee80211_crypto.9 ieee80211_notify_michael_failure.9 \ ieee80211_crypto.9 ieee80211_notify_replay_failure.9 MLINKS+=ieee80211_input.9 ieee80211_input_all.9 MLINKS+=ieee80211_node.9 ieee80211_dump_node.9 \ ieee80211_node.9 ieee80211_dump_nodes.9 \ ieee80211_node.9 ieee80211_find_rxnode.9 \ ieee80211_node.9 ieee80211_find_rxnode_withkey.9 \ ieee80211_node.9 ieee80211_free_node.9 \ ieee80211_node.9 ieee80211_iterate_nodes.9 \ ieee80211_node.9 ieee80211_ref_node.9 \ ieee80211_node.9 ieee80211_unref_node.9 MLINKS+=ieee80211_output.9 ieee80211_process_callback.9 \ ieee80211_output.9 M_SEQNO_GET.9 \ ieee80211_output.9 M_WME_GETAC.9 MLINKS+=ieee80211_proto.9 ieee80211_new_state.9 \ ieee80211_proto.9 ieee80211_resume_all.9 \ ieee80211_proto.9 ieee80211_start_all.9 \ ieee80211_proto.9 ieee80211_stop_all.9 \ ieee80211_proto.9 ieee80211_suspend_all.9 \ ieee80211_proto.9 ieee80211_waitfor_parent.9 MLINKS+=ieee80211_radiotap.9 ieee80211_radiotap_active.9 \ ieee80211_radiotap.9 ieee80211_radiotap_active_vap.9 \ ieee80211_radiotap.9 ieee80211_radiotap_attach.9 \ ieee80211_radiotap.9 ieee80211_radiotap_tx.9 \ ieee80211_radiotap.9 radiotap.9 MLINKS+=ieee80211_regdomain.9 ieee80211_alloc_countryie.9 \ ieee80211_regdomain.9 ieee80211_init_channels.9 \ ieee80211_regdomain.9 ieee80211_sort_channels.9 MLINKS+=ieee80211_vap.9 ieee80211_vap_attach.9 \ ieee80211_vap.9 ieee80211_vap_detach.9 \ ieee80211_vap.9 ieee80211_vap_setup.9 MLINKS+=ifnet.9 ifaddr.9 \ ifnet.9 if_data.9 \ ifnet.9 ifqueue.9 MLINKS+=insmntque.9 insmntque1.9 MLINKS+=ithread.9 ithread_add_handler.9 \ ithread.9 ithread_create.9 \ ithread.9 ithread_destroy.9 \ ithread.9 ithread_priority.9 \ ithread.9 ithread_remove_handler.9 \ ithread.9 ithread_schedule.9 MLINKS+=kernacc.9 useracc.9 MLINKS+=kernel_mount.9 free_mntarg.9 \ kernel_mount.9 kernel_vmount.9 \ kernel_mount.9 mount_arg.9 \ kernel_mount.9 mount_argb.9 \ kernel_mount.9 mount_argf.9 \ kernel_mount.9 mount_argsu.9 MLINKS+=kobj.9 DEFINE_CLASS.9 \ kobj.9 kobj_class_compile.9 \ kobj.9 kobj_class_compile_static.9 \ kobj.9 kobj_class_free.9 \ kobj.9 kobj_create.9 \ kobj.9 kobj_delete.9 \ kobj.9 kobj_init.9 MLINKS+=kproc.9 kproc_create.9 \ kproc.9 kproc_exit.9 \ kproc.9 kproc_resume.9 \ kproc.9 kproc_shutdown.9 \ kproc.9 kproc_start.9 \ kproc.9 kproc_suspend.9 \ kproc.9 kproc_suspend_check.9 \ kproc.9 kthread_create.9 MLINKS+=kqueue.9 knlist_add.9 \ kqueue.9 knlist_clear.9 \ kqueue.9 knlist_delete.9 \ kqueue.9 knlist_destroy.9 \ kqueue.9 knlist_empty.9 \ kqueue.9 knlist_init.9 \ kqueue.9 knlist_remove.9 \ kqueue.9 knlist_remove_inevent.9 \ kqueue.9 knote_fdclose.9 \ kqueue.9 KNOTE_LOCKED.9 \ kqueue.9 KNOTE_UNLOCKED.9 \ kqueue.9 kqfd_register.9 \ kqueue.9 kqueue_add_filteropts.9 \ kqueue.9 kqueue_del_filteropts.9 MLINKS+=kthread.9 kthread_add.9 \ kthread.9 kthread_exit.9 \ kthread.9 kthread_resume.9 \ kthread.9 kthread_shutdown.9 \ kthread.9 kthread_start.9 \ kthread.9 kthread_suspend.9 \ kthread.9 kthread_suspend_check.9 MLINKS+=ktr.9 CTR0.9 \ ktr.9 CTR1.9 \ ktr.9 CTR2.9 \ ktr.9 CTR3.9 \ ktr.9 CTR4.9 \ ktr.9 CTR5.9 MLINKS+=lock.9 lockdestroy.9 \ lock.9 lockinit.9 \ lock.9 lockmgr.9 \ lock.9 lockmgr_args.9 \ lock.9 lockmgr_args_rw.9 \ lock.9 lockmgr_assert.9 \ lock.9 lockmgr_disown.9 \ lock.9 lockmgr_printinfo.9 \ lock.9 lockmgr_recursed.9 \ lock.9 lockmgr_rw.9 \ lock.9 lockmgr_waiters.9 \ lock.9 lockstatus.9 MLINKS+=LOCK_PROFILING.9 MUTEX_PROFILING.9 MLINKS+=make_dev.9 destroy_dev.9 \ make_dev.9 destroy_dev_drain.9 \ make_dev.9 destroy_dev_sched.9 \ make_dev.9 destroy_dev_sched_cb.9 \ make_dev.9 dev_depends.9 \ make_dev.9 make_dev_alias.9 \ make_dev.9 make_dev_cred.9 \ make_dev.9 make_dev_credf.9 MLINKS+=malloc.9 free.9 \ malloc.9 MALLOC_DECLARE.9 \ malloc.9 MALLOC_DEFINE.9 \ malloc.9 realloc.9 \ malloc.9 reallocf.9 MLINKS+=mbchain.9 mb_detach.9 \ mbchain.9 mb_done.9 \ mbchain.9 mb_fixhdr.9 \ mbchain.9 mb_init.9 \ mbchain.9 mb_initm.9 \ mbchain.9 mb_put_int64be.9 \ mbchain.9 mb_put_int64le.9 \ mbchain.9 mb_put_mbuf.9 \ mbchain.9 mb_put_mem.9 \ mbchain.9 mb_put_uint16be.9 \ mbchain.9 mb_put_uint16le.9 \ mbchain.9 mb_put_uint32be.9 \ mbchain.9 mb_put_uint32le.9 \ mbchain.9 mb_put_uint8.9 \ mbchain.9 mb_put_uio.9 \ mbchain.9 mb_reserve.9 MLINKS+=mbpool.9 mbp_alloc.9 \ mbpool.9 mbp_card_free.9 \ mbpool.9 mbp_count.9 \ mbpool.9 mbp_create.9 \ mbpool.9 mbp_destroy.9 \ mbpool.9 mbp_ext_free.9 \ mbpool.9 mbp_free.9 \ mbpool.9 mbp_get.9 \ mbpool.9 mbp_get_keep.9 \ mbpool.9 mbp_sync.9 MLINKS+=\ mbuf.9 m_adj.9 \ mbuf.9 M_ALIGN.9 \ mbuf.9 m_apply.9 \ mbuf.9 m_cat.9 \ mbuf.9 MCHTYPE.9 \ mbuf.9 MCLGET.9 \ mbuf.9 m_copyback.9 \ mbuf.9 m_copydata.9 \ mbuf.9 m_copym.9 \ mbuf.9 m_copypacket.9 \ mbuf.9 m_defrag.9 \ mbuf.9 m_devget.9 \ mbuf.9 m_dup.9 \ mbuf.9 m_dup_pkthdr.9 \ mbuf.9 MEXTADD.9 \ mbuf.9 MEXT_ADD_REF.9 \ mbuf.9 MEXTFREE.9 \ mbuf.9 MEXT_IS_REF.9 \ mbuf.9 MEXT_REM_REF.9 \ mbuf.9 m_fixhdr.9 \ mbuf.9 MFREE.9 \ mbuf.9 m_free.9 \ mbuf.9 m_freem.9 \ mbuf.9 MGET.9 \ mbuf.9 m_get.9 \ mbuf.9 m_getcl.9 \ mbuf.9 m_getclr.9 \ mbuf.9 MGETHDR.9 \ mbuf.9 m_gethdr.9 \ mbuf.9 m_getm.9 \ mbuf.9 m_getptr.9 \ mbuf.9 MH_ALIGN.9 \ mbuf.9 M_LEADINGSPACE.9 \ mbuf.9 m_length.9 \ mbuf.9 M_MOVE_PKTHDR.9 \ mbuf.9 m_move_pkthdr.9 \ mbuf.9 M_PREPEND.9 \ mbuf.9 m_prepend.9 \ mbuf.9 m_pullup.9 \ mbuf.9 m_split.9 \ mbuf.9 mtod.9 \ mbuf.9 M_TRAILINGSPACE.9 \ mbuf.9 M_WRITABLE.9 MLINKS+=MD5.9 MD5Init.9 \ MD5.9 MD5Transform.9 MLINKS+=mdchain.9 md_append_record.9 \ mdchain.9 md_done.9 \ mdchain.9 md_get_int64.9 \ mdchain.9 md_get_int64be.9 \ mdchain.9 md_get_int64le.9 \ mdchain.9 md_get_mbuf.9 \ mdchain.9 md_get_mem.9 \ mdchain.9 md_get_uint16.9 \ mdchain.9 md_get_uint16be.9 \ mdchain.9 md_get_uint16le.9 \ mdchain.9 md_get_uint32.9 \ mdchain.9 md_get_uint32be.9 \ mdchain.9 md_get_uint32le.9 \ mdchain.9 md_get_uint8.9 \ mdchain.9 md_get_uio.9 \ mdchain.9 md_initm.9 \ mdchain.9 md_next_record.9 MLINKS+=microtime.9 bintime.9 \ microtime.9 getbintime.9 \ microtime.9 getmicrotime.9 \ microtime.9 getnanotime.9 \ microtime.9 nanotime.9 MLINKS+=microuptime.9 binuptime.9 \ microuptime.9 getbinuptime.9 \ microuptime.9 getmicrouptime.9 \ microuptime.9 getnanouptime.9 \ microuptime.9 nanouptime.9 MLINKS+=mi_switch.9 cpu_switch.9 \ mi_switch.9 cpu_throw.9 MLINKS+=mtx_pool.9 mtx_pool_alloc.9 \ mtx_pool.9 mtx_pool_create.9 \ mtx_pool.9 mtx_pool_destroy.9 \ mtx_pool.9 mtx_pool_find.9 \ mtx_pool.9 mtx_pool_lock.9 \ mtx_pool.9 mtx_pool_lock_spin.9 \ mtx_pool.9 mtx_pool_unlock.9 \ mtx_pool.9 mtx_pool_unlock_spin.9 MLINKS+=mutex.9 mtx_assert.9 \ mutex.9 mtx_destroy.9 \ mutex.9 mtx_init.9 \ mutex.9 mtx_initialized.9 \ mutex.9 mtx_lock.9 \ mutex.9 mtx_lock_flags.9 \ mutex.9 mtx_lock_spin.9 \ mutex.9 mtx_lock_spin_flags.9 \ mutex.9 mtx_owned.9 \ mutex.9 mtx_recursed.9 \ mutex.9 mtx_sleep.9 \ mutex.9 MTX_SYSINIT.9 \ mutex.9 mtx_trylock.9 \ mutex.9 mtx_trylock_flags.9 \ mutex.9 mtx_unlock.9 \ mutex.9 mtx_unlock_flags.9 \ mutex.9 mtx_unlock_spin.9 \ mutex.9 mtx_unlock_spin_flags.9 MLINKS+=namei.9 NDFREE.9 \ namei.9 NDHASGIANT.9 \ namei.9 NDINIT.9 MLINKS+=pbuf.9 getpbuf.9 \ pbuf.9 relpbuf.9 \ pbuf.9 trypbuf.9 MLINKS+=pci.9 pci_disable_busmaster.9 \ pci.9 pci_disable_io.9 \ pci.9 pci_enable_busmaster.9 \ pci.9 pci_enable_io.9 \ pci.9 pci_find_bsf.9 \ pci.9 pci_find_dbsf.9 \ pci.9 pci_find_device.9 \ pci.9 pci_get_powerstate.9 \ pci.9 pci_read_config.9 \ pci.9 pci_set_powerstate.9 \ pci.9 pci_write_config.9 MLINKS+=pfil.9 pfil_add_hook.9 \ pfil.9 pfil_hook_get.9 \ pfil.9 pfil_remove_hook.9 MLINKS+=pfind.9 zpfind.9 MLINKS+=pmap_clear_modify.9 pmap_clear_reference.9 MLINKS+=pmap_copy.9 pmap_copy_page.9 MLINKS+=pmap_extract.9 pmap_extract_and_hold.9 MLINKS+=pmap_init.9 pmap_init2.9 MLINKS+=pmap_is_modified.9 pmap_ts_modified.9 MLINKS+=pmap_page_protect.9 pmap_protect.9 MLINKS+=pmap_pinit.9 pmap_pinit0.9 \ pmap_pinit.9 pmap_pinit2.9 MLINKS+=pmap_qenter.9 pmap_qremove.9 MLINKS+=pmap_remove.9 pmap_remove_all.9 \ pmap_remove.9 pmap_remove_pages.9 MLINKS+=pmap_resident_count.9 pmap_wired_count.9 MLINKS+=pmap_zero_page.9 pmap_zero_area.9 \ pmap_zero_page.9 pmap_zero_idle.9 MLINKS+=printf.9 log.9 \ printf.9 tprintf.9 \ printf.9 uprintf.9 MLINKS+=priv.9 priv_check.9 \ priv.9 priv_check_cred.9 MLINKS+=psignal.9 gsignal.9 \ psignal.9 pgsignal.9 MLINKS+=random.9 arc4rand.9 \ random.9 arc4random.9 \ random.9 read_random.9 \ random.9 srandom.9 MLINKS+=refcount.9 refcount_acquire.9 \ refcount.9 refcount_init.9 \ refcount.9 refcount_release.9 MLINKS+=resource_int_value.9 resource_long_value.9 \ resource_int_value.9 resource_string_value.9 MLINKS+=rman.9 rman_activate_resource.9 \ rman.9 rman_await_resource.9 \ rman.9 rman_deactivate_resource.9 \ rman.9 rman_fini.9 \ rman.9 rman_get_bushandle.9 \ rman.9 rman_get_bustag.9 \ rman.9 rman_get_device.9 \ rman.9 rman_get_end.9 \ rman.9 rman_get_flags.9 \ rman.9 rman_get_rid.9 \ rman.9 rman_get_size.9 \ rman.9 rman_get_start.9 \ rman.9 rman_get_virtual.9 \ rman.9 rman_init.9 \ rman.9 rman_make_alignment_flags.9 \ rman.9 rman_manage_region.9 \ rman.9 rman_release_resource.9 \ rman.9 rman_reserve_resource.9 \ rman.9 rman_reserve_resource_bound.9 \ rman.9 rman_set_bushandle.9 \ rman.9 rman_set_bustag.9 \ rman.9 rman_set_rid.9 \ rman.9 rman_set_virtual.9 MLINKS+=rmlock.9 rm_destroy.9 \ rmlock.9 rm_init.9 \ rmlock.9 rm_rlock.9 \ + rmlock.9 rm_try_rlock.9 \ rmlock.9 rm_runlock.9 \ rmlock.9 RM_SYSINIT.9 \ rmlock.9 rm_wlock.9 \ rmlock.9 rm_wowned.9 \ rmlock.9 rm_wunlock.9 MLINKS+=rtalloc.9 rtalloc1.9 \ rtalloc.9 rtalloc_ign.9 \ rtalloc.9 RTFREE.9 \ rtalloc.9 rtfree.9 MLINKS+=runqueue.9 choosethread.9 \ runqueue.9 procrunnable.9 \ runqueue.9 remrunqueue.9 \ runqueue.9 setrunqueue.9 MLINKS+=rwlock.9 rw_assert.9 \ rwlock.9 rw_destroy.9 \ rwlock.9 rw_downgrade.9 \ rwlock.9 rw_init.9 \ rwlock.9 rw_initialized.9 \ rwlock.9 rw_rlock.9 \ rwlock.9 rw_runlock.9 \ rwlock.9 rw_sleep.9 \ rwlock.9 RW_SYSINIT.9 \ rwlock.9 rw_try_rlock.9 \ rwlock.9 rw_try_upgrade.9 \ rwlock.9 rw_try_wlock.9 \ rwlock.9 rw_wlock.9 \ rwlock.9 rw_wowned.9 \ rwlock.9 rw_wunlock.9 MLINKS+=sbuf.9 sbuf_bcat.9 \ sbuf.9 sbuf_bcopyin.9 \ sbuf.9 sbuf_bcpy.9 \ sbuf.9 sbuf_cat.9 \ sbuf.9 sbuf_clear.9 \ sbuf.9 sbuf_copyin.9 \ sbuf.9 sbuf_cpy.9 \ sbuf.9 sbuf_data.9 \ sbuf.9 sbuf_delete.9 \ sbuf.9 sbuf_done.9 \ sbuf.9 sbuf_finish.9 \ sbuf.9 sbuf_len.9 \ sbuf.9 sbuf_new.9 \ sbuf.9 sbuf_overflowed.9 \ sbuf.9 sbuf_printf.9 \ sbuf.9 sbuf_putc.9 \ sbuf.9 sbuf_setpos.9 \ sbuf.9 sbuf_trim.9 \ sbuf.9 sbuf_vprintf.9 MLINKS+=scheduler.9 curpriority_cmp.9 \ scheduler.9 maybe_resched.9 \ scheduler.9 resetpriority.9 \ scheduler.9 roundrobin.9 \ scheduler.9 roundrobin_interval.9 \ scheduler.9 schedclock.9 \ scheduler.9 schedcpu.9 \ scheduler.9 sched_setup.9 \ scheduler.9 setrunnable.9 \ scheduler.9 updatepri.9 MLINKS+=securelevel_gt.9 securelevel_ge.9 MLINKS+=selrecord.9 selwakeup.9 MLINKS+=sema.9 sema_destroy.9 \ sema.9 sema_init.9 \ sema.9 sema_post.9 \ sema.9 sema_timedwait.9 \ sema.9 sema_trywait.9 \ sema.9 sema_value.9 \ sema.9 sema_wait.9 MLINKS+=sf_buf.9 sf_buf_alloc.9 \ sf_buf.9 sf_buf_free.9 \ sf_buf.9 sf_buf_kva.9 \ sf_buf.9 sf_buf_page.9 MLINKS+=sglist.9 sglist_alloc.9 \ sglist.9 sglist_append.9 \ sglist.9 sglist_append_mbuf.9 \ sglist.9 sglist_append_phys.9 \ sglist.9 sglist_append_uio.9 \ sglist.9 sglist_append_user.9 \ sglist.9 sglist_build.9 \ sglist.9 sglist_clone.9 \ sglist.9 sglist_consume_uio.9 \ sglist.9 sglist_count.9 \ sglist.9 sglist_free.9 \ sglist.9 sglist_hold.9 \ sglist.9 sglist_init.9 \ sglist.9 sglist_join.9 \ sglist.9 sglist_length.9 \ sglist.9 sglist_reset.9 \ sglist.9 sglist_slice.9 \ sglist.9 sglist_split.9 MLINKS+=signal.9 cursig.9 \ signal.9 execsigs.9 \ signal.9 issignal.9 \ signal.9 killproc.9 \ signal.9 pgsigio.9 \ signal.9 postsig.9 \ signal.9 SETSETNEQ.9 \ signal.9 SETSETOR.9 \ signal.9 SIGADDSET.9 \ signal.9 SIG_CONTSIGMASK.9 \ signal.9 SIGDELSET.9 \ signal.9 SIGEMPTYSET.9 \ signal.9 sigexit.9 \ signal.9 SIGFILLSET.9 \ signal.9 siginit.9 \ signal.9 SIGISEMPTY.9 \ signal.9 SIGISMEMBER.9 \ signal.9 SIGNOTEMPTY.9 \ signal.9 signotify.9 \ signal.9 SIGPENDING.9 \ signal.9 SIGSETAND.9 \ signal.9 SIGSETCANTMASK.9 \ signal.9 SIGSETEQ.9 \ signal.9 SIGSETNAND.9 \ signal.9 SIG_STOPSIGMASK.9 \ signal.9 trapsignal.9 MLINKS+=sleep.9 msleep.9 \ sleep.9 msleep_spin.9 \ sleep.9 pause.9 \ sleep.9 tsleep.9 \ sleep.9 wakeup.9 \ sleep.9 wakeup_one.9 MLINKS+=sleepqueue.9 init_sleepqueues.9 \ sleepqueue.9 sleepq_abort.9 \ sleepqueue.9 sleepq_add.9 \ sleepqueue.9 sleepq_alloc.9 \ sleepqueue.9 sleepq_broadcast.9 \ sleepqueue.9 sleepq_calc_signal_retval.9 \ sleepqueue.9 sleepq_catch_signals.9 \ sleepqueue.9 sleepq_free.9 \ sleepqueue.9 sleepq_lookup.9 \ sleepqueue.9 sleepq_release.9 \ sleepqueue.9 sleepq_remove.9 \ sleepqueue.9 sleepq_set_timeout.9 \ sleepqueue.9 sleepq_signal.9 \ sleepqueue.9 sleepq_timedwait.9 \ sleepqueue.9 sleepq_timedwait_sig.9 \ sleepqueue.9 sleepq_wait.9 \ sleepqueue.9 sleepq_wait_sig.9 MLINKS+=socket.9 sobind.9 \ socket.9 soclose.9 \ socket.9 soconnect.9 \ socket.9 socreate.9 \ socket.9 sogetopt.9 \ socket.9 soreceive.9 \ socket.9 sosend.9 \ socket.9 sosetopt.9 \ socket.9 soshutdown.9 MLINKS+=spl.9 spl0.9 \ spl.9 splbio.9 \ spl.9 splclock.9 \ spl.9 splhigh.9 \ spl.9 splimp.9 \ spl.9 splnet.9 \ spl.9 splsoftclock.9 \ spl.9 splsofttty.9 \ spl.9 splstatclock.9 \ spl.9 spltty.9 \ spl.9 splvm.9 \ spl.9 splx.9 MLINKS+=stack.9 stack_copy.9 \ stack.9 stack_create.9 \ stack.9 stack_destroy.9 \ stack.9 stack_print.9 \ stack.9 stack_print_ddb.9 \ stack.9 stack_put.9 \ stack.9 stack_save.9 \ stack.9 stack_sbuf_print.9 \ stack.9 stack_sbuf_print_ddb.9 \ stack.9 stack_zero.9 MLINKS+=store.9 subyte.9 \ store.9 suswintr.9 \ store.9 suword.9 \ store.9 suword16.9 \ store.9 suword32.9 \ store.9 suword64.9 MLINKS+=swi.9 swi_add.9 \ swi.9 swi_sched.9 MLINKS+=sx.9 sx_assert.9 \ sx.9 sx_destroy.9 \ sx.9 sx_downgrade.9 \ sx.9 sx_init.9 \ sx.9 sx_init_flags.9 \ sx.9 sx_sleep.9 \ sx.9 sx_slock.9 \ sx.9 sx_sunlock.9 \ sx.9 SX_SYSINIT.9 \ sx.9 sx_try_slock.9 \ sx.9 sx_try_upgrade.9 \ sx.9 sx_try_xlock.9 \ sx.9 sx_unlock.9 \ sx.9 sx_xholder.9 \ sx.9 sx_xlock.9 \ sx.9 sx_xlocked.9 \ sx.9 sx_xunlock.9 MLINKS+=sysctl.9 SYSCTL_DECL.9 \ sysctl.9 SYSCTL_INT.9 \ sysctl.9 SYSCTL_LONG.9 \ sysctl.9 SYSCTL_NODE.9 \ sysctl.9 SYSCTL_OPAQUE.9 \ sysctl.9 SYSCTL_PROC.9 \ sysctl.9 SYSCTL_STRING.9 \ sysctl.9 SYSCTL_STRUCT.9 \ sysctl.9 SYSCTL_UINT.9 \ sysctl.9 SYSCTL_ULONG.9 \ sysctl.9 SYSCTL_XINT.9 \ sysctl.9 SYSCTL_XLONG.9 MLINKS+=sysctl_add_oid.9 SYSCTL_ADD_INT.9 \ sysctl_add_oid.9 SYSCTL_ADD_LONG.9 \ sysctl_add_oid.9 SYSCTL_ADD_NODE.9 \ sysctl_add_oid.9 SYSCTL_ADD_OID.9 \ sysctl_add_oid.9 SYSCTL_ADD_OPAQUE.9 \ sysctl_add_oid.9 SYSCTL_ADD_PROC.9 \ sysctl_add_oid.9 SYSCTL_ADD_STRING.9 \ sysctl_add_oid.9 SYSCTL_ADD_STRUCT.9 \ sysctl_add_oid.9 SYSCTL_ADD_UINT.9 \ sysctl_add_oid.9 SYSCTL_ADD_ULONG.9 \ sysctl_add_oid.9 SYSCTL_CHILDREN.9 \ sysctl_add_oid.9 sysctl_move_oid.9 \ sysctl_add_oid.9 sysctl_remove_oid.9 \ sysctl_add_oid.9 SYSCTL_STATIC_CHILDREN.9 MLINKS+=sysctl_ctx_init.9 sysctl_ctx_entry_add.9 \ sysctl_ctx_init.9 sysctl_ctx_entry_del.9 \ sysctl_ctx_init.9 sysctl_ctx_entry_find.9 \ sysctl_ctx_init.9 sysctl_ctx_free.9 MLINKS+=taskqueue.9 TASK_INIT.9 \ taskqueue.9 taskqueue_create.9 \ taskqueue.9 TASKQUEUE_DECLARE.9 \ taskqueue.9 TASKQUEUE_DEFINE.9 \ taskqueue.9 taskqueue_enqueue.9 \ taskqueue.9 taskqueue_find.9 \ taskqueue.9 taskqueue_free.9 \ taskqueue.9 taskqueue_run.9 MLINKS+=time.9 boottime.9 \ time.9 time_second.9 \ time.9 time_uptime.9 MLINKS+=timeout.9 callout.9 \ timeout.9 callout_active.9 \ timeout.9 callout_deactivate.9 \ timeout.9 callout_drain.9 \ timeout.9 callout_handle_init.9 \ timeout.9 callout_init.9 \ timeout.9 callout_init_mtx.9 \ timeout.9 callout_init_rw.9 \ timeout.9 callout_pending.9 \ timeout.9 callout_reset.9 \ timeout.9 callout_schedule.9 \ timeout.9 callout_stop.9 \ timeout.9 untimeout.9 MLINKS+=ucred.9 crcopy.9 \ ucred.9 crdup.9 \ ucred.9 crfree.9 \ ucred.9 crget.9 \ ucred.9 crhold.9 \ ucred.9 crshared.9 \ ucred.9 cru2x.9 MLINKS+=uidinfo.9 uifind.9 \ uidinfo.9 uifree.9 \ uidinfo.9 uihashinit.9 \ uidinfo.9 uihold.9 MLINKS+=uio.9 uiomove.9 MLINKS+=usbdi.9 usbd_do_request.9 \ usbdi.9 usbd_do_request_flags.9 \ usbdi.9 usbd_errstr.9 \ usbdi.9 usbd_lookup_id_by_info.9 \ usbdi.9 usbd_lookup_id_by_uaa.9 \ usbdi.9 usbd_transfer_clear_stall.9 \ usbdi.9 usbd_transfer_drain.9 \ usbdi.9 usbd_transfer_pending.9 \ usbdi.9 usbd_transfer_poll.9 \ usbdi.9 usbd_transfer_setup.9 \ usbdi.9 usbd_transfer_start.9 \ usbdi.9 usbd_transfer_stop.9 \ usbdi.9 usbd_transfer_submit.9 \ usbdi.9 usbd_transfer_unsetup.9 \ usbdi.9 usbd_xfer_clr_flag.9 \ usbdi.9 usbd_xfer_frame_data.9 \ usbdi.9 usbd_xfer_frame_len.9 \ usbdi.9 usbd_xfer_get_frame.9 \ usbdi.9 usbd_xfer_get_priv.9 \ usbdi.9 usbd_xfer_is_stalled.9 \ usbdi.9 usbd_xfer_max_framelen.9 \ usbdi.9 usbd_xfer_max_frames.9 \ usbdi.9 usbd_xfer_max_len.9 \ usbdi.9 usbd_xfer_set_flag.9 \ usbdi.9 usbd_xfer_set_frame_data.9 \ usbdi.9 usbd_xfer_set_frame_len.9 \ usbdi.9 usbd_xfer_set_frame_offset.9 \ usbdi.9 usbd_xfer_set_frames.9 \ usbdi.9 usbd_xfer_set_interval.9 \ usbdi.9 usbd_xfer_set_priv.9 \ usbdi.9 usbd_xfer_set_stall.9 \ usbdi.9 usbd_xfer_set_timeout.9 \ usbdi.9 usbd_xfer_softc.9 \ usbdi.9 usbd_xfer_state.9 \ usbdi.9 usbd_xfer_status.9 \ usbdi.9 usb_fifo_alloc_buffer.9 \ usbdi.9 usb_fifo_attach.9 \ usbdi.9 usb_fifo_detach.9 \ usbdi.9 usb_fifo_free_buffer.9 \ usbdi.9 usb_fifo_get_data.9 \ usbdi.9 usb_fifo_get_data_buffer.9 \ usbdi.9 usb_fifo_get_data_error.9 \ usbdi.9 usb_fifo_get_data_linear.9 \ usbdi.9 usb_fifo_put_bytes_max.9 \ usbdi.9 usb_fifo_put_data.9 \ usbdi.9 usb_fifo_put_data_buffer.9 \ usbdi.9 usb_fifo_put_data_error.9 \ usbdi.9 usb_fifo_put_data_linear.9 \ usbdi.9 usb_fifo_reset.9 \ usbdi.9 usb_fifo_softc.9 \ usbdi.9 usb_fifo_wakeup.9 MLINKS+=vcount.9 count_dev.9 MLINKS+=vfsconf.9 vfs_modevent.9 \ vfsconf.9 vfs_register.9 \ vfsconf.9 vfs_unregister.9 MLINKS+=vfs_getopt.9 vfs_copyopt.9 \ vfs_getopt.9 vfs_filteropt.9 \ vfs_getopt.9 vfs_flagopt.9 \ vfs_getopt.9 vfs_getopts.9 \ vfs_getopt.9 vfs_scanopt.9 \ vfs_getopt.9 vfs_setopt.9 \ vfs_getopt.9 vfs_setopt_part.9 \ vfs_getopt.9 vfs_setopts.9 MLINKS+=VFS_LOCK_GIANT.9 VFS_UNLOCK_GIANT.9 MLINKS+=vgone.9 vgonel.9 MLINKS+=vhold.9 vdrop.9 \ vhold.9 vdropl.9 \ vhold.9 vholdl.9 MLINKS+=vm_map_lock.9 vm_map_lock_downgrade.9 \ vm_map_lock.9 vm_map_lock_read.9 \ vm_map_lock.9 vm_map_lock_upgrade.9 \ vm_map_lock.9 vm_map_trylock.9 \ vm_map_lock.9 vm_map_trylock_read.9 \ vm_map_lock.9 vm_map_unlock.9 \ vm_map_lock.9 vm_map_unlock_read.9 MLINKS+=vm_map_lookup.9 vm_map_lookup_done.9 MLINKS+=vm_map_max.9 vm_map_min.9 \ vm_map_max.9 vm_map_pmap.9 MLINKS+=vm_map_stack.9 vm_map_growstack.9 MLINKS+=vm_map_wire.9 vm_map_unwire.9 MLINKS+=vm_page_bits.9 vm_page_clear_dirty.9 \ vm_page_bits.9 vm_page_dirty.9 \ vm_page_bits.9 vm_page_is_valid.9 \ vm_page_bits.9 vm_page_set_invalid.9 \ vm_page_bits.9 vm_page_set_validclean.9 \ vm_page_bits.9 vm_page_test_dirty.9 \ vm_page_bits.9 vm_page_undirty.9 \ vm_page_bits.9 vm_page_zero_invalid.9 MLINKS+=vm_page_flag.9 vm_page_flag_clear.9 \ vm_page_flag.9 vm_page_flag_set.9 MLINKS+=vm_page_free.9 vm_page_free_toq.9 \ vm_page_free.9 vm_page_free_zero.9 \ vm_page_free.9 vm_page_try_to_free.9 MLINKS+=vm_page_hold.9 vm_page_unhold.9 MLINKS+=vm_page_insert.9 vm_page_remove.9 MLINKS+=vm_page_io.9 vm_page_io_finish.9 \ vm_page_io.9 vm_page_io_start.9 MLINKS+=vm_page_wakeup.9 vm_page_busy.9 \ vm_page_wakeup.9 vm_page_flash.9 MLINKS+=vm_page_wire.9 vm_page_unwire.9 MLINKS+=VOP_ACCESS.9 VOP_ACCESSX.9 MLINKS+=VOP_ATTRIB.9 VOP_GETATTR.9 \ VOP_ATTRIB.9 VOP_SETATTR.9 MLINKS+=VOP_CREATE.9 VOP_MKDIR.9 \ VOP_CREATE.9 VOP_MKNOD.9 \ VOP_CREATE.9 VOP_SYMLINK.9 MLINKS+=VOP_GETPAGES.9 VOP_PUTPAGES.9 MLINKS+=VOP_GETVOBJECT.9 VOP_CREATEVOBJECT.9 \ VOP_GETVOBJECT.9 VOP_DESTROYVOBJECT.9 MLINKS+=VOP_INACTIVE.9 VOP_RECLAIM.9 MLINKS+=VOP_LOCK.9 vn_lock.9 \ VOP_LOCK.9 VOP_ISLOCKED.9 \ VOP_LOCK.9 VOP_UNLOCK.9 MLINKS+=VOP_OPENCLOSE.9 VOP_CLOSE.9 \ VOP_OPENCLOSE.9 VOP_OPEN.9 MLINKS+=VOP_RDWR.9 VOP_READ.9 \ VOP_RDWR.9 VOP_WRITE.9 MLINKS+=VOP_REMOVE.9 VOP_RMDIR.9 MLINKS+=vref.9 VREF.9 MLINKS+=vslock.9 vsunlock.9 MLINKS+=zero_copy.9 zero_copy_sockets.9 MLINKS+=zone.9 uma.9 \ zone.9 uma_zalloc.9 \ zone.9 uma_zcreate.9 \ zone.9 uma_zdestroy.9 \ zone.9 uma_zfree.9 \ zone.9 uma_zone_set_max.9 .include Index: head/share/man/man9/locking.9 =================================================================== --- head/share/man/man9/locking.9 (revision 212111) +++ head/share/man/man9/locking.9 (revision 212112) @@ -1,360 +1,367 @@ .\" Copyright (c) 2007 Julian Elischer (julian - freebsd org ) .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" .Dd August 24, 2010 .Dt LOCKING 9 .Os .Sh NAME .Nm locking .Nd kernel synchronization primitives .Sh DESCRIPTION The .Em FreeBSD kernel is written to run across multiple CPUs and as such requires several different synchronization primitives to allow the developers to safely access and manipulate the many data types required. .Ss Mutexes Mutexes (also called "sleep mutexes") are the most commonly used synchronization primitive in the kernel. Thread acquires (locks) a mutex before accessing data shared with other threads (including interrupt threads), and releases (unlocks) it afterwards. If the mutex cannot be acquired, the thread requesting it will sleep. Mutexes fully support priority propagation. .Pp See .Xr mutex 9 for details. .Ss Spin mutexes Spin mutexes are variation of basic mutexes; the main difference between the two is that spin mutexes never sleep - instead, they spin, waiting for the thread holding the lock, which runs on another CPU, to release it. Differently from ordinary mutex, spin mutexes disable interrupts when acquired. Since disabling interrupts is expensive, they are also generally slower. Spin mutexes should be used only when necessary, e.g. to protect data shared with interrupt filter code (see .Xr bus_setup_intr 9 for details). .Ss Pool mutexes With most synchronization primitives, such as mutexes, programmer must provide a piece of allocated memory to hold the primitive. For example, a mutex may be embedded inside the structure it protects. Pool mutex is a variant of mutex without this requirement - to lock or unlock a pool mutex, one uses address of the structure being protected with it, not the mutex itself. Pool mutexes are seldom used. .Pp See .Xr mtx_pool 9 for details. .Ss Reader/writer locks Reader/writer locks allow shared access to protected data by multiple threads, or exclusive access by a single thread. The threads with shared access are known as .Em readers since they should only read the protected data. A thread with exclusive access is known as a .Em writer since it may modify protected data. .Pp Reader/writer locks can be treated as mutexes (see above and .Xr mutex 9 ) with shared/exclusive semantics. More specifically, regular mutexes can be considered to be equivalent to a write-lock on an .Em rw_lock. The .Em rw_lock locks have priority propagation like mutexes, but priority can be propagated only to an exclusive holder. This limitation comes from the fact that shared owners are anonymous. Another important property is that shared holders of .Em rw_lock can recurse, but exclusive locks are not allowed to recurse. This ability should not be used lightly and .Em may go away. .Pp See .Xr rwlock 9 for details. .Ss Read-mostly locks Mostly reader locks are similar to .Em reader/writer locks but optimized for very infrequent write locking. .Em Read-mostly locks implement full priority propagation by tracking shared owners using a caller-supplied .Em tracker data structure. .Pp See .Xr rmlock 9 for details. .Ss Shared/exclusive locks Shared/exclusive locks are similar to reader/writer locks; the main difference between them is that shared/exclusive locks may be held during unbounded sleep (and may thus perform an unbounded sleep). They are inherently less efficient than mutexes, reader/writer locks and read-mostly locks. They don't support priority propagation. They should be considered to be closely related to .Xr sleep 9 . In fact it could in some cases be considered a conditional sleep. .Pp See .Xr sx 9 for details. .Ss Counting semaphores Counting semaphores provide a mechanism for synchronizing access to a pool of resources. Unlike mutexes, semaphores do not have the concept of an owner, so they can be useful in situations where one thread needs to acquire a resource, and another thread needs to release it. They are largely deprecated. .Pp See .Xr sema 9 for details. .Ss Condition variables Condition variables are used in conjunction with mutexes to wait for conditions to occur. A thread must hold the mutex before calling the .Fn cv_wait* , functions. When a thread waits on a condition, the mutex is atomically released before the thread is blocked, then reacquired before the function call returns. .Pp See .Xr condvar 9 for details. .Ss Giant Giant is an instance of a mutex, with some special characteristics: .Bl -enum .It It is recursive. .It Drivers and filesystems can request that Giant be locked around them by not marking themselves MPSAFE. Note that infrastructure to do this is slowly going away as non-MPSAFE drivers either became properly locked or disappear. .It Giant must be locked first before other locks. .It It is OK to hold Giant while performing unbounded sleep; in such case, Giant will be dropped before sleeping and picked up after wakeup. .It There are places in the kernel that drop Giant and pick it back up again. Sleep locks will do this before sleeping. Parts of the network or VM code may do this as well, depending on the setting of a sysctl. This means that you cannot count on Giant keeping other code from running if your code sleeps, even if you want it to. .El .Ss Sleep/wakeup The functions .Fn tsleep , .Fn msleep , .Fn msleep_spin , .Fn pause , .Fn wakeup , and .Fn wakeup_one handle event-based thread blocking. If a thread must wait for an external event, it is put to sleep by .Fn tsleep , .Fn msleep , .Fn msleep_spin , or .Fn pause . Threads may also wait using one of the locking primitive sleep routines .Xr mtx_sleep 9 , .Xr rw_sleep 9 , or .Xr sx_sleep 9 . .Pp The parameter .Fa chan is an arbitrary address that uniquely identifies the event on which the thread is being put to sleep. All threads sleeping on a single .Fa chan are woken up later by .Fn wakeup , often called from inside an interrupt routine, to indicate that the resource the thread was blocking on is available now. .Pp Several of the sleep functions including .Fn msleep , .Fn msleep_spin , and the locking primitive sleep routines specify an additional lock parameter. The lock will be released before sleeping and reacquired before the sleep routine returns. If .Fa priority includes the .Dv PDROP flag, then the lock will not be reacquired before returning. The lock is used to ensure that a condition can be checked atomically, and that the current thread can be suspended without missing a change to the condition, or an associated wakeup. In addition, all of the sleep routines will fully drop the .Va Giant mutex (even if recursed) while the thread is suspended and will reacquire the .Va Giant mutex before the function returns. .Pp See .Xr sleep 9 for details. .Pp .Ss Lockmanager locks Shared/exclusive locks, used mostly in .Xr VFS 9 , in particular as a .Xr vnode 9 lock. They have features other lock types don't have, such as sleep timeout, writer starvation avoidance, draining, and interlock mutex, but this makes them complicated to implement; for this reason, they are deprecated. .Pp See .Xr lock 9 for details. .Sh INTERACTIONS The primitives interact and have a number of rules regarding how they can and can not be combined. Many of these rules are checked using the .Xr witness 4 code. .Ss Bounded vs. unbounded sleep The following primitives perform bounded sleep: mutexes, pool mutexes, reader/writer locks and read-mostly locks. .Pp The following primitives block (perform unbounded sleep): shared/exclusive locks, counting semaphores, condition variables, sleep/wakeup and lockmanager locks. .Pp It is an error to do any operation that could result in any kind of sleep while holding spin mutex. .Pp As a general rule, it is an error to do any operation that could result in unbounded sleep while holding any primitive from the 'bounded sleep' group. For example, it is an error to try to acquire shared/exclusive lock while holding mutex, or to try to allocate memory with M_WAITOK while holding read-write lock. .Pp As a special case, it is possible to call .Fn sleep or .Fn mtx_sleep while holding a single mutex. It will atomically drop that mutex and reacquire it as part of waking up. This is often a bad idea because it generally relies on the programmer having good knowledge of all of the call graph above the place where .Fn mtx_sleep is being called and assumptions the calling code has made. Because the lock gets dropped during sleep, one one must re-test all the assumptions that were made before, all the way up the call graph to the place where the lock was acquired. .Pp It is an error to do any operation that could result in any kind of sleep when running inside an interrupt filter. .Pp It is an error to do any operation that could result in unbounded sleep when running inside an interrupt thread. .Ss Interaction table The following table shows what you can and can not do while holding one of the synchronization primitives discussed: .Bl -column ".Ic xxxxxxxxxxxxxxxxxxx" ".Xr XXXXXXXXX" ".Xr XXXXXXX" ".Xr XXXXXXX" ".Xr XXXXXXX" ".Xr XXXXXX" -offset indent .It Xo .Em "You have: You want:" Ta spin mtx Ta mutex Ta sx Ta rwlock Ta rmlock Ta sleep .Xc .It spin mtx Ta \&ok-1 Ta \&no Ta \&no Ta \&no Ta \&no Ta \&no-3 .It mutex Ta \&ok Ta \&ok-1 Ta \&no Ta \&ok Ta \&ok Ta \&no-3 .It sx Ta \&ok Ta \&ok Ta \&ok-2 Ta \&ok Ta \&ok Ta \&ok-4 .It rwlock Ta \&ok Ta \&ok Ta \&no Ta \&ok-2 Ta \&ok Ta \&no-3 -.It rmlock Ta \&ok Ta \&ok Ta \&no Ta \&ok Ta \&ok-2 Ta \&no +.It rmlock Ta \&ok Ta \&ok Ta \&ok-5 Ta \&ok Ta \&ok-2 Ta \&ok-5 .El .Pp .Em *1 Recursion is defined per lock. Lock order is important. .Pp .Em *2 Readers can recurse though writers can not. Lock order is important. .Pp .Em *3 There are calls that atomically release this primitive when going to sleep and reacquire it on wakeup (e.g. .Fn mtx_sleep , .Fn rw_sleep and .Fn msleep_spin ). .Pp .Em *4 Though one can sleep holding an sx lock, one can also use .Fn sx_sleep which will atomically release this primitive when going to sleep and reacquire it on wakeup. +.Pp +.Em *5 +.Em Read-mostly +locks can be initialized to support sleeping while holding a write lock. +See +.Xr rmlock 9 +for details. .Ss Context mode table The next table shows what can be used in different contexts. At this time this is a rather easy to remember table. .Bl -column ".Ic Xxxxxxxxxxxxxxxxxxx" ".Xr XXXXXXXXX" ".Xr XXXXXXX" ".Xr XXXXXXX" ".Xr XXXXXXX" ".Xr XXXXXX" -offset indent .It Xo .Em "Context:" Ta spin mtx Ta mutex Ta sx Ta rwlock Ta rmlock Ta sleep .Xc .It interrupt filter: Ta \&ok Ta \&no Ta \&no Ta \&no Ta \&no Ta \&no .It interrupt thread: Ta \&ok Ta \&ok Ta \&no Ta \&ok Ta \&ok Ta \&no .It callout: Ta \&ok Ta \&ok Ta \&no Ta \&ok Ta \&no Ta \&no .It syscall: Ta \&ok Ta \&ok Ta \&ok Ta \&ok Ta \&ok Ta \&ok .El .Sh SEE ALSO .Xr witness 4 , .Xr condvar 9 , .Xr lock 9 , .Xr mtx_pool 9 , .Xr mutex 9 , .Xr rmlock 9 , .Xr rwlock 9 , .Xr sema 9 , .Xr sleep 9 , .Xr sx 9 , .Xr LOCK_PROFILING 9 .Sh HISTORY These functions appeared in .Bsx 4.1 through .Fx 7.0 .Sh BUGS There are too many locking primitives to choose from. Index: head/share/man/man9/rmlock.9 =================================================================== --- head/share/man/man9/rmlock.9 (revision 212111) +++ head/share/man/man9/rmlock.9 (revision 212112) @@ -1,230 +1,256 @@ .\" Copyright (c) 2007 Stephan Uphoff .\" Copyright (c) 2006 Gleb Smirnoff .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" .\" Based on rwlock.9 man page .Dd November 10, 2007 .Dt RMLOCK 9 .Os .Sh NAME .Nm rmlock , .Nm rm_init , .Nm rm_init_flags , .Nm rm_destroy , .Nm rm_rlock , +.Nm rm_try_rlock , .Nm rm_wlock , .Nm rm_runlock , .Nm rm_wunlock , .Nm rm_wowned , .Nm RM_SYSINIT .Nd kernel reader/writer lock optimized for mostly read access patterns .Sh SYNOPSIS .In sys/param.h .In sys/lock.h .In sys/rmlock.h .Ft void .Fn rm_init "struct rmlock *rm" "const char *name" .Ft void .Fn rm_init_flags "struct rmlock *rm" "const char *name" "int opts" .Ft void .Fn rm_destroy "struct rmlock *rm" .Ft void .Fn rm_rlock "struct rmlock *rm" "struct rm_priotracker* tracker" +.Ft int +.Fn rm_try_rlock "struct rmlock *rm" "struct rm_priotracker* tracker" .Ft void .Fn rm_wlock "struct rmlock *rm" .Ft void .Fn rm_runlock "struct rmlock *rm" "struct rm_priotracker* tracker" .Ft void .Fn rm_wunlock "struct rmlock *rm" .Ft int .Fn rm_wowned "struct rmlock *rm" .In sys/kernel.h .Fn RM_SYSINIT "name" "struct rmlock *rm" "const char *desc" "int opts" .Sh DESCRIPTION Mostly reader locks allow shared access to protected data by multiple threads, or exclusive access by a single thread. The threads with shared access are known as .Em readers since they only read the protected data. A thread with exclusive access is known as a .Em writer since it can modify protected data. .Pp Read mostly locks are designed to be efficient for locks almost exclusively used as reader locks and as such should be used for protecting data that rarely changes. Acquiring an exclusive lock after the lock had been locked for shared access is an expensive operation. .Pp Although reader/writer locks look very similar to .Xr sx 9 locks, their usage pattern is different. Reader/writer locks can be treated as mutexes (see .Xr mutex 9 ) -with shared/exclusive semantics. +with shared/exclusive semantics unless initialized with +.Dv RM_SLEEPABLE . Unlike .Xr sx 9 , an .Nm can be locked while holding a non-spin mutex, and an .Nm -cannot be held while sleeping. +cannot be held while sleeping, again unless initialized with +.Dv RM_SLEEPABLE . The .Nm locks have full priority propagation like mutexes. The .Va rm_priotracker structure argument supplied in .Fn rm_rlock and .Fn rm_runlock is used to keep track of the read owner(s). Another important property is that shared holders of .Nm can recurse if the lock has been initialized with the .Dv LO_RECURSABLE option, however exclusive locks are not allowed to recurse. .Ss Macros and Functions .Bl -tag -width indent .It Fn rm_init "struct rmlock *rm" "const char *name" Initialize structure located at .Fa rm as mostly reader lock, described by .Fa name . The name description is used solely for debugging purposes. This function must be called before any other operations on the lock. .It Fn rm_init_flags "struct rmlock *rm" "const char *name" "int opts" Initialize the rm lock just like the .Fn rm_init function, but specifying a set of optional flags to alter the behaviour of .Fa rm , through the .Fa opts argument. It contains one or more of the following flags: .Bl -tag -width ".Dv RM_NOWITNESS" .It Dv RM_NOWITNESS Instruct .Xr witness 4 to ignore this lock. .It Dv RM_RECURSE Allow threads to recursively acquire exclusive locks for .Fa rm . +.It Dv RM_SLEEPABLE +Allow writers to sleep while holding the lock. +Readers must not sleep while holding the lock and can avoid to sleep on +taking the lock by using +.Fn rm_try_rlock +instead of +.Fn rm_rlock . .El .It Fn rm_rlock "struct rmlock *rm" "struct rm_priotracker* tracker" Lock .Fa rm as a reader. Using .Fa tracker to track read owners of a lock for priority propagation. This data structure is only used internally by .Nm and must persist until .Fn rm_runlock has been called. This data structure can be allocated on the stack since rmlocks cannot be held while sleeping. If any thread holds this lock exclusively, the current thread blocks, and its priority is propagated to the exclusive holder. If the lock was initialized with the .Dv LO_RECURSABLE option the .Fn rm_rlock function can be called when the thread has already acquired reader access on .Fa rm . This is called .Dq "recursing on a lock" . +.It Fn rm_try_rlock "struct rmlock *rm" "struct rm_priotracker* tracker" +Try to lock +.Fa rm +as a reader. +.Fn rm_try_rlock +will return 0 if the lock cannot be acquired immediately; +otherwise the lock will be acquired and a non-zero value will be returned. +Note that +.Fn rm_try_rlock +may fail even while the lock is not currently held by a writer. .It Fn rm_wlock "struct rmlock *rm" Lock .Fa rm as a writer. If there are any shared owners of the lock, the current thread blocks. The .Fn rm_wlock function cannot be called recursively. .It Fn rm_runlock "struct rmlock *rm" "struct rm_priotracker* tracker" This function releases a shared lock previously acquired by .Fn rm_rlock . The .Fa tracker argument must match the .Fa tracker argument used for acquiring the shared lock .It Fn rm_wunlock "struct rmlock *rm" This function releases an exclusive lock previously acquired by .Fn rm_wlock . .It Fn rm_destroy "struct rmlock *rm" This functions destroys a lock previously initialized with .Fn rm_init . The .Fa rm lock must be unlocked. .It Fn rm_wowned "struct rmlock *rm" This function returns a non-zero value if the current thread owns an exclusive lock on .Fa rm . .El .Sh SEE ALSO .Xr locking 9 , .Xr mutex 9 , .Xr panic 9 , .Xr rwlock 9 , .Xr sema 9 , .Xr sx 9 .Sh HISTORY These functions appeared in .Fx 7.0 . .Sh AUTHORS .An -nosplit The .Nm facility was written by .An "Stephan Uphoff" . This manual page was written by .An "Gleb Smirnoff" for rwlock and modified to reflect rmlock by .An "Stephan Uphoff" . .Sh BUGS The .Nm implementation is currently not optimized for single processor systems. +.Pp +.Fn rm_try_rlock +can fail transiently even when there is no writer, while another reader +updates the state on the local CPU. .Pp The .Nm implementation uses a single per CPU list shared by all rmlocks in the system. If rmlocks become popular, hashing to multiple per CPU queues may be needed to speed up the writer lock process. .Pp The .Nm can currently not be used as a lock argument for condition variable wait functions. Index: head/sys/kern/kern_rmlock.c =================================================================== --- head/sys/kern/kern_rmlock.c (revision 212111) +++ head/sys/kern/kern_rmlock.c (revision 212112) @@ -1,536 +1,589 @@ /*- * Copyright (c) 2007 Stephan Uphoff * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Machine independent bits of reader/writer lock implementation. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_kdtrace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #define RMPF_ONQUEUE 1 #define RMPF_SIGNAL 2 /* * To support usage of rmlock in CVs and msleep yet another list for the * priority tracker would be needed. Using this lock for cv and msleep also * does not seem very useful */ static __inline void compiler_memory_barrier(void) { __asm __volatile("":::"memory"); } static void assert_rm(struct lock_object *lock, int what); static void lock_rm(struct lock_object *lock, int how); #ifdef KDTRACE_HOOKS static int owner_rm(struct lock_object *lock, struct thread **owner); #endif static int unlock_rm(struct lock_object *lock); struct lock_class lock_class_rm = { .lc_name = "rm", .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, .lc_assert = assert_rm, #if 0 #ifdef DDB .lc_ddb_show = db_show_rwlock, #endif #endif .lc_lock = lock_rm, .lc_unlock = unlock_rm, #ifdef KDTRACE_HOOKS .lc_owner = owner_rm, #endif }; static void assert_rm(struct lock_object *lock, int what) { panic("assert_rm called"); } static void lock_rm(struct lock_object *lock, int how) { panic("lock_rm called"); } static int unlock_rm(struct lock_object *lock) { panic("unlock_rm called"); } #ifdef KDTRACE_HOOKS static int owner_rm(struct lock_object *lock, struct thread **owner) { panic("owner_rm called"); } #endif static struct mtx rm_spinlock; MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN); /* * Add or remove tracker from per-cpu list. * * The per-cpu list can be traversed at any time in forward direction from an * interrupt on the *local* cpu. */ static void inline rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker) { struct rm_queue *next; /* Initialize all tracker pointers */ tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue; next = pc->pc_rm_queue.rmq_next; tracker->rmp_cpuQueue.rmq_next = next; /* rmq_prev is not used during froward traversal. */ next->rmq_prev = &tracker->rmp_cpuQueue; /* Update pointer to first element. */ pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue; } static void inline rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker) { struct rm_queue *next, *prev; next = tracker->rmp_cpuQueue.rmq_next; prev = tracker->rmp_cpuQueue.rmq_prev; /* Not used during forward traversal. */ next->rmq_prev = prev; /* Remove from list. */ prev->rmq_next = next; } static void rm_cleanIPI(void *arg) { struct pcpu *pc; struct rmlock *rm = arg; struct rm_priotracker *tracker; struct rm_queue *queue; pc = pcpu_find(curcpu); for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; queue = queue->rmq_next) { tracker = (struct rm_priotracker *)queue; if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) { tracker->rmp_flags = RMPF_ONQUEUE; mtx_lock_spin(&rm_spinlock); LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, rmp_qentry); mtx_unlock_spin(&rm_spinlock); } } } +CTASSERT((RM_SLEEPABLE & LO_CLASSFLAGS) == RM_SLEEPABLE); + void rm_init_flags(struct rmlock *rm, const char *name, int opts) { int liflags; liflags = 0; if (!(opts & RM_NOWITNESS)) liflags |= LO_WITNESS; if (opts & RM_RECURSE) liflags |= LO_RECURSABLE; - rm->rm_noreadtoken = 1; + rm->rm_writecpus = all_cpus; LIST_INIT(&rm->rm_activeReaders); - mtx_init(&rm->rm_lock, name, "rmlock_mtx", MTX_NOWITNESS); + if (opts & RM_SLEEPABLE) { + liflags |= RM_SLEEPABLE; + sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_RECURSE); + } else + mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS); lock_init(&rm->lock_object, &lock_class_rm, name, NULL, liflags); } void rm_init(struct rmlock *rm, const char *name) { rm_init_flags(rm, name, 0); } void rm_destroy(struct rmlock *rm) { - mtx_destroy(&rm->rm_lock); + if (rm->lock_object.lo_flags & RM_SLEEPABLE) + sx_destroy(&rm->rm_lock_sx); + else + mtx_destroy(&rm->rm_lock_mtx); lock_destroy(&rm->lock_object); } int rm_wowned(struct rmlock *rm) { - return (mtx_owned(&rm->rm_lock)); + if (rm->lock_object.lo_flags & RM_SLEEPABLE) + return (sx_xlocked(&rm->rm_lock_sx)); + else + return (mtx_owned(&rm->rm_lock_mtx)); } void rm_sysinit(void *arg) { struct rm_args *args = arg; rm_init(args->ra_rm, args->ra_desc); } void rm_sysinit_flags(void *arg) { struct rm_args_flags *args = arg; rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts); } -static void -_rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker) +static int +_rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) { struct pcpu *pc; struct rm_queue *queue; struct rm_priotracker *atracker; critical_enter(); pc = pcpu_find(curcpu); /* Check if we just need to do a proper critical_exit. */ - if (0 == rm->rm_noreadtoken) { + if (!(pc->pc_cpumask & rm->rm_writecpus)) { critical_exit(); - return; + return (1); } /* Remove our tracker from the per-cpu list. */ rm_tracker_remove(pc, tracker); /* Check to see if the IPI granted us the lock after all. */ if (tracker->rmp_flags) { /* Just add back tracker - we hold the lock. */ rm_tracker_add(pc, tracker); critical_exit(); - return; + return (1); } /* * We allow readers to aquire a lock even if a writer is blocked if * the lock is recursive and the reader already holds the lock. */ if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) { /* * Just grant the lock if this thread already has a tracker * for this lock on the per-cpu queue. */ for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; queue = queue->rmq_next) { atracker = (struct rm_priotracker *)queue; if ((atracker->rmp_rmlock == rm) && (atracker->rmp_thread == tracker->rmp_thread)) { mtx_lock_spin(&rm_spinlock); LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, rmp_qentry); tracker->rmp_flags = RMPF_ONQUEUE; mtx_unlock_spin(&rm_spinlock); rm_tracker_add(pc, tracker); critical_exit(); - return; + return (1); } } } sched_unpin(); critical_exit(); - mtx_lock(&rm->rm_lock); - rm->rm_noreadtoken = 0; - critical_enter(); + if (trylock) { + if (rm->lock_object.lo_flags & RM_SLEEPABLE) { + if (!sx_try_xlock(&rm->rm_lock_sx)) + return (0); + } else { + if (!mtx_trylock(&rm->rm_lock_mtx)) + return (0); + } + } else { + if (rm->lock_object.lo_flags & RM_SLEEPABLE) + sx_xlock(&rm->rm_lock_sx); + else + mtx_lock(&rm->rm_lock_mtx); + } + critical_enter(); pc = pcpu_find(curcpu); + rm->rm_writecpus &= ~pc->pc_cpumask; rm_tracker_add(pc, tracker); sched_pin(); critical_exit(); - mtx_unlock(&rm->rm_lock); + if (rm->lock_object.lo_flags & RM_SLEEPABLE) + sx_xunlock(&rm->rm_lock_sx); + else + mtx_unlock(&rm->rm_lock_mtx); + + return (1); } -void -_rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker) +int +_rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) { struct thread *td = curthread; struct pcpu *pc; tracker->rmp_flags = 0; tracker->rmp_thread = td; tracker->rmp_rmlock = rm; td->td_critnest++; /* critical_enter(); */ compiler_memory_barrier(); pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ rm_tracker_add(pc, tracker); sched_pin(); compiler_memory_barrier(); td->td_critnest--; /* * Fast path to combine two common conditions into a single * conditional jump. */ - if (0 == (td->td_owepreempt | rm->rm_noreadtoken)) - return; + if (0 == (td->td_owepreempt | (rm->rm_writecpus & pc->pc_cpumask))) + return (1); /* We do not have a read token and need to acquire one. */ - _rm_rlock_hard(rm, tracker); + return _rm_rlock_hard(rm, tracker, trylock); } static void _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker) { if (td->td_owepreempt) { td->td_critnest++; critical_exit(); } if (!tracker->rmp_flags) return; mtx_lock_spin(&rm_spinlock); LIST_REMOVE(tracker, rmp_qentry); if (tracker->rmp_flags & RMPF_SIGNAL) { struct rmlock *rm; struct turnstile *ts; rm = tracker->rmp_rmlock; turnstile_chain_lock(&rm->lock_object); mtx_unlock_spin(&rm_spinlock); ts = turnstile_lookup(&rm->lock_object); turnstile_signal(ts, TS_EXCLUSIVE_QUEUE); turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); turnstile_chain_unlock(&rm->lock_object); } else mtx_unlock_spin(&rm_spinlock); } void _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker) { struct pcpu *pc; struct thread *td = tracker->rmp_thread; td->td_critnest++; /* critical_enter(); */ pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ rm_tracker_remove(pc, tracker); td->td_critnest--; sched_unpin(); if (0 == (td->td_owepreempt | tracker->rmp_flags)) return; _rm_unlock_hard(td, tracker); } void _rm_wlock(struct rmlock *rm) { struct rm_priotracker *prio; struct turnstile *ts; + cpumask_t readcpus; - mtx_lock(&rm->rm_lock); + if (rm->lock_object.lo_flags & RM_SLEEPABLE) + sx_xlock(&rm->rm_lock_sx); + else + mtx_lock(&rm->rm_lock_mtx); - if (rm->rm_noreadtoken == 0) { + if (rm->rm_writecpus != all_cpus) { /* Get all read tokens back */ - rm->rm_noreadtoken = 1; + readcpus = all_cpus & (all_cpus & ~rm->rm_writecpus); + rm->rm_writecpus = all_cpus; /* - * Assumes rm->rm_noreadtoken update is visible on other CPUs + * Assumes rm->rm_writecpus update is visible on other CPUs * before rm_cleanIPI is called. */ #ifdef SMP - smp_rendezvous(smp_no_rendevous_barrier, + smp_rendezvous_cpus(readcpus, + smp_no_rendevous_barrier, rm_cleanIPI, smp_no_rendevous_barrier, rm); #else rm_cleanIPI(rm); #endif mtx_lock_spin(&rm_spinlock); while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) { ts = turnstile_trywait(&rm->lock_object); prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL; mtx_unlock_spin(&rm_spinlock); turnstile_wait(ts, prio->rmp_thread, TS_EXCLUSIVE_QUEUE); mtx_lock_spin(&rm_spinlock); } mtx_unlock_spin(&rm_spinlock); } } void _rm_wunlock(struct rmlock *rm) { - mtx_unlock(&rm->rm_lock); + if (rm->lock_object.lo_flags & RM_SLEEPABLE) + sx_xunlock(&rm->rm_lock_sx); + else + mtx_unlock(&rm->rm_lock_mtx); } #ifdef LOCK_DEBUG void _rm_wlock_debug(struct rmlock *rm, const char *file, int line) { WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); _rm_wlock(rm); LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line); - WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); + if (rm->lock_object.lo_flags & RM_SLEEPABLE) + WITNESS_LOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE, + file, line); + else + WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); curthread->td_locks++; } void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) { curthread->td_locks--; - WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); + if (rm->lock_object.lo_flags & RM_SLEEPABLE) + WITNESS_UNLOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE, + file, line); + else + WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line); _rm_wunlock(rm); } -void +int _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, - const char *file, int line) + int trylock, const char *file, int line) { - + if (!trylock && (rm->lock_object.lo_flags & RM_SLEEPABLE)) + WITNESS_CHECKORDER(&rm->rm_lock_sx.lock_object, LOP_NEWORDER, + file, line, NULL); WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL); - _rm_rlock(rm, tracker); + if (_rm_rlock(rm, tracker, trylock)) { + LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line); - LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line); + WITNESS_LOCK(&rm->lock_object, 0, file, line); - WITNESS_LOCK(&rm->lock_object, 0, file, line); + curthread->td_locks++; - curthread->td_locks++; + return (1); + } + + return (0); } void _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, const char *file, int line) { curthread->td_locks--; WITNESS_UNLOCK(&rm->lock_object, 0, file, line); LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line); _rm_runlock(rm, tracker); } #else /* * Just strip out file and line arguments if no lock debugging is enabled in * the kernel - we are called from a kernel module. */ void _rm_wlock_debug(struct rmlock *rm, const char *file, int line) { _rm_wlock(rm); } void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) { _rm_wunlock(rm); } -void +int _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, - const char *file, int line) + int trylock, const char *file, int line) { - _rm_rlock(rm, tracker); + return _rm_rlock(rm, tracker, trylock); } void _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, const char *file, int line) { _rm_runlock(rm, tracker); } #endif Index: head/sys/sys/_rmlock.h =================================================================== --- head/sys/sys/_rmlock.h (revision 212111) +++ head/sys/sys/_rmlock.h (revision 212112) @@ -1,62 +1,66 @@ /*- * Copyright (c) 2007 Stephan Uphoff * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS__RMLOCK_H_ #define _SYS__RMLOCK_H_ /* * XXXUPS remove as soon as we have per cpu variable * linker sets and can define rm_queue in _rm_lock.h */ #include /* * Mostly reader/occasional writer lock. */ LIST_HEAD(rmpriolist,rm_priotracker); struct rmlock { struct lock_object lock_object; - volatile int rm_noreadtoken; + volatile cpumask_t rm_writecpus; LIST_HEAD(,rm_priotracker) rm_activeReaders; - struct mtx rm_lock; - + union { + struct mtx _rm_lock_mtx; + struct sx _rm_lock_sx; + } _rm_lock; }; +#define rm_lock_mtx _rm_lock._rm_lock_mtx +#define rm_lock_sx _rm_lock._rm_lock_sx struct rm_priotracker { struct rm_queue rmp_cpuQueue; /* Must be first */ struct rmlock *rmp_rmlock; struct thread *rmp_thread; int rmp_flags; LIST_ENTRY(rm_priotracker) rmp_qentry; }; #endif /* !_SYS__RMLOCK_H_ */ Index: head/sys/sys/rmlock.h =================================================================== --- head/sys/sys/rmlock.h (revision 212111) +++ head/sys/sys/rmlock.h (revision 212112) @@ -1,121 +1,127 @@ /*- * Copyright (c) 2007 Stephan Uphoff * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_RMLOCK_H_ #define _SYS_RMLOCK_H_ #include +#include #include #include #ifdef _KERNEL /* * Flags passed to rm_init(9). */ #define RM_NOWITNESS 0x00000001 #define RM_RECURSE 0x00000002 +#define RM_SLEEPABLE 0x00000004 void rm_init(struct rmlock *rm, const char *name); void rm_init_flags(struct rmlock *rm, const char *name, int opts); void rm_destroy(struct rmlock *rm); int rm_wowned(struct rmlock *rm); void rm_sysinit(void *arg); void rm_sysinit_flags(void *arg); void _rm_wlock_debug(struct rmlock *rm, const char *file, int line); void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line); -void _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, - const char *file, int line); +int _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, + int trylock, const char *file, int line); void _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, const char *file, int line); void _rm_wlock(struct rmlock *rm); void _rm_wunlock(struct rmlock *rm); -void _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker); +int _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, + int trylock); void _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker); /* * Public interface for lock operations. */ #ifndef LOCK_DEBUG #error LOCK_DEBUG not defined, include before #endif #if LOCK_DEBUG > 0 #define rm_wlock(rm) _rm_wlock_debug((rm), LOCK_FILE, LOCK_LINE) #define rm_wunlock(rm) _rm_wunlock_debug((rm), LOCK_FILE, LOCK_LINE) #define rm_rlock(rm,tracker) \ - _rm_rlock_debug((rm),(tracker), LOCK_FILE, LOCK_LINE ) + ((void)_rm_rlock_debug((rm),(tracker), 0, LOCK_FILE, LOCK_LINE )) +#define rm_try_rlock(rm,tracker) \ + _rm_rlock_debug((rm),(tracker), 1, LOCK_FILE, LOCK_LINE ) #define rm_runlock(rm,tracker) \ _rm_runlock_debug((rm), (tracker), LOCK_FILE, LOCK_LINE ) #else -#define rm_wlock(rm) _rm_wlock((rm)) -#define rm_wunlock(rm) _rm_wunlock((rm)) -#define rm_rlock(rm,tracker) _rm_rlock((rm),(tracker)) -#define rm_runlock(rm,tracker) _rm_runlock((rm), (tracker)) +#define rm_wlock(rm) _rm_wlock((rm)) +#define rm_wunlock(rm) _rm_wunlock((rm)) +#define rm_rlock(rm,tracker) ((void)_rm_rlock((rm),(tracker), 0)) +#define rm_try_rlock(rm,tracker) _rm_rlock((rm),(tracker), 1) +#define rm_runlock(rm,tracker) _rm_runlock((rm), (tracker)) #endif struct rm_args { struct rmlock *ra_rm; const char *ra_desc; }; struct rm_args_flags { struct rmlock *ra_rm; const char *ra_desc; int ra_opts; }; #define RM_SYSINIT(name, rm, desc) \ static struct rm_args name##_args = { \ (rm), \ (desc), \ }; \ SYSINIT(name##_rm_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ rm_sysinit, &name##_args); \ SYSUNINIT(name##_rm_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ rm_destroy, (rm)) #define RM_SYSINIT_FLAGS(name, rm, desc, opts) \ static struct rm_args name##_args = { \ (rm), \ (desc), \ (opts), \ }; \ SYSINIT(name##_rm_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ rm_sysinit_flags, &name##_args); \ SYSUNINIT(name##_rm_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ rm_destroy, (rm)) #endif /* _KERNEL */ #endif /* !_SYS_RMLOCK_H_ */