diff --git a/share/man/man3/Makefile b/share/man/man3/Makefile index fa4f751ce553..872aec1e83ab 100644 --- a/share/man/man3/Makefile +++ b/share/man/man3/Makefile @@ -1,524 +1,525 @@ # @(#)Makefile 8.2 (Berkeley) 12/13/93 # $FreeBSD$ .include MAN= arb.3 \ assert.3 \ ATOMIC_VAR_INIT.3 \ bitstring.3 \ CMSG_DATA.3 \ end.3 \ fpgetround.3 \ intro.3 \ makedev.3 \ offsetof.3 \ ${PTHREAD_MAN} \ Q_FRAWMASK.3 \ Q_IFRAWMASK.3 \ Q_INI.3 \ Q_IRAWMASK.3 \ Q_QABS.3 \ Q_QADDI.3 \ Q_QADDQ.3 \ Q_SIGNED.3 \ Q_SIGNSHFT.3 \ qmath.3 \ queue.3 \ sigevent.3 \ siginfo.3 \ stats.3 \ stdarg.3 \ sysexits.3 \ tgmath.3 \ timeradd.3 \ tree.3 MLINKS= arb.3 ARB8_ENTRY.3 \ arb.3 ARB8_HEAD.3 \ arb.3 ARB16_ENTRY.3 \ arb.3 ARB16_HEAD.3 \ arb.3 ARB32_ENTRY.3 \ arb.3 ARB32_HEAD.3 \ arb.3 ARB_ALLOCSIZE.3 \ arb.3 ARB_CURNODES.3 \ arb.3 ARB_EMPTY.3 \ arb.3 ARB_FIND.3 \ arb.3 ARB_FOREACH.3 \ arb.3 ARB_FOREACH_FROM.3 \ arb.3 ARB_FOREACH_REVERSE.3 \ arb.3 ARB_FOREACH_REVERSE_FROM.3 \ arb.3 ARB_FOREACH_REVERSE_SAFE.3 \ arb.3 ARB_FOREACH_SAFE.3 \ arb.3 ARB_FREEIDX.3 \ arb.3 ARB_FULL.3 \ arb.3 ARB_GETFREE.3 \ arb.3 ARB_INIT.3 \ arb.3 ARB_INITIALIZER.3 \ arb.3 ARB_INSERT.3 \ arb.3 ARB_LEFT.3 \ arb.3 ARB_LEFTIDX.3 \ arb.3 ARB_MAX.3 \ arb.3 ARB_MAXNODES.3 \ arb.3 ARB_MIN.3 \ arb.3 ARB_NEXT.3 \ arb.3 ARB_NFIND.3 \ arb.3 ARB_PARENT.3 \ arb.3 ARB_PARENTIDX.3 \ arb.3 ARB_PREV.3 \ arb.3 ARB_REINSERT.3 \ arb.3 ARB_REMOVE.3 \ arb.3 ARB_RESET_TREE.3 \ arb.3 ARB_RIGHT.3 \ arb.3 ARB_RIGHTIDX.3 \ arb.3 ARB_ROOT.3 MLINKS+= assert.3 static_assert.3 MLINKS+= ATOMIC_VAR_INIT.3 atomic_compare_exchange_strong.3 \ ATOMIC_VAR_INIT.3 atomic_compare_exchange_strong_explicit.3 \ ATOMIC_VAR_INIT.3 atomic_compare_exchange_weak.3 \ ATOMIC_VAR_INIT.3 atomic_compare_exchange_weak_explicit.3 \ ATOMIC_VAR_INIT.3 atomic_exchange.3 \ ATOMIC_VAR_INIT.3 atomic_exchange_explicit.3 \ ATOMIC_VAR_INIT.3 atomic_fetch_add.3 \ ATOMIC_VAR_INIT.3 atomic_fetch_add_explicit.3 \ ATOMIC_VAR_INIT.3 atomic_fetch_and.3 \ ATOMIC_VAR_INIT.3 atomic_fetch_and_explicit.3 \ ATOMIC_VAR_INIT.3 atomic_fetch_or.3 \ ATOMIC_VAR_INIT.3 atomic_fetch_or_explicit.3 \ ATOMIC_VAR_INIT.3 atomic_fetch_sub.3 \ ATOMIC_VAR_INIT.3 atomic_fetch_sub_explicit.3 \ ATOMIC_VAR_INIT.3 atomic_fetch_xor.3 \ ATOMIC_VAR_INIT.3 atomic_fetch_xor_explicit.3 \ ATOMIC_VAR_INIT.3 atomic_init.3 \ ATOMIC_VAR_INIT.3 atomic_is_lock_free.3 \ ATOMIC_VAR_INIT.3 atomic_load.3 \ ATOMIC_VAR_INIT.3 atomic_load_explicit.3 \ ATOMIC_VAR_INIT.3 atomic_store.3 \ ATOMIC_VAR_INIT.3 atomic_store_explicit.3 MLINKS+= bitstring.3 bit_alloc.3 \ bitstring.3 bit_clear.3 \ bitstring.3 bit_count.3 \ bitstring.3 bit_decl.3 \ bitstring.3 bit_ffc.3 \ bitstring.3 bit_ffc_area.3 \ bitstring.3 bit_ffc_area_at.3 \ bitstring.3 bit_ffc_at.3 \ bitstring.3 bit_ffs.3 \ bitstring.3 bit_ffs_area.3 \ bitstring.3 bit_ffs_area_at.3 \ bitstring.3 bit_ffs_at.3 \ bitstring.3 bit_ff_at.3 \ bitstring.3 bit_ff_area_at.3 \ bitstring.3 bit_foreach.3 \ bitstring.3 bit_foreach_at.3 \ bitstring.3 bit_foreach_unset.3 \ bitstring.3 bit_foreach_unset_at.3 \ bitstring.3 bit_nclear.3 \ bitstring.3 bit_nset.3 \ bitstring.3 bit_ntest.3 \ bitstring.3 bit_set.3 \ bitstring.3 bitstr_size.3 \ bitstring.3 bit_test.3 MLINKS+= CMSG_DATA.3 CMSG_FIRSTHDR.3 \ CMSG_DATA.3 CMSG_LEN.3 \ CMSG_DATA.3 CMSG_NEXTHDR.3 \ CMSG_DATA.3 CMSG_SPACE.3 MLINKS+= end.3 edata.3 \ end.3 etext.3 MLINKS+= fpgetround.3 fpgetmask.3 \ fpgetround.3 fpgetprec.3 \ fpgetround.3 fpgetsticky.3 \ fpgetround.3 fpresetsticky.3 \ fpgetround.3 fpsetmask.3 \ fpgetround.3 fpsetprec.3 \ fpgetround.3 fpsetround.3 MLINKS+= makedev.3 major.3 \ makedev.3 minor.3 MLINKS+= ${PTHREAD_MLINKS} MLINKS+= Q_FRAWMASK.3 Q_GFRAW.3 \ Q_FRAWMASK.3 Q_GFABSVAL.3 \ Q_FRAWMASK.3 Q_GFVAL.3 \ Q_FRAWMASK.3 Q_SFVAL.3 MLINKS+= Q_IFRAWMASK.3 Q_IFVALIMASK.3 \ Q_IFRAWMASK.3 Q_IFVALFMASK.3 \ Q_IFRAWMASK.3 Q_GIFRAW.3 \ Q_IFRAWMASK.3 Q_GIFABSVAL.3 \ Q_IFRAWMASK.3 Q_GIFVAL.3 \ Q_IFRAWMASK.3 Q_SIFVAL.3 \ Q_IFRAWMASK.3 Q_SIFVALS.3 MLINKS+= Q_INI.3 Q_NCBITS.3 \ Q_INI.3 Q_BT.3 \ Q_INI.3 Q_TC.3 \ Q_INI.3 Q_NTBITS.3 \ Q_INI.3 Q_NFCBITS.3 \ Q_INI.3 Q_MAXNFBITS.3 \ Q_INI.3 Q_NFBITS.3 \ Q_INI.3 Q_NIBITS.3 \ Q_INI.3 Q_RPSHFT.3 \ Q_INI.3 Q_ABS.3 \ Q_INI.3 Q_MAXSTRLEN.3 \ Q_INI.3 Q_TOSTR.3 \ Q_INI.3 Q_SHL.3 \ Q_INI.3 Q_SHR.3 \ Q_INI.3 Q_DEBUG.3 \ Q_INI.3 Q_DFV2BFV.3 MLINKS+= Q_IRAWMASK.3 Q_GIRAW.3 \ Q_IRAWMASK.3 Q_GIABSVAL.3 \ Q_IRAWMASK.3 Q_GIVAL.3 \ Q_IRAWMASK.3 Q_SIVAL.3 MLINKS+= Q_QABS.3 Q_Q2D.3 \ Q_QABS.3 Q_Q2F.3 MLINKS+= Q_QADDI.3 Q_QDIVI.3 \ Q_QADDI.3 Q_QMULI.3 \ Q_QADDI.3 Q_QSUBI.3 \ Q_QADDI.3 Q_QFRACI.3 \ Q_QADDI.3 Q_QCPYVALI.3 MLINKS+= Q_QADDQ.3 Q_QDIVQ.3 \ Q_QADDQ.3 Q_QMULQ.3 \ Q_QADDQ.3 Q_QSUBQ.3 \ Q_QADDQ.3 Q_NORMPREC.3 \ Q_QADDQ.3 Q_QMAXQ.3 \ Q_QADDQ.3 Q_QMINQ.3 \ Q_QADDQ.3 Q_QCLONEQ.3 \ Q_QADDQ.3 Q_QCPYVALQ.3 MLINKS+= Q_SIGNED.3 Q_LTZ.3 \ Q_SIGNED.3 Q_PRECEQ.3 \ Q_SIGNED.3 Q_QLTQ.3 \ Q_SIGNED.3 Q_QLEQ.3 \ Q_SIGNED.3 Q_QGTQ.3 \ Q_SIGNED.3 Q_QGEQ.3 \ Q_SIGNED.3 Q_QEQ.3 \ Q_SIGNED.3 Q_QNEQ.3 \ Q_SIGNED.3 Q_OFLOW.3 \ Q_SIGNED.3 Q_RELPREC.3 MLINKS+= Q_SIGNSHFT.3 Q_SSIGN.3 \ Q_SIGNSHFT.3 Q_CRAWMASK.3 \ Q_SIGNSHFT.3 Q_SRAWMASK.3 \ Q_SIGNSHFT.3 Q_GCRAW.3 \ Q_SIGNSHFT.3 Q_GCVAL.3 \ Q_SIGNSHFT.3 Q_SCVAL.3 MLINKS+= queue.3 LIST_CLASS_ENTRY.3 \ queue.3 LIST_CLASS_HEAD.3 \ queue.3 LIST_EMPTY.3 \ queue.3 LIST_ENTRY.3 \ queue.3 LIST_FIRST.3 \ queue.3 LIST_FOREACH.3 \ queue.3 LIST_FOREACH_FROM.3 \ queue.3 LIST_FOREACH_FROM_SAFE.3 \ queue.3 LIST_FOREACH_SAFE.3 \ queue.3 LIST_HEAD.3 \ queue.3 LIST_HEAD_INITIALIZER.3 \ queue.3 LIST_INIT.3 \ queue.3 LIST_INSERT_AFTER.3 \ queue.3 LIST_INSERT_BEFORE.3 \ queue.3 LIST_INSERT_HEAD.3 \ queue.3 LIST_NEXT.3 \ queue.3 LIST_PREV.3 \ queue.3 LIST_REMOVE.3 \ queue.3 LIST_SWAP.3 \ queue.3 SLIST_CLASS_ENTRY.3 \ queue.3 SLIST_CLASS_HEAD.3 \ queue.3 SLIST_EMPTY.3 \ queue.3 SLIST_ENTRY.3 \ queue.3 SLIST_FIRST.3 \ queue.3 SLIST_FOREACH.3 \ queue.3 SLIST_FOREACH_FROM.3 \ queue.3 SLIST_FOREACH_FROM_SAFE.3 \ queue.3 SLIST_FOREACH_SAFE.3 \ queue.3 SLIST_HEAD.3 \ queue.3 SLIST_HEAD_INITIALIZER.3 \ queue.3 SLIST_INIT.3 \ queue.3 SLIST_INSERT_AFTER.3 \ queue.3 SLIST_INSERT_HEAD.3 \ queue.3 SLIST_NEXT.3 \ queue.3 SLIST_REMOVE.3 \ queue.3 SLIST_REMOVE_AFTER.3 \ queue.3 SLIST_REMOVE_HEAD.3 \ queue.3 SLIST_REMOVE_PREVPTR.3 \ queue.3 SLIST_SWAP.3 \ queue.3 STAILQ_CLASS_ENTRY.3 \ queue.3 STAILQ_CLASS_HEAD.3 \ queue.3 STAILQ_CONCAT.3 \ queue.3 STAILQ_EMPTY.3 \ queue.3 STAILQ_ENTRY.3 \ queue.3 STAILQ_FIRST.3 \ queue.3 STAILQ_FOREACH.3 \ queue.3 STAILQ_FOREACH_FROM.3 \ queue.3 STAILQ_FOREACH_FROM_SAFE.3 \ queue.3 STAILQ_FOREACH_SAFE.3 \ queue.3 STAILQ_HEAD.3 \ queue.3 STAILQ_HEAD_INITIALIZER.3 \ queue.3 STAILQ_INIT.3 \ queue.3 STAILQ_INSERT_AFTER.3 \ queue.3 STAILQ_INSERT_HEAD.3 \ queue.3 STAILQ_INSERT_TAIL.3 \ queue.3 STAILQ_LAST.3 \ queue.3 STAILQ_NEXT.3 \ queue.3 STAILQ_REMOVE.3 \ queue.3 STAILQ_REMOVE_AFTER.3 \ queue.3 STAILQ_REMOVE_HEAD.3 \ queue.3 STAILQ_SWAP.3 \ queue.3 TAILQ_CLASS_ENTRY.3 \ queue.3 TAILQ_CLASS_HEAD.3 \ queue.3 TAILQ_CONCAT.3 \ queue.3 TAILQ_EMPTY.3 \ queue.3 TAILQ_ENTRY.3 \ queue.3 TAILQ_FIRST.3 \ queue.3 TAILQ_FOREACH.3 \ queue.3 TAILQ_FOREACH_FROM.3 \ queue.3 TAILQ_FOREACH_FROM_SAFE.3 \ queue.3 TAILQ_FOREACH_REVERSE.3 \ queue.3 TAILQ_FOREACH_REVERSE_FROM.3 \ queue.3 TAILQ_FOREACH_REVERSE_FROM_SAFE.3 \ queue.3 TAILQ_FOREACH_REVERSE_SAFE.3 \ queue.3 TAILQ_FOREACH_SAFE.3 \ queue.3 TAILQ_HEAD.3 \ queue.3 TAILQ_HEAD_INITIALIZER.3 \ queue.3 TAILQ_INIT.3 \ queue.3 TAILQ_INSERT_AFTER.3 \ queue.3 TAILQ_INSERT_BEFORE.3 \ queue.3 TAILQ_INSERT_HEAD.3 \ queue.3 TAILQ_INSERT_TAIL.3 \ queue.3 TAILQ_LAST.3 \ queue.3 TAILQ_NEXT.3 \ queue.3 TAILQ_PREV.3 \ queue.3 TAILQ_REMOVE.3 \ queue.3 TAILQ_SWAP.3 MLINKS+= stats.3 stats_tpl_alloc.3 \ stats.3 stats_tpl_fetch_allocid.3 \ stats.3 stats_tpl_fetch.3 \ stats.3 stats_tpl_id2name.3 \ stats.3 stats_tpl_sample_rates.3 \ stats.3 stats_tpl_sample_rollthedice.3 \ stats.3 STATS_VSS_SUM.3 \ stats.3 STATS_VSS_MAX.3 \ stats.3 STATS_VSS_MIN.3 \ stats.3 STATS_VSS_CRHIST32_LIN.3 \ stats.3 STATS_VSS_CRHIST64_LIN.3 \ stats.3 stats_tpl_add_voistats.3 \ stats.3 stats_blob_alloc.3 \ stats.3 stats_blob_init.3 \ stats.3 stats_blob_clone.3 \ stats.3 stats_blob_destroy.3 \ stats.3 stats_voistat_fetch_dptr.3 \ stats.3 stats_blob_snapshot.3 \ stats.3 stats_blob_tostr.3 \ stats.3 stats_voistatdata_tostr.3 \ stats.3 stats_blob_visit.3 MLINKS+= stdarg.3 va_arg.3 \ stdarg.3 va_copy.3 \ stdarg.3 va_end.3 \ stdarg.3 varargs.3 \ stdarg.3 va_start.3 MLINKS+= timeradd.3 timerclear.3 \ timeradd.3 timercmp.3 \ timeradd.3 timerisset.3 \ timeradd.3 timersub.3 \ timeradd.3 timespecadd.3 \ timeradd.3 timespecsub.3 \ timeradd.3 timespecclear.3 \ timeradd.3 timespecisset.3 \ timeradd.3 timespeccmp.3 MLINKS+= tree.3 RB_AUGMENT.3 \ + tree.3 RB_AUGMENT_CHECK.3 \ tree.3 RB_EMPTY.3 \ tree.3 RB_ENTRY.3 \ tree.3 RB_FIND.3 \ tree.3 RB_FOREACH.3 \ tree.3 RB_FOREACH_FROM.3 \ tree.3 RB_FOREACH_REVERSE.3 \ tree.3 RB_FOREACH_REVERSE_FROM.3 \ tree.3 RB_FOREACH_REVERSE_SAFE.3 \ tree.3 RB_FOREACH_SAFE.3 \ tree.3 RB_GENERATE.3 \ tree.3 RB_GENERATE_FIND.3 \ tree.3 RB_GENERATE_INSERT.3 \ tree.3 RB_GENERATE_INSERT_COLOR.3 \ tree.3 RB_GENERATE_MINMAX.3 \ tree.3 RB_GENERATE_NEXT.3 \ tree.3 RB_GENERATE_NFIND.3 \ tree.3 RB_GENERATE_PREV.3 \ tree.3 RB_GENERATE_REMOVE.3 \ tree.3 RB_GENERATE_REMOVE_COLOR.3 \ tree.3 RB_GENERATE_STATIC.3 \ tree.3 RB_HEAD.3 \ tree.3 RB_INIT.3 \ tree.3 RB_INITIALIZER.3 \ tree.3 RB_INSERT.3 \ tree.3 RB_LEFT.3 \ tree.3 RB_MAX.3 \ tree.3 RB_MIN.3 \ tree.3 RB_NEXT.3 \ tree.3 RB_NFIND.3 \ tree.3 RB_PARENT.3 \ tree.3 RB_PREV.3 \ tree.3 RB_PROTOTYPE.3 \ tree.3 RB_PROTOTYPE_FIND.3 \ tree.3 RB_PROTOTYPE_INSERT.3 \ tree.3 RB_PROTOTYPE_INSERT_COLOR.3 \ tree.3 RB_PROTOTYPE_MINMAX.3 \ tree.3 RB_PROTOTYPE_NEXT.3 \ tree.3 RB_PROTOTYPE_NFIND.3 \ tree.3 RB_PROTOTYPE_PREV.3 \ tree.3 RB_PROTOTYPE_REMOVE.3 \ tree.3 RB_PROTOTYPE_REMOVE_COLOR.3 \ tree.3 RB_PROTOTYPE_STATIC.3 \ tree.3 RB_REINSERT.3 \ tree.3 RB_REMOVE.3 \ tree.3 RB_RIGHT.3 \ tree.3 RB_ROOT.3 \ tree.3 SPLAY_EMPTY.3 \ tree.3 SPLAY_ENTRY.3 \ tree.3 SPLAY_FIND.3 \ tree.3 SPLAY_FOREACH.3 \ tree.3 SPLAY_GENERATE.3 \ tree.3 SPLAY_HEAD.3 \ tree.3 SPLAY_INIT.3 \ tree.3 SPLAY_INITIALIZER.3 \ tree.3 SPLAY_INSERT.3 \ tree.3 SPLAY_LEFT.3 \ tree.3 SPLAY_MAX.3 \ tree.3 SPLAY_MIN.3 \ tree.3 SPLAY_NEXT.3 \ tree.3 SPLAY_PROTOTYPE.3 \ tree.3 SPLAY_REMOVE.3 \ tree.3 SPLAY_RIGHT.3 \ tree.3 SPLAY_ROOT.3 PTHREAD_MAN= pthread.3 \ pthread_affinity_np.3 \ pthread_atfork.3 \ pthread_attr.3 \ pthread_attr_affinity_np.3 \ pthread_attr_get_np.3 \ pthread_attr_setcreatesuspend_np.3 \ pthread_barrierattr.3 \ pthread_barrier_destroy.3 \ pthread_cancel.3 \ pthread_cleanup_pop.3 \ pthread_cleanup_push.3 \ pthread_condattr.3 \ pthread_cond_broadcast.3 \ pthread_cond_destroy.3 \ pthread_cond_init.3 \ pthread_cond_signal.3 \ pthread_cond_timedwait.3 \ pthread_cond_wait.3 \ pthread_create.3 \ pthread_detach.3 \ pthread_equal.3 \ pthread_exit.3 \ pthread_getconcurrency.3 \ pthread_getcpuclockid.3 \ pthread_getspecific.3 \ pthread_getthreadid_np.3 \ pthread_join.3 \ pthread_key_create.3 \ pthread_key_delete.3 \ pthread_kill.3 \ pthread_main_np.3 \ pthread_multi_np.3 \ pthread_mutexattr.3 \ pthread_mutexattr_getkind_np.3 \ pthread_mutex_consistent.3 \ pthread_mutex_destroy.3 \ pthread_mutex_init.3 \ pthread_mutex_lock.3 \ pthread_mutex_timedlock.3 \ pthread_mutex_trylock.3 \ pthread_mutex_unlock.3 \ pthread_np.3 \ pthread_once.3 \ pthread_resume_all_np.3 \ pthread_resume_np.3 \ pthread_rwlockattr_destroy.3 \ pthread_rwlockattr_getpshared.3 \ pthread_rwlockattr_init.3 \ pthread_rwlockattr_setpshared.3 \ pthread_rwlock_destroy.3 \ pthread_rwlock_init.3 \ pthread_rwlock_rdlock.3 \ pthread_rwlock_timedrdlock.3 \ pthread_rwlock_timedwrlock.3 \ pthread_rwlock_unlock.3 \ pthread_rwlock_wrlock.3 \ pthread_schedparam.3 \ pthread_self.3 \ pthread_set_name_np.3 \ pthread_setspecific.3 \ pthread_sigmask.3 \ pthread_spin_init.3 \ pthread_spin_lock.3 \ pthread_suspend_all_np.3 \ pthread_suspend_np.3 \ pthread_switch_add_np.3 \ pthread_testcancel.3 \ pthread_yield.3 PTHREAD_MLINKS= pthread_affinity_np.3 pthread_getaffinity_np.3 \ pthread_affinity_np.3 pthread_setaffinity_np.3 PTHREAD_MLINKS+=pthread_attr.3 pthread_attr_destroy.3 \ pthread_attr.3 pthread_attr_getdetachstate.3 \ pthread_attr.3 pthread_attr_getguardsize.3 \ pthread_attr.3 pthread_attr_getinheritsched.3 \ pthread_attr.3 pthread_attr_getschedparam.3 \ pthread_attr.3 pthread_attr_getschedpolicy.3 \ pthread_attr.3 pthread_attr_getscope.3 \ pthread_attr.3 pthread_attr_getstack.3 \ pthread_attr.3 pthread_attr_getstackaddr.3 \ pthread_attr.3 pthread_attr_getstacksize.3 \ pthread_attr.3 pthread_attr_init.3 \ pthread_attr.3 pthread_attr_setdetachstate.3 \ pthread_attr.3 pthread_attr_setguardsize.3 \ pthread_attr.3 pthread_attr_setinheritsched.3 \ pthread_attr.3 pthread_attr_setschedparam.3 \ pthread_attr.3 pthread_attr_setschedpolicy.3 \ pthread_attr.3 pthread_attr_setscope.3 \ pthread_attr.3 pthread_attr_setstack.3 \ pthread_attr.3 pthread_attr_setstackaddr.3 \ pthread_attr.3 pthread_attr_setstacksize.3 PTHREAD_MLINKS+=pthread_attr_affinity_np.3 pthread_attr_getaffinity_np.3 \ pthread_attr_affinity_np.3 pthread_attr_setaffinity_np.3 PTHREAD_MLINKS+=pthread_barrierattr.3 pthread_barrierattr_destroy.3 \ pthread_barrierattr.3 pthread_barrierattr_getpshared.3 \ pthread_barrierattr.3 pthread_barrierattr_init.3 \ pthread_barrierattr.3 pthread_barrierattr_setpshared.3 PTHREAD_MLINKS+=pthread_barrier_destroy.3 pthread_barrier_init.3 \ pthread_barrier_destroy.3 pthread_barrier_wait.3 PTHREAD_MLINKS+=pthread_condattr.3 pthread_condattr_destroy.3 \ pthread_condattr.3 pthread_condattr_init.3 \ pthread_condattr.3 pthread_condattr_getclock.3 \ pthread_condattr.3 pthread_condattr_setclock.3 \ pthread_condattr.3 pthread_condattr_getpshared.3 \ pthread_condattr.3 pthread_condattr_setpshared.3 PTHREAD_MLINKS+=pthread_getconcurrency.3 pthread_setconcurrency.3 PTHREAD_MLINKS+=pthread_multi_np.3 pthread_single_np.3 PTHREAD_MLINKS+=pthread_mutexattr.3 pthread_mutexattr_destroy.3 \ pthread_mutexattr.3 pthread_mutexattr_getprioceiling.3 \ pthread_mutexattr.3 pthread_mutexattr_getprotocol.3 \ pthread_mutexattr.3 pthread_mutexattr_getpshared.3 \ pthread_mutexattr.3 pthread_mutexattr_getrobust.3 \ pthread_mutexattr.3 pthread_mutexattr_gettype.3 \ pthread_mutexattr.3 pthread_mutexattr_init.3 \ pthread_mutexattr.3 pthread_mutexattr_setprioceiling.3 \ pthread_mutexattr.3 pthread_mutexattr_setprotocol.3 \ pthread_mutexattr.3 pthread_mutexattr_setpshared.3 \ pthread_mutexattr.3 pthread_mutexattr_setrobust.3 \ pthread_mutexattr.3 pthread_mutexattr_settype.3 PTHREAD_MLINKS+=pthread_mutexattr_getkind_np.3 pthread_mutexattr_setkind_np.3 PTHREAD_MLINKS+=pthread_rwlock_rdlock.3 pthread_rwlock_tryrdlock.3 PTHREAD_MLINKS+=pthread_rwlock_wrlock.3 pthread_rwlock_trywrlock.3 PTHREAD_MLINKS+=pthread_schedparam.3 pthread_getschedparam.3 \ pthread_schedparam.3 pthread_setschedparam.3 PTHREAD_MLINKS+=pthread_set_name_np.3 pthread_get_name_np.3 \ pthread_set_name_np.3 pthread_getname_np.3 \ pthread_set_name_np.3 pthread_setname_np.3 PTHREAD_MLINKS+=pthread_spin_init.3 pthread_spin_destroy.3 \ pthread_spin_lock.3 pthread_spin_trylock.3 \ pthread_spin_lock.3 pthread_spin_unlock.3 PTHREAD_MLINKS+=pthread_switch_add_np.3 pthread_switch_delete_np.3 PTHREAD_MLINKS+=pthread_testcancel.3 pthread_setcancelstate.3 \ pthread_testcancel.3 pthread_setcanceltype.3 PTHREAD_MLINKS+=pthread_join.3 pthread_peekjoin_np.3 \ pthread_join.3 pthread_timedjoin_np.3 .include diff --git a/share/man/man3/tree.3 b/share/man/man3/tree.3 index 7e3727b2c365..27bba268da62 100644 --- a/share/man/man3/tree.3 +++ b/share/man/man3/tree.3 @@ -1,781 +1,800 @@ .\" $OpenBSD: tree.3,v 1.7 2002/06/12 01:09:20 provos Exp $ .\" .\" Copyright 2002 Niels Provos .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 3. All advertising materials mentioning features or use of this software .\" must display the following acknowledgement: .\" This product includes software developed by Niels Provos. .\" 4. The name of the author may not be used to endorse or promote products .\" derived from this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR .\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES .\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. .\" IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, .\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT .\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, .\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY .\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF .\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" .\" $FreeBSD$ .\" .Dd July 27, 2020 .Dt TREE 3 .Os .Sh NAME .Nm SPLAY_PROTOTYPE , .Nm SPLAY_GENERATE , .Nm SPLAY_ENTRY , .Nm SPLAY_HEAD , .Nm SPLAY_INITIALIZER , .Nm SPLAY_ROOT , .Nm SPLAY_EMPTY , .Nm SPLAY_NEXT , .Nm SPLAY_MIN , .Nm SPLAY_MAX , .Nm SPLAY_FIND , .Nm SPLAY_LEFT , .Nm SPLAY_RIGHT , .Nm SPLAY_FOREACH , .Nm SPLAY_INIT , .Nm SPLAY_INSERT , .Nm SPLAY_REMOVE , .Nm RB_PROTOTYPE , .Nm RB_PROTOTYPE_STATIC , .Nm RB_PROTOTYPE_INSERT , .Nm RB_PROTOTYPE_INSERT_COLOR , .Nm RB_PROTOTYPE_REMOVE , .Nm RB_PROTOTYPE_REMOVE_COLOR , .Nm RB_PROTOTYPE_FIND , .Nm RB_PROTOTYPE_NFIND , .Nm RB_PROTOTYPE_NEXT , .Nm RB_PROTOTYPE_PREV , .Nm RB_PROTOTYPE_MINMAX , .Nm RB_PROTOTYPE_REINSERT , .Nm RB_GENERATE , .Nm RB_GENERATE_STATIC , .Nm RB_GENERATE_INSERT , .Nm RB_GENERATE_INSERT_COLOR , .Nm RB_GENERATE_REMOVE , .Nm RB_GENERATE_REMOVE_COLOR , .Nm RB_GENERATE_FIND , .Nm RB_GENERATE_NFIND , .Nm RB_GENERATE_NEXT , .Nm RB_GENERATE_PREV , .Nm RB_GENERATE_MINMAX , .Nm RB_GENERATE_REINSERT , .Nm RB_ENTRY , .Nm RB_HEAD , .Nm RB_INITIALIZER , .Nm RB_ROOT , .Nm RB_EMPTY , .Nm RB_NEXT , .Nm RB_PREV , .Nm RB_MIN , .Nm RB_MAX , .Nm RB_FIND , .Nm RB_NFIND , .Nm RB_LEFT , .Nm RB_RIGHT , .Nm RB_PARENT , .Nm RB_FOREACH , .Nm RB_FOREACH_FROM , .Nm RB_FOREACH_SAFE , .Nm RB_FOREACH_REVERSE , .Nm RB_FOREACH_REVERSE_FROM , .Nm RB_FOREACH_REVERSE_SAFE , .Nm RB_INIT , .Nm RB_INSERT , .Nm RB_REMOVE , .Nm RB_REINSERT , .Nm RB_AUGMENT +.Nm RB_AUGMENT_CHECK, .Nm RB_UPDATE_AUGMENT .Nd "implementations of splay and rank-balanced (wavl) trees" .Sh SYNOPSIS .In sys/tree.h .Fn SPLAY_PROTOTYPE NAME TYPE FIELD CMP .Fn SPLAY_GENERATE NAME TYPE FIELD CMP .Fn SPLAY_ENTRY TYPE .Fn SPLAY_HEAD HEADNAME TYPE .Ft "struct TYPE *" .Fn SPLAY_INITIALIZER "SPLAY_HEAD *head" .Fn SPLAY_ROOT "SPLAY_HEAD *head" .Ft bool .Fn SPLAY_EMPTY "SPLAY_HEAD *head" .Ft "struct TYPE *" .Fn SPLAY_NEXT NAME "SPLAY_HEAD *head" "struct TYPE *elm" .Ft "struct TYPE *" .Fn SPLAY_MIN NAME "SPLAY_HEAD *head" .Ft "struct TYPE *" .Fn SPLAY_MAX NAME "SPLAY_HEAD *head" .Ft "struct TYPE *" .Fn SPLAY_FIND NAME "SPLAY_HEAD *head" "struct TYPE *elm" .Ft "struct TYPE *" .Fn SPLAY_LEFT "struct TYPE *elm" "SPLAY_ENTRY NAME" .Ft "struct TYPE *" .Fn SPLAY_RIGHT "struct TYPE *elm" "SPLAY_ENTRY NAME" .Fn SPLAY_FOREACH VARNAME NAME "SPLAY_HEAD *head" .Ft void .Fn SPLAY_INIT "SPLAY_HEAD *head" .Ft "struct TYPE *" .Fn SPLAY_INSERT NAME "SPLAY_HEAD *head" "struct TYPE *elm" .Ft "struct TYPE *" .Fn SPLAY_REMOVE NAME "SPLAY_HEAD *head" "struct TYPE *elm" .Fn RB_PROTOTYPE NAME TYPE FIELD CMP .Fn RB_PROTOTYPE_STATIC NAME TYPE FIELD CMP .Fn RB_PROTOTYPE_INSERT NAME TYPE ATTR .Fn RB_PROTOTYPE_INSERT_COLOR NAME TYPE ATTR .Fn RB_PROTOTYPE_REMOVE NAME TYPE ATTR .Fn RB_PROTOTYPE_REMOVE_COLOR NAME TYPE ATTR .Fn RB_PROTOTYPE_FIND NAME TYPE ATTR .Fn RB_PROTOTYPE_NFIND NAME TYPE ATTR .Fn RB_PROTOTYPE_NEXT NAME TYPE ATTR .Fn RB_PROTOTYPE_PREV NAME TYPE ATTR .Fn RB_PROTOTYPE_MINMAX NAME TYPE ATTR .Fn RB_PROTOTYPE_REINSERT NAME TYPE ATTR .Fn RB_GENERATE NAME TYPE FIELD CMP .Fn RB_GENERATE_STATIC NAME TYPE FIELD CMP .Fn RB_GENERATE_INSERT NAME TYPE FIELD CMP ATTR .Fn RB_GENERATE_INSERT_COLOR NAME TYPE FIELD ATTR .Fn RB_GENERATE_REMOVE NAME TYPE FIELD ATTR .Fn RB_GENERATE_REMOVE_COLOR NAME TYPE FIELD ATTR .Fn RB_GENERATE_FIND NAME TYPE FIELD CMP ATTR .Fn RB_GENERATE_NFIND NAME TYPE FIELD CMP ATTR .Fn RB_GENERATE_NEXT NAME TYPE FIELD ATTR .Fn RB_GENERATE_PREV NAME TYPE FIELD ATTR .Fn RB_GENERATE_MINMAX NAME TYPE FIELD ATTR .Fn RB_GENERATE_REINSERT NAME TYPE FIELD CMP ATTR .Fn RB_ENTRY TYPE .Fn RB_HEAD HEADNAME TYPE .Fn RB_INITIALIZER "RB_HEAD *head" .Ft "struct TYPE *" .Fn RB_ROOT "RB_HEAD *head" .Ft "bool" .Fn RB_EMPTY "RB_HEAD *head" .Ft "struct TYPE *" .Fn RB_NEXT NAME "RB_HEAD *head" "struct TYPE *elm" .Ft "struct TYPE *" .Fn RB_PREV NAME "RB_HEAD *head" "struct TYPE *elm" .Ft "struct TYPE *" .Fn RB_MIN NAME "RB_HEAD *head" .Ft "struct TYPE *" .Fn RB_MAX NAME "RB_HEAD *head" .Ft "struct TYPE *" .Fn RB_FIND NAME "RB_HEAD *head" "struct TYPE *elm" .Ft "struct TYPE *" .Fn RB_NFIND NAME "RB_HEAD *head" "struct TYPE *elm" .Ft "struct TYPE *" .Fn RB_LEFT "struct TYPE *elm" "RB_ENTRY NAME" .Ft "struct TYPE *" .Fn RB_RIGHT "struct TYPE *elm" "RB_ENTRY NAME" .Ft "struct TYPE *" .Fn RB_PARENT "struct TYPE *elm" "RB_ENTRY NAME" .Fn RB_FOREACH VARNAME NAME "RB_HEAD *head" .Fn RB_FOREACH_FROM "VARNAME" "NAME" "POS_VARNAME" .Fn RB_FOREACH_SAFE "VARNAME" "NAME" "RB_HEAD *head" "TEMP_VARNAME" .Fn RB_FOREACH_REVERSE VARNAME NAME "RB_HEAD *head" .Fn RB_FOREACH_REVERSE_FROM "VARNAME" "NAME" "POS_VARNAME" .Fn RB_FOREACH_REVERSE_SAFE "VARNAME" "NAME" "RB_HEAD *head" "TEMP_VARNAME" .Ft void .Fn RB_INIT "RB_HEAD *head" .Ft "struct TYPE *" .Fn RB_INSERT NAME "RB_HEAD *head" "struct TYPE *elm" .Ft "struct TYPE *" .Fn RB_REMOVE NAME "RB_HEAD *head" "struct TYPE *elm" .Ft "struct TYPE *" .Fn RB_REINSERT NAME "RB_HEAD *head" "struct TYPE *elm" .Ft "void" .Fn RB_AUGMENT NAME "struct TYPE *elm" +.Ft "bool" +.Fn RB_AUGMENT_CHECK NAME "struct TYPE *elm" .Ft "void" .Fn RB_UPDATE_AUGMENT NAME "struct TYPE *elm" .Sh DESCRIPTION These macros define data structures for different types of trees: splay trees and rank-balanced (wavl) trees. .Pp In the macro definitions, .Fa TYPE is the name tag of a user defined structure that must contain a field of type .Vt SPLAY_ENTRY , or .Vt RB_ENTRY , named .Fa ENTRYNAME . The argument .Fa HEADNAME is the name tag of a user defined structure that must be declared using the macros .Fn SPLAY_HEAD , or .Fn RB_HEAD . The argument .Fa NAME has to be a unique name prefix for every tree that is defined. .Pp The function prototypes are declared with .Fn SPLAY_PROTOTYPE , .Fn RB_PROTOTYPE , or .Fn RB_PROTOTYPE_STATIC . The function bodies are generated with .Fn SPLAY_GENERATE , .Fn RB_GENERATE , or .Fn RB_GENERATE_STATIC . See the examples below for further explanation of how these macros are used. .Sh SPLAY TREES A splay tree is a self-organizing data structure. Every operation on the tree causes a splay to happen. The splay moves the requested node to the root of the tree and partly rebalances it. .Pp This has the benefit that request locality causes faster lookups as the requested nodes move to the top of the tree. On the other hand, every lookup causes memory writes. .Pp The Balance Theorem bounds the total access time for .Ar m operations and .Ar n inserts on an initially empty tree as .Fn O "\*[lp]m + n\*[rp]lg n" . The amortized cost for a sequence of .Ar m accesses to a splay tree is .Fn O "lg n" . .Pp A splay tree is headed by a structure defined by the .Fn SPLAY_HEAD macro. A structure is declared as follows: .Bd -ragged -offset indent .Fn SPLAY_HEAD HEADNAME TYPE .Va head ; .Ed .Pp where .Fa HEADNAME is the name of the structure to be defined, and struct .Fa TYPE is the type of the elements to be inserted into the tree. .Pp The .Fn SPLAY_ENTRY macro declares a structure that allows elements to be connected in the tree. .Pp In order to use the functions that manipulate the tree structure, their prototypes need to be declared with the .Fn SPLAY_PROTOTYPE macro, where .Fa NAME is a unique identifier for this particular tree. The .Fa TYPE argument is the type of the structure that is being managed by the tree. The .Fa FIELD argument is the name of the element defined by .Fn SPLAY_ENTRY . .Pp The function bodies are generated with the .Fn SPLAY_GENERATE macro. It takes the same arguments as the .Fn SPLAY_PROTOTYPE macro, but should be used only once. .Pp Finally, the .Fa CMP argument is the name of a function used to compare tree nodes with each other. The function takes two arguments of type .Vt "struct TYPE *" . If the first argument is smaller than the second, the function returns a value smaller than zero. If they are equal, the function returns zero. Otherwise, it should return a value greater than zero. The compare function defines the order of the tree elements. .Pp The .Fn SPLAY_INIT macro initializes the tree referenced by .Fa head . .Pp The splay tree can also be initialized statically by using the .Fn SPLAY_INITIALIZER macro like this: .Bd -ragged -offset indent .Fn SPLAY_HEAD HEADNAME TYPE .Va head = .Fn SPLAY_INITIALIZER &head ; .Ed .Pp The .Fn SPLAY_INSERT macro inserts the new element .Fa elm into the tree. .Pp The .Fn SPLAY_REMOVE macro removes the element .Fa elm from the tree pointed by .Fa head . .Pp The .Fn SPLAY_FIND macro can be used to find a particular element in the tree. .Bd -literal -offset indent struct TYPE find, *res; find.key = 30; res = SPLAY_FIND(NAME, head, &find); .Ed .Pp The .Fn SPLAY_ROOT , .Fn SPLAY_MIN , .Fn SPLAY_MAX , and .Fn SPLAY_NEXT macros can be used to traverse the tree: .Bd -literal -offset indent for (np = SPLAY_MIN(NAME, &head); np != NULL; np = SPLAY_NEXT(NAME, &head, np)) .Ed .Pp Or, for simplicity, one can use the .Fn SPLAY_FOREACH macro: .Bd -ragged -offset indent .Fn SPLAY_FOREACH np NAME head .Ed .Pp The .Fn SPLAY_EMPTY macro should be used to check whether a splay tree is empty. .Sh RANK-BALANCED TREES Rank-balanced (RB) trees are a framework for defining height-balanced binary search trees, including AVL and red-black trees. Each tree node has an associated rank. Balance conditions are expressed by conditions on the differences in rank between any node and its children. Rank differences are stored in each tree node. .Pp The balance conditions implemented by the RB macros lead to weak AVL (wavl) trees, which combine the best aspects of AVL and red-black trees. Wavl trees rebalance after an insertion in the same way AVL trees do, with the same worst-case time as red-black trees offer, and with better balance in the resulting tree. Wavl trees rebalance after a removal in a way that requires less restructuring, in the worst case, than either AVL or red-black trees do. Removals can lead to a tree almost as unbalanced as a red-black tree; insertions lead to a tree becoming as balanced as an AVL tree. .Pp A rank-balanced tree is headed by a structure defined by the .Fn RB_HEAD macro. A structure is declared as follows: .Bd -ragged -offset indent .Fn RB_HEAD HEADNAME TYPE .Va head ; .Ed .Pp where .Fa HEADNAME is the name of the structure to be defined, and struct .Fa TYPE is the type of the elements to be inserted into the tree. .Pp The .Fn RB_ENTRY macro declares a structure that allows elements to be connected in the tree. .Pp In order to use the functions that manipulate the tree structure, their prototypes need to be declared with the .Fn RB_PROTOTYPE or .Fn RB_PROTOTYPE_STATIC macro, where .Fa NAME is a unique identifier for this particular tree. The .Fa TYPE argument is the type of the structure that is being managed by the tree. The .Fa FIELD argument is the name of the element defined by .Fn RB_ENTRY . Individual prototypes can be declared with .Fn RB_PROTOTYPE_INSERT , .Fn RB_PROTOTYPE_INSERT_COLOR , .Fn RB_PROTOTYPE_REMOVE , .Fn RB_PROTOTYPE_REMOVE_COLOR , .Fn RB_PROTOTYPE_FIND , .Fn RB_PROTOTYPE_NFIND , .Fn RB_PROTOTYPE_NEXT , .Fn RB_PROTOTYPE_PREV , .Fn RB_PROTOTYPE_MINMAX , and .Fn RB_PROTOTYPE_REINSERT in case not all functions are required. The individual prototype macros expect .Fa NAME , .Fa TYPE , and .Fa ATTR arguments. The .Fa ATTR argument must be empty for global functions or .Fa static for static functions. .Pp The function bodies are generated with the .Fn RB_GENERATE or .Fn RB_GENERATE_STATIC macro. These macros take the same arguments as the .Fn RB_PROTOTYPE and .Fn RB_PROTOTYPE_STATIC macros, but should be used only once. As an alternative individual function bodies are generated with the .Fn RB_GENERATE_INSERT , .Fn RB_GENERATE_INSERT_COLOR , .Fn RB_GENERATE_REMOVE , .Fn RB_GENERATE_REMOVE_COLOR , .Fn RB_GENERATE_FIND , .Fn RB_GENERATE_NFIND , .Fn RB_GENERATE_NEXT , .Fn RB_GENERATE_PREV , .Fn RB_GENERATE_MINMAX , and .Fn RB_GENERATE_REINSERT macros. .Pp Finally, the .Fa CMP argument is the name of a function used to compare tree nodes with each other. The function takes two arguments of type .Vt "struct TYPE *" . If the first argument is smaller than the second, the function returns a value smaller than zero. If they are equal, the function returns zero. Otherwise, it should return a value greater than zero. The compare function defines the order of the tree elements. .Pp The .Fn RB_INIT macro initializes the tree referenced by .Fa head . .Pp The rank-balanced tree can also be initialized statically by using the .Fn RB_INITIALIZER macro like this: .Bd -ragged -offset indent .Fn RB_HEAD HEADNAME TYPE .Va head = .Fn RB_INITIALIZER &head ; .Ed .Pp The .Fn RB_INSERT macro inserts the new element .Fa elm into the tree. .Pp The .Fn RB_REMOVE macro removes the element .Fa elm from the tree pointed by .Fa head . .Pp The .Fn RB_FIND and .Fn RB_NFIND macros can be used to find a particular element in the tree. .Pp The .Fn RB_FIND macro returns the element in the tree equal to the provided key, or .Dv NULL if there is no such element. .Pp The .Fn RB_NFIND macro returns the least element greater than or equal to the provided key, or .Dv NULL if there is no such element. .Bd -literal -offset indent struct TYPE find, *res, *resn; find.key = 30; res = RB_FIND(NAME, head, &find); resn = RB_NFIND(NAME, head, &find); .Ed .Pp The .Fn RB_ROOT , .Fn RB_MIN , .Fn RB_MAX , .Fn RB_NEXT , and .Fn RB_PREV macros can be used to traverse the tree: .Pp .Dl "for (np = RB_MIN(NAME, &head); np != NULL; np = RB_NEXT(NAME, &head, np))" .Pp Or, for simplicity, one can use the .Fn RB_FOREACH or .Fn RB_FOREACH_REVERSE macro: .Bd -ragged -offset indent .Fn RB_FOREACH np NAME head .Ed .Pp The macros .Fn RB_FOREACH_SAFE and .Fn RB_FOREACH_REVERSE_SAFE traverse the tree referenced by head in a forward or reverse direction respectively, assigning each element in turn to np. However, unlike their unsafe counterparts, they permit both the removal of np as well as freeing it from within the loop safely without interfering with the traversal. .Pp Both .Fn RB_FOREACH_FROM and .Fn RB_FOREACH_REVERSE_FROM may be used to continue an interrupted traversal in a forward or reverse direction respectively. The head pointer is not required. The pointer to the node from where to resume the traversal should be passed as their last argument, and will be overwritten to provide safe traversal. .Pp The .Fn RB_EMPTY macro should be used to check whether a rank-balanced tree is empty. .Pp The .Fn RB_REINSERT macro updates the position of the element .Fa elm in the tree. This must be called if a member of a .Nm tree is modified in a way that affects comparison, such as by modifying a node's key. This is a lower overhead alternative to removing the element and reinserting it again. .Pp The .Fn RB_AUGMENT macro updates augmentation data of the element .Fa elm in the tree. By default, it has no effect. It is not meant to be invoked by the RB user. If .Fn RB_AUGMENT is defined by the RB user, then when an element is inserted or removed from the tree, it is invoked for every element in the tree that is the root of an altered subtree, working from the bottom of the tree up to the top. It is typically used to maintain some associative accumulation of tree elements, such as sums, minima, maxima, and the like. .Pp The +.Fn RB_AUGMENT_CHECK +macro updates augmentation data of the element +.Fa elm +in the tree. +By default, it does nothing and returns false. +If +.Fn RB_AUGMENT_CHECK +is defined, then when an element is inserted or removed from the tree, +it is invoked for every element in the tree that is the root of an +altered subtree, working from the bottom of the tree up toward the +top, until it returns false to indicate that it did not change the +element and so working further up the tree would change nothing. +It is typically used to maintain some associative accumulation of tree +elements, such as sums, minima, maxima, and the like. +.Pp +The .Fn RB_UPDATE_AUGMENT macro updates augmentation data of the element .Fa elm and its ancestors in the tree. If .Fn RB_AUGMENT is defined by the RB user, then when an element in the tree is changed, without changing the order of items in the tree, invoking this function on that element restores consistency of the augmentation state of the tree as if the element had been removed and inserted again. .Sh EXAMPLES The following example demonstrates how to declare a rank-balanced tree holding integers. Values are inserted into it and the contents of the tree are printed in order. To maintain the sum of the values in the tree, each element maintains the sum of its value and the sums from its left and right subtrees. Lastly, the internal structure of the tree is printed. .Bd -literal -offset 3n #include #include #include #include struct node { RB_ENTRY(node) entry; int i, sum; }; int intcmp(struct node *e1, struct node *e2) { return (e1->i < e2->i ? -1 : e1->i > e2->i); } int sumaug(struct node *e) { e->sum = e->i; if (RB_LEFT(e, entry) != NULL) e->sum += RB_LEFT(e, entry)->sum; if (RB_RIGHT(e, entry) != NULL) e->sum += RB_RIGHT(e, entry)->sum; } #define RB_AUGMENT(entry) sumaug(entry) RB_HEAD(inttree, node) head = RB_INITIALIZER(&head); RB_GENERATE(inttree, node, entry, intcmp) int testdata[] = { 20, 16, 17, 13, 3, 6, 1, 8, 2, 4, 10, 19, 5, 9, 12, 15, 18, 7, 11, 14 }; void print_tree(struct node *n) { struct node *left, *right; if (n == NULL) { printf("nil"); return; } left = RB_LEFT(n, entry); right = RB_RIGHT(n, entry); if (left == NULL && right == NULL) printf("%d", n->i); else { printf("%d(", n->i); print_tree(left); printf(","); print_tree(right); printf(")"); } } int main(void) { int i; struct node *n; for (i = 0; i < sizeof(testdata) / sizeof(testdata[0]); i++) { if ((n = malloc(sizeof(struct node))) == NULL) err(1, NULL); n->i = testdata[i]; RB_INSERT(inttree, &head, n); } RB_FOREACH(n, inttree, &head) { printf("%d\en", n->i); } print_tree(RB_ROOT(&head)); printf("Sum of values = %d\n", RB_ROOT(&head)->sum); printf("\en"); return (0); } .Ed .Sh NOTES Trying to free a tree in the following way is a common error: .Bd -literal -offset indent SPLAY_FOREACH(var, NAME, head) { SPLAY_REMOVE(NAME, head, var); free(var); } free(head); .Ed .Pp Since .Va var is freed, the .Fn FOREACH macro refers to a pointer that may have been reallocated already. Proper code needs a second variable. .Bd -literal -offset indent for (var = SPLAY_MIN(NAME, head); var != NULL; var = nxt) { nxt = SPLAY_NEXT(NAME, head, var); SPLAY_REMOVE(NAME, head, var); free(var); } .Ed .Pp Both .Fn RB_INSERT and .Fn SPLAY_INSERT return .Dv NULL if the element was inserted in the tree successfully, otherwise they return a pointer to the element with the colliding key. .Pp Accordingly, .Fn RB_REMOVE and .Fn SPLAY_REMOVE return the pointer to the removed element otherwise they return .Dv NULL to indicate an error. .Sh SEE ALSO .Xr arb 3 , .Xr queue 3 .Rs .%A "Bernhard Haeupler" .%A "Siddhartha Sen" .%A "Robert E. Tarjan" .%T "Rank-Balanced Trees" .%U "http://sidsen.azurewebsites.net/papers/rb-trees-talg.pdf" .%J "ACM Transactions on Algorithms" .%V "11" .%N "4" .%D "June 2015" .Re .Sh HISTORY The tree macros first appeared in .Fx 4.6 . .Sh AUTHORS The author of the tree macros is .An Niels Provos . diff --git a/sys/dev/iommu/iommu_gas.c b/sys/dev/iommu/iommu_gas.c index 68e22f16c69f..c04edb8451b4 100644 --- a/sys/dev/iommu/iommu_gas.c +++ b/sys/dev/iommu/iommu_gas.c @@ -1,989 +1,1013 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 The FreeBSD Foundation * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); -#define RB_AUGMENT(entry) iommu_gas_augment_entry(entry) +#define RB_AUGMENT_CHECK(entry) iommu_gas_augment_entry(entry) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Guest Address Space management. */ static uma_zone_t iommu_map_entry_zone; #ifdef INVARIANTS static int iommu_check_free; #endif static void intel_gas_init(void) { iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY", sizeof(struct iommu_map_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP); } SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL); struct iommu_map_entry * iommu_gas_alloc_entry(struct iommu_domain *domain, u_int flags) { struct iommu_map_entry *res; KASSERT((flags & ~(IOMMU_PGF_WAITOK)) == 0, ("unsupported flags %x", flags)); res = uma_zalloc(iommu_map_entry_zone, ((flags & IOMMU_PGF_WAITOK) != 0 ? M_WAITOK : M_NOWAIT) | M_ZERO); if (res != NULL && domain != NULL) { res->domain = domain; atomic_add_int(&domain->entries_cnt, 1); } return (res); } void iommu_gas_free_entry(struct iommu_map_entry *entry) { struct iommu_domain *domain; domain = entry->domain; if (domain != NULL) atomic_subtract_int(&domain->entries_cnt, 1); uma_zfree(iommu_map_entry_zone, entry); } static int iommu_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b) { /* Last entry have zero size, so <= */ KASSERT(a->start <= a->end, ("inverted entry %p (%jx, %jx)", a, (uintmax_t)a->start, (uintmax_t)a->end)); KASSERT(b->start <= b->end, ("inverted entry %p (%jx, %jx)", b, (uintmax_t)b->start, (uintmax_t)b->end)); KASSERT(a->end <= b->start || b->end <= a->start || a->end == a->start || b->end == b->start, ("overlapping entries %p (%jx, %jx) %p (%jx, %jx)", a, (uintmax_t)a->start, (uintmax_t)a->end, b, (uintmax_t)b->start, (uintmax_t)b->end)); if (a->end < b->end) return (-1); else if (b->end < a->end) return (1); return (0); } -static void +/* + * Update augmentation data based on data from children. + * Return true if and only if the update changes the augmentation data. + */ +static bool iommu_gas_augment_entry(struct iommu_map_entry *entry) { struct iommu_map_entry *child; - iommu_gaddr_t free_down; + iommu_gaddr_t bound, delta, free_down; free_down = 0; + bound = entry->start; if ((child = RB_LEFT(entry, rb_entry)) != NULL) { - free_down = MAX(free_down, child->free_down); - free_down = MAX(free_down, entry->start - child->last); - entry->first = child->first; - } else - entry->first = entry->start; - + free_down = MAX(child->free_down, bound - child->last); + bound = child->first; + } + delta = bound - entry->first; + entry->first = bound; + bound = entry->end; if ((child = RB_RIGHT(entry, rb_entry)) != NULL) { free_down = MAX(free_down, child->free_down); - free_down = MAX(free_down, child->first - entry->end); - entry->last = child->last; - } else - entry->last = entry->end; + free_down = MAX(free_down, child->first - bound); + bound = child->last; + } + delta += entry->last - bound; + if (delta == 0) + delta = entry->free_down - free_down; + entry->last = bound; entry->free_down = free_down; + + /* + * Return true either if the value of last-first changed, + * or if free_down changed. + */ + return (delta != 0); } RB_GENERATE(iommu_gas_entries_tree, iommu_map_entry, rb_entry, iommu_gas_cmp_entries); #ifdef INVARIANTS static void iommu_gas_check_free(struct iommu_domain *domain) { struct iommu_map_entry *entry, *l, *r; iommu_gaddr_t v; RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) { KASSERT(domain == entry->domain, ("mismatched free domain %p entry %p entry->domain %p", domain, entry, entry->domain)); l = RB_LEFT(entry, rb_entry); r = RB_RIGHT(entry, rb_entry); v = 0; if (l != NULL) { v = MAX(v, l->free_down); v = MAX(v, entry->start - l->last); } if (r != NULL) { v = MAX(v, r->free_down); v = MAX(v, r->first - entry->end); } MPASS(entry->free_down == v); } } #endif static bool iommu_gas_rb_insert(struct iommu_domain *domain, struct iommu_map_entry *entry) { struct iommu_map_entry *found; found = RB_INSERT(iommu_gas_entries_tree, &domain->rb_root, entry); return (found == NULL); } static void iommu_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry) { RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry); } struct iommu_domain * iommu_get_ctx_domain(struct iommu_ctx *ctx) { return (ctx->domain); } void iommu_gas_init_domain(struct iommu_domain *domain) { struct iommu_map_entry *begin, *end; begin = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); end = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); IOMMU_DOMAIN_LOCK(domain); KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain)); KASSERT(RB_EMPTY(&domain->rb_root), ("non-empty entries %p", domain)); - begin->start = 0; - begin->end = IOMMU_PAGE_SIZE; - begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; - iommu_gas_rb_insert(domain, begin); - + /* + * The end entry must be inserted first because it has a zero-length gap + * between start and end. Initially, all augmentation data for a new + * entry is zero. Function iommu_gas_augment_entry will compute no + * change in the value of (start-end) and no change in the value of + * free_down, so it will return false to suggest that nothing changed in + * the entry. Thus, inserting the end entry second prevents + * augmentation information to be propogated to the begin entry at the + * tree root. So it is inserted first. + */ end->start = domain->end; end->end = domain->end; end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; iommu_gas_rb_insert(domain, end); + begin->start = 0; + begin->end = IOMMU_PAGE_SIZE; + begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; + iommu_gas_rb_insert(domain, begin); + domain->first_place = begin; domain->last_place = end; domain->flags |= IOMMU_DOMAIN_GAS_INITED; IOMMU_DOMAIN_UNLOCK(domain); } void iommu_gas_fini_domain(struct iommu_domain *domain) { struct iommu_map_entry *entry, *entry1; IOMMU_DOMAIN_ASSERT_LOCKED(domain); KASSERT(domain->entries_cnt == 2, ("domain still in use %p", domain)); entry = RB_MIN(iommu_gas_entries_tree, &domain->rb_root); KASSERT(entry->start == 0, ("start entry start %p", domain)); KASSERT(entry->end == IOMMU_PAGE_SIZE, ("start entry end %p", domain)); KASSERT(entry->flags == (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED), ("start entry flags %p", domain)); RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry); iommu_gas_free_entry(entry); entry = RB_MAX(iommu_gas_entries_tree, &domain->rb_root); KASSERT(entry->start == domain->end, ("end entry start %p", domain)); KASSERT(entry->end == domain->end, ("end entry end %p", domain)); KASSERT(entry->flags == (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED), ("end entry flags %p", domain)); RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry); iommu_gas_free_entry(entry); RB_FOREACH_SAFE(entry, iommu_gas_entries_tree, &domain->rb_root, entry1) { KASSERT((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0, ("non-RMRR entry left %p", domain)); RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry); iommu_gas_free_entry(entry); } } struct iommu_gas_match_args { struct iommu_domain *domain; iommu_gaddr_t size; int offset; const struct bus_dma_tag_common *common; u_int gas_flags; struct iommu_map_entry *entry; }; /* * The interval [beg, end) is a free interval between two iommu_map_entries. * Addresses can be allocated only in the range [lbound, ubound). Try to * allocate space in the free interval, subject to the conditions expressed by * a, and return 'true' if and only if the allocation attempt succeeds. */ static bool iommu_gas_match_one(struct iommu_gas_match_args *a, iommu_gaddr_t beg, iommu_gaddr_t end, iommu_gaddr_t lbound, iommu_gaddr_t ubound) { struct iommu_map_entry *entry; iommu_gaddr_t first, size, start; bool found __diagused; int offset; /* * The prev->end is always aligned on the page size, which * causes page alignment for the entry->start too. * * Create IOMMU_PAGE_SIZE gaps before, after new entry * to ensure that out-of-bounds accesses fault. */ beg = MAX(beg + IOMMU_PAGE_SIZE, lbound); start = roundup2(beg, a->common->alignment); if (start < beg) return (false); end = MIN(end - IOMMU_PAGE_SIZE, ubound); offset = a->offset; size = a->size; if (start + offset + size > end) return (false); /* Check for and try to skip past boundary crossing. */ if (!vm_addr_bound_ok(start + offset, size, a->common->boundary)) { /* * The start + offset to start + offset + size region crosses * the boundary. Check if there is enough space after the next * boundary after the beg. */ first = start; beg = roundup2(start + offset + 1, a->common->boundary); start = roundup2(beg, a->common->alignment); if (start + offset + size > end || !vm_addr_bound_ok(start + offset, size, a->common->boundary)) { /* * Not enough space to align at the requested boundary, * or boundary is smaller than the size, but allowed to * split. We already checked that start + size does not * overlap ubound. * * XXXKIB. It is possible that beg is exactly at the * start of the next entry, then we do not have gap. * Ignore for now. */ if ((a->gas_flags & IOMMU_MF_CANSPLIT) == 0) return (false); size = beg - first - offset; start = first; } } entry = a->entry; entry->start = start; entry->end = start + roundup2(size + offset, IOMMU_PAGE_SIZE); entry->flags = IOMMU_MAP_ENTRY_MAP; found = iommu_gas_rb_insert(a->domain, entry); KASSERT(found, ("found dup %p start %jx size %jx", a->domain, (uintmax_t)start, (uintmax_t)size)); return (true); } /* Find the next entry that might abut a big-enough range. */ static struct iommu_map_entry * iommu_gas_next(struct iommu_map_entry *curr, iommu_gaddr_t min_free) { struct iommu_map_entry *next; if ((next = RB_RIGHT(curr, rb_entry)) != NULL && next->free_down >= min_free) { /* Find next entry in right subtree. */ do curr = next; while ((next = RB_LEFT(curr, rb_entry)) != NULL && next->free_down >= min_free); } else { /* Find next entry in a left-parent ancestor. */ while ((next = RB_PARENT(curr, rb_entry)) != NULL && curr == RB_RIGHT(next, rb_entry)) curr = next; curr = next; } return (curr); } static int iommu_gas_find_space(struct iommu_gas_match_args *a) { struct iommu_domain *domain; struct iommu_map_entry *curr, *first; iommu_gaddr_t addr, min_free; IOMMU_DOMAIN_ASSERT_LOCKED(a->domain); KASSERT(a->entry->flags == 0, ("dirty entry %p %p", a->domain, a->entry)); /* * If the subtree doesn't have free space for the requested allocation * plus two guard pages, skip it. */ min_free = 2 * IOMMU_PAGE_SIZE + roundup2(a->size + a->offset, IOMMU_PAGE_SIZE); /* * Find the first entry in the lower region that could abut a big-enough * range. */ curr = RB_ROOT(&a->domain->rb_root); first = NULL; while (curr != NULL && curr->free_down >= min_free) { first = curr; curr = RB_LEFT(curr, rb_entry); } /* * Walk the big-enough ranges until one satisfies alignment * requirements, or violates lowaddr address requirement. */ addr = a->common->lowaddr + 1; for (curr = first; curr != NULL; curr = iommu_gas_next(curr, min_free)) { if ((first = RB_LEFT(curr, rb_entry)) != NULL && iommu_gas_match_one(a, first->last, curr->start, 0, addr)) return (0); if (curr->end >= addr) { /* All remaining ranges >= addr */ break; } if ((first = RB_RIGHT(curr, rb_entry)) != NULL && iommu_gas_match_one(a, curr->end, first->first, 0, addr)) return (0); } /* * To resume the search at the start of the upper region, first climb to * the nearest ancestor that spans highaddr. Then find the last entry * before highaddr that could abut a big-enough range. */ addr = a->common->highaddr; while (curr != NULL && curr->last < addr) curr = RB_PARENT(curr, rb_entry); first = NULL; while (curr != NULL && curr->free_down >= min_free) { if (addr < curr->end) curr = RB_LEFT(curr, rb_entry); else { first = curr; curr = RB_RIGHT(curr, rb_entry); } } /* * Walk the remaining big-enough ranges until one satisfies alignment * requirements. */ domain = a->domain; for (curr = first; curr != NULL; curr = iommu_gas_next(curr, min_free)) { if ((first = RB_LEFT(curr, rb_entry)) != NULL && iommu_gas_match_one(a, first->last, curr->start, addr + 1, domain->end)) return (0); if ((first = RB_RIGHT(curr, rb_entry)) != NULL && iommu_gas_match_one(a, curr->end, first->first, addr + 1, domain->end)) return (0); } return (ENOMEM); } static int iommu_gas_alloc_region(struct iommu_domain *domain, struct iommu_map_entry *entry, u_int flags) { struct iommu_map_entry *next, *prev; bool found __diagused; IOMMU_DOMAIN_ASSERT_LOCKED(domain); if ((entry->start & IOMMU_PAGE_MASK) != 0 || (entry->end & IOMMU_PAGE_MASK) != 0) return (EINVAL); if (entry->start >= entry->end) return (EINVAL); if (entry->end >= domain->end) return (EINVAL); next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, entry); KASSERT(next != NULL, ("next must be non-null %p %jx", domain, (uintmax_t)entry->start)); prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next); /* prev could be NULL */ /* * Adapt to broken BIOSes which specify overlapping RMRR * entries. * * XXXKIB: this does not handle a case when prev or next * entries are completely covered by the current one, which * extends both ways. */ if (prev != NULL && prev->end > entry->start && (prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) { if ((flags & IOMMU_MF_RMRR) == 0 || (prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0) return (EBUSY); entry->start = prev->end; } if (next->start < entry->end && (next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) { if ((flags & IOMMU_MF_RMRR) == 0 || (next->flags & IOMMU_MAP_ENTRY_RMRR) == 0) return (EBUSY); entry->end = next->start; } if (entry->end == entry->start) return (0); if (prev != NULL && prev->end > entry->start) { /* This assumes that prev is the placeholder entry. */ iommu_gas_rb_remove(domain, prev); prev = NULL; } if (next->start < entry->end) { iommu_gas_rb_remove(domain, next); next = NULL; } found = iommu_gas_rb_insert(domain, entry); KASSERT(found, ("found RMRR dup %p start %jx end %jx", domain, (uintmax_t)entry->start, (uintmax_t)entry->end)); if ((flags & IOMMU_MF_RMRR) != 0) entry->flags = IOMMU_MAP_ENTRY_RMRR; #ifdef INVARIANTS struct iommu_map_entry *ip, *in; ip = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry); in = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry); KASSERT(prev == NULL || ip == prev, ("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)", entry, entry->start, entry->end, prev, prev == NULL ? 0 : prev->start, prev == NULL ? 0 : prev->end, ip, ip == NULL ? 0 : ip->start, ip == NULL ? 0 : ip->end)); KASSERT(next == NULL || in == next, ("RMRR %p (%jx %jx) next %p (%jx %jx) ins next %p (%jx %jx)", entry, entry->start, entry->end, next, next == NULL ? 0 : next->start, next == NULL ? 0 : next->end, in, in == NULL ? 0 : in->start, in == NULL ? 0 : in->end)); #endif return (0); } void iommu_gas_free_space(struct iommu_map_entry *entry) { struct iommu_domain *domain; domain = entry->domain; KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR | IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP, ("permanent entry %p %p", domain, entry)); IOMMU_DOMAIN_LOCK(domain); iommu_gas_rb_remove(domain, entry); entry->flags &= ~IOMMU_MAP_ENTRY_MAP; #ifdef INVARIANTS if (iommu_check_free) iommu_gas_check_free(domain); #endif IOMMU_DOMAIN_UNLOCK(domain); } void iommu_gas_free_region(struct iommu_map_entry *entry) { struct iommu_domain *domain; domain = entry->domain; KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR | IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR, ("non-RMRR entry %p %p", domain, entry)); IOMMU_DOMAIN_LOCK(domain); if (entry != domain->first_place && entry != domain->last_place) iommu_gas_rb_remove(domain, entry); entry->flags &= ~IOMMU_MAP_ENTRY_RMRR; IOMMU_DOMAIN_UNLOCK(domain); } static struct iommu_map_entry * iommu_gas_remove_clip_left(struct iommu_domain *domain, iommu_gaddr_t start, iommu_gaddr_t end, struct iommu_map_entry **r) { struct iommu_map_entry *entry, *res, fentry; IOMMU_DOMAIN_ASSERT_LOCKED(domain); MPASS(start <= end); MPASS(end <= domain->end); /* * Find an entry which contains the supplied guest's address * start, or the first entry after the start. Since we * asserted that start is below domain end, entry should * exist. Then clip it if needed. */ fentry.start = start + 1; fentry.end = start + 1; entry = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &fentry); if (entry->start >= start || (entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) return (entry); res = *r; *r = NULL; *res = *entry; res->start = entry->end = start; RB_UPDATE_AUGMENT(entry, rb_entry); iommu_gas_rb_insert(domain, res); return (res); } static bool iommu_gas_remove_clip_right(struct iommu_domain *domain, iommu_gaddr_t end, struct iommu_map_entry *entry, struct iommu_map_entry *r) { if (entry->start >= end || (entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) return (false); *r = *entry; r->end = entry->start = end; RB_UPDATE_AUGMENT(entry, rb_entry); iommu_gas_rb_insert(domain, r); return (true); } static void iommu_gas_remove_unmap(struct iommu_domain *domain, struct iommu_map_entry *entry, struct iommu_map_entries_tailq *gcp) { IOMMU_DOMAIN_ASSERT_LOCKED(domain); if ((entry->flags & (IOMMU_MAP_ENTRY_UNMAPPED | IOMMU_MAP_ENTRY_REMOVING)) != 0) return; MPASS((entry->flags & IOMMU_MAP_ENTRY_PLACE) == 0); entry->flags |= IOMMU_MAP_ENTRY_REMOVING; TAILQ_INSERT_TAIL(gcp, entry, dmamap_link); } /* * Remove specified range from the GAS of the domain. Note that the * removal is not guaranteed to occur upon the function return, it * might be finalized some time after, when hardware reports that * (queued) IOTLB invalidation was performed. */ void iommu_gas_remove(struct iommu_domain *domain, iommu_gaddr_t start, iommu_gaddr_t size) { struct iommu_map_entry *entry, *nentry, *r1, *r2; struct iommu_map_entries_tailq gc; iommu_gaddr_t end; end = start + size; r1 = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); r2 = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); TAILQ_INIT(&gc); IOMMU_DOMAIN_LOCK(domain); nentry = iommu_gas_remove_clip_left(domain, start, end, &r1); RB_FOREACH_FROM(entry, iommu_gas_entries_tree, nentry) { if (entry->start >= end) break; KASSERT(start <= entry->start, ("iommu_gas_remove entry (%#jx, %#jx) start %#jx", entry->start, entry->end, start)); if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) continue; iommu_gas_remove_unmap(domain, entry, &gc); } if (iommu_gas_remove_clip_right(domain, end, entry, r2)) { iommu_gas_remove_unmap(domain, r2, &gc); r2 = NULL; } #ifdef INVARIANTS RB_FOREACH(entry, iommu_gas_entries_tree, &domain->rb_root) { if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0) continue; KASSERT(entry->end <= start || entry->start >= end, ("iommu_gas_remove leftover entry (%#jx, %#jx) range " "(%#jx, %#jx)", entry->start, entry->end, start, end)); } #endif IOMMU_DOMAIN_UNLOCK(domain); if (r1 != NULL) iommu_gas_free_entry(r1); if (r2 != NULL) iommu_gas_free_entry(r2); iommu_domain_unload(domain, &gc, true); } int iommu_gas_map(struct iommu_domain *domain, const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset, u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res) { struct iommu_gas_match_args a; struct iommu_map_entry *entry; int error; KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0, ("invalid flags 0x%x", flags)); a.domain = domain; a.size = size; a.offset = offset; a.common = common; a.gas_flags = flags; entry = iommu_gas_alloc_entry(domain, (flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0); if (entry == NULL) return (ENOMEM); a.entry = entry; IOMMU_DOMAIN_LOCK(domain); error = iommu_gas_find_space(&a); if (error == ENOMEM) { IOMMU_DOMAIN_UNLOCK(domain); iommu_gas_free_entry(entry); return (error); } #ifdef INVARIANTS if (iommu_check_free) iommu_gas_check_free(domain); #endif KASSERT(error == 0, ("unexpected error %d from iommu_gas_find_entry", error)); KASSERT(entry->end < domain->end, ("allocated GPA %jx, max GPA %jx", (uintmax_t)entry->end, (uintmax_t)domain->end)); entry->flags |= eflags; IOMMU_DOMAIN_UNLOCK(domain); error = domain->ops->map(domain, entry->start, entry->end - entry->start, ma, eflags, ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); if (error == ENOMEM) { iommu_domain_unload_entry(entry, true, (flags & IOMMU_MF_CANWAIT) != 0); return (error); } KASSERT(error == 0, ("unexpected error %d from domain_map_buf", error)); *res = entry; return (0); } int iommu_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma) { iommu_gaddr_t start; int error; KASSERT(entry->domain == domain, ("mismatched domain %p entry %p entry->domain %p", domain, entry, entry->domain)); KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain, entry, entry->flags)); KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0, ("invalid flags 0x%x", flags)); start = entry->start; IOMMU_DOMAIN_LOCK(domain); error = iommu_gas_alloc_region(domain, entry, flags); if (error != 0) { IOMMU_DOMAIN_UNLOCK(domain); return (error); } entry->flags |= eflags; IOMMU_DOMAIN_UNLOCK(domain); if (entry->end == entry->start) return (0); error = domain->ops->map(domain, entry->start, entry->end - entry->start, ma + OFF_TO_IDX(start - entry->start), eflags, ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0)); if (error == ENOMEM) { iommu_domain_unload_entry(entry, false, (flags & IOMMU_MF_CANWAIT) != 0); return (error); } KASSERT(error == 0, ("unexpected error %d from domain_map_buf", error)); return (0); } static int iommu_gas_reserve_region_locked(struct iommu_domain *domain, iommu_gaddr_t start, iommu_gaddr_t end, struct iommu_map_entry *entry) { int error; IOMMU_DOMAIN_ASSERT_LOCKED(domain); entry->start = start; entry->end = end; error = iommu_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT); if (error == 0) entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED; return (error); } int iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start, iommu_gaddr_t end, struct iommu_map_entry **entry0) { struct iommu_map_entry *entry; int error; entry = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); IOMMU_DOMAIN_LOCK(domain); error = iommu_gas_reserve_region_locked(domain, start, end, entry); IOMMU_DOMAIN_UNLOCK(domain); if (error != 0) iommu_gas_free_entry(entry); else if (entry0 != NULL) *entry0 = entry; return (error); } /* * As in iommu_gas_reserve_region, reserve [start, end), but allow for existing * entries. */ int iommu_gas_reserve_region_extend(struct iommu_domain *domain, iommu_gaddr_t start, iommu_gaddr_t end) { struct iommu_map_entry *entry, *next, *prev, key = {}; iommu_gaddr_t entry_start, entry_end; int error; error = 0; entry = NULL; end = ummin(end, domain->end); while (start < end) { /* Preallocate an entry. */ if (entry == NULL) entry = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK); /* Calculate the free region from here to the next entry. */ key.start = key.end = start; IOMMU_DOMAIN_LOCK(domain); next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, &key); KASSERT(next != NULL, ("domain %p with end %#jx has no entry " "after %#jx", domain, (uintmax_t)domain->end, (uintmax_t)start)); entry_end = ummin(end, next->start); prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next); if (prev != NULL) entry_start = ummax(start, prev->end); else entry_start = start; start = next->end; /* Reserve the region if non-empty. */ if (entry_start != entry_end) { error = iommu_gas_reserve_region_locked(domain, entry_start, entry_end, entry); if (error != 0) { IOMMU_DOMAIN_UNLOCK(domain); break; } entry = NULL; } IOMMU_DOMAIN_UNLOCK(domain); } /* Release a preallocated entry if it was not used. */ if (entry != NULL) iommu_gas_free_entry(entry); return (error); } void iommu_unmap_msi(struct iommu_ctx *ctx) { struct iommu_map_entry *entry; struct iommu_domain *domain; domain = ctx->domain; entry = domain->msi_entry; if (entry == NULL) return; domain->ops->unmap(domain, entry->start, entry->end - entry->start, IOMMU_PGF_WAITOK); iommu_gas_free_space(entry); iommu_gas_free_entry(entry); domain->msi_entry = NULL; domain->msi_base = 0; domain->msi_phys = 0; } int iommu_map_msi(struct iommu_ctx *ctx, iommu_gaddr_t size, int offset, u_int eflags, u_int flags, vm_page_t *ma) { struct iommu_domain *domain; struct iommu_map_entry *entry; int error; error = 0; domain = ctx->domain; /* Check if there is already an MSI page allocated */ IOMMU_DOMAIN_LOCK(domain); entry = domain->msi_entry; IOMMU_DOMAIN_UNLOCK(domain); if (entry == NULL) { error = iommu_gas_map(domain, &ctx->tag->common, size, offset, eflags, flags, ma, &entry); IOMMU_DOMAIN_LOCK(domain); if (error == 0) { if (domain->msi_entry == NULL) { MPASS(domain->msi_base == 0); MPASS(domain->msi_phys == 0); domain->msi_entry = entry; domain->msi_base = entry->start; domain->msi_phys = VM_PAGE_TO_PHYS(ma[0]); } else { /* * We lost the race and already have an * MSI page allocated. Free the unneeded entry. */ iommu_gas_free_entry(entry); } } else if (domain->msi_entry != NULL) { /* * The allocation failed, but another succeeded. * Return success as there is a valid MSI page. */ error = 0; } IOMMU_DOMAIN_UNLOCK(domain); } return (error); } void iommu_translate_msi(struct iommu_domain *domain, uint64_t *addr) { *addr = (*addr - domain->msi_phys) + domain->msi_base; KASSERT(*addr >= domain->msi_entry->start, ("%s: Address is below the MSI entry start address (%jx < %jx)", __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->start)); KASSERT(*addr + sizeof(*addr) <= domain->msi_entry->end, ("%s: Address is above the MSI entry end address (%jx < %jx)", __func__, (uintmax_t)*addr, (uintmax_t)domain->msi_entry->end)); } SYSCTL_NODE(_hw, OID_AUTO, iommu, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, ""); #ifdef INVARIANTS SYSCTL_INT(_hw_iommu, OID_AUTO, check_free, CTLFLAG_RWTUN, &iommu_check_free, 0, "Check the GPA RBtree for free_down and free_after validity"); #endif diff --git a/sys/sys/tree.h b/sys/sys/tree.h index d2c9cfcddd90..c03edee0f248 100644 --- a/sys/sys/tree.h +++ b/sys/sys/tree.h @@ -1,936 +1,970 @@ /* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */ /* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */ /* $FreeBSD$ */ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright 2002 Niels Provos * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _SYS_TREE_H_ #define _SYS_TREE_H_ #include /* * This file defines data structures for different types of trees: * splay trees and rank-balanced trees. * * A splay tree is a self-organizing data structure. Every operation * on the tree causes a splay to happen. The splay moves the requested * node to the root of the tree and partly rebalances it. * * This has the benefit that request locality causes faster lookups as * the requested nodes move to the top of the tree. On the other hand, * every lookup causes memory writes. * * The Balance Theorem bounds the total access time for m operations * and n inserts on an initially empty tree as O((m + n)lg n). The * amortized cost for a sequence of m accesses to a splay tree is O(lg n); * * A rank-balanced tree is a binary search tree with an integer * rank-difference as an attribute of each pointer from parent to child. * The sum of the rank-differences on any path from a node down to null is * the same, and defines the rank of that node. The rank of the null node * is -1. * * Different additional conditions define different sorts of balanced trees, * including "red-black" and "AVL" trees. The set of conditions applied here * are the "weak-AVL" conditions of Haeupler, Sen and Tarjan presented in in * "Rank Balanced Trees", ACM Transactions on Algorithms Volume 11 Issue 4 June * 2015 Article No.: 30pp 1–26 https://doi.org/10.1145/2689412 (the HST paper): * - every rank-difference is 1 or 2. * - the rank of any leaf is 1. * * For historical reasons, rank differences that are even are associated * with the color red (Rank-Even-Difference), and the child that a red edge * points to is called a red child. * * Every operation on a rank-balanced tree is bounded as O(lg n). * The maximum height of a rank-balanced tree is 2lg (n+1). */ #define SPLAY_HEAD(name, type) \ struct name { \ struct type *sph_root; /* root of the tree */ \ } #define SPLAY_INITIALIZER(root) \ { NULL } #define SPLAY_INIT(root) do { \ (root)->sph_root = NULL; \ } while (/*CONSTCOND*/ 0) #define SPLAY_ENTRY(type) \ struct { \ struct type *spe_left; /* left element */ \ struct type *spe_right; /* right element */ \ } #define SPLAY_LEFT(elm, field) (elm)->field.spe_left #define SPLAY_RIGHT(elm, field) (elm)->field.spe_right #define SPLAY_ROOT(head) (head)->sph_root #define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL) /* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */ #define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ (head)->sph_root = tmp; \ } while (/*CONSTCOND*/ 0) #define SPLAY_ROTATE_LEFT(head, tmp, field) do { \ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \ SPLAY_LEFT(tmp, field) = (head)->sph_root; \ (head)->sph_root = tmp; \ } while (/*CONSTCOND*/ 0) #define SPLAY_LINKLEFT(head, tmp, field) do { \ SPLAY_LEFT(tmp, field) = (head)->sph_root; \ tmp = (head)->sph_root; \ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ } while (/*CONSTCOND*/ 0) #define SPLAY_LINKRIGHT(head, tmp, field) do { \ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ tmp = (head)->sph_root; \ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ } while (/*CONSTCOND*/ 0) #define SPLAY_ASSEMBLE(head, node, left, right, field) do { \ SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \ SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \ } while (/*CONSTCOND*/ 0) /* Generates prototypes and inline functions */ #define SPLAY_PROTOTYPE(name, type, field, cmp) \ void name##_SPLAY(struct name *, struct type *); \ void name##_SPLAY_MINMAX(struct name *, int); \ struct type *name##_SPLAY_INSERT(struct name *, struct type *); \ struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \ \ /* Finds the node with the same key as elm */ \ static __unused __inline struct type * \ name##_SPLAY_FIND(struct name *head, struct type *elm) \ { \ if (SPLAY_EMPTY(head)) \ return(NULL); \ name##_SPLAY(head, elm); \ if ((cmp)(elm, (head)->sph_root) == 0) \ return (head->sph_root); \ return (NULL); \ } \ \ static __unused __inline struct type * \ name##_SPLAY_NEXT(struct name *head, struct type *elm) \ { \ name##_SPLAY(head, elm); \ if (SPLAY_RIGHT(elm, field) != NULL) { \ elm = SPLAY_RIGHT(elm, field); \ while (SPLAY_LEFT(elm, field) != NULL) { \ elm = SPLAY_LEFT(elm, field); \ } \ } else \ elm = NULL; \ return (elm); \ } \ \ static __unused __inline struct type * \ name##_SPLAY_MIN_MAX(struct name *head, int val) \ { \ name##_SPLAY_MINMAX(head, val); \ return (SPLAY_ROOT(head)); \ } /* Main splay operation. * Moves node close to the key of elm to top */ #define SPLAY_GENERATE(name, type, field, cmp) \ struct type * \ name##_SPLAY_INSERT(struct name *head, struct type *elm) \ { \ if (SPLAY_EMPTY(head)) { \ SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \ } else { \ __typeof(cmp(NULL, NULL)) __comp; \ name##_SPLAY(head, elm); \ __comp = (cmp)(elm, (head)->sph_root); \ if (__comp < 0) { \ SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\ SPLAY_RIGHT(elm, field) = (head)->sph_root; \ SPLAY_LEFT((head)->sph_root, field) = NULL; \ } else if (__comp > 0) { \ SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\ SPLAY_LEFT(elm, field) = (head)->sph_root; \ SPLAY_RIGHT((head)->sph_root, field) = NULL; \ } else \ return ((head)->sph_root); \ } \ (head)->sph_root = (elm); \ return (NULL); \ } \ \ struct type * \ name##_SPLAY_REMOVE(struct name *head, struct type *elm) \ { \ struct type *__tmp; \ if (SPLAY_EMPTY(head)) \ return (NULL); \ name##_SPLAY(head, elm); \ if ((cmp)(elm, (head)->sph_root) == 0) { \ if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\ } else { \ __tmp = SPLAY_RIGHT((head)->sph_root, field); \ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\ name##_SPLAY(head, elm); \ SPLAY_RIGHT((head)->sph_root, field) = __tmp; \ } \ return (elm); \ } \ return (NULL); \ } \ \ void \ name##_SPLAY(struct name *head, struct type *elm) \ { \ struct type __node, *__left, *__right, *__tmp; \ __typeof(cmp(NULL, NULL)) __comp; \ \ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ __left = __right = &__node; \ \ while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \ if (__comp < 0) { \ __tmp = SPLAY_LEFT((head)->sph_root, field); \ if (__tmp == NULL) \ break; \ if ((cmp)(elm, __tmp) < 0){ \ SPLAY_ROTATE_RIGHT(head, __tmp, field); \ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ break; \ } \ SPLAY_LINKLEFT(head, __right, field); \ } else if (__comp > 0) { \ __tmp = SPLAY_RIGHT((head)->sph_root, field); \ if (__tmp == NULL) \ break; \ if ((cmp)(elm, __tmp) > 0){ \ SPLAY_ROTATE_LEFT(head, __tmp, field); \ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ break; \ } \ SPLAY_LINKRIGHT(head, __left, field); \ } \ } \ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ } \ \ /* Splay with either the minimum or the maximum element \ * Used to find minimum or maximum element in tree. \ */ \ void name##_SPLAY_MINMAX(struct name *head, int __comp) \ { \ struct type __node, *__left, *__right, *__tmp; \ \ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ __left = __right = &__node; \ \ while (1) { \ if (__comp < 0) { \ __tmp = SPLAY_LEFT((head)->sph_root, field); \ if (__tmp == NULL) \ break; \ if (__comp < 0){ \ SPLAY_ROTATE_RIGHT(head, __tmp, field); \ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ break; \ } \ SPLAY_LINKLEFT(head, __right, field); \ } else if (__comp > 0) { \ __tmp = SPLAY_RIGHT((head)->sph_root, field); \ if (__tmp == NULL) \ break; \ if (__comp > 0) { \ SPLAY_ROTATE_LEFT(head, __tmp, field); \ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ break; \ } \ SPLAY_LINKRIGHT(head, __left, field); \ } \ } \ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ } #define SPLAY_NEGINF -1 #define SPLAY_INF 1 #define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y) #define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y) #define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y) #define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y) #define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \ : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF)) #define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \ : name##_SPLAY_MIN_MAX(x, SPLAY_INF)) #define SPLAY_FOREACH(x, name, head) \ for ((x) = SPLAY_MIN(name, head); \ (x) != NULL; \ (x) = SPLAY_NEXT(name, head, x)) /* Macros that define a rank-balanced tree */ #define RB_HEAD(name, type) \ struct name { \ struct type *rbh_root; /* root of the tree */ \ } #define RB_INITIALIZER(root) \ { NULL } #define RB_INIT(root) do { \ (root)->rbh_root = NULL; \ } while (/*CONSTCOND*/ 0) #define RB_ENTRY(type) \ struct { \ struct type *rbe_link[3]; \ } /* * With the expectation that any object of struct type has an * address that is a multiple of 4, and that therefore the * 2 least significant bits of a pointer to struct type are * always zero, this implementation sets those bits to indicate * that the left or right child of the tree node is "red". */ #define _RB_LINK(elm, dir, field) (elm)->field.rbe_link[dir] #define _RB_UP(elm, field) _RB_LINK(elm, 0, field) #define _RB_L ((__uintptr_t)1) #define _RB_R ((__uintptr_t)2) #define _RB_LR ((__uintptr_t)3) #define _RB_BITS(elm) (*(__uintptr_t *)&elm) #define _RB_BITSUP(elm, field) _RB_BITS(_RB_UP(elm, field)) #define _RB_PTR(elm) (__typeof(elm)) \ ((__uintptr_t)elm & ~_RB_LR) #define RB_PARENT(elm, field) _RB_PTR(_RB_UP(elm, field)) #define RB_LEFT(elm, field) _RB_LINK(elm, _RB_L, field) #define RB_RIGHT(elm, field) _RB_LINK(elm, _RB_R, field) #define RB_ROOT(head) (head)->rbh_root #define RB_EMPTY(head) (RB_ROOT(head) == NULL) #define RB_SET_PARENT(dst, src, field) do { \ _RB_BITSUP(dst, field) = (__uintptr_t)src | \ (_RB_BITSUP(dst, field) & _RB_LR); \ } while (/*CONSTCOND*/ 0) #define RB_SET(elm, parent, field) do { \ _RB_UP(elm, field) = parent; \ RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \ } while (/*CONSTCOND*/ 0) /* - * Something to be invoked in a loop at the root of every modified subtree, - * from the bottom up to the root, to update augmented node data. + * Either RB_AUGMENT or RB_AUGMENT_CHECK is invoked in a loop at the root of + * every modified subtree, from the bottom up to the root, to update augmented + * node data. RB_AUGMENT_CHECK returns true only when the update changes the + * node data, so that updating can be stopped short of the root when it returns + * false. */ +#ifndef RB_AUGMENT_CHECK #ifndef RB_AUGMENT -#define RB_AUGMENT(x) break +#define RB_AUGMENT_CHECK(x) false +#else +#define RB_AUGMENT_CHECK(x) (RB_AUGMENT(x), true) +#endif #endif #define RB_UPDATE_AUGMENT(elm, field) do { \ __typeof(elm) rb_update_tmp = (elm); \ - do { \ - RB_AUGMENT(rb_update_tmp); \ - } while ((rb_update_tmp = RB_PARENT(rb_update_tmp, field)) != NULL); \ + while (RB_AUGMENT_CHECK(rb_update_tmp) && \ + (rb_update_tmp = RB_PARENT(rb_update_tmp, field)) != NULL) \ + ; \ } while (0) #define RB_SWAP_CHILD(head, par, out, in, field) do { \ if (par == NULL) \ RB_ROOT(head) = (in); \ else if ((out) == RB_LEFT(par, field)) \ RB_LEFT(par, field) = (in); \ else \ RB_RIGHT(par, field) = (in); \ } while (/*CONSTCOND*/ 0) /* * RB_ROTATE macro partially restructures the tree to improve balance. In the * case when dir is _RB_L, tmp is a right child of elm. After rotation, elm * is a left child of tmp, and the subtree that represented the items between * them, which formerly hung to the left of tmp now hangs to the right of elm. * The parent-child relationship between elm and its former parent is not * changed; where this macro once updated those fields, that is now left to the * caller of RB_ROTATE to clean up, so that a pair of rotations does not twice * update the same pair of pointer fields with distinct values. */ #define RB_ROTATE(elm, tmp, dir, field) do { \ if ((_RB_LINK(elm, dir ^ _RB_LR, field) = \ _RB_LINK(tmp, dir, field)) != NULL) \ RB_SET_PARENT(_RB_LINK(tmp, dir, field), elm, field); \ _RB_LINK(tmp, dir, field) = (elm); \ RB_SET_PARENT(elm, tmp, field); \ } while (/*CONSTCOND*/ 0) /* Generates prototypes and inline functions */ #define RB_PROTOTYPE(name, type, field, cmp) \ RB_PROTOTYPE_INTERNAL(name, type, field, cmp,) #define RB_PROTOTYPE_STATIC(name, type, field, cmp) \ RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static) #define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \ RB_PROTOTYPE_RANK(name, type, attr) \ RB_PROTOTYPE_INSERT_COLOR(name, type, attr); \ RB_PROTOTYPE_REMOVE_COLOR(name, type, attr); \ RB_PROTOTYPE_INSERT(name, type, attr); \ RB_PROTOTYPE_REMOVE(name, type, attr); \ RB_PROTOTYPE_FIND(name, type, attr); \ RB_PROTOTYPE_NFIND(name, type, attr); \ RB_PROTOTYPE_NEXT(name, type, attr); \ RB_PROTOTYPE_PREV(name, type, attr); \ RB_PROTOTYPE_MINMAX(name, type, attr); \ RB_PROTOTYPE_REINSERT(name, type, attr); #ifdef _RB_DIAGNOSTIC #define RB_PROTOTYPE_RANK(name, type, attr) \ attr int name##_RB_RANK(struct type *); #else #define RB_PROTOTYPE_RANK(name, type, attr) #endif #define RB_PROTOTYPE_INSERT_COLOR(name, type, attr) \ - attr void name##_RB_INSERT_COLOR(struct name *, \ + attr struct type *name##_RB_INSERT_COLOR(struct name *, \ struct type *, struct type *) #define RB_PROTOTYPE_REMOVE_COLOR(name, type, attr) \ - attr void name##_RB_REMOVE_COLOR(struct name *, \ + attr struct type *name##_RB_REMOVE_COLOR(struct name *, \ struct type *, struct type *) #define RB_PROTOTYPE_REMOVE(name, type, attr) \ attr struct type *name##_RB_REMOVE(struct name *, struct type *) #define RB_PROTOTYPE_INSERT(name, type, attr) \ attr struct type *name##_RB_INSERT(struct name *, struct type *) #define RB_PROTOTYPE_FIND(name, type, attr) \ attr struct type *name##_RB_FIND(struct name *, struct type *) #define RB_PROTOTYPE_NFIND(name, type, attr) \ attr struct type *name##_RB_NFIND(struct name *, struct type *) #define RB_PROTOTYPE_NEXT(name, type, attr) \ attr struct type *name##_RB_NEXT(struct type *) #define RB_PROTOTYPE_PREV(name, type, attr) \ attr struct type *name##_RB_PREV(struct type *) #define RB_PROTOTYPE_MINMAX(name, type, attr) \ attr struct type *name##_RB_MINMAX(struct name *, int) #define RB_PROTOTYPE_REINSERT(name, type, attr) \ attr struct type *name##_RB_REINSERT(struct name *, struct type *) /* Main rb operation. * Moves node close to the key of elm to top */ #define RB_GENERATE(name, type, field, cmp) \ RB_GENERATE_INTERNAL(name, type, field, cmp,) #define RB_GENERATE_STATIC(name, type, field, cmp) \ RB_GENERATE_INTERNAL(name, type, field, cmp, __unused static) #define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \ RB_GENERATE_RANK(name, type, field, attr) \ RB_GENERATE_INSERT_COLOR(name, type, field, attr) \ RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \ RB_GENERATE_INSERT(name, type, field, cmp, attr) \ RB_GENERATE_REMOVE(name, type, field, attr) \ RB_GENERATE_FIND(name, type, field, cmp, attr) \ RB_GENERATE_NFIND(name, type, field, cmp, attr) \ RB_GENERATE_NEXT(name, type, field, attr) \ RB_GENERATE_PREV(name, type, field, attr) \ RB_GENERATE_MINMAX(name, type, field, attr) \ RB_GENERATE_REINSERT(name, type, field, cmp, attr) #ifdef _RB_DIAGNOSTIC +#ifndef RB_AUGMENT +#define _RB_AUGMENT_VERIFY(x) RB_AUGMENT_CHECK(x) +#else +#define _RB_AUGMENT_VERIFY(x) false +#endif #define RB_GENERATE_RANK(name, type, field, attr) \ +/* \ + * Return the rank of the subtree rooted at elm, or -1 if the subtree \ + * is not rank-balanced, or has inconsistent augmentation data. + */ \ attr int \ name##_RB_RANK(struct type *elm) \ { \ struct type *left, *right, *up; \ int left_rank, right_rank; \ \ if (elm == NULL) \ return (0); \ up = _RB_UP(elm, field); \ left = RB_LEFT(elm, field); \ left_rank = ((_RB_BITS(up) & _RB_L) ? 2 : 1) + \ name##_RB_RANK(left); \ right = RB_RIGHT(elm, field); \ right_rank = ((_RB_BITS(up) & _RB_R) ? 2 : 1) + \ name##_RB_RANK(right); \ if (left_rank != right_rank || \ - (left_rank == 2 && left == NULL && right == NULL)) \ + (left_rank == 2 && left == NULL && right == NULL) || \ + _RB_AUGMENT_VERIFY(elm)) \ return (-1); \ return (left_rank); \ } #else #define RB_GENERATE_RANK(name, type, field, attr) #endif #define RB_GENERATE_INSERT_COLOR(name, type, field, attr) \ -attr void \ +attr struct type * \ name##_RB_INSERT_COLOR(struct name *head, \ struct type *parent, struct type *elm) \ { \ /* \ * Initially, elm is a leaf. Either its parent was previously \ * a leaf, with two black null children, or an interior node \ * with a black non-null child and a red null child. The \ * balance criterion "the rank of any leaf is 1" precludes the \ * possibility of two red null children for the initial parent. \ * So the first loop iteration cannot lead to accessing an \ * uninitialized 'child', and a later iteration can only happen \ * when a value has been assigned to 'child' in the previous \ * one. \ */ \ struct type *child, *child_up, *gpar; \ __uintptr_t elmdir, sibdir; \ \ do { \ /* the rank of the tree rooted at elm grew */ \ gpar = _RB_UP(parent, field); \ elmdir = RB_RIGHT(parent, field) == elm ? _RB_R : _RB_L; \ if (_RB_BITS(gpar) & elmdir) { \ /* shorten the parent-elm edge to rebalance */ \ _RB_BITSUP(parent, field) ^= elmdir; \ - return; \ + return (NULL); \ } \ sibdir = elmdir ^ _RB_LR; \ /* the other edge must change length */ \ _RB_BITSUP(parent, field) ^= sibdir; \ if ((_RB_BITS(gpar) & _RB_LR) == 0) { \ /* both edges now short, retry from parent */ \ child = elm; \ elm = parent; \ continue; \ } \ _RB_UP(parent, field) = gpar = _RB_PTR(gpar); \ if (_RB_BITSUP(elm, field) & elmdir) { \ /* \ * Exactly one of the edges descending from elm \ * is long. The long one is in the same \ * direction as the edge from parent to elm, \ * so change that by rotation. The edge from \ * parent to z was shortened above. Shorten \ * the long edge down from elm, and adjust \ * other edge lengths based on the downward \ * edges from 'child'. \ * \ * par par \ * / \ / \ \ * elm z / z \ * / \ child \ * / child / \ \ * / / \ elm \ \ * w / \ / \ y \ * x y w \ \ * x \ */ \ RB_ROTATE(elm, child, elmdir, field); \ child_up = _RB_UP(child, field); \ if (_RB_BITS(child_up) & sibdir) \ _RB_BITSUP(parent, field) ^= elmdir; \ if (_RB_BITS(child_up) & elmdir) \ _RB_BITSUP(elm, field) ^= _RB_LR; \ else \ _RB_BITSUP(elm, field) ^= elmdir; \ /* if child is a leaf, don't augment elm, \ * since it is restored to be a leaf again. */ \ if ((_RB_BITS(child_up) & _RB_LR) == 0) \ elm = child; \ } else \ child = elm; \ \ /* \ * The long edge descending from 'child' points back \ * in the direction of 'parent'. Rotate to make \ * 'parent' a child of 'child', then make both edges \ * of 'child' short to rebalance. \ * \ * par child \ * / \ / \ \ * / z x par \ * child / \ \ * / \ / z \ * x \ y \ * y \ */ \ RB_ROTATE(parent, child, sibdir, field); \ _RB_UP(child, field) = gpar; \ RB_SWAP_CHILD(head, gpar, parent, child, field); \ if (elm != child) \ - RB_AUGMENT(elm); \ - RB_AUGMENT(parent); \ - break; \ + RB_AUGMENT_CHECK(elm); \ + RB_AUGMENT_CHECK(parent); \ + return (child); \ } while ((parent = gpar) != NULL); \ + return (NULL); \ } #ifndef RB_STRICT_HST /* * In REMOVE_COLOR, the HST paper, in figure 3, in the single-rotate case, has * 'parent' with one higher rank, and then reduces its rank if 'parent' has * become a leaf. This implementation always has the parent in its new position * with lower rank, to avoid the leaf check. Define RB_STRICT_HST to 1 to get * the behavior that HST describes. */ #define RB_STRICT_HST 0 #endif #define RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \ -attr void \ +attr struct type * \ name##_RB_REMOVE_COLOR(struct name *head, \ struct type *parent, struct type *elm) \ { \ struct type *gpar, *sib, *up; \ __uintptr_t elmdir, sibdir; \ \ if (RB_RIGHT(parent, field) == elm && \ RB_LEFT(parent, field) == elm) { \ /* Deleting a leaf that is an only-child creates a \ * rank-2 leaf. Demote that leaf. */ \ _RB_UP(parent, field) = _RB_PTR(_RB_UP(parent, field)); \ elm = parent; \ if ((parent = _RB_UP(elm, field)) == NULL) \ - return; \ + return (NULL); \ } \ do { \ /* the rank of the tree rooted at elm shrank */ \ gpar = _RB_UP(parent, field); \ elmdir = RB_RIGHT(parent, field) == elm ? _RB_R : _RB_L; \ _RB_BITS(gpar) ^= elmdir; \ if (_RB_BITS(gpar) & elmdir) { \ /* lengthen the parent-elm edge to rebalance */ \ _RB_UP(parent, field) = gpar; \ - return; \ + return (NULL); \ } \ if (_RB_BITS(gpar) & _RB_LR) { \ /* shorten other edge, retry from parent */ \ _RB_BITS(gpar) ^= _RB_LR; \ _RB_UP(parent, field) = gpar; \ gpar = _RB_PTR(gpar); \ continue; \ } \ sibdir = elmdir ^ _RB_LR; \ sib = _RB_LINK(parent, sibdir, field); \ up = _RB_UP(sib, field); \ _RB_BITS(up) ^= _RB_LR; \ if ((_RB_BITS(up) & _RB_LR) == 0) { \ /* shorten edges descending from sib, retry */ \ _RB_UP(sib, field) = up; \ continue; \ } \ if ((_RB_BITS(up) & sibdir) == 0) { \ /* \ * The edge descending from 'sib' away from \ * 'parent' is long. The short edge descending \ * from 'sib' toward 'parent' points to 'elm*' \ * Rotate to make 'sib' a child of 'elm*' \ * then adjust the lengths of the edges \ * descending from 'sib' and 'elm*'. \ * \ * par par \ * / \ / \ \ * / sib elm \ \ * / / \ elm* \ * elm elm* \ / \ \ * / \ \ / \ \ * / \ z / \ \ * x y x sib \ * / \ \ * / z \ * y \ */ \ elm = _RB_LINK(sib, elmdir, field); \ /* elm is a 1-child. First rotate at elm. */ \ RB_ROTATE(sib, elm, sibdir, field); \ up = _RB_UP(elm, field); \ _RB_BITSUP(parent, field) ^= \ (_RB_BITS(up) & elmdir) ? _RB_LR : elmdir; \ _RB_BITSUP(sib, field) ^= \ (_RB_BITS(up) & sibdir) ? _RB_LR : sibdir; \ _RB_BITSUP(elm, field) |= _RB_LR; \ } else { \ if ((_RB_BITS(up) & elmdir) == 0 && \ RB_STRICT_HST && elm != NULL) { \ /* if parent does not become a leaf, \ do not demote parent yet. */ \ _RB_BITSUP(parent, field) ^= sibdir; \ _RB_BITSUP(sib, field) ^= _RB_LR; \ } else if ((_RB_BITS(up) & elmdir) == 0) { \ /* demote parent. */ \ _RB_BITSUP(parent, field) ^= elmdir; \ _RB_BITSUP(sib, field) ^= sibdir; \ } else \ _RB_BITSUP(sib, field) ^= sibdir; \ elm = sib; \ } \ \ /* \ * The edge descending from 'elm' away from 'parent' \ * is short. Rotate to make 'parent' a child of 'elm', \ * then lengthen the short edges descending from \ * 'parent' and 'elm' to rebalance. \ * \ * par elm \ * / \ / \ \ * e \ / \ \ * elm / \ \ * / \ par s \ * / \ / \ \ * / \ e \ \ * x s x \ */ \ RB_ROTATE(parent, elm, elmdir, field); \ RB_SET_PARENT(elm, gpar, field); \ RB_SWAP_CHILD(head, gpar, parent, elm, field); \ if (sib != elm) \ - RB_AUGMENT(sib); \ - break; \ + RB_AUGMENT_CHECK(sib); \ + return (parent); \ } while (elm = parent, (parent = gpar) != NULL); \ + return (NULL); \ } +#define _RB_AUGMENT_WALK(elm, match, field) \ +do { \ + if (match == elm) \ + match = NULL; \ +} while (RB_AUGMENT_CHECK(elm) && \ + (elm = RB_PARENT(elm, field)) != NULL) + #define RB_GENERATE_REMOVE(name, type, field, attr) \ attr struct type * \ name##_RB_REMOVE(struct name *head, struct type *out) \ { \ struct type *child, *in, *opar, *parent; \ \ child = RB_LEFT(out, field); \ in = RB_RIGHT(out, field); \ opar = _RB_UP(out, field); \ if (in == NULL || child == NULL) { \ in = child = in == NULL ? child : in; \ parent = opar = _RB_PTR(opar); \ } else { \ parent = in; \ while (RB_LEFT(in, field)) \ in = RB_LEFT(in, field); \ RB_SET_PARENT(child, in, field); \ RB_LEFT(in, field) = child; \ child = RB_RIGHT(in, field); \ if (parent != in) { \ RB_SET_PARENT(parent, in, field); \ RB_RIGHT(in, field) = parent; \ parent = RB_PARENT(in, field); \ RB_LEFT(parent, field) = child; \ } \ _RB_UP(in, field) = opar; \ opar = _RB_PTR(opar); \ } \ RB_SWAP_CHILD(head, opar, out, in, field); \ if (child != NULL) \ _RB_UP(child, field) = parent; \ if (parent != NULL) { \ - name##_RB_REMOVE_COLOR(head, parent, child); \ + opar = name##_RB_REMOVE_COLOR(head, parent, child); \ /* if rotation has made 'parent' the root of the same \ * subtree as before, don't re-augment it. */ \ - if (parent == in && RB_LEFT(parent, field) == NULL) \ + if (parent == in && RB_LEFT(parent, field) == NULL) { \ + opar = NULL; \ parent = RB_PARENT(parent, field); \ - RB_UPDATE_AUGMENT(parent, field); \ + } \ + _RB_AUGMENT_WALK(parent, opar, field); \ + if (opar != NULL) { \ + RB_AUGMENT_CHECK(opar); \ + RB_AUGMENT_CHECK(RB_PARENT(opar, field)); \ + } \ } \ return (out); \ } #define RB_GENERATE_INSERT(name, type, field, cmp, attr) \ /* Inserts a node into the RB tree */ \ attr struct type * \ name##_RB_INSERT(struct name *head, struct type *elm) \ { \ struct type *tmp; \ struct type **tmpp = &RB_ROOT(head); \ struct type *parent = NULL; \ \ while ((tmp = *tmpp) != NULL) { \ parent = tmp; \ __typeof(cmp(NULL, NULL)) comp = (cmp)(elm, parent); \ if (comp < 0) \ tmpp = &RB_LEFT(parent, field); \ else if (comp > 0) \ tmpp = &RB_RIGHT(parent, field); \ else \ return (parent); \ } \ RB_SET(elm, parent, field); \ *tmpp = elm; \ if (parent != NULL) \ - name##_RB_INSERT_COLOR(head, parent, elm); \ - RB_UPDATE_AUGMENT(elm, field); \ + tmp = name##_RB_INSERT_COLOR(head, parent, elm); \ + _RB_AUGMENT_WALK(elm, tmp, field); \ + if (tmp != NULL) \ + RB_AUGMENT_CHECK(tmp); \ return (NULL); \ } #define RB_GENERATE_FIND(name, type, field, cmp, attr) \ /* Finds the node with the same key as elm */ \ attr struct type * \ name##_RB_FIND(struct name *head, struct type *elm) \ { \ struct type *tmp = RB_ROOT(head); \ __typeof(cmp(NULL, NULL)) comp; \ while (tmp) { \ comp = cmp(elm, tmp); \ if (comp < 0) \ tmp = RB_LEFT(tmp, field); \ else if (comp > 0) \ tmp = RB_RIGHT(tmp, field); \ else \ return (tmp); \ } \ return (NULL); \ } #define RB_GENERATE_NFIND(name, type, field, cmp, attr) \ /* Finds the first node greater than or equal to the search key */ \ attr struct type * \ name##_RB_NFIND(struct name *head, struct type *elm) \ { \ struct type *tmp = RB_ROOT(head); \ struct type *res = NULL; \ __typeof(cmp(NULL, NULL)) comp; \ while (tmp) { \ comp = cmp(elm, tmp); \ if (comp < 0) { \ res = tmp; \ tmp = RB_LEFT(tmp, field); \ } \ else if (comp > 0) \ tmp = RB_RIGHT(tmp, field); \ else \ return (tmp); \ } \ return (res); \ } #define RB_GENERATE_NEXT(name, type, field, attr) \ /* ARGSUSED */ \ attr struct type * \ name##_RB_NEXT(struct type *elm) \ { \ if (RB_RIGHT(elm, field)) { \ elm = RB_RIGHT(elm, field); \ while (RB_LEFT(elm, field)) \ elm = RB_LEFT(elm, field); \ } else { \ while (RB_PARENT(elm, field) && \ (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \ elm = RB_PARENT(elm, field); \ elm = RB_PARENT(elm, field); \ } \ return (elm); \ } #define RB_GENERATE_PREV(name, type, field, attr) \ /* ARGSUSED */ \ attr struct type * \ name##_RB_PREV(struct type *elm) \ { \ if (RB_LEFT(elm, field)) { \ elm = RB_LEFT(elm, field); \ while (RB_RIGHT(elm, field)) \ elm = RB_RIGHT(elm, field); \ } else { \ while (RB_PARENT(elm, field) && \ (elm == RB_LEFT(RB_PARENT(elm, field), field))) \ elm = RB_PARENT(elm, field); \ elm = RB_PARENT(elm, field); \ } \ return (elm); \ } #define RB_GENERATE_MINMAX(name, type, field, attr) \ attr struct type * \ name##_RB_MINMAX(struct name *head, int val) \ { \ struct type *tmp = RB_ROOT(head); \ struct type *parent = NULL; \ while (tmp) { \ parent = tmp; \ if (val < 0) \ tmp = RB_LEFT(tmp, field); \ else \ tmp = RB_RIGHT(tmp, field); \ } \ return (parent); \ } #define RB_GENERATE_REINSERT(name, type, field, cmp, attr) \ attr struct type * \ name##_RB_REINSERT(struct name *head, struct type *elm) \ { \ struct type *cmpelm; \ if (((cmpelm = RB_PREV(name, head, elm)) != NULL && \ cmp(cmpelm, elm) >= 0) || \ ((cmpelm = RB_NEXT(name, head, elm)) != NULL && \ cmp(elm, cmpelm) >= 0)) { \ /* XXXLAS: Remove/insert is heavy handed. */ \ RB_REMOVE(name, head, elm); \ return (RB_INSERT(name, head, elm)); \ } \ return (NULL); \ } \ #define RB_NEGINF -1 #define RB_INF 1 #define RB_INSERT(name, x, y) name##_RB_INSERT(x, y) #define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y) #define RB_FIND(name, x, y) name##_RB_FIND(x, y) #define RB_NFIND(name, x, y) name##_RB_NFIND(x, y) #define RB_NEXT(name, x, y) name##_RB_NEXT(y) #define RB_PREV(name, x, y) name##_RB_PREV(y) #define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF) #define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF) #define RB_REINSERT(name, x, y) name##_RB_REINSERT(x, y) #define RB_FOREACH(x, name, head) \ for ((x) = RB_MIN(name, head); \ (x) != NULL; \ (x) = name##_RB_NEXT(x)) #define RB_FOREACH_FROM(x, name, y) \ for ((x) = (y); \ ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ (x) = (y)) #define RB_FOREACH_SAFE(x, name, head, y) \ for ((x) = RB_MIN(name, head); \ ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ (x) = (y)) #define RB_FOREACH_REVERSE(x, name, head) \ for ((x) = RB_MAX(name, head); \ (x) != NULL; \ (x) = name##_RB_PREV(x)) #define RB_FOREACH_REVERSE_FROM(x, name, y) \ for ((x) = (y); \ ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ (x) = (y)) #define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \ for ((x) = RB_MAX(name, head); \ ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ (x) = (y)) #endif /* _SYS_TREE_H_ */