Page MenuHomeFreeBSD

D34842.id104764.diff
No OneTemporary

D34842.id104764.diff

Index: contrib/bsnmp/snmp_mibII/mibII_route.c
===================================================================
--- contrib/bsnmp/snmp_mibII/mibII_route.c
+++ contrib/bsnmp/snmp_mibII/mibII_route.c
@@ -239,7 +239,7 @@
*/
r = RB_MIN(sroutes, &sroutes);
while (r != NULL) {
- r1 = RB_NEXT(sroutes, &sroutes, r);
+ r1 = RB_NEXT(sroutes, r);
RB_REMOVE(sroutes, &sroutes, r);
free(r);
r = r1;
Index: contrib/elftoolchain/addr2line/addr2line.c
===================================================================
--- contrib/elftoolchain/addr2line/addr2line.c
+++ contrib/elftoolchain/addr2line/addr2line.c
@@ -411,7 +411,7 @@
res = RB_NFIND(cutree, &cuhead, &find);
if (res != NULL) {
if (res->lopc != addr)
- res = RB_PREV(cutree, &cuhead, res);
+ res = RB_PREV(cutree, res);
if (res != NULL && addr >= res->lopc && addr < res->hipc)
return (res);
} else {
Index: contrib/elftoolchain/libelf/elf_scn.c
===================================================================
--- contrib/elftoolchain/libelf/elf_scn.c
+++ contrib/elftoolchain/libelf/elf_scn.c
@@ -251,5 +251,5 @@
}
return (s == NULL ? elf_getscn(e, (size_t) 1) :
- RB_NEXT(scntree, &e->e_u.e_elf.e_scn, s));
+ RB_NEXT(scntree, s));
}
Index: contrib/pf/libevent/event.c
===================================================================
--- contrib/pf/libevent/event.c
+++ contrib/pf/libevent/event.c
@@ -764,7 +764,7 @@
for (ev = RB_MIN(event_tree, &base->timetree); ev; ev = next) {
if (timercmp(&ev->ev_timeout, &now, >))
break;
- next = RB_NEXT(event_tree, &base->timetree, ev);
+ next = RB_NEXT(event_tree, ev);
event_queue_remove(base, ev, EVLIST_TIMEOUT);
Index: share/man/man3/tree.3
===================================================================
--- share/man/man3/tree.3
+++ share/man/man3/tree.3
@@ -162,9 +162,9 @@
.Ft "bool"
.Fn RB_EMPTY "RB_HEAD *head"
.Ft "struct TYPE *"
-.Fn RB_NEXT NAME "RB_HEAD *head" "struct TYPE *elm"
+.Fn RB_NEXT NAME "RB_HEAD *head"
.Ft "struct TYPE *"
-.Fn RB_PREV NAME "RB_HEAD *head" "struct TYPE *elm"
+.Fn RB_PREV NAME "RB_HEAD *head"
.Ft "struct TYPE *"
.Fn RB_MIN NAME "RB_HEAD *head"
.Ft "struct TYPE *"
@@ -532,7 +532,7 @@
.Fn RB_PREV
macros can be used to traverse the tree:
.Pp
-.Dl "for (np = RB_MIN(NAME, &head); np != NULL; np = RB_NEXT(NAME, &head, np))"
+.Dl "for (np = RB_MIN(NAME, &head); np != NULL; np = RB_NEXT(NAME, np))"
.Pp
Or, for simplicity, one can use the
.Fn RB_FOREACH
Index: sys/compat/linuxkpi/common/include/linux/rbtree.h
===================================================================
--- sys/compat/linuxkpi/common/include/linux/rbtree.h
+++ sys/compat/linuxkpi/common/include/linux/rbtree.h
@@ -78,7 +78,7 @@
linux_root_RB_INSERT_COLOR((struct linux_root *)(root), (node))
#define rb_erase(node, root) \
linux_root_RB_REMOVE((struct linux_root *)(root), (node))
-#define rb_next(node) RB_NEXT(linux_root, NULL, (node))
+#define rb_next(node) RB_NEXT(linux_root, (node))
#define rb_prev(node) RB_PREV(linux_root, NULL, (node))
#define rb_first(root) RB_MIN(linux_root, (struct linux_root *)(root))
#define rb_last(root) RB_MAX(linux_root, (struct linux_root *)(root))
Index: sys/dev/iommu/iommu_gas.c
===================================================================
--- sys/dev/iommu/iommu_gas.c
+++ sys/dev/iommu/iommu_gas.c
@@ -490,7 +490,7 @@
next = RB_NFIND(iommu_gas_entries_tree, &domain->rb_root, entry);
KASSERT(next != NULL, ("next must be non-null %p %jx", domain,
(uintmax_t)entry->start));
- prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next);
+ prev = RB_PREV(iommu_gas_entries_tree, next);
/* prev could be NULL */
/*
@@ -536,8 +536,8 @@
#ifdef INVARIANTS
struct iommu_map_entry *ip, *in;
- ip = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry);
- in = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry);
+ ip = RB_PREV(iommu_gas_entries_tree, entry);
+ in = RB_NEXT(iommu_gas_entries_tree, entry);
KASSERT(prev == NULL || ip == prev,
("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)",
entry, entry->start, entry->end, prev,
@@ -580,8 +580,8 @@
IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR,
("non-RMRR entry %p %p", domain, entry));
- prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, entry);
- next = RB_NEXT(iommu_gas_entries_tree, &domain->rb_root, entry);
+ prev = RB_PREV(iommu_gas_entries_tree, entry);
+ next = RB_NEXT(iommu_gas_entries_tree, entry);
iommu_gas_rb_remove(domain, entry);
entry->flags &= ~IOMMU_MAP_ENTRY_RMRR;
@@ -738,7 +738,7 @@
"after %#jx", domain, (uintmax_t)domain->end,
(uintmax_t)start));
entry_end = ummin(end, next->start);
- prev = RB_PREV(iommu_gas_entries_tree, &domain->rb_root, next);
+ prev = RB_PREV(iommu_gas_entries_tree, next);
if (prev != NULL)
entry_start = ummax(start, prev->end);
else
Index: sys/dev/xen/gntdev/gntdev.c
===================================================================
--- sys/dev/xen/gntdev/gntdev.c
+++ sys/dev/xen/gntdev/gntdev.c
@@ -166,8 +166,8 @@
mtx_lock(&priv_user->user_data_lock);
RB_INSERT(file_offset_head, &priv_user->file_offset, offset);
- offset_nxt = RB_NEXT(file_offset_head, &priv_user->file_offset, offset);
- offset_prv = RB_PREV(file_offset_head, &priv_user->file_offset, offset);
+ offset_nxt = RB_NEXT(file_offset_head, offset);
+ offset_prv = RB_PREV(file_offset_head, offset);
if (offset_nxt != NULL &&
offset_nxt->file_offset == offset->file_offset + offset->count *
PAGE_SIZE) {
@@ -318,7 +318,7 @@
mtx_lock(&priv_user->user_data_lock);
gref_start = RB_FIND(gref_tree_head, &priv_user->gref_tree, &find_gref);
for (gref = gref_start; gref != NULL && count > 0; gref =
- RB_NEXT(gref_tree_head, &priv_user->gref_tree, gref)) {
+ RB_NEXT(gref_tree_head, gref)) {
if (index != gref->file_index)
break;
index += PAGE_SIZE;
@@ -447,7 +447,7 @@
mtx_lock(&priv_user->user_data_lock);
mtx_lock(&cleanup_data.to_kill_grefs_mtx);
for (; gref != NULL && count > 0; gref = gref_tmp) {
- gref_tmp = RB_NEXT(gref_tree_head, &priv_user->gref_tree, gref);
+ gref_tmp = RB_NEXT(gref_tree_head, gref);
RB_REMOVE(gref_tree_head, &priv_user->gref_tree, gref);
STAILQ_INSERT_TAIL(&cleanup_data.to_kill_grefs, gref,
gref_next.list);
@@ -1069,7 +1069,7 @@
mtx_lock(&priv_user->user_data_lock);
VM_OBJECT_WLOCK(mem_obj);
for (gref = gref_start; gref != NULL && count > 0; gref =
- RB_NEXT(gref_tree_head, &priv_user->gref_tree, gref)) {
+ RB_NEXT(gref_tree_head, gref)) {
if (gref->page->object)
break;
Index: sys/fs/tmpfs/tmpfs_subr.c
===================================================================
--- sys/fs/tmpfs/tmpfs_subr.c
+++ sys/fs/tmpfs/tmpfs_subr.c
@@ -1130,8 +1130,7 @@
if (dc->tdc_current != NULL)
return (dc->tdc_current);
}
- dc->tdc_tree = dc->tdc_current = RB_NEXT(tmpfs_dir,
- &dnode->tn_dir.tn_dirhead, dc->tdc_tree);
+ dc->tdc_tree = dc->tdc_current = RB_NEXT(tmpfs_dir, dc->tdc_tree);
if ((de = dc->tdc_current) != NULL && tmpfs_dirent_duphead(de)) {
dc->tdc_current = LIST_FIRST(&de->ud.td_duphead);
MPASS(dc->tdc_current != NULL);
Index: sys/kern/subr_stats.c
===================================================================
--- sys/kern/subr_stats.c
+++ sys/kern/subr_stats.c
@@ -3379,7 +3379,7 @@
int i = 0;
ARB_FOREACH(ctd64, ctdth64, ctd64tree) {
rbctd64 = (i == 0 ? RB_MIN(rbctdth64, rbctdtree) :
- RB_NEXT(rbctdth64, rbctdtree, rbctd64));
+ RB_NEXT(rbctdth64, rbctd64));
if (i >= ARB_CURNODES(ctd64tree)
|| ctd64 != rbctd64
Index: sys/netinet/tcp_stacks/rack.c
===================================================================
--- sys/netinet/tcp_stacks/rack.c
+++ sys/netinet/tcp_stacks/rack.c
@@ -7356,7 +7356,7 @@
(ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) {
struct rack_sendmap *prsm;
- prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
+ prsm = RB_PREV(rack_rb_tree_head, rsm);
if (prsm)
prsm->r_one_out_nr = 1;
}
@@ -8243,7 +8243,7 @@
* the next guy and it is already sacked.
*
*/
- next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
+ next = RB_NEXT(rack_rb_tree_head, rsm);
if (next && (next->r_flags & RACK_ACKED) &&
SEQ_GEQ(end, next->r_start)) {
/**
@@ -8316,7 +8316,7 @@
counter_u64_add(rack_sack_used_next_merge, 1);
/* Postion for the next block */
start = next->r_end;
- rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next);
+ rsm = RB_NEXT(rack_rb_tree_head, next);
if (rsm == NULL)
goto out;
} else {
@@ -8372,12 +8372,12 @@
moved++;
if (end == rsm->r_end) {
/* Done with block */
- rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
+ rsm = RB_NEXT(rack_rb_tree_head, rsm);
goto out;
} else if (SEQ_LT(end, rsm->r_end)) {
/* A partial sack to a already sacked block */
moved++;
- rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
+ rsm = RB_NEXT(rack_rb_tree_head, rsm);
goto out;
} else {
/*
@@ -8386,7 +8386,7 @@
* next block.
*/
start = rsm->r_end;
- rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
+ rsm = RB_NEXT(rack_rb_tree_head, rsm);
if (rsm == NULL)
goto out;
}
@@ -8471,7 +8471,7 @@
* There is more not coverend by this rsm move on
* to the next block in the RB tree.
*/
- nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
+ nrsm = RB_NEXT(rack_rb_tree_head, rsm);
start = rsm->r_end;
rsm = nrsm;
if (rsm == NULL)
@@ -8523,7 +8523,7 @@
rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
}
}
- prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
+ prev = RB_PREV(rack_rb_tree_head, rsm);
if (prev &&
(prev->r_flags & RACK_ACKED)) {
/**
@@ -8695,26 +8695,26 @@
* with either the previous or
* next block?
*/
- next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
+ next = RB_NEXT(rack_rb_tree_head, rsm);
while (next) {
if (next->r_flags & RACK_TLP)
break;
if (next->r_flags & RACK_ACKED) {
/* yep this and next can be merged */
rsm = rack_merge_rsm(rack, rsm, next);
- next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
+ next = RB_NEXT(rack_rb_tree_head, rsm);
} else
break;
}
/* Now what about the previous? */
- prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
+ prev = RB_PREV(rack_rb_tree_head, rsm);
while (prev) {
if (prev->r_flags & RACK_TLP)
break;
if (prev->r_flags & RACK_ACKED) {
/* yep the previous and this can be merged */
rsm = rack_merge_rsm(rack, prev, rsm);
- prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
+ prev = RB_PREV(rack_rb_tree_head, rsm);
} else
break;
}
@@ -8726,7 +8726,7 @@
}
/* Save off the next one for quick reference. */
if (rsm)
- nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
+ nrsm = RB_NEXT(rack_rb_tree_head, rsm);
else
nrsm = NULL;
*prsm = rack->r_ctl.rc_sacklast = nrsm;
@@ -8760,7 +8760,7 @@
tmap = rsm;
}
tmap->r_in_tmap = 1;
- rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
+ rsm = RB_NEXT(rack_rb_tree_head, rsm);
}
/*
* Now lets possibly clear the sack filter so we start
@@ -10019,8 +10019,7 @@
else
rsm->orig_m_len = 0;
#endif
- rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
- rsm);
+ rsm = RB_NEXT(rack_rb_tree_head, rsm);
if (rsm == NULL)
break;
}
@@ -15216,12 +15215,12 @@
* next with space i.e. over 1 MSS or the one
* after that (after the app-limited).
*/
- my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
+ my_rsm = RB_NEXT(rack_rb_tree_head,
rack->r_ctl.rc_first_appl);
if (my_rsm) {
if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp))
/* Have to use the next one */
- my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree,
+ my_rsm = RB_NEXT(rack_rb_tree_head,
my_rsm);
else {
/* Use after the first MSS of it is acked */
Index: sys/netpfil/pf/pf.c
===================================================================
--- sys/netpfil/pf/pf.c
+++ sys/netpfil/pf/pf.c
@@ -3387,7 +3387,7 @@
PF_ANCHOR_SET_MATCH(f);
*match = 0;
}
- f->child = RB_NEXT(pf_kanchor_node, parent, f->child);
+ f->child = RB_NEXT(pf_kanchor_node, f->child);
if (f->child != NULL) {
*rs = &f->child->ruleset;
*r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
@@ -3490,8 +3490,7 @@
PF_ETH_ANCHOR_SET_MATCH(f);
*match = 0;
}
- f->child = RB_NEXT(pf_keth_anchor_node, parent,
- f->child);
+ f->child = RB_NEXT(pf_keth_anchor_node, f->child);
if (f->child != NULL) {
*rs = &f->child->ruleset;
*r = TAILQ_FIRST((*rs)->active.rules);
Index: sys/netpfil/pf/pf_if.c
===================================================================
--- sys/netpfil/pf/pf_if.c
+++ sys/netpfil/pf/pf_if.c
@@ -882,7 +882,7 @@
NET_EPOCH_ENTER(et);
for (p = RB_MIN(pfi_ifhead, &V_pfi_ifs); p; p = nextp) {
- nextp = RB_NEXT(pfi_ifhead, &V_pfi_ifs, p);
+ nextp = RB_NEXT(pfi_ifhead, p);
if (pfi_skip_if(name, p))
continue;
if (*size <= n++)
@@ -890,7 +890,7 @@
if (!p->pfik_tzero)
p->pfik_tzero = time_second;
pf_kkif_to_kif(p, buf++);
- nextp = RB_NEXT(pfi_ifhead, &V_pfi_ifs, p);
+ nextp = RB_NEXT(pfi_ifhead, p);
}
*size = n;
NET_EPOCH_EXIT(et);
Index: sys/powerpc/aim/mmu_oea.c
===================================================================
--- sys/powerpc/aim/mmu_oea.c
+++ sys/powerpc/aim/mmu_oea.c
@@ -1040,7 +1040,7 @@
key.pvo_vaddr = sva;
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
pvo != NULL && PVO_VADDR(pvo) < eva;
- pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
+ pvo = RB_NEXT(pvo_tree, pvo)) {
if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
panic("moea_unwire: pvo %p is missing PVO_WIRED", pvo);
pvo->pvo_vaddr &= ~PVO_WIRED;
@@ -1811,7 +1811,7 @@
key.pvo_vaddr = sva;
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
- tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
+ tpvo = RB_NEXT(pvo_tree, pvo);
/*
* Grab the PTE pointer before we diddle with the cached PTE
@@ -1902,7 +1902,7 @@
key.pvo_vaddr = sva;
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
- tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
+ tpvo = RB_NEXT(pvo_tree, pvo);
moea_pvo_remove(pvo, -1);
}
PMAP_UNLOCK(pm);
Index: sys/powerpc/aim/mmu_oea64.c
===================================================================
--- sys/powerpc/aim/mmu_oea64.c
+++ sys/powerpc/aim/mmu_oea64.c
@@ -1352,7 +1352,7 @@
PMAP_LOCK(pm);
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
pvo != NULL && PVO_VADDR(pvo) < eva;
- pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
+ pvo = RB_NEXT(pvo_tree, pvo)) {
if (PVO_IS_SP(pvo)) {
if (moea64_sp_pvo_in_range(pvo, sva, eva)) {
pvo = moea64_sp_unwire(pvo);
@@ -2609,7 +2609,7 @@
key.pvo_vaddr = sva;
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
pvo != NULL && PVO_VADDR(pvo) < eva;
- pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
+ pvo = RB_NEXT(pvo_tree, pvo)) {
if (PVO_IS_SP(pvo)) {
if (moea64_sp_pvo_in_range(pvo, sva, eva)) {
pvo = moea64_sp_protect(pvo, prot);
@@ -2740,7 +2740,7 @@
moea64_sp_demote(pvo);
}
}
- tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
+ tpvo = RB_NEXT(pvo_tree, pvo);
/*
* For locking reasons, remove this from the page table and
@@ -3157,7 +3157,7 @@
key.pvo_vaddr = DMAP_BASE_ADDRESS + ppa;
for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
ppa < pa + size; ppa += PAGE_SIZE,
- pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
+ pvo = RB_NEXT(pvo_tree, pvo)) {
if (pvo == NULL || PVO_PADDR(pvo) != ppa) {
error = EFAULT;
break;
@@ -3873,7 +3873,7 @@
goto error;
}
- pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo);
+ pvo = RB_NEXT(pvo_tree, pvo);
}
/* All OK, promote. */
@@ -3893,7 +3893,7 @@
for (pvo = first, va_end = PVO_VADDR(pvo) + HPT_SP_SIZE;
pvo != NULL && PVO_VADDR(pvo) < va_end;
- pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) {
+ pvo = RB_NEXT(pvo_tree, pvo)) {
pvo->pvo_pte.pa &= ADDR_POFF | ~HPT_SP_MASK;
pvo->pvo_pte.pa |= LPTE_LP_4K_16M;
pvo->pvo_vaddr |= PVO_LARGE;
@@ -3944,7 +3944,7 @@
for (pvo = sp, va_end = va + HPT_SP_SIZE;
pvo != NULL && PVO_VADDR(pvo) < va_end;
- pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo),
+ pvo = RB_NEXT(pvo_tree, pvo),
va += PAGE_SIZE, pa += PAGE_SIZE) {
KASSERT(pvo && PVO_VADDR(pvo) == va,
("%s: missing PVO for va %#jx", __func__, (uintmax_t)va));
@@ -4007,7 +4007,7 @@
eva = PVO_VADDR(sp) + HPT_SP_SIZE;
refchg = 0;
for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva;
- prev = pvo, pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
+ prev = pvo, pvo = RB_NEXT(pvo_tree, pvo)) {
if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
panic("%s: pvo %p is missing PVO_WIRED",
__func__, pvo);
@@ -4053,7 +4053,7 @@
refchg = 0;
for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva;
- prev = pvo, pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
+ prev = pvo, pvo = RB_NEXT(pvo_tree, pvo)) {
pvo->pvo_pte.prot = prot;
/*
* If the PVO is in the page table, update mapping
@@ -4095,7 +4095,7 @@
eva = PVO_VADDR(sp) + HPT_SP_SIZE;
for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
- tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
+ tpvo = RB_NEXT(pvo_tree, pvo);
/*
* For locking reasons, remove this from the page table and
@@ -4141,7 +4141,7 @@
refchg = 0;
for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva;
- pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) {
+ pvo = RB_NEXT(pvo_tree, pvo)) {
ret = moea64_pte_synch(pvo);
if (ret > 0) {
refchg |= ret & (LPTE_CHG | LPTE_REF);
@@ -4221,7 +4221,7 @@
refchg = 0;
for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva;
- pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) {
+ pvo = RB_NEXT(pvo_tree, pvo)) {
ret = moea64_pte_clear(pvo, ptebit);
if (ret > 0)
refchg |= ret & (LPTE_CHG | LPTE_REF);
Index: sys/powerpc/aim/moea64_native.c
===================================================================
--- sys/powerpc/aim/moea64_native.c
+++ sys/powerpc/aim/moea64_native.c
@@ -932,7 +932,7 @@
eva = PVO_VADDR(pvo) + HPT_SP_SIZE;
for (; pvo != NULL && PVO_VADDR(pvo) < eva;
- pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
+ pvo = RB_NEXT(pvo_tree, pvo)) {
pt = moea64_pteg_table + pvo->pvo_pte.slot;
ptehi = be64toh(pt->pte_hi);
if ((ptehi & LPTE_AVPN_MASK) !=
@@ -981,7 +981,7 @@
eva = PVO_VADDR(pvo) + HPT_SP_SIZE;
for (; pvo != NULL && PVO_VADDR(pvo) < eva;
- pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
+ pvo = RB_NEXT(pvo_tree, pvo)) {
moea64_pte_from_pvo(pvo, &insertpt);
pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */
Index: sys/powerpc/pseries/mmu_phyp.c
===================================================================
--- sys/powerpc/pseries/mmu_phyp.c
+++ sys/powerpc/pseries/mmu_phyp.c
@@ -600,7 +600,7 @@
eva = PVO_VADDR(pvo) + HPT_SP_SIZE;
for (; pvo != NULL && PVO_VADDR(pvo) < eva;
- pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
+ pvo = RB_NEXT(pvo_tree, pvo)) {
moea64_pte_from_pvo(pvo, &pte);
err = phyp_pft_hcall(H_REMOVE, H_AVPN, pvo->pvo_pte.slot,
@@ -637,7 +637,7 @@
rm_rlock(&mphyp_eviction_lock, &track);
for (; pvo != NULL && PVO_VADDR(pvo) < eva;
- pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
+ pvo = RB_NEXT(pvo_tree, pvo)) {
/* Initialize PTE */
moea64_pte_from_pvo(pvo, &pte);
Index: sys/sys/tree.h
===================================================================
--- sys/sys/tree.h
+++ sys/sys/tree.h
@@ -776,9 +776,9 @@
name##_RB_REINSERT(struct name *head, struct type *elm) \
{ \
struct type *cmpelm; \
- if (((cmpelm = RB_PREV(name, head, elm)) != NULL && \
+ if (((cmpelm = RB_PREV(name, elm)) != NULL && \
cmp(cmpelm, elm) >= 0) || \
- ((cmpelm = RB_NEXT(name, head, elm)) != NULL && \
+ ((cmpelm = RB_NEXT(name, elm)) != NULL && \
cmp(elm, cmpelm) >= 0)) { \
/* XXXLAS: Remove/insert is heavy handed. */ \
RB_REMOVE(name, head, elm); \
@@ -794,8 +794,8 @@
#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
-#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
-#define RB_PREV(name, x, y) name##_RB_PREV(y)
+#define RB_NEXT(name, y) name##_RB_NEXT(y)
+#define RB_PREV(name, y) name##_RB_PREV(y)
#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
#define RB_REINSERT(name, x, y) name##_RB_REINSERT(x, y)
Index: usr.sbin/ypldap/yp.c
===================================================================
--- usr.sbin/ypldap/yp.c
+++ usr.sbin/ypldap/yp.c
@@ -511,8 +511,7 @@
* and look up the next field, then remove it again.
*/
RB_INSERT(user_name_tree, env->sc_user_names, &ukey);
- if ((ue = RB_NEXT(user_name_tree, &env->sc_user_names,
- &ukey)) == NULL) {
+ if ((ue = RB_NEXT(user_name_tree, &ukey)) == NULL) {
RB_REMOVE(user_name_tree, env->sc_user_names,
&ukey);
res.stat = YP_NOKEY;
@@ -544,8 +543,7 @@
* canacar's trick reloaded.
*/
RB_INSERT(group_name_tree, env->sc_group_names, &gkey);
- if ((ge = RB_NEXT(group_name_tree, &env->sc_group_names,
- &gkey)) == NULL) {
+ if ((ge = RB_NEXT(group_name_tree, &gkey)) == NULL) {
RB_REMOVE(group_name_tree, env->sc_group_names,
&gkey);
res.stat = YP_NOKEY;

File Metadata

Mime Type
text/plain
Expires
Fri, Mar 13, 6:37 PM (2 h, 5 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
29631619
Default Alt Text
D34842.id104764.diff (21 KB)

Event Timeline