Index: sys/dev/iommu/iommu.h =================================================================== --- sys/dev/iommu/iommu.h +++ sys/dev/iommu/iommu.h @@ -109,6 +109,7 @@ struct iommu_map_entries_tailq unload_entries; /* (d) Entries to unload */ struct iommu_gas_entries_tree rb_root; /* (d) */ + struct iommu_map_entry *start_gap; /* (d) */ iommu_gaddr_t end; /* (c) Highest address + 1 in the guest AS */ struct iommu_map_entry *first_place, *last_place; /* (d) */ Index: sys/dev/iommu/iommu_gas.c =================================================================== --- sys/dev/iommu/iommu_gas.c +++ sys/dev/iommu/iommu_gas.c @@ -205,6 +205,12 @@ iommu_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry) { + /* + * The predecessor to the removed entry may become the new greatest + * lower bound on free gaps. + */ + if (entry->end <= domain->start_gap->end) + domain->start_gap = iommu_gas_entries_tree_RB_PREV(entry); RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry); } @@ -238,6 +244,7 @@ end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; iommu_gas_rb_insert(domain, end); + domain->start_gap = begin; domain->first_place = begin; domain->last_place = end; domain->flags |= IOMMU_DOMAIN_GAS_INITED; @@ -259,7 +266,7 @@ KASSERT(entry->flags == (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED), ("start entry flags %p", domain)); - RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry); + iommu_gas_rb_remove(domain, entry); iommu_gas_free_entry(entry); entry = RB_MAX(iommu_gas_entries_tree, &domain->rb_root); @@ -268,15 +275,14 @@ KASSERT(entry->flags == (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED), ("end entry flags %p", domain)); - RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry); + iommu_gas_rb_remove(domain, entry); iommu_gas_free_entry(entry); RB_FOREACH_SAFE(entry, iommu_gas_entries_tree, &domain->rb_root, entry1) { KASSERT((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0, ("non-RMRR entry left %p", domain)); - RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, - entry); + iommu_gas_rb_remove(domain, entry); iommu_gas_free_entry(entry); } } @@ -359,6 +365,9 @@ found = iommu_gas_rb_insert(a->domain, entry); KASSERT(found, ("found dup %p start %jx size %jx", a->domain, (uintmax_t)start, (uintmax_t)size)); + /* The new entry may be a new, greater lower bound on free gaps. */ + if (start == a->domain->start_gap->end) + a->domain->start_gap = entry; return (true); } @@ -396,6 +405,25 @@ KASSERT(a->entry->flags == 0, ("dirty entry %p %p", a->domain, a->entry)); + /* Make start_gap the greatest lower bound on free gaps. */ + first = a->domain->start_gap; + while (first != NULL && first->free_down == 0) + first = RB_PARENT(first, rb_entry); + for (curr = first; curr != NULL; + curr = iommu_gas_next(curr, 1)) { + if ((first = RB_LEFT(curr, rb_entry)) != NULL && + first->last < curr->start) { + a->domain->start_gap = + iommu_gas_entries_tree_RB_PREV(first); + break; + } + if ((first = RB_RIGHT(curr, rb_entry)) != NULL && + curr->end < first->first) { + a->domain->start_gap = curr; + break; + } + } + /* * If the subtree doesn't have free space for the requested allocation * plus two guard pages, skip it. @@ -407,12 +435,9 @@ * Find the first entry in the lower region that could abut a big-enough * range. */ - curr = RB_ROOT(&a->domain->rb_root); - first = NULL; - while (curr != NULL && curr->free_down >= min_free) { - first = curr; - curr = RB_LEFT(curr, rb_entry); - } + first = a->domain->start_gap; + while (first != NULL && first->free_down < min_free) + first = RB_PARENT(first, rb_entry); /* * Walk the big-enough ranges until one satisfies alignment