Index: sys/dev/iommu/iommu.h =================================================================== --- sys/dev/iommu/iommu.h +++ sys/dev/iommu/iommu.h @@ -109,6 +109,7 @@ struct iommu_map_entries_tailq unload_entries; /* (d) Entries to unload */ struct iommu_gas_entries_tree rb_root; /* (d) */ + struct iommu_map_entry *start_gap; /* (d) */ iommu_gaddr_t end; /* (c) Highest address + 1 in the guest AS */ struct iommu_map_entry *first_place, *last_place; /* (d) */ Index: sys/dev/iommu/iommu_gas.c =================================================================== --- sys/dev/iommu/iommu_gas.c +++ sys/dev/iommu/iommu_gas.c @@ -210,6 +210,13 @@ iommu_gas_rb_remove(struct iommu_domain *domain, struct iommu_map_entry *entry) { + /* The removed entry may open a new, smaller start gap. */ + if (entry->end <= domain->start_gap->end) { + domain->start_gap = RB_RIGHT(entry, rb_entry) != NULL ? + iommu_gas_entries_tree_RB_NEXT(entry) : + RB_LEFT(entry, rb_entry) != NULL ? + RB_LEFT(entry, rb_entry) : RB_PARENT(entry, rb_entry); + } RB_REMOVE(iommu_gas_entries_tree, &domain->rb_root, entry); } @@ -253,6 +260,7 @@ begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED; RB_INSERT_PREV(iommu_gas_entries_tree, &domain->rb_root, end, begin); + domain->start_gap = end; domain->first_place = begin; domain->last_place = end; domain->flags |= IOMMU_DOMAIN_GAS_INITED; @@ -405,6 +413,26 @@ IOMMU_DOMAIN_ASSERT_LOCKED(a->domain); KASSERT(a->entry->flags == 0, ("dirty entry %p %p", a->domain, a->entry)); + domain = a->domain; + + /* + * Advance start_gap to the first free space big enough for a + * minimum allocation plus two guard pages. + */ + min_free = 3 * IOMMU_PAGE_SIZE; + first = domain->start_gap; + while (first != NULL && first->free_down < min_free) + first = RB_PARENT(first, rb_entry); + for (curr = first; curr != NULL; + curr = iommu_gas_next(curr, min_free)) { + if ((first = RB_LEFT(curr, rb_entry)) != NULL && + first->last + min_free <= curr->start) + break; + if ((first = RB_RIGHT(curr, rb_entry)) != NULL && + curr->end + min_free <= first->first) + break; + } + domain->start_gap = curr; /* * If the subtree doesn't have free space for the requested allocation @@ -413,20 +441,13 @@ min_free = 2 * IOMMU_PAGE_SIZE + roundup2(a->size + a->offset, IOMMU_PAGE_SIZE); - /* - * Find the first entry in the lower region that could abut a big-enough - * range. - */ - domain = a->domain; - curr = RB_ROOT(&domain->rb_root); - first = NULL; - while (curr != NULL && curr->free_down >= min_free) { - first = curr; - curr = RB_LEFT(curr, rb_entry); - } + /* Climb to find a node in the subtree of big-enough ranges. */ + first = curr; + while (first != NULL && first->free_down < min_free) + first = RB_PARENT(first, rb_entry); /* - * Walk the big-enough ranges until one satisfies alignment + * Walk the big-enough ranges tree until one satisfies alignment * requirements, or violates lowaddr address requirement. */ addr = a->common->lowaddr + 1;