diff --git a/sys/compat/linuxkpi/common/src/linux_page.c b/sys/compat/linuxkpi/common/src/linux_page.c --- a/sys/compat/linuxkpi/common/src/linux_page.c +++ b/sys/compat/linuxkpi/common/src/linux_page.c @@ -103,6 +103,9 @@ { vm_page_t page; + KASSERT(order < sizeof(size_t) * NBBY, + ("%s: invalid order %u", __func__, order)); + if (PMAP_HAS_DMAP) { unsigned long npages = 1UL << order; int req = VM_ALLOC_WIRED; @@ -158,6 +161,9 @@ void linux_free_pages(vm_page_t page, unsigned int order) { + KASSERT(order < sizeof(size_t) * NBBY, + ("%s: invalid order %u", __func__, order)); + if (PMAP_HAS_DMAP) { unsigned long npages = 1UL << order; unsigned long x; @@ -180,9 +186,13 @@ vm_offset_t linux_alloc_kmem(gfp_t flags, unsigned int order) { - size_t size = ((size_t)PAGE_SIZE) << order; + size_t size; void *addr; + KASSERT(order < sizeof(size_t) * NBBY - PAGE_SHIFT, + ("%s: invalid order %u", __func__, order)); + + size = ((size_t)PAGE_SIZE) << order; if ((flags & GFP_DMA32) == 0) { addr = kmem_malloc(size, flags & GFP_NATIVE_MASK); } else { @@ -197,6 +207,8 @@ { KASSERT((addr & PAGE_MASK) == 0, ("%s: addr %p is not page aligned", __func__, (void *)addr)); + KASSERT(order < sizeof(size_t) * NBBY, + ("%s: invalid order %u", __func__, order)); if (addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) { _linux_free_kmem(addr, order);