diff --git a/share/man/man9/contigmalloc.9 b/share/man/man9/contigmalloc.9
--- a/share/man/man9/contigmalloc.9
+++ b/share/man/man9/contigmalloc.9
@@ -23,7 +23,7 @@
 .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 .\" POSSIBILITY OF SUCH DAMAGE.
 .\"
-.Dd October 30, 2018
+.Dd July 22, 2024
 .Dt CONTIGMALLOC 9
 .Os
 .Sh NAME
@@ -113,6 +113,13 @@
 .Fn contigmalloc
 or
 .Fn contigmalloc_domainset .
+Its use is deprecated in favor of
+.Xr free 9
+which no longer requires the caller to know the
+.Fa size
+and also accepts
+.Dv NULL
+as an address.
 .Sh IMPLEMENTATION NOTES
 The
 .Fn contigmalloc
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -116,6 +116,16 @@
 #define	DEBUG_REDZONE_ARG
 #endif
 
+typedef	enum {
+	SLAB_COOKIE_SLAB_PTR		= 0x0,
+	SLAB_COOKIE_MALLOC_LARGE	= 0x1,
+	SLAB_COOKIE_CONTIG_MALLOC	= 0x2,
+} slab_cookie_t;
+#define	SLAB_COOKIE_MASK		0x3
+#define	SLAB_COOKIE_SHIFT		2
+#define	GET_SLAB_COOKIE(_slab)						\
+    ((slab_cookie_t)(uintptr_t)(_slab) & SLAB_COOKIE_MASK)
+
 /*
  * When realloc() is called, if the new size is sufficiently smaller than
  * the old size, realloc() will allocate a new, smaller block to avoid
@@ -451,6 +461,21 @@
  *	If M_NOWAIT is set, this routine will not block and return NULL if
  *	the allocation fails.
  */
+#define	IS_CONTIG_MALLOC(_slab)						\
+    (GET_SLAB_COOKIE(_slab) == SLAB_COOKIE_CONTIG_MALLOC)
+#define	CONTIG_MALLOC_SLAB(_size)					\
+    ((void *)(((_size) << SLAB_COOKIE_SHIFT) | SLAB_COOKIE_CONTIG_MALLOC))
+static inline size_t
+contigmalloc_size(uma_slab_t slab)
+{
+	uintptr_t va;
+
+	KASSERT(IS_CONTIG_MALLOC(slab),
+	    ("%s: called on non-contigmalloc allocation: %p", __func__, slab));
+	va = (uintptr_t)slab;
+	return (va >> SLAB_COOKIE_SHIFT);
+}
+
 void *
 contigmalloc(unsigned long size, struct malloc_type *type, int flags,
     vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
@@ -460,8 +485,11 @@
 
 	ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment,
 	    boundary, VM_MEMATTR_DEFAULT);
-	if (ret != NULL)
+	if (ret != NULL) {
+		/* Use low bits unused for slab pointers. */
+		vsetzoneslab((uintptr_t)ret, NULL, CONTIG_MALLOC_SLAB(size));
 		malloc_type_allocated(type, round_page(size));
+	}
 	return (ret);
 }
 
@@ -474,25 +502,28 @@
 
 	ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
 	    alignment, boundary, VM_MEMATTR_DEFAULT);
-	if (ret != NULL)
+	if (ret != NULL) {
+		/* Use low bits unused for slab pointers. */
+		vsetzoneslab((uintptr_t)ret, NULL, CONTIG_MALLOC_SLAB(size));
 		malloc_type_allocated(type, round_page(size));
+	}
 	return (ret);
 }
 
 /*
- *	contigfree:
+ *	contigfree (deprecated).
  *
  *	Free a block of memory allocated by contigmalloc.
  *
  *	This routine may not block.
  */
 void
-contigfree(void *addr, unsigned long size, struct malloc_type *type)
+contigfree(void *addr, unsigned long size __unused, struct malloc_type *type)
 {
-
-	kmem_free(addr, size);
-	malloc_type_freed(type, round_page(size));
+	free(addr, type);
 }
+#undef	IS_CONTIG_MALLOC
+#undef	CONTIG_MALLOC_SLAB
 
 #ifdef MALLOC_DEBUG
 static int
@@ -562,22 +593,19 @@
 /*
  * Handle large allocations and frees by using kmem_malloc directly.
  */
-static inline bool
-malloc_large_slab(uma_slab_t slab)
-{
-	uintptr_t va;
-
-	va = (uintptr_t)slab;
-	return ((va & 1) != 0);
-}
-
+#define	IS_MALLOC_LARGE(_slab)						\
+    (GET_SLAB_COOKIE(_slab) == SLAB_COOKIE_MALLOC_LARGE)
+#define	MALLOC_LARGE_SLAB(_size)					\
+    ((void *)(((_size) << SLAB_COOKIE_SHIFT) | SLAB_COOKIE_MALLOC_LARGE))
 static inline size_t
 malloc_large_size(uma_slab_t slab)
 {
 	uintptr_t va;
 
 	va = (uintptr_t)slab;
-	return (va >> 1);
+	KASSERT(IS_MALLOC_LARGE(slab),
+	    ("%s: called on non-malloc_large allocation: %p", __func__, slab));
+	return (va >> SLAB_COOKIE_SHIFT);
 }
 
 static caddr_t __noinline
@@ -589,8 +617,8 @@
 	size = roundup(size, PAGE_SIZE);
 	va = kmem_malloc_domainset(policy, size, flags);
 	if (va != NULL) {
-		/* The low bit is unused for slab pointers. */
-		vsetzoneslab((uintptr_t)va, NULL, (void *)((size << 1) | 1));
+		/* Use low bits unused for slab pointers. */
+		vsetzoneslab((uintptr_t)va, NULL, MALLOC_LARGE_SLAB(size));
 		uma_total_inc(size);
 	}
 	malloc_type_allocated(mtp, va == NULL ? 0 : size);
@@ -613,6 +641,8 @@
 	kmem_free(addr, size);
 	uma_total_dec(size);
 }
+#undef	IS_MALLOC_LARGE
+#undef	MALLOC_LARGE_SLAB
 
 /*
  *	malloc:
@@ -914,18 +944,30 @@
 
 	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
 	if (slab == NULL)
-		panic("free: address %p(%p) has not been allocated.\n",
+		panic("free: address %p(%p) has not been allocated",
 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
 
-	if (__predict_true(!malloc_large_slab(slab))) {
+	switch (GET_SLAB_COOKIE(slab)) {
+	case __predict_true(SLAB_COOKIE_SLAB_PTR):
 		size = zone->uz_size;
 #if defined(INVARIANTS) && !defined(KASAN)
 		free_save_type(addr, mtp, size);
 #endif
 		uma_zfree_arg(zone, addr, slab);
-	} else {
+		break;
+	case SLAB_COOKIE_MALLOC_LARGE:
 		size = malloc_large_size(slab);
 		free_large(addr, size);
+		break;
+	case SLAB_COOKIE_CONTIG_MALLOC:
+		size = contigmalloc_size(slab);
+		kmem_free(addr, size);
+		size = round_page(size);
+		break;
+	default:
+		panic("%s: addr %p slab %p with unknown cookie %d", __func__,
+		    addr, slab, GET_SLAB_COOKIE(slab));
+		/* NOTREACHED */
 	}
 	malloc_type_freed(mtp, size);
 }
@@ -957,7 +999,8 @@
 		panic("free: address %p(%p) has not been allocated.\n",
 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
 
-	if (__predict_true(!malloc_large_slab(slab))) {
+	switch (GET_SLAB_COOKIE(slab)) {
+	case __predict_true(SLAB_COOKIE_SLAB_PTR):
 		size = zone->uz_size;
 #if defined(INVARIANTS) && !defined(KASAN)
 		free_save_type(addr, mtp, size);
@@ -965,11 +1008,22 @@
 		kasan_mark(addr, size, size, 0);
 		explicit_bzero(addr, size);
 		uma_zfree_arg(zone, addr, slab);
-	} else {
+		break;
+	case SLAB_COOKIE_MALLOC_LARGE:
 		size = malloc_large_size(slab);
 		kasan_mark(addr, size, size, 0);
 		explicit_bzero(addr, size);
 		free_large(addr, size);
+		break;
+	case SLAB_COOKIE_CONTIG_MALLOC:
+		size = round_page(contigmalloc_size(slab));
+		explicit_bzero(addr, size);
+		kmem_free(addr, size);
+		break;
+	default:
+		panic("%s: addr %p slab %p with unknown cookie %d", __func__,
+		    addr, slab, GET_SLAB_COOKIE(slab));
+		/* NOTREACHED */
 	}
 	malloc_type_freed(mtp, size);
 }
@@ -1016,10 +1070,20 @@
 	    ("realloc: address %p out of range", (void *)addr));
 
 	/* Get the size of the original block */
-	if (!malloc_large_slab(slab))
+	switch (GET_SLAB_COOKIE(slab)) {
+	case __predict_true(SLAB_COOKIE_SLAB_PTR):
 		alloc = zone->uz_size;
-	else
+		break;
+	case SLAB_COOKIE_MALLOC_LARGE:
 		alloc = malloc_large_size(slab);
+		break;
+	default:
+#ifdef INVARIANTS
+		panic("%s: called for addr %p of unsupported allocation type; "
+		    "slab %p cookie %d", __func__, addr, slab, GET_SLAB_COOKIE(slab));
+#endif
+		return (NULL);
+	}
 
 	/* Reuse the original block if appropriate */
 	if (size <= alloc &&
@@ -1101,10 +1165,18 @@
 		panic("malloc_usable_size: address %p(%p) is not allocated.\n",
 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
 
-	if (!malloc_large_slab(slab))
+	switch (GET_SLAB_COOKIE(slab)) {
+	case __predict_true(SLAB_COOKIE_SLAB_PTR):
 		size = zone->uz_size;
-	else
+		break;
+	case SLAB_COOKIE_MALLOC_LARGE:
 		size = malloc_large_size(slab);
+		break;
+	default:
+		__assert_unreachable();
+		size = 0;
+		break;
+	}
 #endif
 
 	/*