diff --git a/sys/compat/linuxkpi/common/include/linux/gfp.h b/sys/compat/linuxkpi/common/include/linux/gfp.h --- a/sys/compat/linuxkpi/common/include/linux/gfp.h +++ b/sys/compat/linuxkpi/common/include/linux/gfp.h @@ -81,6 +81,11 @@ CTASSERT((__GFP_DMA32 & GFP_NATIVE_MASK) == 0); CTASSERT((__GFP_BITS_MASK & GFP_NATIVE_MASK) == GFP_NATIVE_MASK); +struct page_frag_cache { + void *va; + int pagecnt_bias; +}; + /* * Resolve a page into a virtual address: * @@ -95,6 +100,9 @@ */ extern vm_page_t linux_alloc_pages(gfp_t flags, unsigned int order); extern void linux_free_pages(vm_page_t page, unsigned int order); +void *linuxkpi_page_frag_alloc(struct page_frag_cache *, size_t, gfp_t); +void linuxkpi_page_frag_free(void *); +void linuxkpi__page_frag_cache_drain(struct page *, size_t); static inline struct page * alloc_page(gfp_t flags) @@ -176,6 +184,27 @@ linux_free_kmem(addr, 0); } +static inline void * +page_frag_alloc(struct page_frag_cache *pfc, size_t fragsz, gfp_t gfp) +{ + + return (linuxkpi_page_frag_alloc(pfc, fragsz, gfp)); +} + +static inline void +page_frag_free(void *addr) +{ + + linuxkpi_page_frag_free(addr); +} + +static inline void +__page_frag_cache_drain(struct page *page, size_t count) +{ + + linuxkpi__page_frag_cache_drain(page, count); +} + static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) { diff --git a/sys/compat/linuxkpi/common/src/linux_page.c b/sys/compat/linuxkpi/common/src/linux_page.c --- a/sys/compat/linuxkpi/common/src/linux_page.c +++ b/sys/compat/linuxkpi/common/src/linux_page.c @@ -429,3 +429,48 @@ free(mrdesc, M_LKMTRR); #endif } + +/* + * This is a highly simplified version of the Linux page_frag_cache. + * We only support up-to 1 single page as fragment size and we will + * always return a full page. This may be wasteful on small objects + * but the only known consumer (mt76) is either asking for a half-page + * or a full page. If this was to become a problem we can implement + * a more elaborate version. + */ +void * +linuxkpi_page_frag_alloc(struct page_frag_cache *pfc, + size_t fragsz, gfp_t gfp) +{ + vm_page_t pages; + + if (fragsz == 0) + return (NULL); + + KASSERT(fragsz <= PAGE_SIZE, ("%s: fragsz %zu > PAGE_SIZE not yet " + "supported", __func__, fragsz)); + + pages = alloc_pages(gfp, flsl(howmany(fragsz, PAGE_SIZE) - 1)); + pfc->va = linux_page_address(pages); + + /* Passed in as "count" to __page_frag_cache_drain(). Unused by us. */ + pfc->pagecnt_bias = 0; + + return (pfc->va); +} + +void +linuxkpi_page_frag_free(void *addr) +{ + vm_page_t page; + + page = PHYS_TO_VM_PAGE(vtophys(addr)); + linux_free_pages(page, 0); +} + +void +linuxkpi__page_frag_cache_drain(struct page *page, size_t count __unused) +{ + + linux_free_pages(page, 0); +}