Index: sys/kern/uipc_ktls.c =================================================================== --- sys/kern/uipc_ktls.c +++ sys/kern/uipc_ktls.c @@ -95,6 +95,7 @@ LIST_HEAD(, ktls_crypto_backend) ktls_backends; static struct rmlock ktls_backends_lock; static uma_zone_t ktls_session_zone; +static uma_zone_t ktls_buffer_zone; static uint16_t ktls_cpuid_lookup[MAXCPU]; SYSCTL_NODE(_kern_ipc, OID_AUTO, tls, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, @@ -116,7 +117,7 @@ "Bind crypto threads to cores (1) or cores and domains (2) at boot"); static u_int ktls_maxlen = 16384; -SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, maxlen, CTLFLAG_RWTUN, +SYSCTL_UINT(_kern_ipc_tls, OID_AUTO, maxlen, CTLFLAG_RDTUN, &ktls_maxlen, 0, "Maximum TLS record size"); static int ktls_number_threads; @@ -366,6 +367,35 @@ } #endif +static int +ktls_buffer_import(void *arg, void **store, int count, int domain, int flags) +{ + vm_page_t m; + int i; + + for (i = 0; i < count; i++) { + m = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_NORMAL | + VM_ALLOC_NODUMP | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); + if (m == NULL) + break; + store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + } + return (i); +} + +static void +ktls_buffer_release(void *arg __unused, void **store, int count) +{ + vm_page_t m; + int i; + + for (i = 0; i < count; i++) { + m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i])); + (void)vm_page_unwire_noq(m); + vm_page_free(m); + } +} + static void ktls_init(void *dummy __unused) { @@ -382,8 +412,11 @@ ktls_session_zone = uma_zcreate("ktls_session", sizeof(struct ktls_session), - NULL, NULL, NULL, NULL, - UMA_ALIGN_CACHE, 0); + NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); + + ktls_buffer_zone = uma_zcache_create("ktls_buffers", PAGE_SIZE, + NULL, NULL, NULL, NULL, ktls_buffer_import, ktls_buffer_release, + NULL, UMA_ZONE_FIRSTTOUCH); /* * Initialize the workqueues to run the TLS work. We create a @@ -2005,6 +2038,19 @@ counter_u64_add(ktls_cnt_tx_queued, 1); } +static void +ktls_free_mext(struct mbuf *m) +{ + void *buf; + int i; + + M_ASSERTEXTPG(m); + for (i = 0; i < m->m_epg_npgs; i++) { + buf = (void *)PHYS_TO_DMAP(m->m_epg_pa[i]); + uma_zfree(ktls_buffer_zone, buf); + } +} + static __noinline void ktls_encrypt(struct mbuf *top) { @@ -2014,7 +2060,7 @@ vm_paddr_t parray[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)]; struct iovec src_iov[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)]; struct iovec dst_iov[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)]; - vm_page_t pg; + void *buf; int error, i, len, npages, off, total_pages; bool is_anon; @@ -2072,24 +2118,17 @@ len = m_epg_pagelen(m, i, off); src_iov[i].iov_len = len; src_iov[i].iov_base = - (char *)(void *)PHYS_TO_DMAP(m->m_epg_pa[i]) + - off; + (char *)(void *)PHYS_TO_DMAP(m->m_epg_pa[i]) + off; if (is_anon) { dst_iov[i].iov_base = src_iov[i].iov_base; dst_iov[i].iov_len = src_iov[i].iov_len; continue; } -retry_page: - pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | - VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP | VM_ALLOC_WIRED); - if (pg == NULL) { - vm_wait(NULL); - goto retry_page; - } - parray[i] = VM_PAGE_TO_PHYS(pg); - dst_iov[i].iov_base = - (char *)(void *)PHYS_TO_DMAP(parray[i]) + off; + + buf = uma_zalloc(ktls_buffer_zone, M_WAITOK); + parray[i] = DMAP_TO_PHYS((vm_offset_t)buf); + dst_iov[i].iov_base = (char *)buf + off; dst_iov[i].iov_len = len; } @@ -2121,8 +2160,8 @@ for (i = 0; i < m->m_epg_npgs; i++) m->m_epg_pa[i] = parray[i]; - /* Use the basic free routine. */ - m->m_ext.ext_free = mb_free_mext_pgs; + /* Ensure that the pages are freed to the cache zone. */ + m->m_ext.ext_free = ktls_free_mext; /* Pages are now writable. */ m->m_epg_flags |= EPG_FLAG_ANON;