Index: sys/netinet/tcp_hostcache.c =================================================================== --- sys/netinet/tcp_hostcache.c +++ sys/netinet/tcp_hostcache.c @@ -2,6 +2,7 @@ * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG + * Copyright (c) 2021 Gleb Smirnoff * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -45,18 +46,18 @@ * more lightwight and only carries information related to packet forwarding. * * tcp_hostcache is designed for multiple concurrent access in SMP - * environments and high contention. All bucket rows have their own lock and - * thus multiple lookups and modifies can be done at the same time as long as - * they are in different bucket rows. If a request for insertion of a new - * record can't be satisfied, it simply returns an empty structure. Nobody - * and nothing outside of tcp_hostcache.c will ever point directly to any - * entry in the tcp_hostcache. All communication is done in an - * object-oriented way and only functions of tcp_hostcache will manipulate - * hostcache entries. Otherwise, we are unable to achieve good behaviour in - * concurrent access situations. Since tcp_hostcache is only caching - * information, there are no fatal consequences if we either can't satisfy - * any particular request or have to drop/overwrite an existing entry because - * of bucket limit memory constrains. + * environments and high contention. It is a straight hash. Each bucket row + * is protected by its own lock for modification. Readers are protected by + * SMR. This puts certain restrictions on writers, e.g. a writer shall only + * insert a fully populated entry into a row. Writer can't reuse least used + * entry if a hash is full. Value updates for an entry shall be atomic. + * + * TCP stack(s) communication with tcp_hostcache() is done via KBI functions + * tcp_hc_*() and the hc_metrics_lite structure. + * + * Since tcp_hostcache is only caching information, there are no fatal + * consequences if we either can't allocate a new entry or have to drop + * an existing entry, or return somewhat stale information. */ /* @@ -79,6 +80,7 @@ #include #include #include +#include #include #include #include @@ -92,18 +94,15 @@ #include -TAILQ_HEAD(hc_qhead, hc_metrics); - struct hc_head { - struct hc_qhead hch_bucket; + CK_SLIST_HEAD(hc_qhead, hc_metrics) hch_bucket; u_int hch_length; struct mtx hch_mtx; }; struct hc_metrics { /* housekeeping */ - TAILQ_ENTRY(hc_metrics) rmx_q; - struct hc_head *rmx_head; /* head of bucket tail queue */ + CK_SLIST_ENTRY(hc_metrics) rmx_q; struct in_addr ip4; /* IP address */ struct in6_addr ip6; /* IP6 address */ uint32_t ip6_zoneid; /* IPv6 scope zone id */ @@ -126,6 +125,7 @@ struct tcp_hostcache { struct hc_head *hashbase; uma_zone_t zone; + smr_t smr; u_int hashsize; u_int hashmask; u_int hashsalt; @@ -149,8 +149,8 @@ VNET_DEFINE_STATIC(struct callout, tcp_hc_callout); #define V_tcp_hc_callout VNET(tcp_hc_callout) -static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *, bool); -static struct hc_metrics *tcp_hc_insert(struct in_conninfo *); +static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *, bool, + struct hc_head **, struct hc_metrics **); static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS); static int sysctl_tcp_hc_histo(SYSCTL_HANDLER_ARGS); static int sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS); @@ -270,7 +270,7 @@ * Initialize the hash buckets. */ for (i = 0; i < V_tcp_hostcache.hashsize; i++) { - TAILQ_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket); + CK_SLIST_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket); V_tcp_hostcache.hashbase[i].hch_length = 0; mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry", NULL, MTX_DEF); @@ -281,8 +281,9 @@ */ V_tcp_hostcache.zone = uma_zcreate("hostcache", sizeof(struct hc_metrics), - NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); + NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_SMR); uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit); + V_tcp_hostcache.smr = uma_zone_get_smr(V_tcp_hostcache.zone); /* * Set up periodic cache cleanup. @@ -313,13 +314,20 @@ #endif /* - * Internal function: look up an entry in the hostcache or return NULL. + * Internal function: look up an entry in the hostcache. * - * If an entry has been returned, the caller becomes responsible for - * unlocking the bucket row after he is done reading/modifying the entry. + * If looked up for update, returns locked and provides hash row pointer and + * pointer to previous element, with the following semantics: + * - on success, prevp points to the previous element unless the element + * found is the very first one. + * - on failure, prevp points to the one to the last element, unless the + * hash row has 1 element or 0 elements. + * + * If looked up for read, returns in SMR section. */ static struct hc_metrics * -tcp_hc_lookup(struct in_conninfo *inc, bool update) +tcp_hc_lookup(struct in_conninfo *inc, bool update, struct hc_head **headp, + struct hc_metrics **prevp) { int hash; struct hc_head *hc_head; @@ -337,17 +345,19 @@ hc_head = &V_tcp_hostcache.hashbase[hash]; - /* - * Acquire lock for this bucket row; we release the lock if we don't - * find an entry, otherwise the caller has to unlock after he is - * done. - */ - THC_LOCK(hc_head); + if (update) { + KASSERT(headp != NULL && prevp != NULL, + ("%s: NULL headp or prevp", __func__)); + *headp = hc_head; + *prevp = NULL; + THC_LOCK(hc_head); + } else + smr_enter(V_tcp_hostcache.smr); /* * Iterate through entries in bucket row looking for a match. */ - TAILQ_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) { + CK_SLIST_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) { if (inc->inc_flags & INC_ISIPV6) { /* XXX: check ip6_zoneid */ if (memcmp(&inc->inc6_faddr, &hc_entry->ip6, @@ -358,12 +368,10 @@ sizeof(inc->inc_faddr)) == 0) goto found; } + if (update && CK_SLIST_NEXT(hc_entry, rmx_q) != NULL) + *prevp = hc_entry; } - /* - * We were unsuccessful and didn't find anything. - */ - THC_UNLOCK(hc_head); return (NULL); found: @@ -373,109 +381,8 @@ else hc_entry->rmx_hits++; #endif - hc_entry->rmx_expire = V_tcp_hostcache.expire; - - return (hc_entry); -} - -/* - * Internal function: insert an entry into the hostcache or return NULL if - * unable to allocate a new one. - * - * If an entry has been returned, the caller becomes responsible for - * unlocking the bucket row after he is done reading/modifying the entry. - */ -static struct hc_metrics * -tcp_hc_insert(struct in_conninfo *inc) -{ - int hash; - struct hc_head *hc_head; - struct hc_metrics *hc_entry; - - KASSERT(inc != NULL, ("%s: NULL in_conninfo", __func__)); - - /* - * Hash the foreign ip address. - */ - if (inc->inc_flags & INC_ISIPV6) - hash = HOSTCACHE_HASH6(&inc->inc6_faddr); - else - hash = HOSTCACHE_HASH(&inc->inc_faddr); - - hc_head = &V_tcp_hostcache.hashbase[hash]; - - /* - * Acquire lock for this bucket row; we release the lock if we don't - * find an entry, otherwise the caller has to unlock after he is - * done. - */ - THC_LOCK(hc_head); - - /* - * If the bucket limit is reached, reuse the least-used element. - */ - if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit || - atomic_load_int(&V_tcp_hostcache.cache_count) >= V_tcp_hostcache.cache_limit) { - hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead); - /* - * At first we were dropping the last element, just to - * reacquire it in the next two lines again, which isn't very - * efficient. Instead just reuse the least used element. - * We may drop something that is still "in-use" but we can be - * "lossy". - * Just give up if this bucket row is empty and we don't have - * anything to replace. - */ - if (hc_entry == NULL) { - THC_UNLOCK(hc_head); - return (NULL); - } - TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q); - KASSERT(V_tcp_hostcache.hashbase[hash].hch_length > 0 && - V_tcp_hostcache.hashbase[hash].hch_length <= - V_tcp_hostcache.bucket_limit, - ("tcp_hostcache: bucket length range violated at %u: %u", - hash, V_tcp_hostcache.hashbase[hash].hch_length)); - V_tcp_hostcache.hashbase[hash].hch_length--; - atomic_subtract_int(&V_tcp_hostcache.cache_count, 1); - TCPSTAT_INC(tcps_hc_bucketoverflow); -#if 0 - uma_zfree(V_tcp_hostcache.zone, hc_entry); -#endif - } else { - /* - * Allocate a new entry, or balk if not possible. - */ - hc_entry = uma_zalloc(V_tcp_hostcache.zone, M_NOWAIT); - if (hc_entry == NULL) { - THC_UNLOCK(hc_head); - return (NULL); - } - } - - /* - * Initialize basic information of hostcache entry. - */ - bzero(hc_entry, sizeof(*hc_entry)); - if (inc->inc_flags & INC_ISIPV6) { - hc_entry->ip6 = inc->inc6_faddr; - hc_entry->ip6_zoneid = inc->inc6_zoneid; - } else - hc_entry->ip4 = inc->inc_faddr; - hc_entry->rmx_head = hc_head; - hc_entry->rmx_expire = V_tcp_hostcache.expire; - - /* - * Put it upfront. - */ - TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q); - V_tcp_hostcache.hashbase[hash].hch_length++; - KASSERT(V_tcp_hostcache.hashbase[hash].hch_length < - V_tcp_hostcache.bucket_limit, - ("tcp_hostcache: bucket length too high at %u: %u", - hash, V_tcp_hostcache.hashbase[hash].hch_length)); - atomic_add_int(&V_tcp_hostcache.cache_count, 1); - TCPSTAT_INC(tcps_hc_added); + if (hc_entry->rmx_expire != V_tcp_hostcache.expire) + atomic_store_int(&hc_entry->rmx_expire, V_tcp_hostcache.expire); return (hc_entry); } @@ -498,12 +405,13 @@ /* * Find the right bucket. */ - hc_entry = tcp_hc_lookup(inc, false); + hc_entry = tcp_hc_lookup(inc, false, NULL, NULL); /* * If we don't have an existing object. */ if (hc_entry == NULL) { + smr_exit(V_tcp_hostcache.smr); bzero(hc_metrics_lite, sizeof(*hc_metrics_lite)); return; } @@ -516,10 +424,7 @@ hc_metrics_lite->rmx_sendpipe = hc_entry->rmx_sendpipe; hc_metrics_lite->rmx_recvpipe = hc_entry->rmx_recvpipe; - /* - * Unlock bucket row. - */ - THC_UNLOCK(hc_entry->rmx_head); + smr_exit(V_tcp_hostcache.smr); } /* @@ -536,13 +441,15 @@ if (!V_tcp_use_hostcache) return (0); - hc_entry = tcp_hc_lookup(inc, false); + hc_entry = tcp_hc_lookup(inc, false, NULL, NULL); if (hc_entry == NULL) { + smr_exit(V_tcp_hostcache.smr); return (0); } mtu = hc_entry->rmx_mtu; - THC_UNLOCK(hc_entry->rmx_head); + smr_exit(V_tcp_hostcache.smr); + return (mtu); } @@ -565,75 +472,155 @@ void tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml) { - struct hc_metrics *hc_entry; + struct hc_head *hc_head; + struct hc_metrics *hc_entry, *hc_prev; + uint32_t v; + bool new; if (!V_tcp_use_hostcache) return; - hc_entry = tcp_hc_lookup(inc, true); + hc_entry = tcp_hc_lookup(inc, true, &hc_head, &hc_prev); if (hc_entry == NULL) { - hc_entry = tcp_hc_insert(inc); - if (hc_entry == NULL) + /* + * Try to allocate a new entry. If the bucket limit is + * reached, delete the least-used element, located at the end + * of the CK_SLIST. Give up if the row is empty. + * + * tcp_hc_lookup() has provided us with pointer to the one to + * the last element, in case if list has at least 2 elements. + */ + if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit || + atomic_load_int(&V_tcp_hostcache.cache_count) >= + V_tcp_hostcache.cache_limit) { + if (hc_prev != NULL) { + hc_entry = CK_SLIST_NEXT(hc_prev, rmx_q); + KASSERT(CK_SLIST_NEXT(hc_entry, rmx_q) == NULL, + ("%s: %p is not one to last", + __func__, hc_prev)); + CK_SLIST_REMOVE_AFTER(hc_prev, rmx_q); + } else if ((hc_entry = + CK_SLIST_FIRST(&hc_head->hch_bucket)) != NULL) { + KASSERT(CK_SLIST_NEXT(hc_entry, rmx_q) == NULL, + ("%s: %p is not the only element", + __func__, hc_entry)); + CK_SLIST_REMOVE_HEAD(&hc_head->hch_bucket, + rmx_q); + } else { + THC_UNLOCK(hc_head); + return; + } + KASSERT(hc_head->hch_length > 0 && + hc_head->hch_length <= V_tcp_hostcache.bucket_limit, + ("tcp_hostcache: bucket length violated at %p", + hc_head)); + hc_head->hch_length--; + atomic_subtract_int(&V_tcp_hostcache.cache_count, 1); + TCPSTAT_INC(tcps_hc_bucketoverflow); + uma_zfree_smr(V_tcp_hostcache.zone, hc_entry); + } + + /* + * Allocate a new entry, or balk if not possible. + */ + hc_entry = uma_zalloc_smr(V_tcp_hostcache.zone, M_NOWAIT); + if (hc_entry == NULL) { + THC_UNLOCK(hc_head); return; - } + } + + /* + * Initialize basic information of hostcache entry. + */ + bzero(hc_entry, sizeof(*hc_entry)); + if (inc->inc_flags & INC_ISIPV6) { + hc_entry->ip6 = inc->inc6_faddr; + hc_entry->ip6_zoneid = inc->inc6_zoneid; + } else + hc_entry->ip4 = inc->inc_faddr; + hc_entry->rmx_expire = V_tcp_hostcache.expire; + new = true; + } else + new = false; + /* + * Fill in data. Use atomics, since an existing entry is + * accessible by readers in SMR section. + */ if (hcml->rmx_mtu != 0) { - hc_entry->rmx_mtu = hcml->rmx_mtu; + atomic_store_32(&hc_entry->rmx_mtu, hcml->rmx_mtu); } if (hcml->rmx_rtt != 0) { if (hc_entry->rmx_rtt == 0) - hc_entry->rmx_rtt = hcml->rmx_rtt; + v = hcml->rmx_rtt; else - hc_entry->rmx_rtt = ((uint64_t)hc_entry->rmx_rtt + + v = ((uint64_t)hc_entry->rmx_rtt + (uint64_t)hcml->rmx_rtt) / 2; + atomic_store_32(&hc_entry->rmx_rtt, v); TCPSTAT_INC(tcps_cachedrtt); } if (hcml->rmx_rttvar != 0) { - if (hc_entry->rmx_rttvar == 0) - hc_entry->rmx_rttvar = hcml->rmx_rttvar; + if (hc_entry->rmx_rttvar == 0) + v = hcml->rmx_rttvar; else - hc_entry->rmx_rttvar = ((uint64_t)hc_entry->rmx_rttvar + + v = ((uint64_t)hc_entry->rmx_rttvar + (uint64_t)hcml->rmx_rttvar) / 2; + atomic_store_32(&hc_entry->rmx_rttvar, v); TCPSTAT_INC(tcps_cachedrttvar); } if (hcml->rmx_ssthresh != 0) { if (hc_entry->rmx_ssthresh == 0) - hc_entry->rmx_ssthresh = hcml->rmx_ssthresh; + v = hcml->rmx_ssthresh; else - hc_entry->rmx_ssthresh = - (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2; + v = (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2; + atomic_store_32(&hc_entry->rmx_ssthresh, v); TCPSTAT_INC(tcps_cachedssthresh); } if (hcml->rmx_cwnd != 0) { if (hc_entry->rmx_cwnd == 0) - hc_entry->rmx_cwnd = hcml->rmx_cwnd; + v = hcml->rmx_cwnd; else - hc_entry->rmx_cwnd = ((uint64_t)hc_entry->rmx_cwnd + + v = ((uint64_t)hc_entry->rmx_cwnd + (uint64_t)hcml->rmx_cwnd) / 2; + atomic_store_32(&hc_entry->rmx_cwnd, v); /* TCPSTAT_INC(tcps_cachedcwnd); */ } if (hcml->rmx_sendpipe != 0) { if (hc_entry->rmx_sendpipe == 0) - hc_entry->rmx_sendpipe = hcml->rmx_sendpipe; + v = hcml->rmx_sendpipe; else - hc_entry->rmx_sendpipe = - ((uint64_t)hc_entry->rmx_sendpipe + + v = ((uint64_t)hc_entry->rmx_sendpipe + (uint64_t)hcml->rmx_sendpipe) /2; + atomic_store_32(&hc_entry->rmx_sendpipe, v); /* TCPSTAT_INC(tcps_cachedsendpipe); */ } if (hcml->rmx_recvpipe != 0) { if (hc_entry->rmx_recvpipe == 0) - hc_entry->rmx_recvpipe = hcml->rmx_recvpipe; + v = hcml->rmx_recvpipe; else - hc_entry->rmx_recvpipe = - ((uint64_t)hc_entry->rmx_recvpipe + + v = ((uint64_t)hc_entry->rmx_recvpipe + (uint64_t)hcml->rmx_recvpipe) /2; + atomic_store_32(&hc_entry->rmx_recvpipe, v); /* TCPSTAT_INC(tcps_cachedrecvpipe); */ } - TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q); - TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q); - THC_UNLOCK(hc_entry->rmx_head); + /* + * Put it upfront. + */ + if (new) { + CK_SLIST_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q); + hc_head->hch_length++; + KASSERT(hc_head->hch_length < V_tcp_hostcache.bucket_limit, + ("tcp_hostcache: bucket length too high at %p", hc_head)); + atomic_add_int(&V_tcp_hostcache.cache_count, 1); + TCPSTAT_INC(tcps_hc_added); + } else if (hc_entry != CK_SLIST_FIRST(&hc_head->hch_bucket)) { + KASSERT(CK_SLIST_NEXT(hc_prev, rmx_q) == hc_entry, + ("%s: %p next is not %p", __func__, hc_prev, hc_entry)); + CK_SLIST_REMOVE_AFTER(hc_prev, rmx_q); + CK_SLIST_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q); + } + THC_UNLOCK(hc_head); } /* @@ -683,8 +670,8 @@ #define msec(u) (((u) + 500) / 1000) for (i = 0; i < V_tcp_hostcache.hashsize; i++) { THC_LOCK(&V_tcp_hostcache.hashbase[i]); - TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket, - rmx_q) { + CK_SLIST_FOREACH(hc_entry, + &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q) { sbuf_printf(&sb, "%-15s %5u %8u %6lums %6lums %8u %8u %8u " #ifdef TCP_HC_COUNTERS @@ -770,29 +757,45 @@ static void tcp_hc_purge_internal(int all) { - struct hc_metrics *hc_entry, *hc_next; + struct hc_head *head; + struct hc_metrics *hc_entry, *hc_next, *hc_prev; int i; for (i = 0; i < V_tcp_hostcache.hashsize; i++) { - THC_LOCK(&V_tcp_hostcache.hashbase[i]); - TAILQ_FOREACH_SAFE(hc_entry, - &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q, hc_next) { - KASSERT(V_tcp_hostcache.hashbase[i].hch_length > 0 && - V_tcp_hostcache.hashbase[i].hch_length <= + head = &V_tcp_hostcache.hashbase[i]; + hc_prev = NULL; + THC_LOCK(head); + CK_SLIST_FOREACH_SAFE(hc_entry, &head->hch_bucket, rmx_q, + hc_next) { + KASSERT(head->hch_length > 0 && head->hch_length <= V_tcp_hostcache.bucket_limit, ("tcp_hostcache: " - "bucket length out of range at %u: %u", - i, V_tcp_hostcache.hashbase[i].hch_length)); + "bucket length out of range at %u: %u", i, + head->hch_length)); if (all || hc_entry->rmx_expire <= 0) { - TAILQ_REMOVE( - &V_tcp_hostcache.hashbase[i].hch_bucket, - hc_entry, rmx_q); - uma_zfree(V_tcp_hostcache.zone, hc_entry); + if (hc_prev != NULL) { + KASSERT(hc_entry == + CK_SLIST_NEXT(hc_prev, rmx_q), + ("%s: %p is not next to %p", + __func__, hc_entry, hc_prev)); + CK_SLIST_REMOVE_AFTER(hc_prev, rmx_q); + } else { + KASSERT(hc_entry == + CK_SLIST_FIRST(&head->hch_bucket), + ("%s: %p is not first", + __func__, hc_entry)); + CK_SLIST_REMOVE_HEAD(&head->hch_bucket, + rmx_q); + } + uma_zfree_smr(V_tcp_hostcache.zone, hc_entry); V_tcp_hostcache.hashbase[i].hch_length--; atomic_subtract_int(&V_tcp_hostcache.cache_count, 1); - } else - hc_entry->rmx_expire -= V_tcp_hostcache.prune; + } else { + atomic_subtract_int(&hc_entry->rmx_expire, + V_tcp_hostcache.prune); + hc_prev = hc_entry; + } } - THC_UNLOCK(&V_tcp_hostcache.hashbase[i]); + THC_UNLOCK(head); } }