Index: head/usr.sbin/nscd/cachelib.c =================================================================== --- head/usr.sbin/nscd/cachelib.c (revision 315212) +++ head/usr.sbin/nscd/cachelib.c (revision 315213) @@ -1,1244 +1,1244 @@ /*- * Copyright (c) 2005 Michael Bushkov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include "cachelib.h" #include "debug.h" #define INITIAL_ENTRIES_CAPACITY 32 #define ENTRIES_CAPACITY_STEP 32 #define STRING_SIMPLE_HASH_BODY(in_var, var, a, M) \ for ((var) = 0; *(in_var) != '\0'; ++(in_var)) \ (var) = ((a)*(var) + *(in_var)) % (M) #define STRING_SIMPLE_MP2_HASH_BODY(in_var, var, a, M) \ for ((var) = 0; *(in_var) != 0; ++(in_var)) \ (var) = ((a)*(var) + *(in_var)) & (M - 1) static int cache_elemsize_common_continue_func(struct cache_common_entry_ *, struct cache_policy_item_ *); static int cache_lifetime_common_continue_func(struct cache_common_entry_ *, struct cache_policy_item_ *); static void clear_cache_entry(struct cache_entry_ *); static void destroy_cache_entry(struct cache_entry_ *); static void destroy_cache_mp_read_session(struct cache_mp_read_session_ *); static void destroy_cache_mp_write_session(struct cache_mp_write_session_ *); static int entries_bsearch_cmp_func(const void *, const void *); static int entries_qsort_cmp_func(const void *, const void *); static struct cache_entry_ ** find_cache_entry_p(struct cache_ *, const char *); static void flush_cache_entry(struct cache_entry_ *); static void flush_cache_policy(struct cache_common_entry_ *, struct cache_policy_ *, struct cache_policy_ *, int (*)(struct cache_common_entry_ *, struct cache_policy_item_ *)); static int ht_items_cmp_func(const void *, const void *); static int ht_items_fixed_size_left_cmp_func(const void *, const void *); static hashtable_index_t ht_item_hash_func(const void *, size_t); /* * Hashing and comparing routines, that are used with the hash tables */ static int ht_items_cmp_func(const void *p1, const void *p2) { struct cache_ht_item_data_ *hp1, *hp2; size_t min_size; int result; hp1 = (struct cache_ht_item_data_ *)p1; hp2 = (struct cache_ht_item_data_ *)p2; assert(hp1->key != NULL); assert(hp2->key != NULL); if (hp1->key_size != hp2->key_size) { min_size = (hp1->key_size < hp2->key_size) ? hp1->key_size : hp2->key_size; result = memcmp(hp1->key, hp2->key, min_size); if (result == 0) return ((hp1->key_size < hp2->key_size) ? -1 : 1); else return (result); } else return (memcmp(hp1->key, hp2->key, hp1->key_size)); } static int ht_items_fixed_size_left_cmp_func(const void *p1, const void *p2) { struct cache_ht_item_data_ *hp1, *hp2; size_t min_size; int result; hp1 = (struct cache_ht_item_data_ *)p1; hp2 = (struct cache_ht_item_data_ *)p2; assert(hp1->key != NULL); assert(hp2->key != NULL); if (hp1->key_size != hp2->key_size) { min_size = (hp1->key_size < hp2->key_size) ? hp1->key_size : hp2->key_size; result = memcmp(hp1->key, hp2->key, min_size); if (result == 0) if (min_size == hp1->key_size) return (0); else return ((hp1->key_size < hp2->key_size) ? -1 : 1); else return (result); } else return (memcmp(hp1->key, hp2->key, hp1->key_size)); } static hashtable_index_t ht_item_hash_func(const void *p, size_t cache_entries_size) { struct cache_ht_item_data_ *hp; size_t i; hashtable_index_t retval; hp = (struct cache_ht_item_data_ *)p; assert(hp->key != NULL); retval = 0; for (i = 0; i < hp->key_size; ++i) retval = (127 * retval + (unsigned char)hp->key[i]) % cache_entries_size; return retval; } HASHTABLE_PROTOTYPE(cache_ht_, cache_ht_item_, struct cache_ht_item_data_); HASHTABLE_GENERATE(cache_ht_, cache_ht_item_, struct cache_ht_item_data_, data, ht_item_hash_func, ht_items_cmp_func); /* * Routines to sort and search the entries by name */ static int entries_bsearch_cmp_func(const void *key, const void *ent) { assert(key != NULL); assert(ent != NULL); return (strcmp((char const *)key, (*(struct cache_entry_ const **)ent)->name)); } static int entries_qsort_cmp_func(const void *e1, const void *e2) { assert(e1 != NULL); assert(e2 != NULL); return (strcmp((*(struct cache_entry_ const **)e1)->name, (*(struct cache_entry_ const **)e2)->name)); } static struct cache_entry_ ** find_cache_entry_p(struct cache_ *the_cache, const char *entry_name) { return ((struct cache_entry_ **)(bsearch(entry_name, the_cache->entries, the_cache->entries_size, sizeof(struct cache_entry_ *), entries_bsearch_cmp_func))); } static void destroy_cache_mp_write_session(struct cache_mp_write_session_ *ws) { struct cache_mp_data_item_ *data_item; TRACE_IN(destroy_cache_mp_write_session); assert(ws != NULL); while (!TAILQ_EMPTY(&ws->items)) { data_item = TAILQ_FIRST(&ws->items); TAILQ_REMOVE(&ws->items, data_item, entries); free(data_item->value); free(data_item); } free(ws); TRACE_OUT(destroy_cache_mp_write_session); } static void destroy_cache_mp_read_session(struct cache_mp_read_session_ *rs) { TRACE_IN(destroy_cache_mp_read_session); assert(rs != NULL); free(rs); TRACE_OUT(destroy_cache_mp_read_session); } static void destroy_cache_entry(struct cache_entry_ *entry) { struct cache_common_entry_ *common_entry; struct cache_mp_entry_ *mp_entry; struct cache_mp_read_session_ *rs; struct cache_mp_write_session_ *ws; struct cache_ht_item_ *ht_item; struct cache_ht_item_data_ *ht_item_data; TRACE_IN(destroy_cache_entry); assert(entry != NULL); if (entry->params->entry_type == CET_COMMON) { common_entry = (struct cache_common_entry_ *)entry; HASHTABLE_FOREACH(&(common_entry->items), ht_item) { HASHTABLE_ENTRY_FOREACH(ht_item, data, ht_item_data) { free(ht_item_data->key); free(ht_item_data->value); } HASHTABLE_ENTRY_CLEAR(ht_item, data); } HASHTABLE_DESTROY(&(common_entry->items), data); /* FIFO policy is always first */ destroy_cache_fifo_policy(common_entry->policies[0]); switch (common_entry->common_params.policy) { case CPT_LRU: destroy_cache_lru_policy(common_entry->policies[1]); break; case CPT_LFU: destroy_cache_lfu_policy(common_entry->policies[1]); break; default: break; } free(common_entry->policies); } else { mp_entry = (struct cache_mp_entry_ *)entry; while (!TAILQ_EMPTY(&mp_entry->ws_head)) { ws = TAILQ_FIRST(&mp_entry->ws_head); TAILQ_REMOVE(&mp_entry->ws_head, ws, entries); destroy_cache_mp_write_session(ws); } while (!TAILQ_EMPTY(&mp_entry->rs_head)) { rs = TAILQ_FIRST(&mp_entry->rs_head); TAILQ_REMOVE(&mp_entry->rs_head, rs, entries); destroy_cache_mp_read_session(rs); } if (mp_entry->completed_write_session != NULL) destroy_cache_mp_write_session( mp_entry->completed_write_session); if (mp_entry->pending_write_session != NULL) destroy_cache_mp_write_session( mp_entry->pending_write_session); } free(entry->name); free(entry); TRACE_OUT(destroy_cache_entry); } static void clear_cache_entry(struct cache_entry_ *entry) { struct cache_mp_entry_ *mp_entry; struct cache_common_entry_ *common_entry; struct cache_ht_item_ *ht_item; struct cache_ht_item_data_ *ht_item_data; struct cache_policy_ *policy; struct cache_policy_item_ *item, *next_item; size_t entry_size; unsigned int i; if (entry->params->entry_type == CET_COMMON) { common_entry = (struct cache_common_entry_ *)entry; entry_size = 0; HASHTABLE_FOREACH(&(common_entry->items), ht_item) { HASHTABLE_ENTRY_FOREACH(ht_item, data, ht_item_data) { free(ht_item_data->key); free(ht_item_data->value); } entry_size += HASHTABLE_ENTRY_SIZE(ht_item, data); HASHTABLE_ENTRY_CLEAR(ht_item, data); } common_entry->items_size -= entry_size; for (i = 0; i < common_entry->policies_size; ++i) { policy = common_entry->policies[i]; next_item = NULL; item = policy->get_first_item_func(policy); while (item != NULL) { next_item = policy->get_next_item_func(policy, item); policy->remove_item_func(policy, item); policy->destroy_item_func(item); item = next_item; } } } else { mp_entry = (struct cache_mp_entry_ *)entry; if (mp_entry->rs_size == 0) { if (mp_entry->completed_write_session != NULL) { destroy_cache_mp_write_session( mp_entry->completed_write_session); mp_entry->completed_write_session = NULL; } memset(&mp_entry->creation_time, 0, sizeof(struct timeval)); memset(&mp_entry->last_request_time, 0, sizeof(struct timeval)); } } } /* * When passed to the flush_cache_policy, ensures that all old elements are * deleted. */ static int cache_lifetime_common_continue_func(struct cache_common_entry_ *entry, struct cache_policy_item_ *item) { return ((item->last_request_time.tv_sec - item->creation_time.tv_sec > entry->common_params.max_lifetime.tv_sec) ? 1: 0); } /* * When passed to the flush_cache_policy, ensures that all elements, that * exceed the size limit, are deleted. */ static int cache_elemsize_common_continue_func(struct cache_common_entry_ *entry, struct cache_policy_item_ *item) { return ((entry->items_size > entry->common_params.satisf_elemsize) ? 1 : 0); } /* * Removes the elements from the cache entry, while the continue_func returns 1. */ static void flush_cache_policy(struct cache_common_entry_ *entry, struct cache_policy_ *policy, struct cache_policy_ *connected_policy, int (*continue_func)(struct cache_common_entry_ *, struct cache_policy_item_ *)) { struct cache_policy_item_ *item, *next_item, *connected_item; struct cache_ht_item_ *ht_item; struct cache_ht_item_data_ *ht_item_data, ht_key; hashtable_index_t hash; assert(policy != NULL); next_item = NULL; item = policy->get_first_item_func(policy); while ((item != NULL) && (continue_func(entry, item) == 1)) { next_item = policy->get_next_item_func(policy, item); connected_item = item->connected_item; policy->remove_item_func(policy, item); memset(&ht_key, 0, sizeof(struct cache_ht_item_data_)); ht_key.key = item->key; ht_key.key_size = item->key_size; hash = HASHTABLE_CALCULATE_HASH(cache_ht_, &entry->items, &ht_key); assert(hash < HASHTABLE_ENTRIES_COUNT(&entry->items)); ht_item = HASHTABLE_GET_ENTRY(&(entry->items), hash); ht_item_data = HASHTABLE_ENTRY_FIND(cache_ht_, ht_item, &ht_key); assert(ht_item_data != NULL); free(ht_item_data->key); free(ht_item_data->value); HASHTABLE_ENTRY_REMOVE(cache_ht_, ht_item, ht_item_data); --entry->items_size; policy->destroy_item_func(item); if (connected_item != NULL) { connected_policy->remove_item_func(connected_policy, connected_item); connected_policy->destroy_item_func(connected_item); } item = next_item; } } static void flush_cache_entry(struct cache_entry_ *entry) { struct cache_mp_entry_ *mp_entry; struct cache_common_entry_ *common_entry; struct cache_policy_ *policy, *connected_policy; connected_policy = NULL; if (entry->params->entry_type == CET_COMMON) { common_entry = (struct cache_common_entry_ *)entry; if ((common_entry->common_params.max_lifetime.tv_sec != 0) || (common_entry->common_params.max_lifetime.tv_usec != 0)) { policy = common_entry->policies[0]; if (common_entry->policies_size > 1) connected_policy = common_entry->policies[1]; flush_cache_policy(common_entry, policy, connected_policy, cache_lifetime_common_continue_func); } if ((common_entry->common_params.max_elemsize != 0) && common_entry->items_size > common_entry->common_params.max_elemsize) { if (common_entry->policies_size > 1) { policy = common_entry->policies[1]; connected_policy = common_entry->policies[0]; } else { policy = common_entry->policies[0]; connected_policy = NULL; } flush_cache_policy(common_entry, policy, connected_policy, cache_elemsize_common_continue_func); } } else { mp_entry = (struct cache_mp_entry_ *)entry; if ((mp_entry->mp_params.max_lifetime.tv_sec != 0) || (mp_entry->mp_params.max_lifetime.tv_usec != 0)) { if (mp_entry->last_request_time.tv_sec - mp_entry->last_request_time.tv_sec > mp_entry->mp_params.max_lifetime.tv_sec) clear_cache_entry(entry); } } } struct cache_ * init_cache(struct cache_params const *params) { struct cache_ *retval; TRACE_IN(init_cache); assert(params != NULL); retval = calloc(1, sizeof(*retval)); assert(retval != NULL); assert(params != NULL); memcpy(&retval->params, params, sizeof(struct cache_params)); - retval->entries = calloc(1, - sizeof(*retval->entries) * INITIAL_ENTRIES_CAPACITY); + retval->entries = calloc(INITIAL_ENTRIES_CAPACITY, + sizeof(*retval->entries)); assert(retval->entries != NULL); retval->entries_capacity = INITIAL_ENTRIES_CAPACITY; retval->entries_size = 0; TRACE_OUT(init_cache); return (retval); } void destroy_cache(struct cache_ *the_cache) { TRACE_IN(destroy_cache); assert(the_cache != NULL); if (the_cache->entries != NULL) { size_t i; for (i = 0; i < the_cache->entries_size; ++i) destroy_cache_entry(the_cache->entries[i]); free(the_cache->entries); } free(the_cache); TRACE_OUT(destroy_cache); } int register_cache_entry(struct cache_ *the_cache, struct cache_entry_params const *params) { int policies_size; size_t entry_name_size; struct cache_common_entry_ *new_common_entry; struct cache_mp_entry_ *new_mp_entry; TRACE_IN(register_cache_entry); assert(the_cache != NULL); if (find_cache_entry(the_cache, params->entry_name) != NULL) { TRACE_OUT(register_cache_entry); return (-1); } if (the_cache->entries_size == the_cache->entries_capacity) { struct cache_entry_ **new_entries; size_t new_capacity; new_capacity = the_cache->entries_capacity + ENTRIES_CAPACITY_STEP; - new_entries = calloc(1, - sizeof(*new_entries) * new_capacity); + new_entries = calloc(new_capacity, + sizeof(*new_entries)); assert(new_entries != NULL); memcpy(new_entries, the_cache->entries, sizeof(struct cache_entry_ *) * the_cache->entries_size); free(the_cache->entries); the_cache->entries = new_entries; } entry_name_size = strlen(params->entry_name) + 1; switch (params->entry_type) { case CET_COMMON: new_common_entry = calloc(1, sizeof(*new_common_entry)); assert(new_common_entry != NULL); memcpy(&new_common_entry->common_params, params, sizeof(struct common_cache_entry_params)); new_common_entry->params = (struct cache_entry_params *)&new_common_entry->common_params; new_common_entry->common_params.cep.entry_name = calloc(1, entry_name_size); assert(new_common_entry->common_params.cep.entry_name != NULL); strlcpy(new_common_entry->common_params.cep.entry_name, params->entry_name, entry_name_size); new_common_entry->name = new_common_entry->common_params.cep.entry_name; HASHTABLE_INIT(&(new_common_entry->items), struct cache_ht_item_data_, data, new_common_entry->common_params.cache_entries_size); if (new_common_entry->common_params.policy == CPT_FIFO) policies_size = 1; else policies_size = 2; - new_common_entry->policies = calloc(1, - sizeof(*new_common_entry->policies) * policies_size); + new_common_entry->policies = calloc(policies_size, + sizeof(*new_common_entry->policies)); assert(new_common_entry->policies != NULL); new_common_entry->policies_size = policies_size; new_common_entry->policies[0] = init_cache_fifo_policy(); if (policies_size > 1) { switch (new_common_entry->common_params.policy) { case CPT_LRU: new_common_entry->policies[1] = init_cache_lru_policy(); break; case CPT_LFU: new_common_entry->policies[1] = init_cache_lfu_policy(); break; default: break; } } new_common_entry->get_time_func = the_cache->params.get_time_func; the_cache->entries[the_cache->entries_size++] = (struct cache_entry_ *)new_common_entry; break; case CET_MULTIPART: new_mp_entry = calloc(1, sizeof(*new_mp_entry)); assert(new_mp_entry != NULL); memcpy(&new_mp_entry->mp_params, params, sizeof(struct mp_cache_entry_params)); new_mp_entry->params = (struct cache_entry_params *)&new_mp_entry->mp_params; new_mp_entry->mp_params.cep.entry_name = calloc(1, entry_name_size); assert(new_mp_entry->mp_params.cep.entry_name != NULL); strlcpy(new_mp_entry->mp_params.cep.entry_name, params->entry_name, entry_name_size); new_mp_entry->name = new_mp_entry->mp_params.cep.entry_name; TAILQ_INIT(&new_mp_entry->ws_head); TAILQ_INIT(&new_mp_entry->rs_head); new_mp_entry->get_time_func = the_cache->params.get_time_func; the_cache->entries[the_cache->entries_size++] = (struct cache_entry_ *)new_mp_entry; break; } qsort(the_cache->entries, the_cache->entries_size, sizeof(struct cache_entry_ *), entries_qsort_cmp_func); TRACE_OUT(register_cache_entry); return (0); } int unregister_cache_entry(struct cache_ *the_cache, const char *entry_name) { struct cache_entry_ **del_ent; TRACE_IN(unregister_cache_entry); assert(the_cache != NULL); del_ent = find_cache_entry_p(the_cache, entry_name); if (del_ent != NULL) { destroy_cache_entry(*del_ent); --the_cache->entries_size; memmove(del_ent, del_ent + 1, (&(the_cache->entries[--the_cache->entries_size]) - del_ent) * sizeof(struct cache_entry_ *)); TRACE_OUT(unregister_cache_entry); return (0); } else { TRACE_OUT(unregister_cache_entry); return (-1); } } struct cache_entry_ * find_cache_entry(struct cache_ *the_cache, const char *entry_name) { struct cache_entry_ **result; TRACE_IN(find_cache_entry); result = find_cache_entry_p(the_cache, entry_name); if (result == NULL) { TRACE_OUT(find_cache_entry); return (NULL); } else { TRACE_OUT(find_cache_entry); return (*result); } } /* * Tries to read the element with the specified key from the cache. If the * value_size is too small, it will be filled with the proper number, and * the user will need to call cache_read again with the value buffer, that * is large enough. * Function returns 0 on success, -1 on error, and -2 if the value_size is too * small. */ int cache_read(struct cache_entry_ *entry, const char *key, size_t key_size, char *value, size_t *value_size) { struct cache_common_entry_ *common_entry; struct cache_ht_item_data_ item_data, *find_res; struct cache_ht_item_ *item; hashtable_index_t hash; struct cache_policy_item_ *connected_item; TRACE_IN(cache_read); assert(entry != NULL); assert(key != NULL); assert(value_size != NULL); assert(entry->params->entry_type == CET_COMMON); common_entry = (struct cache_common_entry_ *)entry; memset(&item_data, 0, sizeof(struct cache_ht_item_data_)); /* can't avoid the cast here */ item_data.key = (char *)key; item_data.key_size = key_size; hash = HASHTABLE_CALCULATE_HASH(cache_ht_, &common_entry->items, &item_data); assert(hash < HASHTABLE_ENTRIES_COUNT(&common_entry->items)); item = HASHTABLE_GET_ENTRY(&(common_entry->items), hash); find_res = HASHTABLE_ENTRY_FIND(cache_ht_, item, &item_data); if (find_res == NULL) { TRACE_OUT(cache_read); return (-1); } /* pretend that entry was not found if confidence is below threshold*/ if (find_res->confidence < common_entry->common_params.confidence_threshold) { TRACE_OUT(cache_read); return (-1); } if ((common_entry->common_params.max_lifetime.tv_sec != 0) || (common_entry->common_params.max_lifetime.tv_usec != 0)) { if (find_res->fifo_policy_item->last_request_time.tv_sec - find_res->fifo_policy_item->creation_time.tv_sec > common_entry->common_params.max_lifetime.tv_sec) { free(find_res->key); free(find_res->value); connected_item = find_res->fifo_policy_item->connected_item; if (connected_item != NULL) { common_entry->policies[1]->remove_item_func( common_entry->policies[1], connected_item); common_entry->policies[1]->destroy_item_func( connected_item); } common_entry->policies[0]->remove_item_func( common_entry->policies[0], find_res->fifo_policy_item); common_entry->policies[0]->destroy_item_func( find_res->fifo_policy_item); HASHTABLE_ENTRY_REMOVE(cache_ht_, item, find_res); --common_entry->items_size; } } if ((*value_size < find_res->value_size) || (value == NULL)) { *value_size = find_res->value_size; TRACE_OUT(cache_read); return (-2); } *value_size = find_res->value_size; memcpy(value, find_res->value, find_res->value_size); ++find_res->fifo_policy_item->request_count; common_entry->get_time_func( &find_res->fifo_policy_item->last_request_time); common_entry->policies[0]->update_item_func(common_entry->policies[0], find_res->fifo_policy_item); if (find_res->fifo_policy_item->connected_item != NULL) { connected_item = find_res->fifo_policy_item->connected_item; memcpy(&connected_item->last_request_time, &find_res->fifo_policy_item->last_request_time, sizeof(struct timeval)); connected_item->request_count = find_res->fifo_policy_item->request_count; common_entry->policies[1]->update_item_func( common_entry->policies[1], connected_item); } TRACE_OUT(cache_read); return (0); } /* * Writes the value with the specified key into the cache entry. * Functions returns 0 on success, and -1 on error. */ int cache_write(struct cache_entry_ *entry, const char *key, size_t key_size, char const *value, size_t value_size) { struct cache_common_entry_ *common_entry; struct cache_ht_item_data_ item_data, *find_res; struct cache_ht_item_ *item; hashtable_index_t hash; struct cache_policy_ *policy, *connected_policy; struct cache_policy_item_ *policy_item; struct cache_policy_item_ *connected_policy_item; TRACE_IN(cache_write); assert(entry != NULL); assert(key != NULL); assert(value != NULL); assert(entry->params->entry_type == CET_COMMON); common_entry = (struct cache_common_entry_ *)entry; memset(&item_data, 0, sizeof(struct cache_ht_item_data_)); /* can't avoid the cast here */ item_data.key = (char *)key; item_data.key_size = key_size; hash = HASHTABLE_CALCULATE_HASH(cache_ht_, &common_entry->items, &item_data); assert(hash < HASHTABLE_ENTRIES_COUNT(&common_entry->items)); item = HASHTABLE_GET_ENTRY(&(common_entry->items), hash); find_res = HASHTABLE_ENTRY_FIND(cache_ht_, item, &item_data); if (find_res != NULL) { if (find_res->confidence < common_entry->common_params.confidence_threshold) { /* duplicate entry is no error, if confidence is low */ if ((find_res->value_size == value_size) && (memcmp(find_res->value, value, value_size) == 0)) { /* increase confidence on exact match (key and values) */ find_res->confidence++; } else { /* create new entry with low confidence, if value changed */ free(item_data.value); item_data.value = malloc(value_size); assert(item_data.value != NULL); memcpy(item_data.value, value, value_size); item_data.value_size = value_size; find_res->confidence = 1; } TRACE_OUT(cache_write); return (0); } TRACE_OUT(cache_write); return (-1); } item_data.key = malloc(key_size); memcpy(item_data.key, key, key_size); item_data.value = malloc(value_size); assert(item_data.value != NULL); memcpy(item_data.value, value, value_size); item_data.value_size = value_size; item_data.confidence = 1; policy_item = common_entry->policies[0]->create_item_func(); policy_item->key = item_data.key; policy_item->key_size = item_data.key_size; common_entry->get_time_func(&policy_item->creation_time); if (common_entry->policies_size > 1) { connected_policy_item = common_entry->policies[1]->create_item_func(); memcpy(&connected_policy_item->creation_time, &policy_item->creation_time, sizeof(struct timeval)); connected_policy_item->key = policy_item->key; connected_policy_item->key_size = policy_item->key_size; connected_policy_item->connected_item = policy_item; policy_item->connected_item = connected_policy_item; } item_data.fifo_policy_item = policy_item; common_entry->policies[0]->add_item_func(common_entry->policies[0], policy_item); if (common_entry->policies_size > 1) common_entry->policies[1]->add_item_func( common_entry->policies[1], connected_policy_item); HASHTABLE_ENTRY_STORE(cache_ht_, item, &item_data); ++common_entry->items_size; if ((common_entry->common_params.max_elemsize != 0) && (common_entry->items_size > common_entry->common_params.max_elemsize)) { if (common_entry->policies_size > 1) { policy = common_entry->policies[1]; connected_policy = common_entry->policies[0]; } else { policy = common_entry->policies[0]; connected_policy = NULL; } flush_cache_policy(common_entry, policy, connected_policy, cache_elemsize_common_continue_func); } TRACE_OUT(cache_write); return (0); } /* * Initializes the write session for the specified multipart entry. This * session then should be filled with data either committed or abandoned by * using close_cache_mp_write_session or abandon_cache_mp_write_session * respectively. * Returns NULL on errors (when there are too many opened write sessions for * the entry). */ struct cache_mp_write_session_ * open_cache_mp_write_session(struct cache_entry_ *entry) { struct cache_mp_entry_ *mp_entry; struct cache_mp_write_session_ *retval; TRACE_IN(open_cache_mp_write_session); assert(entry != NULL); assert(entry->params->entry_type == CET_MULTIPART); mp_entry = (struct cache_mp_entry_ *)entry; if ((mp_entry->mp_params.max_sessions > 0) && (mp_entry->ws_size == mp_entry->mp_params.max_sessions)) { TRACE_OUT(open_cache_mp_write_session); return (NULL); } retval = calloc(1, sizeof(*retval)); assert(retval != NULL); TAILQ_INIT(&retval->items); retval->parent_entry = mp_entry; TAILQ_INSERT_HEAD(&mp_entry->ws_head, retval, entries); ++mp_entry->ws_size; TRACE_OUT(open_cache_mp_write_session); return (retval); } /* * Writes data to the specified session. Return 0 on success and -1 on errors * (when write session size limit is exceeded). */ int cache_mp_write(struct cache_mp_write_session_ *ws, char *data, size_t data_size) { struct cache_mp_data_item_ *new_item; TRACE_IN(cache_mp_write); assert(ws != NULL); assert(ws->parent_entry != NULL); assert(ws->parent_entry->params->entry_type == CET_MULTIPART); if ((ws->parent_entry->mp_params.max_elemsize > 0) && (ws->parent_entry->mp_params.max_elemsize == ws->items_size)) { TRACE_OUT(cache_mp_write); return (-1); } new_item = calloc(1, sizeof(*new_item)); assert(new_item != NULL); new_item->value = malloc(data_size); assert(new_item->value != NULL); memcpy(new_item->value, data, data_size); new_item->value_size = data_size; TAILQ_INSERT_TAIL(&ws->items, new_item, entries); ++ws->items_size; TRACE_OUT(cache_mp_write); return (0); } /* * Abandons the write session and frees all the connected resources. */ void abandon_cache_mp_write_session(struct cache_mp_write_session_ *ws) { TRACE_IN(abandon_cache_mp_write_session); assert(ws != NULL); assert(ws->parent_entry != NULL); assert(ws->parent_entry->params->entry_type == CET_MULTIPART); TAILQ_REMOVE(&ws->parent_entry->ws_head, ws, entries); --ws->parent_entry->ws_size; destroy_cache_mp_write_session(ws); TRACE_OUT(abandon_cache_mp_write_session); } /* * Commits the session to the entry, for which it was created. */ void close_cache_mp_write_session(struct cache_mp_write_session_ *ws) { TRACE_IN(close_cache_mp_write_session); assert(ws != NULL); assert(ws->parent_entry != NULL); assert(ws->parent_entry->params->entry_type == CET_MULTIPART); TAILQ_REMOVE(&ws->parent_entry->ws_head, ws, entries); --ws->parent_entry->ws_size; if (ws->parent_entry->completed_write_session == NULL) { /* * If there is no completed session yet, this will be the one */ ws->parent_entry->get_time_func( &ws->parent_entry->creation_time); ws->parent_entry->completed_write_session = ws; } else { /* * If there is a completed session, then we'll save our session * as a pending session. If there is already a pending session, * it would be destroyed. */ if (ws->parent_entry->pending_write_session != NULL) destroy_cache_mp_write_session( ws->parent_entry->pending_write_session); ws->parent_entry->pending_write_session = ws; } TRACE_OUT(close_cache_mp_write_session); } /* * Opens read session for the specified entry. Returns NULL on errors (when * there are no data in the entry, or the data are obsolete). */ struct cache_mp_read_session_ * open_cache_mp_read_session(struct cache_entry_ *entry) { struct cache_mp_entry_ *mp_entry; struct cache_mp_read_session_ *retval; TRACE_IN(open_cache_mp_read_session); assert(entry != NULL); assert(entry->params->entry_type == CET_MULTIPART); mp_entry = (struct cache_mp_entry_ *)entry; if (mp_entry->completed_write_session == NULL) { TRACE_OUT(open_cache_mp_read_session); return (NULL); } if ((mp_entry->mp_params.max_lifetime.tv_sec != 0) || (mp_entry->mp_params.max_lifetime.tv_usec != 0)) { if (mp_entry->last_request_time.tv_sec - mp_entry->last_request_time.tv_sec > mp_entry->mp_params.max_lifetime.tv_sec) { flush_cache_entry(entry); TRACE_OUT(open_cache_mp_read_session); return (NULL); } } retval = calloc(1, sizeof(*retval)); assert(retval != NULL); retval->parent_entry = mp_entry; retval->current_item = TAILQ_FIRST( &mp_entry->completed_write_session->items); TAILQ_INSERT_HEAD(&mp_entry->rs_head, retval, entries); ++mp_entry->rs_size; mp_entry->get_time_func(&mp_entry->last_request_time); TRACE_OUT(open_cache_mp_read_session); return (retval); } /* * Reads the data from the read session - step by step. * Returns 0 on success, -1 on error (when there are no more data), and -2 if * the data_size is too small. In the last case, data_size would be filled * the proper value. */ int cache_mp_read(struct cache_mp_read_session_ *rs, char *data, size_t *data_size) { TRACE_IN(cache_mp_read); assert(rs != NULL); if (rs->current_item == NULL) { TRACE_OUT(cache_mp_read); return (-1); } if (rs->current_item->value_size > *data_size) { *data_size = rs->current_item->value_size; if (data == NULL) { TRACE_OUT(cache_mp_read); return (0); } TRACE_OUT(cache_mp_read); return (-2); } *data_size = rs->current_item->value_size; memcpy(data, rs->current_item->value, rs->current_item->value_size); rs->current_item = TAILQ_NEXT(rs->current_item, entries); TRACE_OUT(cache_mp_read); return (0); } /* * Closes the read session. If there are no more read sessions and there is * a pending write session, it will be committed and old * completed_write_session will be destroyed. */ void close_cache_mp_read_session(struct cache_mp_read_session_ *rs) { TRACE_IN(close_cache_mp_read_session); assert(rs != NULL); assert(rs->parent_entry != NULL); TAILQ_REMOVE(&rs->parent_entry->rs_head, rs, entries); --rs->parent_entry->rs_size; if ((rs->parent_entry->rs_size == 0) && (rs->parent_entry->pending_write_session != NULL)) { destroy_cache_mp_write_session( rs->parent_entry->completed_write_session); rs->parent_entry->completed_write_session = rs->parent_entry->pending_write_session; rs->parent_entry->pending_write_session = NULL; } destroy_cache_mp_read_session(rs); TRACE_OUT(close_cache_mp_read_session); } int transform_cache_entry(struct cache_entry_ *entry, enum cache_transformation_t transformation) { TRACE_IN(transform_cache_entry); switch (transformation) { case CTT_CLEAR: clear_cache_entry(entry); TRACE_OUT(transform_cache_entry); return (0); case CTT_FLUSH: flush_cache_entry(entry); TRACE_OUT(transform_cache_entry); return (0); default: TRACE_OUT(transform_cache_entry); return (-1); } } int transform_cache_entry_part(struct cache_entry_ *entry, enum cache_transformation_t transformation, const char *key_part, size_t key_part_size, enum part_position_t part_position) { struct cache_common_entry_ *common_entry; struct cache_ht_item_ *ht_item; struct cache_ht_item_data_ *ht_item_data, ht_key; struct cache_policy_item_ *item, *connected_item; TRACE_IN(transform_cache_entry_part); if (entry->params->entry_type != CET_COMMON) { TRACE_OUT(transform_cache_entry_part); return (-1); } if (transformation != CTT_CLEAR) { TRACE_OUT(transform_cache_entry_part); return (-1); } memset(&ht_key, 0, sizeof(struct cache_ht_item_data_)); ht_key.key = (char *)key_part; /* can't avoid casting here */ ht_key.key_size = key_part_size; common_entry = (struct cache_common_entry_ *)entry; HASHTABLE_FOREACH(&(common_entry->items), ht_item) { do { ht_item_data = HASHTABLE_ENTRY_FIND_SPECIAL(cache_ht_, ht_item, &ht_key, ht_items_fixed_size_left_cmp_func); if (ht_item_data != NULL) { item = ht_item_data->fifo_policy_item; connected_item = item->connected_item; common_entry->policies[0]->remove_item_func( common_entry->policies[0], item); free(ht_item_data->key); free(ht_item_data->value); HASHTABLE_ENTRY_REMOVE(cache_ht_, ht_item, ht_item_data); --common_entry->items_size; common_entry->policies[0]->destroy_item_func( item); if (common_entry->policies_size == 2) { common_entry->policies[1]->remove_item_func( common_entry->policies[1], connected_item); common_entry->policies[1]->destroy_item_func( connected_item); } } } while (ht_item_data != NULL); } TRACE_OUT(transform_cache_entry_part); return (0); } Index: head/usr.sbin/nscd/config.c =================================================================== --- head/usr.sbin/nscd/config.c (revision 315212) +++ head/usr.sbin/nscd/config.c (revision 315213) @@ -1,588 +1,586 @@ /*- * Copyright (c) 2005 Michael Bushkov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "config.h" #include "debug.h" #include "log.h" /* * Default entries, which always exist in the configuration */ const char *c_default_entries[6] = { NSDB_PASSWD, NSDB_GROUP, NSDB_HOSTS, NSDB_SERVICES, NSDB_PROTOCOLS, NSDB_RPC }; static int configuration_entry_cmp(const void *, const void *); static int configuration_entry_sort_cmp(const void *, const void *); static int configuration_entry_cache_mp_sort_cmp(const void *, const void *); static int configuration_entry_cache_mp_cmp(const void *, const void *); static int configuration_entry_cache_mp_part_cmp(const void *, const void *); static struct configuration_entry *create_configuration_entry(const char *, struct timeval const *, struct timeval const *, struct common_cache_entry_params const *, struct common_cache_entry_params const *, struct mp_cache_entry_params const *); static int configuration_entry_sort_cmp(const void *e1, const void *e2) { return (strcmp((*((struct configuration_entry **)e1))->name, (*((struct configuration_entry **)e2))->name )); } static int configuration_entry_cmp(const void *e1, const void *e2) { return (strcmp((const char *)e1, (*((struct configuration_entry **)e2))->name )); } static int configuration_entry_cache_mp_sort_cmp(const void *e1, const void *e2) { return (strcmp((*((cache_entry *)e1))->params->entry_name, (*((cache_entry *)e2))->params->entry_name )); } static int configuration_entry_cache_mp_cmp(const void *e1, const void *e2) { return (strcmp((const char *)e1, (*((cache_entry *)e2))->params->entry_name )); } static int configuration_entry_cache_mp_part_cmp(const void *e1, const void *e2) { return (strncmp((const char *)e1, (*((cache_entry *)e2))->params->entry_name, strlen((const char *)e1) )); } static struct configuration_entry * create_configuration_entry(const char *name, struct timeval const *common_timeout, struct timeval const *mp_timeout, struct common_cache_entry_params const *positive_params, struct common_cache_entry_params const *negative_params, struct mp_cache_entry_params const *mp_params) { struct configuration_entry *retval; size_t size; int res; TRACE_IN(create_configuration_entry); assert(name != NULL); assert(positive_params != NULL); assert(negative_params != NULL); assert(mp_params != NULL); retval = calloc(1, sizeof(*retval)); assert(retval != NULL); res = pthread_mutex_init(&retval->positive_cache_lock, NULL); if (res != 0) { free(retval); LOG_ERR_2("create_configuration_entry", "can't create positive cache lock"); TRACE_OUT(create_configuration_entry); return (NULL); } res = pthread_mutex_init(&retval->negative_cache_lock, NULL); if (res != 0) { pthread_mutex_destroy(&retval->positive_cache_lock); free(retval); LOG_ERR_2("create_configuration_entry", "can't create negative cache lock"); TRACE_OUT(create_configuration_entry); return (NULL); } res = pthread_mutex_init(&retval->mp_cache_lock, NULL); if (res != 0) { pthread_mutex_destroy(&retval->positive_cache_lock); pthread_mutex_destroy(&retval->negative_cache_lock); free(retval); LOG_ERR_2("create_configuration_entry", "can't create negative cache lock"); TRACE_OUT(create_configuration_entry); return (NULL); } memcpy(&retval->positive_cache_params, positive_params, sizeof(struct common_cache_entry_params)); memcpy(&retval->negative_cache_params, negative_params, sizeof(struct common_cache_entry_params)); memcpy(&retval->mp_cache_params, mp_params, sizeof(struct mp_cache_entry_params)); size = strlen(name); retval->name = calloc(1, size + 1); assert(retval->name != NULL); memcpy(retval->name, name, size); memcpy(&retval->common_query_timeout, common_timeout, sizeof(struct timeval)); memcpy(&retval->mp_query_timeout, mp_timeout, sizeof(struct timeval)); asprintf(&retval->positive_cache_params.cep.entry_name, "%s+", name); assert(retval->positive_cache_params.cep.entry_name != NULL); asprintf(&retval->negative_cache_params.cep.entry_name, "%s-", name); assert(retval->negative_cache_params.cep.entry_name != NULL); asprintf(&retval->mp_cache_params.cep.entry_name, "%s*", name); assert(retval->mp_cache_params.cep.entry_name != NULL); TRACE_OUT(create_configuration_entry); return (retval); } /* * Creates configuration entry and fills it with default values */ struct configuration_entry * create_def_configuration_entry(const char *name) { struct common_cache_entry_params positive_params, negative_params; struct mp_cache_entry_params mp_params; struct timeval default_common_timeout, default_mp_timeout; struct configuration_entry *res = NULL; TRACE_IN(create_def_configuration_entry); memset(&positive_params, 0, sizeof(struct common_cache_entry_params)); positive_params.cep.entry_type = CET_COMMON; positive_params.cache_entries_size = DEFAULT_CACHE_HT_SIZE; positive_params.max_elemsize = DEFAULT_POSITIVE_ELEMENTS_SIZE; positive_params.satisf_elemsize = DEFAULT_POSITIVE_ELEMENTS_SIZE / 2; positive_params.max_lifetime.tv_sec = DEFAULT_POSITIVE_LIFETIME; positive_params.confidence_threshold = DEFAULT_POSITIVE_CONF_THRESH; positive_params.policy = CPT_LRU; memcpy(&negative_params, &positive_params, sizeof(struct common_cache_entry_params)); negative_params.max_elemsize = DEFAULT_NEGATIVE_ELEMENTS_SIZE; negative_params.satisf_elemsize = DEFAULT_NEGATIVE_ELEMENTS_SIZE / 2; negative_params.max_lifetime.tv_sec = DEFAULT_NEGATIVE_LIFETIME; negative_params.confidence_threshold = DEFAULT_NEGATIVE_CONF_THRESH; negative_params.policy = CPT_FIFO; memset(&default_common_timeout, 0, sizeof(struct timeval)); default_common_timeout.tv_sec = DEFAULT_COMMON_ENTRY_TIMEOUT; memset(&default_mp_timeout, 0, sizeof(struct timeval)); default_mp_timeout.tv_sec = DEFAULT_MP_ENTRY_TIMEOUT; memset(&mp_params, 0, sizeof(struct mp_cache_entry_params)); mp_params.cep.entry_type = CET_MULTIPART; mp_params.max_elemsize = DEFAULT_MULTIPART_ELEMENTS_SIZE; mp_params.max_sessions = DEFAULT_MULITPART_SESSIONS_SIZE; mp_params.max_lifetime.tv_sec = DEFAULT_MULITPART_LIFETIME; res = create_configuration_entry(name, &default_common_timeout, &default_mp_timeout, &positive_params, &negative_params, &mp_params); TRACE_OUT(create_def_configuration_entry); return (res); } void destroy_configuration_entry(struct configuration_entry *entry) { TRACE_IN(destroy_configuration_entry); assert(entry != NULL); pthread_mutex_destroy(&entry->positive_cache_lock); pthread_mutex_destroy(&entry->negative_cache_lock); pthread_mutex_destroy(&entry->mp_cache_lock); free(entry->name); free(entry->positive_cache_params.cep.entry_name); free(entry->negative_cache_params.cep.entry_name); free(entry->mp_cache_params.cep.entry_name); free(entry->mp_cache_entries); free(entry); TRACE_OUT(destroy_configuration_entry); } int add_configuration_entry(struct configuration *config, struct configuration_entry *entry) { TRACE_IN(add_configuration_entry); assert(entry != NULL); assert(entry->name != NULL); if (configuration_find_entry(config, entry->name) != NULL) { TRACE_OUT(add_configuration_entry); return (-1); } if (config->entries_size == config->entries_capacity) { struct configuration_entry **new_entries; config->entries_capacity *= 2; - new_entries = calloc(1, - sizeof(*new_entries) * - config->entries_capacity); + new_entries = calloc(config->entries_capacity, + sizeof(*new_entries)); assert(new_entries != NULL); memcpy(new_entries, config->entries, sizeof(struct configuration_entry *) * config->entries_size); free(config->entries); config->entries = new_entries; } config->entries[config->entries_size++] = entry; qsort(config->entries, config->entries_size, sizeof(struct configuration_entry *), configuration_entry_sort_cmp); TRACE_OUT(add_configuration_entry); return (0); } size_t configuration_get_entries_size(struct configuration *config) { TRACE_IN(configuration_get_entries_size); assert(config != NULL); TRACE_OUT(configuration_get_entries_size); return (config->entries_size); } struct configuration_entry * configuration_get_entry(struct configuration *config, size_t index) { TRACE_IN(configuration_get_entry); assert(config != NULL); assert(index < config->entries_size); TRACE_OUT(configuration_get_entry); return (config->entries[index]); } struct configuration_entry * configuration_find_entry(struct configuration *config, const char *name) { struct configuration_entry **retval; TRACE_IN(configuration_find_entry); retval = bsearch(name, config->entries, config->entries_size, sizeof(struct configuration_entry *), configuration_entry_cmp); TRACE_OUT(configuration_find_entry); return ((retval != NULL) ? *retval : NULL); } /* * All multipart cache entries are stored in the configuration_entry in the * sorted array (sorted by names). The 3 functions below manage this array. */ int configuration_entry_add_mp_cache_entry(struct configuration_entry *config_entry, cache_entry c_entry) { cache_entry *new_mp_entries, *old_mp_entries; TRACE_IN(configuration_entry_add_mp_cache_entry); ++config_entry->mp_cache_entries_size; new_mp_entries = malloc(sizeof(*new_mp_entries) * config_entry->mp_cache_entries_size); assert(new_mp_entries != NULL); new_mp_entries[0] = c_entry; if (config_entry->mp_cache_entries_size - 1 > 0) { memcpy(new_mp_entries + 1, config_entry->mp_cache_entries, (config_entry->mp_cache_entries_size - 1) * sizeof(cache_entry)); } old_mp_entries = config_entry->mp_cache_entries; config_entry->mp_cache_entries = new_mp_entries; free(old_mp_entries); qsort(config_entry->mp_cache_entries, config_entry->mp_cache_entries_size, sizeof(cache_entry), configuration_entry_cache_mp_sort_cmp); TRACE_OUT(configuration_entry_add_mp_cache_entry); return (0); } cache_entry configuration_entry_find_mp_cache_entry( struct configuration_entry *config_entry, const char *mp_name) { cache_entry *result; TRACE_IN(configuration_entry_find_mp_cache_entry); result = bsearch(mp_name, config_entry->mp_cache_entries, config_entry->mp_cache_entries_size, sizeof(cache_entry), configuration_entry_cache_mp_cmp); if (result == NULL) { TRACE_OUT(configuration_entry_find_mp_cache_entry); return (NULL); } else { TRACE_OUT(configuration_entry_find_mp_cache_entry); return (*result); } } /* * Searches for all multipart entries with names starting with mp_name. * Needed for cache flushing. */ int configuration_entry_find_mp_cache_entries( struct configuration_entry *config_entry, const char *mp_name, cache_entry **start, cache_entry **finish) { cache_entry *result; TRACE_IN(configuration_entry_find_mp_cache_entries); result = bsearch(mp_name, config_entry->mp_cache_entries, config_entry->mp_cache_entries_size, sizeof(cache_entry), configuration_entry_cache_mp_part_cmp); if (result == NULL) { TRACE_OUT(configuration_entry_find_mp_cache_entries); return (-1); } *start = result; *finish = result + 1; while (*start != config_entry->mp_cache_entries) { if (configuration_entry_cache_mp_part_cmp(mp_name, *start - 1) == 0) *start = *start - 1; else break; } while (*finish != config_entry->mp_cache_entries + config_entry->mp_cache_entries_size) { if (configuration_entry_cache_mp_part_cmp( mp_name, *finish) == 0) *finish = *finish + 1; else break; } TRACE_OUT(configuration_entry_find_mp_cache_entries); return (0); } /* * Configuration entry uses rwlock to handle access to its fields. */ void configuration_lock_rdlock(struct configuration *config) { TRACE_IN(configuration_lock_rdlock); pthread_rwlock_rdlock(&config->rwlock); TRACE_OUT(configuration_lock_rdlock); } void configuration_lock_wrlock(struct configuration *config) { TRACE_IN(configuration_lock_wrlock); pthread_rwlock_wrlock(&config->rwlock); TRACE_OUT(configuration_lock_wrlock); } void configuration_unlock(struct configuration *config) { TRACE_IN(configuration_unlock); pthread_rwlock_unlock(&config->rwlock); TRACE_OUT(configuration_unlock); } /* * Configuration entry uses 3 mutexes to handle cache operations. They are * acquired by configuration_lock_entry and configuration_unlock_entry * functions. */ void configuration_lock_entry(struct configuration_entry *entry, enum config_entry_lock_type lock_type) { TRACE_IN(configuration_lock_entry); assert(entry != NULL); switch (lock_type) { case CELT_POSITIVE: pthread_mutex_lock(&entry->positive_cache_lock); break; case CELT_NEGATIVE: pthread_mutex_lock(&entry->negative_cache_lock); break; case CELT_MULTIPART: pthread_mutex_lock(&entry->mp_cache_lock); break; default: /* should be unreachable */ break; } TRACE_OUT(configuration_lock_entry); } void configuration_unlock_entry(struct configuration_entry *entry, enum config_entry_lock_type lock_type) { TRACE_IN(configuration_unlock_entry); assert(entry != NULL); switch (lock_type) { case CELT_POSITIVE: pthread_mutex_unlock(&entry->positive_cache_lock); break; case CELT_NEGATIVE: pthread_mutex_unlock(&entry->negative_cache_lock); break; case CELT_MULTIPART: pthread_mutex_unlock(&entry->mp_cache_lock); break; default: /* should be unreachable */ break; } TRACE_OUT(configuration_unlock_entry); } struct configuration * init_configuration(void) { struct configuration *retval; TRACE_IN(init_configuration); retval = calloc(1, sizeof(*retval)); assert(retval != NULL); retval->entries_capacity = INITIAL_ENTRIES_CAPACITY; - retval->entries = calloc(1, - sizeof(*retval->entries) * - retval->entries_capacity); + retval->entries = calloc(retval->entries_capacity, + sizeof(*retval->entries)); assert(retval->entries != NULL); pthread_rwlock_init(&retval->rwlock, NULL); TRACE_OUT(init_configuration); return (retval); } void fill_configuration_defaults(struct configuration *config) { size_t len, i; TRACE_IN(fill_configuration_defaults); assert(config != NULL); if (config->socket_path != NULL) free(config->socket_path); len = strlen(DEFAULT_SOCKET_PATH); config->socket_path = calloc(1, len + 1); assert(config->socket_path != NULL); memcpy(config->socket_path, DEFAULT_SOCKET_PATH, len); len = strlen(DEFAULT_PIDFILE_PATH); config->pidfile_path = calloc(1, len + 1); assert(config->pidfile_path != NULL); memcpy(config->pidfile_path, DEFAULT_PIDFILE_PATH, len); config->socket_mode = S_IFSOCK | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH; config->force_unlink = 1; config->query_timeout = DEFAULT_QUERY_TIMEOUT; config->threads_num = DEFAULT_THREADS_NUM; for (i = 0; i < config->entries_size; ++i) destroy_configuration_entry(config->entries[i]); config->entries_size = 0; TRACE_OUT(fill_configuration_defaults); } void destroy_configuration(struct configuration *config) { unsigned int i; TRACE_IN(destroy_configuration); assert(config != NULL); free(config->pidfile_path); free(config->socket_path); for (i = 0; i < config->entries_size; ++i) destroy_configuration_entry(config->entries[i]); free(config->entries); pthread_rwlock_destroy(&config->rwlock); free(config); TRACE_OUT(destroy_configuration); } Index: head/usr.sbin/nscd/hashtable.h =================================================================== --- head/usr.sbin/nscd/hashtable.h (revision 315212) +++ head/usr.sbin/nscd/hashtable.h (revision 315213) @@ -1,221 +1,221 @@ /*- * Copyright (c) 2005 Michael Bushkov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __CACHELIB_HASHTABLE_H__ #define __CACHELIB_HASHTABLE_H__ #include #define HASHTABLE_INITIAL_ENTRIES_CAPACITY 8 typedef unsigned int hashtable_index_t; /* * This file contains queue.h-like macro definitions for hash tables. * Hash table is organized as an array of the specified size of the user * defined (with HASTABLE_ENTRY_HEAD) structures. Each hash table * entry (user defined structure) stores its elements in the sorted array. * You can place elements into the hash table, retrieve elements with * specified key, traverse through all elements, and delete them. * New elements are placed into the hash table by using the compare and * hashing functions, provided by the user. */ /* * Defines the hash table entry structure, that uses specified type of * elements. */ #define HASHTABLE_ENTRY_HEAD(name, type) struct name { \ type *values; \ size_t capacity; \ size_t size; \ } /* * Defines the hash table structure, which uses the specified type of entries. * The only restriction for entries is that is that they should have the field, * defined with HASHTABLE_ENTRY_HEAD macro. */ #define HASHTABLE_HEAD(name, entry) struct name { \ struct entry *entries; \ size_t entries_size; \ } #define HASHTABLE_ENTRIES_COUNT(table) \ ((table)->entries_size) /* * Unlike most of queue.h data types, hash tables can not be initialized * statically - so there is no HASHTABLE_HEAD_INITIALIZED macro. */ #define HASHTABLE_INIT(table, type, field, _entries_size) \ do { \ hashtable_index_t var; \ - (table)->entries = calloc(1, \ - sizeof(*(table)->entries) * (_entries_size)); \ + (table)->entries = calloc(_entries_size, \ + sizeof(*(table)->entries)); \ (table)->entries_size = (_entries_size); \ for (var = 0; var < HASHTABLE_ENTRIES_COUNT(table); ++var) {\ (table)->entries[var].field.capacity = \ HASHTABLE_INITIAL_ENTRIES_CAPACITY; \ (table)->entries[var].field.size = 0; \ (table)->entries[var].field.values = malloc( \ sizeof(type) * \ HASHTABLE_INITIAL_ENTRIES_CAPACITY); \ assert((table)->entries[var].field.values != NULL);\ } \ } while (0) /* * All initialized hashtables should be destroyed with this macro. */ #define HASHTABLE_DESTROY(table, field) \ do { \ hashtable_index_t var; \ for (var = 0; var < HASHTABLE_ENTRIES_COUNT(table); ++var) {\ free((table)->entries[var].field.values); \ } \ } while (0) #define HASHTABLE_GET_ENTRY(table, hash) \ (&((table)->entries[hash])) /* * Traverses through all hash table entries */ #define HASHTABLE_FOREACH(table, var) \ for ((var) = &((table)->entries[0]); \ (var) < &((table)->entries[HASHTABLE_ENTRIES_COUNT(table)]);\ ++(var)) /* * Traverses through all elements of the specified hash table entry */ #define HASHTABLE_ENTRY_FOREACH(entry, field, var) \ for ((var) = &((entry)->field.values[0]); \ (var) < &((entry)->field.values[(entry)->field.size]); \ ++(var)) #define HASHTABLE_ENTRY_CLEAR(entry, field) \ ((entry)->field.size = 0) #define HASHTABLE_ENTRY_SIZE(entry, field) \ ((entry)->field.size) #define HASHTABLE_ENTRY_CAPACITY(entry, field) \ ((entry)->field.capacity) #define HASHTABLE_ENTRY_CAPACITY_INCREASE(entry, field, type) \ do { \ (entry)->field.capacity *= 2; \ (entry)->field.values = realloc((entry)->field.values, \ (entry)->field.capacity * sizeof(type)); \ } while (0) #define HASHTABLE_ENTRY_CAPACITY_DECREASE(entry, field, type) \ do { \ (entry)->field.capacity /= 2; \ (entry)->field.values = realloc((entry)->field.values, \ (entry)->field.capacity * sizeof(type)); \ } while (0) /* * Generates prototypes for the hash table functions */ #define HASHTABLE_PROTOTYPE(name, entry_, type) \ hashtable_index_t name##_CALCULATE_HASH(struct name *, type *); \ void name##_ENTRY_STORE(struct entry_*, type *); \ type *name##_ENTRY_FIND(struct entry_*, type *); \ type *name##_ENTRY_FIND_SPECIAL(struct entry_ *, type *, \ int (*) (const void *, const void *)); \ void name##_ENTRY_REMOVE(struct entry_*, type *); /* * Generates implementations of the hash table functions */ #define HASHTABLE_GENERATE(name, entry_, type, field, HASH, CMP) \ hashtable_index_t name##_CALCULATE_HASH(struct name *table, type *data) \ { \ \ return HASH(data, table->entries_size); \ } \ \ void name##_ENTRY_STORE(struct entry_ *the_entry, type *data) \ { \ \ if (the_entry->field.size == the_entry->field.capacity) \ HASHTABLE_ENTRY_CAPACITY_INCREASE(the_entry, field, type);\ \ memcpy(&(the_entry->field.values[the_entry->field.size++]), \ data, \ sizeof(type)); \ qsort(the_entry->field.values, the_entry->field.size, \ sizeof(type), CMP); \ } \ \ type *name##_ENTRY_FIND(struct entry_ *the_entry, type *key) \ { \ \ return ((type *)bsearch(key, the_entry->field.values, \ the_entry->field.size, sizeof(type), CMP)); \ } \ \ type *name##_ENTRY_FIND_SPECIAL(struct entry_ *the_entry, type *key, \ int (*compar) (const void *, const void *)) \ { \ return ((type *)bsearch(key, the_entry->field.values, \ the_entry->field.size, sizeof(type), compar)); \ } \ \ void name##_ENTRY_REMOVE(struct entry_ *the_entry, type *del_elm) \ { \ \ memmove(del_elm, del_elm + 1, \ (&the_entry->field.values[--the_entry->field.size] - del_elm) *\ sizeof(type)); \ } /* * Macro definitions below wrap the functions, generaed with * HASHTABLE_GENERATE macro. You should use them and avoid using generated * functions directly. */ #define HASHTABLE_CALCULATE_HASH(name, table, data) \ (name##_CALCULATE_HASH((table), data)) #define HASHTABLE_ENTRY_STORE(name, entry, data) \ name##_ENTRY_STORE((entry), data) #define HASHTABLE_ENTRY_FIND(name, entry, key) \ (name##_ENTRY_FIND((entry), (key))) #define HASHTABLE_ENTRY_FIND_SPECIAL(name, entry, key, cmp) \ (name##_ENTRY_FIND_SPECIAL((entry), (key), (cmp))) #define HASHTABLE_ENTRY_REMOVE(name, entry, del_elm) \ name##_ENTRY_REMOVE((entry), (del_elm)) #endif Index: head/usr.sbin/nscd/nscd.c =================================================================== --- head/usr.sbin/nscd/nscd.c (revision 315212) +++ head/usr.sbin/nscd/nscd.c (revision 315213) @@ -1,870 +1,870 @@ /*- * Copyright (c) 2005 Michael Bushkov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in thereg * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "agents/passwd.h" #include "agents/group.h" #include "agents/services.h" #include "cachelib.h" #include "config.h" #include "debug.h" #include "log.h" #include "nscdcli.h" #include "parser.h" #include "query.h" #include "singletons.h" #ifndef CONFIG_PATH #define CONFIG_PATH "/etc/nscd.conf" #endif #define DEFAULT_CONFIG_PATH "nscd.conf" #define MAX_SOCKET_IO_SIZE 4096 struct processing_thread_args { cache the_cache; struct configuration *the_configuration; struct runtime_env *the_runtime_env; }; static void accept_connection(struct kevent *, struct runtime_env *, struct configuration *); static void destroy_cache_(cache); static void destroy_runtime_env(struct runtime_env *); static cache init_cache_(struct configuration *); static struct runtime_env *init_runtime_env(struct configuration *); static void processing_loop(cache, struct runtime_env *, struct configuration *); static void process_socket_event(struct kevent *, struct runtime_env *, struct configuration *); static void process_timer_event(struct kevent *, struct runtime_env *, struct configuration *); static void *processing_thread(void *); static void usage(void); void get_time_func(struct timeval *); static void usage(void) { fprintf(stderr, "usage: nscd [-dnst] [-i cachename] [-I cachename]\n"); exit(1); } static cache init_cache_(struct configuration *config) { struct cache_params params; cache retval; struct configuration_entry *config_entry; size_t size, i; int res; TRACE_IN(init_cache_); memset(¶ms, 0, sizeof(struct cache_params)); params.get_time_func = get_time_func; retval = init_cache(¶ms); size = configuration_get_entries_size(config); for (i = 0; i < size; ++i) { config_entry = configuration_get_entry(config, i); /* * We should register common entries now - multipart entries * would be registered automatically during the queries. */ res = register_cache_entry(retval, (struct cache_entry_params *) &config_entry->positive_cache_params); config_entry->positive_cache_entry = find_cache_entry(retval, config_entry->positive_cache_params.cep.entry_name); assert(config_entry->positive_cache_entry != INVALID_CACHE_ENTRY); res = register_cache_entry(retval, (struct cache_entry_params *) &config_entry->negative_cache_params); config_entry->negative_cache_entry = find_cache_entry(retval, config_entry->negative_cache_params.cep.entry_name); assert(config_entry->negative_cache_entry != INVALID_CACHE_ENTRY); } LOG_MSG_2("cache", "cache was successfully initialized"); TRACE_OUT(init_cache_); return (retval); } static void destroy_cache_(cache the_cache) { TRACE_IN(destroy_cache_); destroy_cache(the_cache); TRACE_OUT(destroy_cache_); } /* * Socket and kqueues are prepared here. We have one global queue for both * socket and timers events. */ static struct runtime_env * init_runtime_env(struct configuration *config) { int serv_addr_len; struct sockaddr_un serv_addr; struct kevent eventlist; struct timespec timeout; struct runtime_env *retval; TRACE_IN(init_runtime_env); retval = calloc(1, sizeof(*retval)); assert(retval != NULL); retval->sockfd = socket(PF_LOCAL, SOCK_STREAM, 0); if (config->force_unlink == 1) unlink(config->socket_path); memset(&serv_addr, 0, sizeof(struct sockaddr_un)); serv_addr.sun_family = PF_LOCAL; strlcpy(serv_addr.sun_path, config->socket_path, sizeof(serv_addr.sun_path)); serv_addr_len = sizeof(serv_addr.sun_family) + strlen(serv_addr.sun_path) + 1; if (bind(retval->sockfd, (struct sockaddr *)&serv_addr, serv_addr_len) == -1) { close(retval->sockfd); free(retval); LOG_ERR_2("runtime environment", "can't bind socket to path: " "%s", config->socket_path); TRACE_OUT(init_runtime_env); return (NULL); } LOG_MSG_2("runtime environment", "using socket %s", config->socket_path); /* * Here we're marking socket as non-blocking and setting its backlog * to the maximum value */ chmod(config->socket_path, config->socket_mode); listen(retval->sockfd, -1); fcntl(retval->sockfd, F_SETFL, O_NONBLOCK); retval->queue = kqueue(); assert(retval->queue != -1); EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0); memset(&timeout, 0, sizeof(struct timespec)); kevent(retval->queue, &eventlist, 1, NULL, 0, &timeout); LOG_MSG_2("runtime environment", "successfully initialized"); TRACE_OUT(init_runtime_env); return (retval); } static void destroy_runtime_env(struct runtime_env *env) { TRACE_IN(destroy_runtime_env); close(env->queue); close(env->sockfd); free(env); TRACE_OUT(destroy_runtime_env); } static void accept_connection(struct kevent *event_data, struct runtime_env *env, struct configuration *config) { struct kevent eventlist[2]; struct timespec timeout; struct query_state *qstate; int fd; int res; uid_t euid; gid_t egid; TRACE_IN(accept_connection); fd = accept(event_data->ident, NULL, NULL); if (fd == -1) { LOG_ERR_2("accept_connection", "error %d during accept()", errno); TRACE_OUT(accept_connection); return; } if (getpeereid(fd, &euid, &egid) != 0) { LOG_ERR_2("accept_connection", "error %d during getpeereid()", errno); TRACE_OUT(accept_connection); return; } qstate = init_query_state(fd, sizeof(int), euid, egid); if (qstate == NULL) { LOG_ERR_2("accept_connection", "can't init query_state"); TRACE_OUT(accept_connection); return; } memset(&timeout, 0, sizeof(struct timespec)); EV_SET(&eventlist[0], fd, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, qstate->timeout.tv_sec * 1000, qstate); EV_SET(&eventlist[1], fd, EVFILT_READ, EV_ADD | EV_ONESHOT, NOTE_LOWAT, qstate->kevent_watermark, qstate); res = kevent(env->queue, eventlist, 2, NULL, 0, &timeout); if (res < 0) LOG_ERR_2("accept_connection", "kevent error"); TRACE_OUT(accept_connection); } static void process_socket_event(struct kevent *event_data, struct runtime_env *env, struct configuration *config) { struct kevent eventlist[2]; struct timeval query_timeout; struct timespec kevent_timeout; int nevents; int eof_res, res; ssize_t io_res; struct query_state *qstate; TRACE_IN(process_socket_event); eof_res = event_data->flags & EV_EOF ? 1 : 0; res = 0; memset(&kevent_timeout, 0, sizeof(struct timespec)); EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_DELETE, 0, 0, NULL); nevents = kevent(env->queue, eventlist, 1, NULL, 0, &kevent_timeout); if (nevents == -1) { if (errno == ENOENT) { /* the timer is already handling this event */ TRACE_OUT(process_socket_event); return; } else { /* some other error happened */ LOG_ERR_2("process_socket_event", "kevent error, errno" " is %d", errno); TRACE_OUT(process_socket_event); return; } } qstate = (struct query_state *)event_data->udata; /* * If the buffer that is to be send/received is too large, * we send it implicitly, by using query_io_buffer_read and * query_io_buffer_write functions in the query_state. These functions * use the temporary buffer, which is later send/received in parts. * The code below implements buffer splitting/mergind for send/receive * operations. It also does the actual socket IO operations. */ if (((qstate->use_alternate_io == 0) && (qstate->kevent_watermark <= (size_t)event_data->data)) || ((qstate->use_alternate_io != 0) && (qstate->io_buffer_watermark <= (size_t)event_data->data))) { if (qstate->use_alternate_io != 0) { switch (qstate->io_buffer_filter) { case EVFILT_READ: io_res = query_socket_read(qstate, qstate->io_buffer_p, qstate->io_buffer_watermark); if (io_res < 0) { qstate->use_alternate_io = 0; qstate->process_func = NULL; } else { qstate->io_buffer_p += io_res; if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) { qstate->io_buffer_p = qstate->io_buffer; qstate->use_alternate_io = 0; } } break; default: break; } } if (qstate->use_alternate_io == 0) { do { res = qstate->process_func(qstate); } while ((qstate->kevent_watermark == 0) && (qstate->process_func != NULL) && (res == 0)); if (res != 0) qstate->process_func = NULL; } if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_filter == EVFILT_WRITE)) { io_res = query_socket_write(qstate, qstate->io_buffer_p, qstate->io_buffer_watermark); if (io_res < 0) { qstate->use_alternate_io = 0; qstate->process_func = NULL; } else qstate->io_buffer_p += io_res; } } else { /* assuming that socket was closed */ qstate->process_func = NULL; qstate->use_alternate_io = 0; } if (((qstate->process_func == NULL) && (qstate->use_alternate_io == 0)) || (eof_res != 0) || (res != 0)) { destroy_query_state(qstate); close(event_data->ident); TRACE_OUT(process_socket_event); return; } /* updating the query_state lifetime variable */ get_time_func(&query_timeout); query_timeout.tv_usec = 0; query_timeout.tv_sec -= qstate->creation_time.tv_sec; if (query_timeout.tv_sec > qstate->timeout.tv_sec) query_timeout.tv_sec = 0; else query_timeout.tv_sec = qstate->timeout.tv_sec - query_timeout.tv_sec; if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size)) qstate->use_alternate_io = 0; if (qstate->use_alternate_io == 0) { /* * If we must send/receive the large block of data, * we should prepare the query_state's io_XXX fields. * We should also substitute its write_func and read_func * with the query_io_buffer_write and query_io_buffer_read, * which will allow us to implicitly send/receive this large * buffer later (in the subsequent calls to the * process_socket_event). */ if (qstate->kevent_watermark > MAX_SOCKET_IO_SIZE) { if (qstate->io_buffer != NULL) free(qstate->io_buffer); qstate->io_buffer = calloc(1, qstate->kevent_watermark); assert(qstate->io_buffer != NULL); qstate->io_buffer_p = qstate->io_buffer; qstate->io_buffer_size = qstate->kevent_watermark; qstate->io_buffer_filter = qstate->kevent_filter; qstate->write_func = query_io_buffer_write; qstate->read_func = query_io_buffer_read; if (qstate->kevent_filter == EVFILT_READ) qstate->use_alternate_io = 1; qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE; EV_SET(&eventlist[1], event_data->ident, qstate->kevent_filter, EV_ADD | EV_ONESHOT, NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate); } else { EV_SET(&eventlist[1], event_data->ident, qstate->kevent_filter, EV_ADD | EV_ONESHOT, NOTE_LOWAT, qstate->kevent_watermark, qstate); } } else { if (qstate->io_buffer + qstate->io_buffer_size - qstate->io_buffer_p < MAX_SOCKET_IO_SIZE) { qstate->io_buffer_watermark = qstate->io_buffer + qstate->io_buffer_size - qstate->io_buffer_p; EV_SET(&eventlist[1], event_data->ident, qstate->io_buffer_filter, EV_ADD | EV_ONESHOT, NOTE_LOWAT, qstate->io_buffer_watermark, qstate); } else { qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE; EV_SET(&eventlist[1], event_data->ident, qstate->io_buffer_filter, EV_ADD | EV_ONESHOT, NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate); } } EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, query_timeout.tv_sec * 1000, qstate); kevent(env->queue, eventlist, 2, NULL, 0, &kevent_timeout); TRACE_OUT(process_socket_event); } /* * This routine is called if timer event has been signaled in the kqueue. It * just closes the socket and destroys the query_state. */ static void process_timer_event(struct kevent *event_data, struct runtime_env *env, struct configuration *config) { struct query_state *qstate; TRACE_IN(process_timer_event); qstate = (struct query_state *)event_data->udata; destroy_query_state(qstate); close(event_data->ident); TRACE_OUT(process_timer_event); } /* * Processing loop is the basic processing routine, that forms a body of each * procssing thread */ static void processing_loop(cache the_cache, struct runtime_env *env, struct configuration *config) { struct timespec timeout; const int eventlist_size = 1; struct kevent eventlist[eventlist_size]; int nevents, i; TRACE_MSG("=> processing_loop"); memset(&timeout, 0, sizeof(struct timespec)); memset(&eventlist, 0, sizeof(struct kevent) * eventlist_size); for (;;) { nevents = kevent(env->queue, NULL, 0, eventlist, eventlist_size, NULL); /* * we can only receive 1 event on success */ if (nevents == 1) { struct kevent *event_data; event_data = &eventlist[0]; if ((int)event_data->ident == env->sockfd) { for (i = 0; i < event_data->data; ++i) accept_connection(event_data, env, config); EV_SET(eventlist, s_runtime_env->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0); memset(&timeout, 0, sizeof(struct timespec)); kevent(s_runtime_env->queue, eventlist, 1, NULL, 0, &timeout); } else { switch (event_data->filter) { case EVFILT_READ: case EVFILT_WRITE: process_socket_event(event_data, env, config); break; case EVFILT_TIMER: process_timer_event(event_data, env, config); break; default: break; } } } else { /* this branch shouldn't be currently executed */ } } TRACE_MSG("<= processing_loop"); } /* * Wrapper above the processing loop function. It sets the thread signal mask * to avoid SIGPIPE signals (which can happen if the client works incorrectly). */ static void * processing_thread(void *data) { struct processing_thread_args *args; sigset_t new; TRACE_MSG("=> processing_thread"); args = (struct processing_thread_args *)data; sigemptyset(&new); sigaddset(&new, SIGPIPE); if (pthread_sigmask(SIG_BLOCK, &new, NULL) != 0) LOG_ERR_1("processing thread", "thread can't block the SIGPIPE signal"); processing_loop(args->the_cache, args->the_runtime_env, args->the_configuration); free(args); TRACE_MSG("<= processing_thread"); return (NULL); } void get_time_func(struct timeval *time) { struct timespec res; memset(&res, 0, sizeof(struct timespec)); clock_gettime(CLOCK_MONOTONIC, &res); time->tv_sec = res.tv_sec; time->tv_usec = 0; } /* * The idea of _nss_cache_cycle_prevention_function is that nsdispatch * will search for this symbol in the executable. This symbol is the * attribute of the caching daemon. So, if it exists, nsdispatch won't try * to connect to the caching daemon and will just ignore the 'cache' * source in the nsswitch.conf. This method helps to avoid cycles and * organize self-performing requests. * * (not actually a function; it used to be, but it doesn't make any * difference, as long as it has external linkage) */ void *_nss_cache_cycle_prevention_function; int main(int argc, char *argv[]) { struct processing_thread_args *thread_args; pthread_t *threads; struct pidfh *pidfile; pid_t pid; char const *config_file; char const *error_str; int error_line; int i, res; int trace_mode_enabled; int force_single_threaded; int do_not_daemonize; int clear_user_cache_entries, clear_all_cache_entries; char *user_config_entry_name, *global_config_entry_name; int show_statistics; int daemon_mode, interactive_mode; /* by default all debug messages are omitted */ TRACE_OFF(); /* parsing command line arguments */ trace_mode_enabled = 0; force_single_threaded = 0; do_not_daemonize = 0; clear_user_cache_entries = 0; clear_all_cache_entries = 0; show_statistics = 0; user_config_entry_name = NULL; global_config_entry_name = NULL; while ((res = getopt(argc, argv, "nstdi:I:")) != -1) { switch (res) { case 'n': do_not_daemonize = 1; break; case 's': force_single_threaded = 1; break; case 't': trace_mode_enabled = 1; break; case 'i': clear_user_cache_entries = 1; if (optarg != NULL) if (strcmp(optarg, "all") != 0) user_config_entry_name = strdup(optarg); break; case 'I': clear_all_cache_entries = 1; if (optarg != NULL) if (strcmp(optarg, "all") != 0) global_config_entry_name = strdup(optarg); break; case 'd': show_statistics = 1; break; case '?': default: usage(); /* NOT REACHED */ } } daemon_mode = do_not_daemonize | force_single_threaded | trace_mode_enabled; interactive_mode = clear_user_cache_entries | clear_all_cache_entries | show_statistics; if ((daemon_mode != 0) && (interactive_mode != 0)) { LOG_ERR_1("main", "daemon mode and interactive_mode arguments " "can't be used together"); usage(); } if (interactive_mode != 0) { FILE *pidfin = fopen(DEFAULT_PIDFILE_PATH, "r"); char pidbuf[256]; struct nscd_connection_params connection_params; nscd_connection connection; int result; if (pidfin == NULL) errx(EXIT_FAILURE, "There is no daemon running."); memset(pidbuf, 0, sizeof(pidbuf)); fread(pidbuf, sizeof(pidbuf) - 1, 1, pidfin); fclose(pidfin); if (ferror(pidfin) != 0) errx(EXIT_FAILURE, "Can't read from pidfile."); if (sscanf(pidbuf, "%d", &pid) != 1) errx(EXIT_FAILURE, "Invalid pidfile."); LOG_MSG_1("main", "daemon PID is %d", pid); memset(&connection_params, 0, sizeof(struct nscd_connection_params)); connection_params.socket_path = DEFAULT_SOCKET_PATH; connection = open_nscd_connection__(&connection_params); if (connection == INVALID_NSCD_CONNECTION) errx(EXIT_FAILURE, "Can't connect to the daemon."); if (clear_user_cache_entries != 0) { result = nscd_transform__(connection, user_config_entry_name, TT_USER); if (result != 0) LOG_MSG_1("main", "user cache transformation failed"); else LOG_MSG_1("main", "user cache_transformation " "succeeded"); } if (clear_all_cache_entries != 0) { if (geteuid() != 0) errx(EXIT_FAILURE, "Only root can initiate " "global cache transformation."); result = nscd_transform__(connection, global_config_entry_name, TT_ALL); if (result != 0) LOG_MSG_1("main", "global cache transformation " "failed"); else LOG_MSG_1("main", "global cache transformation " "succeeded"); } close_nscd_connection__(connection); free(user_config_entry_name); free(global_config_entry_name); return (EXIT_SUCCESS); } pidfile = pidfile_open(DEFAULT_PIDFILE_PATH, 0644, &pid); if (pidfile == NULL) { if (errno == EEXIST) errx(EXIT_FAILURE, "Daemon already running, pid: %d.", pid); warn("Cannot open or create pidfile"); } if (trace_mode_enabled == 1) TRACE_ON(); /* blocking the main thread from receiving SIGPIPE signal */ sigblock(sigmask(SIGPIPE)); /* daemonization */ if (do_not_daemonize == 0) { res = daemon(0, trace_mode_enabled == 0 ? 0 : 1); if (res != 0) { LOG_ERR_1("main", "can't daemonize myself: %s", strerror(errno)); pidfile_remove(pidfile); goto fin; } else LOG_MSG_1("main", "successfully daemonized"); } pidfile_write(pidfile); s_agent_table = init_agent_table(); register_agent(s_agent_table, init_passwd_agent()); register_agent(s_agent_table, init_passwd_mp_agent()); register_agent(s_agent_table, init_group_agent()); register_agent(s_agent_table, init_group_mp_agent()); register_agent(s_agent_table, init_services_agent()); register_agent(s_agent_table, init_services_mp_agent()); LOG_MSG_1("main", "request agents registered successfully"); /* * Hosts agent can't work properly until we have access to the * appropriate dtab structures, which are used in nsdispatch * calls * register_agent(s_agent_table, init_hosts_agent()); */ /* configuration initialization */ s_configuration = init_configuration(); fill_configuration_defaults(s_configuration); error_str = NULL; error_line = 0; config_file = CONFIG_PATH; res = parse_config_file(s_configuration, config_file, &error_str, &error_line); if ((res != 0) && (error_str == NULL)) { config_file = DEFAULT_CONFIG_PATH; res = parse_config_file(s_configuration, config_file, &error_str, &error_line); } if (res != 0) { if (error_str != NULL) { LOG_ERR_1("main", "error in configuration file(%s, %d): %s\n", config_file, error_line, error_str); } else { LOG_ERR_1("main", "no configuration file found " "- was looking for %s and %s", CONFIG_PATH, DEFAULT_CONFIG_PATH); } destroy_configuration(s_configuration); return (-1); } if (force_single_threaded == 1) s_configuration->threads_num = 1; /* cache initialization */ s_cache = init_cache_(s_configuration); if (s_cache == NULL) { LOG_ERR_1("main", "can't initialize the cache"); destroy_configuration(s_configuration); return (-1); } /* runtime environment initialization */ s_runtime_env = init_runtime_env(s_configuration); if (s_runtime_env == NULL) { LOG_ERR_1("main", "can't initialize the runtime environment"); destroy_configuration(s_configuration); destroy_cache_(s_cache); return (-1); } if (s_configuration->threads_num > 1) { - threads = calloc(1, sizeof(*threads) * - s_configuration->threads_num); + threads = calloc(s_configuration->threads_num, + sizeof(*threads)); for (i = 0; i < s_configuration->threads_num; ++i) { thread_args = malloc( sizeof(*thread_args)); thread_args->the_cache = s_cache; thread_args->the_runtime_env = s_runtime_env; thread_args->the_configuration = s_configuration; LOG_MSG_1("main", "thread #%d was successfully created", i); pthread_create(&threads[i], NULL, processing_thread, thread_args); thread_args = NULL; } for (i = 0; i < s_configuration->threads_num; ++i) pthread_join(threads[i], NULL); } else { LOG_MSG_1("main", "working in single-threaded mode"); processing_loop(s_cache, s_runtime_env, s_configuration); } fin: /* runtime environment destruction */ destroy_runtime_env(s_runtime_env); /* cache destruction */ destroy_cache_(s_cache); /* configuration destruction */ destroy_configuration(s_configuration); /* agents table destruction */ destroy_agent_table(s_agent_table); pidfile_remove(pidfile); return (EXIT_SUCCESS); }