Index: stable/11/usr.bin/mkimg/qcow.c =================================================================== --- stable/11/usr.bin/mkimg/qcow.c (revision 315598) +++ stable/11/usr.bin/mkimg/qcow.c (revision 315599) @@ -1,370 +1,370 @@ /*- * Copyright (c) 2014 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include "image.h" #include "format.h" #include "mkimg.h" /* Default cluster sizes. */ #define QCOW1_CLSTR_LOG2SZ 12 /* 4KB */ #define QCOW2_CLSTR_LOG2SZ 16 /* 64KB */ /* Flag bits in cluster offsets */ #define QCOW_CLSTR_COMPRESSED (1ULL << 62) #define QCOW_CLSTR_COPIED (1ULL << 63) struct qcow_header { uint32_t magic; #define QCOW_MAGIC 0x514649fb uint32_t version; #define QCOW_VERSION_1 1 #define QCOW_VERSION_2 2 uint64_t path_offset; uint32_t path_length; uint32_t clstr_log2sz; /* v2 only */ uint64_t disk_size; union { struct { uint8_t clstr_log2sz; uint8_t l2_log2sz; uint16_t _pad; uint32_t encryption; uint64_t l1_offset; } v1; struct { uint32_t encryption; uint32_t l1_entries; uint64_t l1_offset; uint64_t refcnt_offset; uint32_t refcnt_clstrs; uint32_t snapshot_count; uint64_t snapshot_offset; } v2; } u; }; static u_int clstr_log2sz; static uint64_t round_clstr(uint64_t ofs) { uint64_t clstrsz; clstrsz = 1UL << clstr_log2sz; return ((ofs + clstrsz - 1) & ~(clstrsz - 1)); } static int qcow_resize(lba_t imgsz, u_int version) { uint64_t imagesz; switch (version) { case QCOW_VERSION_1: clstr_log2sz = QCOW1_CLSTR_LOG2SZ; break; case QCOW_VERSION_2: clstr_log2sz = QCOW2_CLSTR_LOG2SZ; break; default: return (EDOOFUS); } imagesz = round_clstr(imgsz * secsz); if (verbose) fprintf(stderr, "QCOW: image size = %ju, cluster size = %u\n", (uintmax_t)imagesz, (u_int)(1U << clstr_log2sz)); return (image_set_size(imagesz / secsz)); } static int qcow1_resize(lba_t imgsz) { return (qcow_resize(imgsz, QCOW_VERSION_1)); } static int qcow2_resize(lba_t imgsz) { return (qcow_resize(imgsz, QCOW_VERSION_2)); } static int qcow_write(int fd, u_int version) { struct qcow_header *hdr; uint64_t *l1tbl, *l2tbl, *rctbl; uint16_t *rcblk; uint64_t clstr_imgsz, clstr_l2tbls, clstr_l1tblsz; uint64_t clstr_rcblks, clstr_rctblsz; uint64_t n, imagesz, nclstrs, ofs, ofsflags; lba_t blk, blkofs, blk_imgsz; u_int l1clno, l2clno, rcclno; u_int blk_clstrsz, refcnt_clstrs; u_int clstrsz, l1idx, l2idx; int error; if (clstr_log2sz == 0) return (EDOOFUS); clstrsz = 1U << clstr_log2sz; blk_clstrsz = clstrsz / secsz; blk_imgsz = image_get_size(); imagesz = blk_imgsz * secsz; clstr_imgsz = imagesz >> clstr_log2sz; clstr_l2tbls = round_clstr(clstr_imgsz * 8) >> clstr_log2sz; clstr_l1tblsz = round_clstr(clstr_l2tbls * 8) >> clstr_log2sz; nclstrs = clstr_imgsz + clstr_l2tbls + clstr_l1tblsz + 1; clstr_rcblks = clstr_rctblsz = 0; do { n = clstr_rcblks + clstr_rctblsz; clstr_rcblks = round_clstr((nclstrs + n) * 2) >> clstr_log2sz; clstr_rctblsz = round_clstr(clstr_rcblks * 8) >> clstr_log2sz; } while (n < (clstr_rcblks + clstr_rctblsz)); /* * We got all the sizes in clusters. Start the layout. * 0 - header * 1 - L1 table * 2 - RC table (v2 only) * 3 - L2 tables * 4 - RC block (v2 only) * 5 - data */ l1clno = 1; rcclno = 0; rctbl = l2tbl = l1tbl = NULL; rcblk = NULL; hdr = calloc(1, clstrsz); if (hdr == NULL) return (errno); be32enc(&hdr->magic, QCOW_MAGIC); be32enc(&hdr->version, version); be64enc(&hdr->disk_size, imagesz); switch (version) { case QCOW_VERSION_1: ofsflags = 0; l2clno = l1clno + clstr_l1tblsz; hdr->u.v1.clstr_log2sz = clstr_log2sz; hdr->u.v1.l2_log2sz = clstr_log2sz - 3; be64enc(&hdr->u.v1.l1_offset, clstrsz * l1clno); break; case QCOW_VERSION_2: ofsflags = QCOW_CLSTR_COPIED; rcclno = l1clno + clstr_l1tblsz; l2clno = rcclno + clstr_rctblsz; be32enc(&hdr->clstr_log2sz, clstr_log2sz); be32enc(&hdr->u.v2.l1_entries, clstr_l2tbls); be64enc(&hdr->u.v2.l1_offset, clstrsz * l1clno); be64enc(&hdr->u.v2.refcnt_offset, clstrsz * rcclno); refcnt_clstrs = round_clstr(clstr_rcblks * 8) >> clstr_log2sz; be32enc(&hdr->u.v2.refcnt_clstrs, refcnt_clstrs); break; default: return (EDOOFUS); } if (sparse_write(fd, hdr, clstrsz) < 0) { error = errno; goto out; } free(hdr); hdr = NULL; ofs = clstrsz * l2clno; nclstrs = 1 + clstr_l1tblsz + clstr_rctblsz; - l1tbl = calloc(1, clstrsz * clstr_l1tblsz); + l1tbl = calloc(clstr_l1tblsz, clstrsz); if (l1tbl == NULL) { error = ENOMEM; goto out; } for (n = 0; n < clstr_imgsz; n++) { blk = n * blk_clstrsz; if (image_data(blk, blk_clstrsz)) { nclstrs++; l1idx = n >> (clstr_log2sz - 3); if (l1tbl[l1idx] == 0) { be64enc(l1tbl + l1idx, ofs + ofsflags); ofs += clstrsz; nclstrs++; } } } if (sparse_write(fd, l1tbl, clstrsz * clstr_l1tblsz) < 0) { error = errno; goto out; } clstr_rcblks = 0; do { n = clstr_rcblks; clstr_rcblks = round_clstr((nclstrs + n) * 2) >> clstr_log2sz; } while (n < clstr_rcblks); if (rcclno > 0) { - rctbl = calloc(1, clstrsz * clstr_rctblsz); + rctbl = calloc(clstr_rctblsz, clstrsz); if (rctbl == NULL) { error = ENOMEM; goto out; } for (n = 0; n < clstr_rcblks; n++) { be64enc(rctbl + n, ofs); ofs += clstrsz; nclstrs++; } if (sparse_write(fd, rctbl, clstrsz * clstr_rctblsz) < 0) { error = errno; goto out; } free(rctbl); rctbl = NULL; } l2tbl = malloc(clstrsz); if (l2tbl == NULL) { error = ENOMEM; goto out; } for (l1idx = 0; l1idx < clstr_l2tbls; l1idx++) { if (l1tbl[l1idx] == 0) continue; memset(l2tbl, 0, clstrsz); blkofs = (lba_t)l1idx * blk_clstrsz * (clstrsz >> 3); for (l2idx = 0; l2idx < (clstrsz >> 3); l2idx++) { blk = blkofs + (lba_t)l2idx * blk_clstrsz; if (blk >= blk_imgsz) break; if (image_data(blk, blk_clstrsz)) { be64enc(l2tbl + l2idx, ofs + ofsflags); ofs += clstrsz; } } if (sparse_write(fd, l2tbl, clstrsz) < 0) { error = errno; goto out; } } free(l2tbl); l2tbl = NULL; free(l1tbl); l1tbl = NULL; if (rcclno > 0) { - rcblk = calloc(1, clstrsz * clstr_rcblks); + rcblk = calloc(clstr_rcblks, clstrsz); if (rcblk == NULL) { error = ENOMEM; goto out; } for (n = 0; n < nclstrs; n++) be16enc(rcblk + n, 1); if (sparse_write(fd, rcblk, clstrsz * clstr_rcblks) < 0) { error = errno; goto out; } free(rcblk); rcblk = NULL; } error = 0; for (n = 0; n < clstr_imgsz; n++) { blk = n * blk_clstrsz; if (image_data(blk, blk_clstrsz)) { error = image_copyout_region(fd, blk, blk_clstrsz); if (error) break; } } if (!error) error = image_copyout_done(fd); out: if (rcblk != NULL) free(rcblk); if (l2tbl != NULL) free(l2tbl); if (rctbl != NULL) free(rctbl); if (l1tbl != NULL) free(l1tbl); if (hdr != NULL) free(hdr); return (error); } static int qcow1_write(int fd) { return (qcow_write(fd, QCOW_VERSION_1)); } static int qcow2_write(int fd) { return (qcow_write(fd, QCOW_VERSION_2)); } static struct mkimg_format qcow1_format = { .name = "qcow", .description = "QEMU Copy-On-Write, version 1", .resize = qcow1_resize, .write = qcow1_write, }; FORMAT_DEFINE(qcow1_format); static struct mkimg_format qcow2_format = { .name = "qcow2", .description = "QEMU Copy-On-Write, version 2", .resize = qcow2_resize, .write = qcow2_write, }; FORMAT_DEFINE(qcow2_format); Index: stable/11/usr.sbin/mpsutil/mps_cmd.c =================================================================== --- stable/11/usr.sbin/mpsutil/mps_cmd.c (revision 315598) +++ stable/11/usr.sbin/mpsutil/mps_cmd.c (revision 315599) @@ -1,731 +1,731 @@ /*- * Copyright (c) 2015 Baptiste Daroussin * * Copyright (c) 2015 Netflix, Inc. * All rights reserved. * Written by: Scott Long * * Copyright (c) 2008 Yahoo!, Inc. * All rights reserved. * Written by: John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __RCSID("$FreeBSD$"); #include #include #include #if 0 #include #else #include "mps_ioctl.h" #include "mpr_ioctl.h" #endif #include #include #include #include #include #include #include #include #include "mpsutil.h" #ifndef USE_MPT_IOCTLS #define USE_MPT_IOCTLS #endif static const char *mps_ioc_status_codes[] = { "Success", /* 0x0000 */ "Invalid function", "Busy", "Invalid scatter-gather list", "Internal error", "Reserved", "Insufficient resources", "Invalid field", "Invalid state", /* 0x0008 */ "Operation state not supported", NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 0x0010 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 0x0018 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Invalid configuration action", /* 0x0020 */ "Invalid configuration type", "Invalid configuration page", "Invalid configuration data", "No configuration defaults", "Unable to commit configuration change", NULL, NULL, NULL, /* 0x0028 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 0x0030 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 0x0038 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Recovered SCSI error", /* 0x0040 */ "Invalid SCSI bus", "Invalid SCSI target ID", "SCSI device not there", "SCSI data overrun", "SCSI data underrun", "SCSI I/O error", "SCSI protocol error", "SCSI task terminated", /* 0x0048 */ "SCSI residual mismatch", "SCSI task management failed", "SCSI I/O controller terminated", "SCSI external controller terminated", "EEDP guard error", "EEDP reference tag error", "EEDP application tag error", NULL, /* 0x0050 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 0x0058 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "SCSI target priority I/O", /* 0x0060 */ "Invalid SCSI target port", "Invalid SCSI target I/O index", "SCSI target aborted", "No connection retryable", "No connection", "FC aborted", "Invalid FC receive ID", "FC did invalid", /* 0x0068 */ "FC node logged out", "Transfer count mismatch", "STS data not set", "FC exchange canceled", "Data offset error", "Too much write data", "IU too short", "ACK NAK timeout", /* 0x0070 */ "NAK received", NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 0x0078 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "LAN device not found", /* 0x0080 */ "LAN device failure", "LAN transmit error", "LAN transmit aborted", "LAN receive error", "LAN receive aborted", "LAN partial packet", "LAN canceled", NULL, /* 0x0088 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "SAS SMP request failed", /* 0x0090 */ "SAS SMP data overrun", NULL, NULL, NULL, NULL, NULL, NULL, "Inband aborted", /* 0x0098 */ "No inband connection", NULL, NULL, NULL, NULL, NULL, NULL, "Diagnostic released", /* 0x00A0 */ }; struct mprs_pass_thru { uint64_t PtrRequest; uint64_t PtrReply; uint64_t PtrData; uint32_t RequestSize; uint32_t ReplySize; uint32_t DataSize; uint32_t DataDirection; uint64_t PtrDataOut; uint32_t DataOutSize; uint32_t Timeout; }; struct mprs_btdh_mapping { uint16_t TargetID; uint16_t Bus; uint16_t DevHandle; uint16_t Reserved; }; const char * mps_ioc_status(U16 IOCStatus) { static char buffer[16]; IOCStatus &= MPI2_IOCSTATUS_MASK; if (IOCStatus < sizeof(mps_ioc_status_codes) / sizeof(char *) && mps_ioc_status_codes[IOCStatus] != NULL) return (mps_ioc_status_codes[IOCStatus]); snprintf(buffer, sizeof(buffer), "Status: 0x%04x", IOCStatus); return (buffer); } #ifdef USE_MPT_IOCTLS int mps_map_btdh(int fd, uint16_t *devhandle, uint16_t *bus, uint16_t *target) { int error; struct mprs_btdh_mapping map; map.Bus = *bus; map.TargetID = *target; map.DevHandle = *devhandle; if ((error = ioctl(fd, MPTIOCTL_BTDH_MAPPING, &map)) != 0) { error = errno; warn("Failed to map bus/target/device"); return (error); } *bus = map.Bus; *target = map.TargetID; *devhandle = map.DevHandle; return (0); } int mps_read_config_page_header(int fd, U8 PageType, U8 PageNumber, U32 PageAddress, MPI2_CONFIG_PAGE_HEADER *header, U16 *IOCStatus) { MPI2_CONFIG_REQUEST req; MPI2_CONFIG_REPLY reply; bzero(&req, sizeof(req)); req.Function = MPI2_FUNCTION_CONFIG; req.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; req.Header.PageType = PageType; req.Header.PageNumber = PageNumber; req.PageAddress = PageAddress; if (mps_pass_command(fd, &req, sizeof(req), &reply, sizeof(reply), NULL, 0, NULL, 0, 30)) return (errno); if (!IOC_STATUS_SUCCESS(reply.IOCStatus)) { if (IOCStatus != NULL) *IOCStatus = reply.IOCStatus; return (EIO); } if (header == NULL) return (EINVAL); *header = reply.Header; return (0); } int mps_read_ext_config_page_header(int fd, U8 ExtPageType, U8 PageNumber, U32 PageAddress, MPI2_CONFIG_PAGE_HEADER *header, U16 *ExtPageLength, U16 *IOCStatus) { MPI2_CONFIG_REQUEST req; MPI2_CONFIG_REPLY reply; bzero(&req, sizeof(req)); req.Function = MPI2_FUNCTION_CONFIG; req.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; req.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; req.ExtPageType = ExtPageType; req.Header.PageNumber = PageNumber; req.PageAddress = PageAddress; if (mps_pass_command(fd, &req, sizeof(req), &reply, sizeof(reply), NULL, 0, NULL, 0, 30)) return (errno); if (!IOC_STATUS_SUCCESS(reply.IOCStatus)) { if (IOCStatus != NULL) *IOCStatus = reply.IOCStatus; return (EIO); } if ((header == NULL) || (ExtPageLength == NULL)) return (EINVAL); *header = reply.Header; *ExtPageLength = reply.ExtPageLength; return (0); } void * mps_read_config_page(int fd, U8 PageType, U8 PageNumber, U32 PageAddress, U16 *IOCStatus) { MPI2_CONFIG_REQUEST req; MPI2_CONFIG_PAGE_HEADER header; MPI2_CONFIG_REPLY reply; void *buf; int error, len; bzero(&header, sizeof(header)); error = mps_read_config_page_header(fd, PageType, PageNumber, PageAddress, &header, IOCStatus); if (error) { errno = error; return (NULL); } bzero(&req, sizeof(req)); req.Function = MPI2_FUNCTION_CONFIG; req.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; req.PageAddress = PageAddress; req.Header = header; req.Header.PageLength = reply.Header.PageLength; if (reply.Header.PageLength == 0) req.Header.PageLength = 4; len = req.Header.PageLength * 4; buf = malloc(len); if (mps_pass_command(fd, &req, sizeof(req), &reply, sizeof(reply), buf, len, NULL, 0, 30)) { error = errno; free(buf); errno = error; return (NULL); } if (!IOC_STATUS_SUCCESS(reply.IOCStatus)) { if (IOCStatus != NULL) *IOCStatus = reply.IOCStatus; else warnx("Reading config page failed: 0x%x %s", reply.IOCStatus, mps_ioc_status(reply.IOCStatus)); free(buf); errno = EIO; return (NULL); } return (buf); } void * mps_read_extended_config_page(int fd, U8 ExtPageType, U8 PageVersion, U8 PageNumber, U32 PageAddress, U16 *IOCStatus) { MPI2_CONFIG_REQUEST req; MPI2_CONFIG_PAGE_HEADER header; MPI2_CONFIG_REPLY reply; U16 pagelen; void *buf; int error, len; if (IOCStatus != NULL) *IOCStatus = MPI2_IOCSTATUS_SUCCESS; bzero(&header, sizeof(header)); error = mps_read_ext_config_page_header(fd, ExtPageType, PageNumber, PageAddress, &header, &pagelen, IOCStatus); if (error) { errno = error; return (NULL); } bzero(&req, sizeof(req)); req.Function = MPI2_FUNCTION_CONFIG; req.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; req.PageAddress = PageAddress; req.Header = header; if (pagelen == 0) pagelen = 4; req.ExtPageLength = pagelen; req.ExtPageType = ExtPageType; len = pagelen * 4; buf = malloc(len); if (mps_pass_command(fd, &req, sizeof(req), &reply, sizeof(reply), buf, len, NULL, 0, 30)) { error = errno; free(buf); errno = error; return (NULL); } if (!IOC_STATUS_SUCCESS(reply.IOCStatus)) { if (IOCStatus != NULL) *IOCStatus = reply.IOCStatus; else warnx("Reading extended config page failed: %s", mps_ioc_status(reply.IOCStatus)); free(buf); errno = EIO; return (NULL); } return (buf); } int mps_firmware_send(int fd, unsigned char *fw, uint32_t len, bool bios) { MPI2_FW_DOWNLOAD_REQUEST req; MPI2_FW_DOWNLOAD_REPLY reply; bzero(&req, sizeof(req)); bzero(&reply, sizeof(reply)); req.Function = MPI2_FUNCTION_FW_DOWNLOAD; req.ImageType = bios ? MPI2_FW_DOWNLOAD_ITYPE_BIOS : MPI2_FW_DOWNLOAD_ITYPE_FW; req.TotalImageSize = len; req.MsgFlags = MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT; if (mps_user_command(fd, &req, sizeof(req), &reply, sizeof(reply), fw, len, 0)) { return (-1); } return (0); } int mps_firmware_get(int fd, unsigned char **firmware, bool bios) { MPI2_FW_UPLOAD_REQUEST req; MPI2_FW_UPLOAD_REPLY reply; int size; *firmware = NULL; bzero(&req, sizeof(req)); bzero(&reply, sizeof(reply)); req.Function = MPI2_FUNCTION_FW_UPLOAD; req.ImageType = bios ? MPI2_FW_DOWNLOAD_ITYPE_BIOS : MPI2_FW_DOWNLOAD_ITYPE_FW; if (mps_user_command(fd, &req, sizeof(req), &reply, sizeof(reply), NULL, 0, 0)) { return (-1); } if (reply.ActualImageSize == 0) { return (-1); } size = reply.ActualImageSize; - *firmware = calloc(1, sizeof(unsigned char) * size); + *firmware = calloc(size, sizeof(unsigned char)); if (*firmware == NULL) { warn("calloc"); return (-1); } if (mps_user_command(fd, &req, sizeof(req), &reply, sizeof(reply), *firmware, size, 0)) { free(*firmware); return (-1); } return (size); } #else int mps_read_config_page_header(int fd, U8 PageType, U8 PageNumber, U32 PageAddress, MPI2_CONFIG_PAGE_HEADER *header, U16 *IOCStatus) { struct mps_cfg_page_req req; if (IOCStatus != NULL) *IOCStatus = MPI2_IOCSTATUS_SUCCESS; if (header == NULL) return (EINVAL); bzero(&req, sizeof(req)); req.header.PageType = PageType; req.header.PageNumber = PageNumber; req.page_address = PageAddress; if (ioctl(fd, MPSIO_READ_CFG_HEADER, &req) < 0) return (errno); if (!IOC_STATUS_SUCCESS(req.ioc_status)) { if (IOCStatus != NULL) *IOCStatus = req.ioc_status; return (EIO); } bcopy(&req.header, header, sizeof(*header)); return (0); } void * mps_read_config_page(int fd, U8 PageType, U8 PageNumber, U32 PageAddress, U16 *IOCStatus) { struct mps_cfg_page_req req; void *buf; int error; error = mps_read_config_page_header(fd, PageType, PageNumber, PageAddress, &req.header, IOCStatus); if (error) { errno = error; return (NULL); } if (req.header.PageLength == 0) req.header.PageLength = 4; req.len = req.header.PageLength * 4; buf = malloc(req.len); req.buf = buf; bcopy(&req.header, buf, sizeof(req.header)); if (ioctl(fd, MPSIO_READ_CFG_PAGE, &req) < 0) { error = errno; free(buf); errno = error; return (NULL); } if (!IOC_STATUS_SUCCESS(req.ioc_status)) { if (IOCStatus != NULL) *IOCStatus = req.ioc_status; else warnx("Reading config page failed: 0x%x %s", req.ioc_status, mps_ioc_status(req.ioc_status)); free(buf); errno = EIO; return (NULL); } return (buf); } void * mps_read_extended_config_page(int fd, U8 ExtPageType, U8 PageVersion, U8 PageNumber, U32 PageAddress, U16 *IOCStatus) { struct mps_ext_cfg_page_req req; void *buf; int error; if (IOCStatus != NULL) *IOCStatus = MPI2_IOCSTATUS_SUCCESS; bzero(&req, sizeof(req)); req.header.PageVersion = PageVersion; req.header.PageNumber = PageNumber; req.header.ExtPageType = ExtPageType; req.page_address = PageAddress; if (ioctl(fd, MPSIO_READ_EXT_CFG_HEADER, &req) < 0) return (NULL); if (!IOC_STATUS_SUCCESS(req.ioc_status)) { if (IOCStatus != NULL) *IOCStatus = req.ioc_status; else warnx("Reading extended config page header failed: %s", mps_ioc_status(req.ioc_status)); errno = EIO; return (NULL); } req.len = req.header.ExtPageLength * 4; buf = malloc(req.len); req.buf = buf; bcopy(&req.header, buf, sizeof(req.header)); if (ioctl(fd, MPSIO_READ_EXT_CFG_PAGE, &req) < 0) { error = errno; free(buf); errno = error; return (NULL); } if (!IOC_STATUS_SUCCESS(req.ioc_status)) { if (IOCStatus != NULL) *IOCStatus = req.ioc_status; else warnx("Reading extended config page failed: %s", mps_ioc_status(req.ioc_status)); free(buf); errno = EIO; return (NULL); } return (buf); } #endif int mps_open(int unit) { char path[MAXPATHLEN]; snprintf(path, sizeof(path), "/dev/mp%s%d", is_mps ? "s": "r", unit); return (open(path, O_RDWR)); } int mps_user_command(int fd, void *req, uint32_t req_len, void *reply, uint32_t reply_len, void *buffer, int len, uint32_t flags) { struct mps_usr_command cmd; bzero(&cmd, sizeof(struct mps_usr_command)); cmd.req = req; cmd.req_len = req_len; cmd.rpl = reply; cmd.rpl_len = reply_len; cmd.buf = buffer; cmd.len = len; cmd.flags = flags; if (ioctl(fd, is_mps ? MPSIO_MPS_COMMAND : MPRIO_MPR_COMMAND, &cmd) < 0) return (errno); return (0); } int mps_pass_command(int fd, void *req, uint32_t req_len, void *reply, uint32_t reply_len, void *data_in, uint32_t datain_len, void *data_out, uint32_t dataout_len, uint32_t timeout) { struct mprs_pass_thru pass; pass.PtrRequest = (uint64_t)(uintptr_t)req; pass.PtrReply = (uint64_t)(uintptr_t)reply; pass.PtrData = (uint64_t)(uintptr_t)data_in; pass.PtrDataOut = (uint64_t)(uintptr_t)data_out; pass.RequestSize = req_len; pass.ReplySize = reply_len; pass.DataSize = datain_len; pass.DataOutSize = dataout_len; if (datain_len && dataout_len) { if (is_mps) { pass.DataDirection = MPS_PASS_THRU_DIRECTION_BOTH; } else { pass.DataDirection = MPR_PASS_THRU_DIRECTION_BOTH; } } else if (datain_len) { if (is_mps) { pass.DataDirection = MPS_PASS_THRU_DIRECTION_READ; } else { pass.DataDirection = MPR_PASS_THRU_DIRECTION_READ; } } else if (dataout_len) { if (is_mps) { pass.DataDirection = MPS_PASS_THRU_DIRECTION_WRITE; } else { pass.DataDirection = MPR_PASS_THRU_DIRECTION_WRITE; } } else { if (is_mps) { pass.DataDirection = MPS_PASS_THRU_DIRECTION_NONE; } else { pass.DataDirection = MPR_PASS_THRU_DIRECTION_NONE; } } pass.Timeout = timeout; if (ioctl(fd, MPTIOCTL_PASS_THRU, &pass) < 0) return (errno); return (0); } MPI2_IOC_FACTS_REPLY * mps_get_iocfacts(int fd) { MPI2_IOC_FACTS_REPLY *facts; MPI2_IOC_FACTS_REQUEST req; int error; facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY)); if (facts == NULL) { errno = ENOMEM; return (NULL); } bzero(&req, sizeof(MPI2_IOC_FACTS_REQUEST)); req.Function = MPI2_FUNCTION_IOC_FACTS; #if 1 error = mps_pass_command(fd, &req, sizeof(MPI2_IOC_FACTS_REQUEST), facts, sizeof(MPI2_IOC_FACTS_REPLY), NULL, 0, NULL, 0, 10); #else error = mps_user_command(fd, &req, sizeof(MPI2_IOC_FACTS_REQUEST), facts, sizeof(MPI2_IOC_FACTS_REPLY), NULL, 0, 0); #endif if (error) { free(facts); return (NULL); } if (!IOC_STATUS_SUCCESS(facts->IOCStatus)) { free(facts); errno = EINVAL; return (NULL); } return (facts); } Index: stable/11/usr.sbin/nscd/cachelib.c =================================================================== --- stable/11/usr.sbin/nscd/cachelib.c (revision 315598) +++ stable/11/usr.sbin/nscd/cachelib.c (revision 315599) @@ -1,1244 +1,1244 @@ /*- * Copyright (c) 2005 Michael Bushkov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include "cachelib.h" #include "debug.h" #define INITIAL_ENTRIES_CAPACITY 32 #define ENTRIES_CAPACITY_STEP 32 #define STRING_SIMPLE_HASH_BODY(in_var, var, a, M) \ for ((var) = 0; *(in_var) != '\0'; ++(in_var)) \ (var) = ((a)*(var) + *(in_var)) % (M) #define STRING_SIMPLE_MP2_HASH_BODY(in_var, var, a, M) \ for ((var) = 0; *(in_var) != 0; ++(in_var)) \ (var) = ((a)*(var) + *(in_var)) & (M - 1) static int cache_elemsize_common_continue_func(struct cache_common_entry_ *, struct cache_policy_item_ *); static int cache_lifetime_common_continue_func(struct cache_common_entry_ *, struct cache_policy_item_ *); static void clear_cache_entry(struct cache_entry_ *); static void destroy_cache_entry(struct cache_entry_ *); static void destroy_cache_mp_read_session(struct cache_mp_read_session_ *); static void destroy_cache_mp_write_session(struct cache_mp_write_session_ *); static int entries_bsearch_cmp_func(const void *, const void *); static int entries_qsort_cmp_func(const void *, const void *); static struct cache_entry_ ** find_cache_entry_p(struct cache_ *, const char *); static void flush_cache_entry(struct cache_entry_ *); static void flush_cache_policy(struct cache_common_entry_ *, struct cache_policy_ *, struct cache_policy_ *, int (*)(struct cache_common_entry_ *, struct cache_policy_item_ *)); static int ht_items_cmp_func(const void *, const void *); static int ht_items_fixed_size_left_cmp_func(const void *, const void *); static hashtable_index_t ht_item_hash_func(const void *, size_t); /* * Hashing and comparing routines, that are used with the hash tables */ static int ht_items_cmp_func(const void *p1, const void *p2) { struct cache_ht_item_data_ *hp1, *hp2; size_t min_size; int result; hp1 = (struct cache_ht_item_data_ *)p1; hp2 = (struct cache_ht_item_data_ *)p2; assert(hp1->key != NULL); assert(hp2->key != NULL); if (hp1->key_size != hp2->key_size) { min_size = (hp1->key_size < hp2->key_size) ? hp1->key_size : hp2->key_size; result = memcmp(hp1->key, hp2->key, min_size); if (result == 0) return ((hp1->key_size < hp2->key_size) ? -1 : 1); else return (result); } else return (memcmp(hp1->key, hp2->key, hp1->key_size)); } static int ht_items_fixed_size_left_cmp_func(const void *p1, const void *p2) { struct cache_ht_item_data_ *hp1, *hp2; size_t min_size; int result; hp1 = (struct cache_ht_item_data_ *)p1; hp2 = (struct cache_ht_item_data_ *)p2; assert(hp1->key != NULL); assert(hp2->key != NULL); if (hp1->key_size != hp2->key_size) { min_size = (hp1->key_size < hp2->key_size) ? hp1->key_size : hp2->key_size; result = memcmp(hp1->key, hp2->key, min_size); if (result == 0) if (min_size == hp1->key_size) return (0); else return ((hp1->key_size < hp2->key_size) ? -1 : 1); else return (result); } else return (memcmp(hp1->key, hp2->key, hp1->key_size)); } static hashtable_index_t ht_item_hash_func(const void *p, size_t cache_entries_size) { struct cache_ht_item_data_ *hp; size_t i; hashtable_index_t retval; hp = (struct cache_ht_item_data_ *)p; assert(hp->key != NULL); retval = 0; for (i = 0; i < hp->key_size; ++i) retval = (127 * retval + (unsigned char)hp->key[i]) % cache_entries_size; return retval; } HASHTABLE_PROTOTYPE(cache_ht_, cache_ht_item_, struct cache_ht_item_data_); HASHTABLE_GENERATE(cache_ht_, cache_ht_item_, struct cache_ht_item_data_, data, ht_item_hash_func, ht_items_cmp_func); /* * Routines to sort and search the entries by name */ static int entries_bsearch_cmp_func(const void *key, const void *ent) { assert(key != NULL); assert(ent != NULL); return (strcmp((char const *)key, (*(struct cache_entry_ const **)ent)->name)); } static int entries_qsort_cmp_func(const void *e1, const void *e2) { assert(e1 != NULL); assert(e2 != NULL); return (strcmp((*(struct cache_entry_ const **)e1)->name, (*(struct cache_entry_ const **)e2)->name)); } static struct cache_entry_ ** find_cache_entry_p(struct cache_ *the_cache, const char *entry_name) { return ((struct cache_entry_ **)(bsearch(entry_name, the_cache->entries, the_cache->entries_size, sizeof(struct cache_entry_ *), entries_bsearch_cmp_func))); } static void destroy_cache_mp_write_session(struct cache_mp_write_session_ *ws) { struct cache_mp_data_item_ *data_item; TRACE_IN(destroy_cache_mp_write_session); assert(ws != NULL); while (!TAILQ_EMPTY(&ws->items)) { data_item = TAILQ_FIRST(&ws->items); TAILQ_REMOVE(&ws->items, data_item, entries); free(data_item->value); free(data_item); } free(ws); TRACE_OUT(destroy_cache_mp_write_session); } static void destroy_cache_mp_read_session(struct cache_mp_read_session_ *rs) { TRACE_IN(destroy_cache_mp_read_session); assert(rs != NULL); free(rs); TRACE_OUT(destroy_cache_mp_read_session); } static void destroy_cache_entry(struct cache_entry_ *entry) { struct cache_common_entry_ *common_entry; struct cache_mp_entry_ *mp_entry; struct cache_mp_read_session_ *rs; struct cache_mp_write_session_ *ws; struct cache_ht_item_ *ht_item; struct cache_ht_item_data_ *ht_item_data; TRACE_IN(destroy_cache_entry); assert(entry != NULL); if (entry->params->entry_type == CET_COMMON) { common_entry = (struct cache_common_entry_ *)entry; HASHTABLE_FOREACH(&(common_entry->items), ht_item) { HASHTABLE_ENTRY_FOREACH(ht_item, data, ht_item_data) { free(ht_item_data->key); free(ht_item_data->value); } HASHTABLE_ENTRY_CLEAR(ht_item, data); } HASHTABLE_DESTROY(&(common_entry->items), data); /* FIFO policy is always first */ destroy_cache_fifo_policy(common_entry->policies[0]); switch (common_entry->common_params.policy) { case CPT_LRU: destroy_cache_lru_policy(common_entry->policies[1]); break; case CPT_LFU: destroy_cache_lfu_policy(common_entry->policies[1]); break; default: break; } free(common_entry->policies); } else { mp_entry = (struct cache_mp_entry_ *)entry; while (!TAILQ_EMPTY(&mp_entry->ws_head)) { ws = TAILQ_FIRST(&mp_entry->ws_head); TAILQ_REMOVE(&mp_entry->ws_head, ws, entries); destroy_cache_mp_write_session(ws); } while (!TAILQ_EMPTY(&mp_entry->rs_head)) { rs = TAILQ_FIRST(&mp_entry->rs_head); TAILQ_REMOVE(&mp_entry->rs_head, rs, entries); destroy_cache_mp_read_session(rs); } if (mp_entry->completed_write_session != NULL) destroy_cache_mp_write_session( mp_entry->completed_write_session); if (mp_entry->pending_write_session != NULL) destroy_cache_mp_write_session( mp_entry->pending_write_session); } free(entry->name); free(entry); TRACE_OUT(destroy_cache_entry); } static void clear_cache_entry(struct cache_entry_ *entry) { struct cache_mp_entry_ *mp_entry; struct cache_common_entry_ *common_entry; struct cache_ht_item_ *ht_item; struct cache_ht_item_data_ *ht_item_data; struct cache_policy_ *policy; struct cache_policy_item_ *item, *next_item; size_t entry_size; unsigned int i; if (entry->params->entry_type == CET_COMMON) { common_entry = (struct cache_common_entry_ *)entry; entry_size = 0; HASHTABLE_FOREACH(&(common_entry->items), ht_item) { HASHTABLE_ENTRY_FOREACH(ht_item, data, ht_item_data) { free(ht_item_data->key); free(ht_item_data->value); } entry_size += HASHTABLE_ENTRY_SIZE(ht_item, data); HASHTABLE_ENTRY_CLEAR(ht_item, data); } common_entry->items_size -= entry_size; for (i = 0; i < common_entry->policies_size; ++i) { policy = common_entry->policies[i]; next_item = NULL; item = policy->get_first_item_func(policy); while (item != NULL) { next_item = policy->get_next_item_func(policy, item); policy->remove_item_func(policy, item); policy->destroy_item_func(item); item = next_item; } } } else { mp_entry = (struct cache_mp_entry_ *)entry; if (mp_entry->rs_size == 0) { if (mp_entry->completed_write_session != NULL) { destroy_cache_mp_write_session( mp_entry->completed_write_session); mp_entry->completed_write_session = NULL; } memset(&mp_entry->creation_time, 0, sizeof(struct timeval)); memset(&mp_entry->last_request_time, 0, sizeof(struct timeval)); } } } /* * When passed to the flush_cache_policy, ensures that all old elements are * deleted. */ static int cache_lifetime_common_continue_func(struct cache_common_entry_ *entry, struct cache_policy_item_ *item) { return ((item->last_request_time.tv_sec - item->creation_time.tv_sec > entry->common_params.max_lifetime.tv_sec) ? 1: 0); } /* * When passed to the flush_cache_policy, ensures that all elements, that * exceed the size limit, are deleted. */ static int cache_elemsize_common_continue_func(struct cache_common_entry_ *entry, struct cache_policy_item_ *item) { return ((entry->items_size > entry->common_params.satisf_elemsize) ? 1 : 0); } /* * Removes the elements from the cache entry, while the continue_func returns 1. */ static void flush_cache_policy(struct cache_common_entry_ *entry, struct cache_policy_ *policy, struct cache_policy_ *connected_policy, int (*continue_func)(struct cache_common_entry_ *, struct cache_policy_item_ *)) { struct cache_policy_item_ *item, *next_item, *connected_item; struct cache_ht_item_ *ht_item; struct cache_ht_item_data_ *ht_item_data, ht_key; hashtable_index_t hash; assert(policy != NULL); next_item = NULL; item = policy->get_first_item_func(policy); while ((item != NULL) && (continue_func(entry, item) == 1)) { next_item = policy->get_next_item_func(policy, item); connected_item = item->connected_item; policy->remove_item_func(policy, item); memset(&ht_key, 0, sizeof(struct cache_ht_item_data_)); ht_key.key = item->key; ht_key.key_size = item->key_size; hash = HASHTABLE_CALCULATE_HASH(cache_ht_, &entry->items, &ht_key); assert(hash < HASHTABLE_ENTRIES_COUNT(&entry->items)); ht_item = HASHTABLE_GET_ENTRY(&(entry->items), hash); ht_item_data = HASHTABLE_ENTRY_FIND(cache_ht_, ht_item, &ht_key); assert(ht_item_data != NULL); free(ht_item_data->key); free(ht_item_data->value); HASHTABLE_ENTRY_REMOVE(cache_ht_, ht_item, ht_item_data); --entry->items_size; policy->destroy_item_func(item); if (connected_item != NULL) { connected_policy->remove_item_func(connected_policy, connected_item); connected_policy->destroy_item_func(connected_item); } item = next_item; } } static void flush_cache_entry(struct cache_entry_ *entry) { struct cache_mp_entry_ *mp_entry; struct cache_common_entry_ *common_entry; struct cache_policy_ *policy, *connected_policy; connected_policy = NULL; if (entry->params->entry_type == CET_COMMON) { common_entry = (struct cache_common_entry_ *)entry; if ((common_entry->common_params.max_lifetime.tv_sec != 0) || (common_entry->common_params.max_lifetime.tv_usec != 0)) { policy = common_entry->policies[0]; if (common_entry->policies_size > 1) connected_policy = common_entry->policies[1]; flush_cache_policy(common_entry, policy, connected_policy, cache_lifetime_common_continue_func); } if ((common_entry->common_params.max_elemsize != 0) && common_entry->items_size > common_entry->common_params.max_elemsize) { if (common_entry->policies_size > 1) { policy = common_entry->policies[1]; connected_policy = common_entry->policies[0]; } else { policy = common_entry->policies[0]; connected_policy = NULL; } flush_cache_policy(common_entry, policy, connected_policy, cache_elemsize_common_continue_func); } } else { mp_entry = (struct cache_mp_entry_ *)entry; if ((mp_entry->mp_params.max_lifetime.tv_sec != 0) || (mp_entry->mp_params.max_lifetime.tv_usec != 0)) { if (mp_entry->last_request_time.tv_sec - mp_entry->last_request_time.tv_sec > mp_entry->mp_params.max_lifetime.tv_sec) clear_cache_entry(entry); } } } struct cache_ * init_cache(struct cache_params const *params) { struct cache_ *retval; TRACE_IN(init_cache); assert(params != NULL); retval = calloc(1, sizeof(*retval)); assert(retval != NULL); assert(params != NULL); memcpy(&retval->params, params, sizeof(struct cache_params)); - retval->entries = calloc(1, - sizeof(*retval->entries) * INITIAL_ENTRIES_CAPACITY); + retval->entries = calloc(INITIAL_ENTRIES_CAPACITY, + sizeof(*retval->entries)); assert(retval->entries != NULL); retval->entries_capacity = INITIAL_ENTRIES_CAPACITY; retval->entries_size = 0; TRACE_OUT(init_cache); return (retval); } void destroy_cache(struct cache_ *the_cache) { TRACE_IN(destroy_cache); assert(the_cache != NULL); if (the_cache->entries != NULL) { size_t i; for (i = 0; i < the_cache->entries_size; ++i) destroy_cache_entry(the_cache->entries[i]); free(the_cache->entries); } free(the_cache); TRACE_OUT(destroy_cache); } int register_cache_entry(struct cache_ *the_cache, struct cache_entry_params const *params) { int policies_size; size_t entry_name_size; struct cache_common_entry_ *new_common_entry; struct cache_mp_entry_ *new_mp_entry; TRACE_IN(register_cache_entry); assert(the_cache != NULL); if (find_cache_entry(the_cache, params->entry_name) != NULL) { TRACE_OUT(register_cache_entry); return (-1); } if (the_cache->entries_size == the_cache->entries_capacity) { struct cache_entry_ **new_entries; size_t new_capacity; new_capacity = the_cache->entries_capacity + ENTRIES_CAPACITY_STEP; - new_entries = calloc(1, - sizeof(*new_entries) * new_capacity); + new_entries = calloc(new_capacity, + sizeof(*new_entries)); assert(new_entries != NULL); memcpy(new_entries, the_cache->entries, sizeof(struct cache_entry_ *) * the_cache->entries_size); free(the_cache->entries); the_cache->entries = new_entries; } entry_name_size = strlen(params->entry_name) + 1; switch (params->entry_type) { case CET_COMMON: new_common_entry = calloc(1, sizeof(*new_common_entry)); assert(new_common_entry != NULL); memcpy(&new_common_entry->common_params, params, sizeof(struct common_cache_entry_params)); new_common_entry->params = (struct cache_entry_params *)&new_common_entry->common_params; new_common_entry->common_params.cep.entry_name = calloc(1, entry_name_size); assert(new_common_entry->common_params.cep.entry_name != NULL); strlcpy(new_common_entry->common_params.cep.entry_name, params->entry_name, entry_name_size); new_common_entry->name = new_common_entry->common_params.cep.entry_name; HASHTABLE_INIT(&(new_common_entry->items), struct cache_ht_item_data_, data, new_common_entry->common_params.cache_entries_size); if (new_common_entry->common_params.policy == CPT_FIFO) policies_size = 1; else policies_size = 2; - new_common_entry->policies = calloc(1, - sizeof(*new_common_entry->policies) * policies_size); + new_common_entry->policies = calloc(policies_size, + sizeof(*new_common_entry->policies)); assert(new_common_entry->policies != NULL); new_common_entry->policies_size = policies_size; new_common_entry->policies[0] = init_cache_fifo_policy(); if (policies_size > 1) { switch (new_common_entry->common_params.policy) { case CPT_LRU: new_common_entry->policies[1] = init_cache_lru_policy(); break; case CPT_LFU: new_common_entry->policies[1] = init_cache_lfu_policy(); break; default: break; } } new_common_entry->get_time_func = the_cache->params.get_time_func; the_cache->entries[the_cache->entries_size++] = (struct cache_entry_ *)new_common_entry; break; case CET_MULTIPART: new_mp_entry = calloc(1, sizeof(*new_mp_entry)); assert(new_mp_entry != NULL); memcpy(&new_mp_entry->mp_params, params, sizeof(struct mp_cache_entry_params)); new_mp_entry->params = (struct cache_entry_params *)&new_mp_entry->mp_params; new_mp_entry->mp_params.cep.entry_name = calloc(1, entry_name_size); assert(new_mp_entry->mp_params.cep.entry_name != NULL); strlcpy(new_mp_entry->mp_params.cep.entry_name, params->entry_name, entry_name_size); new_mp_entry->name = new_mp_entry->mp_params.cep.entry_name; TAILQ_INIT(&new_mp_entry->ws_head); TAILQ_INIT(&new_mp_entry->rs_head); new_mp_entry->get_time_func = the_cache->params.get_time_func; the_cache->entries[the_cache->entries_size++] = (struct cache_entry_ *)new_mp_entry; break; } qsort(the_cache->entries, the_cache->entries_size, sizeof(struct cache_entry_ *), entries_qsort_cmp_func); TRACE_OUT(register_cache_entry); return (0); } int unregister_cache_entry(struct cache_ *the_cache, const char *entry_name) { struct cache_entry_ **del_ent; TRACE_IN(unregister_cache_entry); assert(the_cache != NULL); del_ent = find_cache_entry_p(the_cache, entry_name); if (del_ent != NULL) { destroy_cache_entry(*del_ent); --the_cache->entries_size; memmove(del_ent, del_ent + 1, (&(the_cache->entries[--the_cache->entries_size]) - del_ent) * sizeof(struct cache_entry_ *)); TRACE_OUT(unregister_cache_entry); return (0); } else { TRACE_OUT(unregister_cache_entry); return (-1); } } struct cache_entry_ * find_cache_entry(struct cache_ *the_cache, const char *entry_name) { struct cache_entry_ **result; TRACE_IN(find_cache_entry); result = find_cache_entry_p(the_cache, entry_name); if (result == NULL) { TRACE_OUT(find_cache_entry); return (NULL); } else { TRACE_OUT(find_cache_entry); return (*result); } } /* * Tries to read the element with the specified key from the cache. If the * value_size is too small, it will be filled with the proper number, and * the user will need to call cache_read again with the value buffer, that * is large enough. * Function returns 0 on success, -1 on error, and -2 if the value_size is too * small. */ int cache_read(struct cache_entry_ *entry, const char *key, size_t key_size, char *value, size_t *value_size) { struct cache_common_entry_ *common_entry; struct cache_ht_item_data_ item_data, *find_res; struct cache_ht_item_ *item; hashtable_index_t hash; struct cache_policy_item_ *connected_item; TRACE_IN(cache_read); assert(entry != NULL); assert(key != NULL); assert(value_size != NULL); assert(entry->params->entry_type == CET_COMMON); common_entry = (struct cache_common_entry_ *)entry; memset(&item_data, 0, sizeof(struct cache_ht_item_data_)); /* can't avoid the cast here */ item_data.key = (char *)key; item_data.key_size = key_size; hash = HASHTABLE_CALCULATE_HASH(cache_ht_, &common_entry->items, &item_data); assert(hash < HASHTABLE_ENTRIES_COUNT(&common_entry->items)); item = HASHTABLE_GET_ENTRY(&(common_entry->items), hash); find_res = HASHTABLE_ENTRY_FIND(cache_ht_, item, &item_data); if (find_res == NULL) { TRACE_OUT(cache_read); return (-1); } /* pretend that entry was not found if confidence is below threshold*/ if (find_res->confidence < common_entry->common_params.confidence_threshold) { TRACE_OUT(cache_read); return (-1); } if ((common_entry->common_params.max_lifetime.tv_sec != 0) || (common_entry->common_params.max_lifetime.tv_usec != 0)) { if (find_res->fifo_policy_item->last_request_time.tv_sec - find_res->fifo_policy_item->creation_time.tv_sec > common_entry->common_params.max_lifetime.tv_sec) { free(find_res->key); free(find_res->value); connected_item = find_res->fifo_policy_item->connected_item; if (connected_item != NULL) { common_entry->policies[1]->remove_item_func( common_entry->policies[1], connected_item); common_entry->policies[1]->destroy_item_func( connected_item); } common_entry->policies[0]->remove_item_func( common_entry->policies[0], find_res->fifo_policy_item); common_entry->policies[0]->destroy_item_func( find_res->fifo_policy_item); HASHTABLE_ENTRY_REMOVE(cache_ht_, item, find_res); --common_entry->items_size; } } if ((*value_size < find_res->value_size) || (value == NULL)) { *value_size = find_res->value_size; TRACE_OUT(cache_read); return (-2); } *value_size = find_res->value_size; memcpy(value, find_res->value, find_res->value_size); ++find_res->fifo_policy_item->request_count; common_entry->get_time_func( &find_res->fifo_policy_item->last_request_time); common_entry->policies[0]->update_item_func(common_entry->policies[0], find_res->fifo_policy_item); if (find_res->fifo_policy_item->connected_item != NULL) { connected_item = find_res->fifo_policy_item->connected_item; memcpy(&connected_item->last_request_time, &find_res->fifo_policy_item->last_request_time, sizeof(struct timeval)); connected_item->request_count = find_res->fifo_policy_item->request_count; common_entry->policies[1]->update_item_func( common_entry->policies[1], connected_item); } TRACE_OUT(cache_read); return (0); } /* * Writes the value with the specified key into the cache entry. * Functions returns 0 on success, and -1 on error. */ int cache_write(struct cache_entry_ *entry, const char *key, size_t key_size, char const *value, size_t value_size) { struct cache_common_entry_ *common_entry; struct cache_ht_item_data_ item_data, *find_res; struct cache_ht_item_ *item; hashtable_index_t hash; struct cache_policy_ *policy, *connected_policy; struct cache_policy_item_ *policy_item; struct cache_policy_item_ *connected_policy_item; TRACE_IN(cache_write); assert(entry != NULL); assert(key != NULL); assert(value != NULL); assert(entry->params->entry_type == CET_COMMON); common_entry = (struct cache_common_entry_ *)entry; memset(&item_data, 0, sizeof(struct cache_ht_item_data_)); /* can't avoid the cast here */ item_data.key = (char *)key; item_data.key_size = key_size; hash = HASHTABLE_CALCULATE_HASH(cache_ht_, &common_entry->items, &item_data); assert(hash < HASHTABLE_ENTRIES_COUNT(&common_entry->items)); item = HASHTABLE_GET_ENTRY(&(common_entry->items), hash); find_res = HASHTABLE_ENTRY_FIND(cache_ht_, item, &item_data); if (find_res != NULL) { if (find_res->confidence < common_entry->common_params.confidence_threshold) { /* duplicate entry is no error, if confidence is low */ if ((find_res->value_size == value_size) && (memcmp(find_res->value, value, value_size) == 0)) { /* increase confidence on exact match (key and values) */ find_res->confidence++; } else { /* create new entry with low confidence, if value changed */ free(item_data.value); item_data.value = malloc(value_size); assert(item_data.value != NULL); memcpy(item_data.value, value, value_size); item_data.value_size = value_size; find_res->confidence = 1; } TRACE_OUT(cache_write); return (0); } TRACE_OUT(cache_write); return (-1); } item_data.key = malloc(key_size); memcpy(item_data.key, key, key_size); item_data.value = malloc(value_size); assert(item_data.value != NULL); memcpy(item_data.value, value, value_size); item_data.value_size = value_size; item_data.confidence = 1; policy_item = common_entry->policies[0]->create_item_func(); policy_item->key = item_data.key; policy_item->key_size = item_data.key_size; common_entry->get_time_func(&policy_item->creation_time); if (common_entry->policies_size > 1) { connected_policy_item = common_entry->policies[1]->create_item_func(); memcpy(&connected_policy_item->creation_time, &policy_item->creation_time, sizeof(struct timeval)); connected_policy_item->key = policy_item->key; connected_policy_item->key_size = policy_item->key_size; connected_policy_item->connected_item = policy_item; policy_item->connected_item = connected_policy_item; } item_data.fifo_policy_item = policy_item; common_entry->policies[0]->add_item_func(common_entry->policies[0], policy_item); if (common_entry->policies_size > 1) common_entry->policies[1]->add_item_func( common_entry->policies[1], connected_policy_item); HASHTABLE_ENTRY_STORE(cache_ht_, item, &item_data); ++common_entry->items_size; if ((common_entry->common_params.max_elemsize != 0) && (common_entry->items_size > common_entry->common_params.max_elemsize)) { if (common_entry->policies_size > 1) { policy = common_entry->policies[1]; connected_policy = common_entry->policies[0]; } else { policy = common_entry->policies[0]; connected_policy = NULL; } flush_cache_policy(common_entry, policy, connected_policy, cache_elemsize_common_continue_func); } TRACE_OUT(cache_write); return (0); } /* * Initializes the write session for the specified multipart entry. This * session then should be filled with data either committed or abandoned by * using close_cache_mp_write_session or abandon_cache_mp_write_session * respectively. * Returns NULL on errors (when there are too many opened write sessions for * the entry). */ struct cache_mp_write_session_ * open_cache_mp_write_session(struct cache_entry_ *entry) { struct cache_mp_entry_ *mp_entry; struct cache_mp_write_session_ *retval; TRACE_IN(open_cache_mp_write_session); assert(entry != NULL); assert(entry->params->entry_type == CET_MULTIPART); mp_entry = (struct cache_mp_entry_ *)entry; if ((mp_entry->mp_params.max_sessions > 0) && (mp_entry->ws_size == mp_entry->mp_params.max_sessions)) { TRACE_OUT(open_cache_mp_write_session); return (NULL); } retval = calloc(1, sizeof(*retval)); assert(retval != NULL); TAILQ_INIT(&retval->items); retval->parent_entry = mp_entry; TAILQ_INSERT_HEAD(&mp_entry->ws_head, retval, entries); ++mp_entry->ws_size; TRACE_OUT(open_cache_mp_write_session); return (retval); } /* * Writes data to the specified session. Return 0 on success and -1 on errors * (when write session size limit is exceeded). */ int cache_mp_write(struct cache_mp_write_session_ *ws, char *data, size_t data_size) { struct cache_mp_data_item_ *new_item; TRACE_IN(cache_mp_write); assert(ws != NULL); assert(ws->parent_entry != NULL); assert(ws->parent_entry->params->entry_type == CET_MULTIPART); if ((ws->parent_entry->mp_params.max_elemsize > 0) && (ws->parent_entry->mp_params.max_elemsize == ws->items_size)) { TRACE_OUT(cache_mp_write); return (-1); } new_item = calloc(1, sizeof(*new_item)); assert(new_item != NULL); new_item->value = malloc(data_size); assert(new_item->value != NULL); memcpy(new_item->value, data, data_size); new_item->value_size = data_size; TAILQ_INSERT_TAIL(&ws->items, new_item, entries); ++ws->items_size; TRACE_OUT(cache_mp_write); return (0); } /* * Abandons the write session and frees all the connected resources. */ void abandon_cache_mp_write_session(struct cache_mp_write_session_ *ws) { TRACE_IN(abandon_cache_mp_write_session); assert(ws != NULL); assert(ws->parent_entry != NULL); assert(ws->parent_entry->params->entry_type == CET_MULTIPART); TAILQ_REMOVE(&ws->parent_entry->ws_head, ws, entries); --ws->parent_entry->ws_size; destroy_cache_mp_write_session(ws); TRACE_OUT(abandon_cache_mp_write_session); } /* * Commits the session to the entry, for which it was created. */ void close_cache_mp_write_session(struct cache_mp_write_session_ *ws) { TRACE_IN(close_cache_mp_write_session); assert(ws != NULL); assert(ws->parent_entry != NULL); assert(ws->parent_entry->params->entry_type == CET_MULTIPART); TAILQ_REMOVE(&ws->parent_entry->ws_head, ws, entries); --ws->parent_entry->ws_size; if (ws->parent_entry->completed_write_session == NULL) { /* * If there is no completed session yet, this will be the one */ ws->parent_entry->get_time_func( &ws->parent_entry->creation_time); ws->parent_entry->completed_write_session = ws; } else { /* * If there is a completed session, then we'll save our session * as a pending session. If there is already a pending session, * it would be destroyed. */ if (ws->parent_entry->pending_write_session != NULL) destroy_cache_mp_write_session( ws->parent_entry->pending_write_session); ws->parent_entry->pending_write_session = ws; } TRACE_OUT(close_cache_mp_write_session); } /* * Opens read session for the specified entry. Returns NULL on errors (when * there are no data in the entry, or the data are obsolete). */ struct cache_mp_read_session_ * open_cache_mp_read_session(struct cache_entry_ *entry) { struct cache_mp_entry_ *mp_entry; struct cache_mp_read_session_ *retval; TRACE_IN(open_cache_mp_read_session); assert(entry != NULL); assert(entry->params->entry_type == CET_MULTIPART); mp_entry = (struct cache_mp_entry_ *)entry; if (mp_entry->completed_write_session == NULL) { TRACE_OUT(open_cache_mp_read_session); return (NULL); } if ((mp_entry->mp_params.max_lifetime.tv_sec != 0) || (mp_entry->mp_params.max_lifetime.tv_usec != 0)) { if (mp_entry->last_request_time.tv_sec - mp_entry->last_request_time.tv_sec > mp_entry->mp_params.max_lifetime.tv_sec) { flush_cache_entry(entry); TRACE_OUT(open_cache_mp_read_session); return (NULL); } } retval = calloc(1, sizeof(*retval)); assert(retval != NULL); retval->parent_entry = mp_entry; retval->current_item = TAILQ_FIRST( &mp_entry->completed_write_session->items); TAILQ_INSERT_HEAD(&mp_entry->rs_head, retval, entries); ++mp_entry->rs_size; mp_entry->get_time_func(&mp_entry->last_request_time); TRACE_OUT(open_cache_mp_read_session); return (retval); } /* * Reads the data from the read session - step by step. * Returns 0 on success, -1 on error (when there are no more data), and -2 if * the data_size is too small. In the last case, data_size would be filled * the proper value. */ int cache_mp_read(struct cache_mp_read_session_ *rs, char *data, size_t *data_size) { TRACE_IN(cache_mp_read); assert(rs != NULL); if (rs->current_item == NULL) { TRACE_OUT(cache_mp_read); return (-1); } if (rs->current_item->value_size > *data_size) { *data_size = rs->current_item->value_size; if (data == NULL) { TRACE_OUT(cache_mp_read); return (0); } TRACE_OUT(cache_mp_read); return (-2); } *data_size = rs->current_item->value_size; memcpy(data, rs->current_item->value, rs->current_item->value_size); rs->current_item = TAILQ_NEXT(rs->current_item, entries); TRACE_OUT(cache_mp_read); return (0); } /* * Closes the read session. If there are no more read sessions and there is * a pending write session, it will be committed and old * completed_write_session will be destroyed. */ void close_cache_mp_read_session(struct cache_mp_read_session_ *rs) { TRACE_IN(close_cache_mp_read_session); assert(rs != NULL); assert(rs->parent_entry != NULL); TAILQ_REMOVE(&rs->parent_entry->rs_head, rs, entries); --rs->parent_entry->rs_size; if ((rs->parent_entry->rs_size == 0) && (rs->parent_entry->pending_write_session != NULL)) { destroy_cache_mp_write_session( rs->parent_entry->completed_write_session); rs->parent_entry->completed_write_session = rs->parent_entry->pending_write_session; rs->parent_entry->pending_write_session = NULL; } destroy_cache_mp_read_session(rs); TRACE_OUT(close_cache_mp_read_session); } int transform_cache_entry(struct cache_entry_ *entry, enum cache_transformation_t transformation) { TRACE_IN(transform_cache_entry); switch (transformation) { case CTT_CLEAR: clear_cache_entry(entry); TRACE_OUT(transform_cache_entry); return (0); case CTT_FLUSH: flush_cache_entry(entry); TRACE_OUT(transform_cache_entry); return (0); default: TRACE_OUT(transform_cache_entry); return (-1); } } int transform_cache_entry_part(struct cache_entry_ *entry, enum cache_transformation_t transformation, const char *key_part, size_t key_part_size, enum part_position_t part_position) { struct cache_common_entry_ *common_entry; struct cache_ht_item_ *ht_item; struct cache_ht_item_data_ *ht_item_data, ht_key; struct cache_policy_item_ *item, *connected_item; TRACE_IN(transform_cache_entry_part); if (entry->params->entry_type != CET_COMMON) { TRACE_OUT(transform_cache_entry_part); return (-1); } if (transformation != CTT_CLEAR) { TRACE_OUT(transform_cache_entry_part); return (-1); } memset(&ht_key, 0, sizeof(struct cache_ht_item_data_)); ht_key.key = (char *)key_part; /* can't avoid casting here */ ht_key.key_size = key_part_size; common_entry = (struct cache_common_entry_ *)entry; HASHTABLE_FOREACH(&(common_entry->items), ht_item) { do { ht_item_data = HASHTABLE_ENTRY_FIND_SPECIAL(cache_ht_, ht_item, &ht_key, ht_items_fixed_size_left_cmp_func); if (ht_item_data != NULL) { item = ht_item_data->fifo_policy_item; connected_item = item->connected_item; common_entry->policies[0]->remove_item_func( common_entry->policies[0], item); free(ht_item_data->key); free(ht_item_data->value); HASHTABLE_ENTRY_REMOVE(cache_ht_, ht_item, ht_item_data); --common_entry->items_size; common_entry->policies[0]->destroy_item_func( item); if (common_entry->policies_size == 2) { common_entry->policies[1]->remove_item_func( common_entry->policies[1], connected_item); common_entry->policies[1]->destroy_item_func( connected_item); } } } while (ht_item_data != NULL); } TRACE_OUT(transform_cache_entry_part); return (0); } Index: stable/11/usr.sbin/nscd/config.c =================================================================== --- stable/11/usr.sbin/nscd/config.c (revision 315598) +++ stable/11/usr.sbin/nscd/config.c (revision 315599) @@ -1,588 +1,586 @@ /*- * Copyright (c) 2005 Michael Bushkov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "config.h" #include "debug.h" #include "log.h" /* * Default entries, which always exist in the configuration */ const char *c_default_entries[6] = { NSDB_PASSWD, NSDB_GROUP, NSDB_HOSTS, NSDB_SERVICES, NSDB_PROTOCOLS, NSDB_RPC }; static int configuration_entry_cmp(const void *, const void *); static int configuration_entry_sort_cmp(const void *, const void *); static int configuration_entry_cache_mp_sort_cmp(const void *, const void *); static int configuration_entry_cache_mp_cmp(const void *, const void *); static int configuration_entry_cache_mp_part_cmp(const void *, const void *); static struct configuration_entry *create_configuration_entry(const char *, struct timeval const *, struct timeval const *, struct common_cache_entry_params const *, struct common_cache_entry_params const *, struct mp_cache_entry_params const *); static int configuration_entry_sort_cmp(const void *e1, const void *e2) { return (strcmp((*((struct configuration_entry **)e1))->name, (*((struct configuration_entry **)e2))->name )); } static int configuration_entry_cmp(const void *e1, const void *e2) { return (strcmp((const char *)e1, (*((struct configuration_entry **)e2))->name )); } static int configuration_entry_cache_mp_sort_cmp(const void *e1, const void *e2) { return (strcmp((*((cache_entry *)e1))->params->entry_name, (*((cache_entry *)e2))->params->entry_name )); } static int configuration_entry_cache_mp_cmp(const void *e1, const void *e2) { return (strcmp((const char *)e1, (*((cache_entry *)e2))->params->entry_name )); } static int configuration_entry_cache_mp_part_cmp(const void *e1, const void *e2) { return (strncmp((const char *)e1, (*((cache_entry *)e2))->params->entry_name, strlen((const char *)e1) )); } static struct configuration_entry * create_configuration_entry(const char *name, struct timeval const *common_timeout, struct timeval const *mp_timeout, struct common_cache_entry_params const *positive_params, struct common_cache_entry_params const *negative_params, struct mp_cache_entry_params const *mp_params) { struct configuration_entry *retval; size_t size; int res; TRACE_IN(create_configuration_entry); assert(name != NULL); assert(positive_params != NULL); assert(negative_params != NULL); assert(mp_params != NULL); retval = calloc(1, sizeof(*retval)); assert(retval != NULL); res = pthread_mutex_init(&retval->positive_cache_lock, NULL); if (res != 0) { free(retval); LOG_ERR_2("create_configuration_entry", "can't create positive cache lock"); TRACE_OUT(create_configuration_entry); return (NULL); } res = pthread_mutex_init(&retval->negative_cache_lock, NULL); if (res != 0) { pthread_mutex_destroy(&retval->positive_cache_lock); free(retval); LOG_ERR_2("create_configuration_entry", "can't create negative cache lock"); TRACE_OUT(create_configuration_entry); return (NULL); } res = pthread_mutex_init(&retval->mp_cache_lock, NULL); if (res != 0) { pthread_mutex_destroy(&retval->positive_cache_lock); pthread_mutex_destroy(&retval->negative_cache_lock); free(retval); LOG_ERR_2("create_configuration_entry", "can't create negative cache lock"); TRACE_OUT(create_configuration_entry); return (NULL); } memcpy(&retval->positive_cache_params, positive_params, sizeof(struct common_cache_entry_params)); memcpy(&retval->negative_cache_params, negative_params, sizeof(struct common_cache_entry_params)); memcpy(&retval->mp_cache_params, mp_params, sizeof(struct mp_cache_entry_params)); size = strlen(name); retval->name = calloc(1, size + 1); assert(retval->name != NULL); memcpy(retval->name, name, size); memcpy(&retval->common_query_timeout, common_timeout, sizeof(struct timeval)); memcpy(&retval->mp_query_timeout, mp_timeout, sizeof(struct timeval)); asprintf(&retval->positive_cache_params.cep.entry_name, "%s+", name); assert(retval->positive_cache_params.cep.entry_name != NULL); asprintf(&retval->negative_cache_params.cep.entry_name, "%s-", name); assert(retval->negative_cache_params.cep.entry_name != NULL); asprintf(&retval->mp_cache_params.cep.entry_name, "%s*", name); assert(retval->mp_cache_params.cep.entry_name != NULL); TRACE_OUT(create_configuration_entry); return (retval); } /* * Creates configuration entry and fills it with default values */ struct configuration_entry * create_def_configuration_entry(const char *name) { struct common_cache_entry_params positive_params, negative_params; struct mp_cache_entry_params mp_params; struct timeval default_common_timeout, default_mp_timeout; struct configuration_entry *res = NULL; TRACE_IN(create_def_configuration_entry); memset(&positive_params, 0, sizeof(struct common_cache_entry_params)); positive_params.cep.entry_type = CET_COMMON; positive_params.cache_entries_size = DEFAULT_CACHE_HT_SIZE; positive_params.max_elemsize = DEFAULT_POSITIVE_ELEMENTS_SIZE; positive_params.satisf_elemsize = DEFAULT_POSITIVE_ELEMENTS_SIZE / 2; positive_params.max_lifetime.tv_sec = DEFAULT_POSITIVE_LIFETIME; positive_params.confidence_threshold = DEFAULT_POSITIVE_CONF_THRESH; positive_params.policy = CPT_LRU; memcpy(&negative_params, &positive_params, sizeof(struct common_cache_entry_params)); negative_params.max_elemsize = DEFAULT_NEGATIVE_ELEMENTS_SIZE; negative_params.satisf_elemsize = DEFAULT_NEGATIVE_ELEMENTS_SIZE / 2; negative_params.max_lifetime.tv_sec = DEFAULT_NEGATIVE_LIFETIME; negative_params.confidence_threshold = DEFAULT_NEGATIVE_CONF_THRESH; negative_params.policy = CPT_FIFO; memset(&default_common_timeout, 0, sizeof(struct timeval)); default_common_timeout.tv_sec = DEFAULT_COMMON_ENTRY_TIMEOUT; memset(&default_mp_timeout, 0, sizeof(struct timeval)); default_mp_timeout.tv_sec = DEFAULT_MP_ENTRY_TIMEOUT; memset(&mp_params, 0, sizeof(struct mp_cache_entry_params)); mp_params.cep.entry_type = CET_MULTIPART; mp_params.max_elemsize = DEFAULT_MULTIPART_ELEMENTS_SIZE; mp_params.max_sessions = DEFAULT_MULITPART_SESSIONS_SIZE; mp_params.max_lifetime.tv_sec = DEFAULT_MULITPART_LIFETIME; res = create_configuration_entry(name, &default_common_timeout, &default_mp_timeout, &positive_params, &negative_params, &mp_params); TRACE_OUT(create_def_configuration_entry); return (res); } void destroy_configuration_entry(struct configuration_entry *entry) { TRACE_IN(destroy_configuration_entry); assert(entry != NULL); pthread_mutex_destroy(&entry->positive_cache_lock); pthread_mutex_destroy(&entry->negative_cache_lock); pthread_mutex_destroy(&entry->mp_cache_lock); free(entry->name); free(entry->positive_cache_params.cep.entry_name); free(entry->negative_cache_params.cep.entry_name); free(entry->mp_cache_params.cep.entry_name); free(entry->mp_cache_entries); free(entry); TRACE_OUT(destroy_configuration_entry); } int add_configuration_entry(struct configuration *config, struct configuration_entry *entry) { TRACE_IN(add_configuration_entry); assert(entry != NULL); assert(entry->name != NULL); if (configuration_find_entry(config, entry->name) != NULL) { TRACE_OUT(add_configuration_entry); return (-1); } if (config->entries_size == config->entries_capacity) { struct configuration_entry **new_entries; config->entries_capacity *= 2; - new_entries = calloc(1, - sizeof(*new_entries) * - config->entries_capacity); + new_entries = calloc(config->entries_capacity, + sizeof(*new_entries)); assert(new_entries != NULL); memcpy(new_entries, config->entries, sizeof(struct configuration_entry *) * config->entries_size); free(config->entries); config->entries = new_entries; } config->entries[config->entries_size++] = entry; qsort(config->entries, config->entries_size, sizeof(struct configuration_entry *), configuration_entry_sort_cmp); TRACE_OUT(add_configuration_entry); return (0); } size_t configuration_get_entries_size(struct configuration *config) { TRACE_IN(configuration_get_entries_size); assert(config != NULL); TRACE_OUT(configuration_get_entries_size); return (config->entries_size); } struct configuration_entry * configuration_get_entry(struct configuration *config, size_t index) { TRACE_IN(configuration_get_entry); assert(config != NULL); assert(index < config->entries_size); TRACE_OUT(configuration_get_entry); return (config->entries[index]); } struct configuration_entry * configuration_find_entry(struct configuration *config, const char *name) { struct configuration_entry **retval; TRACE_IN(configuration_find_entry); retval = bsearch(name, config->entries, config->entries_size, sizeof(struct configuration_entry *), configuration_entry_cmp); TRACE_OUT(configuration_find_entry); return ((retval != NULL) ? *retval : NULL); } /* * All multipart cache entries are stored in the configuration_entry in the * sorted array (sorted by names). The 3 functions below manage this array. */ int configuration_entry_add_mp_cache_entry(struct configuration_entry *config_entry, cache_entry c_entry) { cache_entry *new_mp_entries, *old_mp_entries; TRACE_IN(configuration_entry_add_mp_cache_entry); ++config_entry->mp_cache_entries_size; new_mp_entries = malloc(sizeof(*new_mp_entries) * config_entry->mp_cache_entries_size); assert(new_mp_entries != NULL); new_mp_entries[0] = c_entry; if (config_entry->mp_cache_entries_size - 1 > 0) { memcpy(new_mp_entries + 1, config_entry->mp_cache_entries, (config_entry->mp_cache_entries_size - 1) * sizeof(cache_entry)); } old_mp_entries = config_entry->mp_cache_entries; config_entry->mp_cache_entries = new_mp_entries; free(old_mp_entries); qsort(config_entry->mp_cache_entries, config_entry->mp_cache_entries_size, sizeof(cache_entry), configuration_entry_cache_mp_sort_cmp); TRACE_OUT(configuration_entry_add_mp_cache_entry); return (0); } cache_entry configuration_entry_find_mp_cache_entry( struct configuration_entry *config_entry, const char *mp_name) { cache_entry *result; TRACE_IN(configuration_entry_find_mp_cache_entry); result = bsearch(mp_name, config_entry->mp_cache_entries, config_entry->mp_cache_entries_size, sizeof(cache_entry), configuration_entry_cache_mp_cmp); if (result == NULL) { TRACE_OUT(configuration_entry_find_mp_cache_entry); return (NULL); } else { TRACE_OUT(configuration_entry_find_mp_cache_entry); return (*result); } } /* * Searches for all multipart entries with names starting with mp_name. * Needed for cache flushing. */ int configuration_entry_find_mp_cache_entries( struct configuration_entry *config_entry, const char *mp_name, cache_entry **start, cache_entry **finish) { cache_entry *result; TRACE_IN(configuration_entry_find_mp_cache_entries); result = bsearch(mp_name, config_entry->mp_cache_entries, config_entry->mp_cache_entries_size, sizeof(cache_entry), configuration_entry_cache_mp_part_cmp); if (result == NULL) { TRACE_OUT(configuration_entry_find_mp_cache_entries); return (-1); } *start = result; *finish = result + 1; while (*start != config_entry->mp_cache_entries) { if (configuration_entry_cache_mp_part_cmp(mp_name, *start - 1) == 0) *start = *start - 1; else break; } while (*finish != config_entry->mp_cache_entries + config_entry->mp_cache_entries_size) { if (configuration_entry_cache_mp_part_cmp( mp_name, *finish) == 0) *finish = *finish + 1; else break; } TRACE_OUT(configuration_entry_find_mp_cache_entries); return (0); } /* * Configuration entry uses rwlock to handle access to its fields. */ void configuration_lock_rdlock(struct configuration *config) { TRACE_IN(configuration_lock_rdlock); pthread_rwlock_rdlock(&config->rwlock); TRACE_OUT(configuration_lock_rdlock); } void configuration_lock_wrlock(struct configuration *config) { TRACE_IN(configuration_lock_wrlock); pthread_rwlock_wrlock(&config->rwlock); TRACE_OUT(configuration_lock_wrlock); } void configuration_unlock(struct configuration *config) { TRACE_IN(configuration_unlock); pthread_rwlock_unlock(&config->rwlock); TRACE_OUT(configuration_unlock); } /* * Configuration entry uses 3 mutexes to handle cache operations. They are * acquired by configuration_lock_entry and configuration_unlock_entry * functions. */ void configuration_lock_entry(struct configuration_entry *entry, enum config_entry_lock_type lock_type) { TRACE_IN(configuration_lock_entry); assert(entry != NULL); switch (lock_type) { case CELT_POSITIVE: pthread_mutex_lock(&entry->positive_cache_lock); break; case CELT_NEGATIVE: pthread_mutex_lock(&entry->negative_cache_lock); break; case CELT_MULTIPART: pthread_mutex_lock(&entry->mp_cache_lock); break; default: /* should be unreachable */ break; } TRACE_OUT(configuration_lock_entry); } void configuration_unlock_entry(struct configuration_entry *entry, enum config_entry_lock_type lock_type) { TRACE_IN(configuration_unlock_entry); assert(entry != NULL); switch (lock_type) { case CELT_POSITIVE: pthread_mutex_unlock(&entry->positive_cache_lock); break; case CELT_NEGATIVE: pthread_mutex_unlock(&entry->negative_cache_lock); break; case CELT_MULTIPART: pthread_mutex_unlock(&entry->mp_cache_lock); break; default: /* should be unreachable */ break; } TRACE_OUT(configuration_unlock_entry); } struct configuration * init_configuration(void) { struct configuration *retval; TRACE_IN(init_configuration); retval = calloc(1, sizeof(*retval)); assert(retval != NULL); retval->entries_capacity = INITIAL_ENTRIES_CAPACITY; - retval->entries = calloc(1, - sizeof(*retval->entries) * - retval->entries_capacity); + retval->entries = calloc(retval->entries_capacity, + sizeof(*retval->entries)); assert(retval->entries != NULL); pthread_rwlock_init(&retval->rwlock, NULL); TRACE_OUT(init_configuration); return (retval); } void fill_configuration_defaults(struct configuration *config) { size_t len, i; TRACE_IN(fill_configuration_defaults); assert(config != NULL); if (config->socket_path != NULL) free(config->socket_path); len = strlen(DEFAULT_SOCKET_PATH); config->socket_path = calloc(1, len + 1); assert(config->socket_path != NULL); memcpy(config->socket_path, DEFAULT_SOCKET_PATH, len); len = strlen(DEFAULT_PIDFILE_PATH); config->pidfile_path = calloc(1, len + 1); assert(config->pidfile_path != NULL); memcpy(config->pidfile_path, DEFAULT_PIDFILE_PATH, len); config->socket_mode = S_IFSOCK | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH; config->force_unlink = 1; config->query_timeout = DEFAULT_QUERY_TIMEOUT; config->threads_num = DEFAULT_THREADS_NUM; for (i = 0; i < config->entries_size; ++i) destroy_configuration_entry(config->entries[i]); config->entries_size = 0; TRACE_OUT(fill_configuration_defaults); } void destroy_configuration(struct configuration *config) { unsigned int i; TRACE_IN(destroy_configuration); assert(config != NULL); free(config->pidfile_path); free(config->socket_path); for (i = 0; i < config->entries_size; ++i) destroy_configuration_entry(config->entries[i]); free(config->entries); pthread_rwlock_destroy(&config->rwlock); free(config); TRACE_OUT(destroy_configuration); } Index: stable/11/usr.sbin/nscd/hashtable.h =================================================================== --- stable/11/usr.sbin/nscd/hashtable.h (revision 315598) +++ stable/11/usr.sbin/nscd/hashtable.h (revision 315599) @@ -1,221 +1,221 @@ /*- * Copyright (c) 2005 Michael Bushkov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __CACHELIB_HASHTABLE_H__ #define __CACHELIB_HASHTABLE_H__ #include #define HASHTABLE_INITIAL_ENTRIES_CAPACITY 8 typedef unsigned int hashtable_index_t; /* * This file contains queue.h-like macro definitions for hash tables. * Hash table is organized as an array of the specified size of the user * defined (with HASTABLE_ENTRY_HEAD) structures. Each hash table * entry (user defined structure) stores its elements in the sorted array. * You can place elements into the hash table, retrieve elements with * specified key, traverse through all elements, and delete them. * New elements are placed into the hash table by using the compare and * hashing functions, provided by the user. */ /* * Defines the hash table entry structure, that uses specified type of * elements. */ #define HASHTABLE_ENTRY_HEAD(name, type) struct name { \ type *values; \ size_t capacity; \ size_t size; \ } /* * Defines the hash table structure, which uses the specified type of entries. * The only restriction for entries is that is that they should have the field, * defined with HASHTABLE_ENTRY_HEAD macro. */ #define HASHTABLE_HEAD(name, entry) struct name { \ struct entry *entries; \ size_t entries_size; \ } #define HASHTABLE_ENTRIES_COUNT(table) \ ((table)->entries_size) /* * Unlike most of queue.h data types, hash tables can not be initialized * statically - so there is no HASHTABLE_HEAD_INITIALIZED macro. */ #define HASHTABLE_INIT(table, type, field, _entries_size) \ do { \ hashtable_index_t var; \ - (table)->entries = calloc(1, \ - sizeof(*(table)->entries) * (_entries_size)); \ + (table)->entries = calloc(_entries_size, \ + sizeof(*(table)->entries)); \ (table)->entries_size = (_entries_size); \ for (var = 0; var < HASHTABLE_ENTRIES_COUNT(table); ++var) {\ (table)->entries[var].field.capacity = \ HASHTABLE_INITIAL_ENTRIES_CAPACITY; \ (table)->entries[var].field.size = 0; \ (table)->entries[var].field.values = malloc( \ sizeof(type) * \ HASHTABLE_INITIAL_ENTRIES_CAPACITY); \ assert((table)->entries[var].field.values != NULL);\ } \ } while (0) /* * All initialized hashtables should be destroyed with this macro. */ #define HASHTABLE_DESTROY(table, field) \ do { \ hashtable_index_t var; \ for (var = 0; var < HASHTABLE_ENTRIES_COUNT(table); ++var) {\ free((table)->entries[var].field.values); \ } \ } while (0) #define HASHTABLE_GET_ENTRY(table, hash) \ (&((table)->entries[hash])) /* * Traverses through all hash table entries */ #define HASHTABLE_FOREACH(table, var) \ for ((var) = &((table)->entries[0]); \ (var) < &((table)->entries[HASHTABLE_ENTRIES_COUNT(table)]);\ ++(var)) /* * Traverses through all elements of the specified hash table entry */ #define HASHTABLE_ENTRY_FOREACH(entry, field, var) \ for ((var) = &((entry)->field.values[0]); \ (var) < &((entry)->field.values[(entry)->field.size]); \ ++(var)) #define HASHTABLE_ENTRY_CLEAR(entry, field) \ ((entry)->field.size = 0) #define HASHTABLE_ENTRY_SIZE(entry, field) \ ((entry)->field.size) #define HASHTABLE_ENTRY_CAPACITY(entry, field) \ ((entry)->field.capacity) #define HASHTABLE_ENTRY_CAPACITY_INCREASE(entry, field, type) \ do { \ (entry)->field.capacity *= 2; \ (entry)->field.values = realloc((entry)->field.values, \ (entry)->field.capacity * sizeof(type)); \ } while (0) #define HASHTABLE_ENTRY_CAPACITY_DECREASE(entry, field, type) \ do { \ (entry)->field.capacity /= 2; \ (entry)->field.values = realloc((entry)->field.values, \ (entry)->field.capacity * sizeof(type)); \ } while (0) /* * Generates prototypes for the hash table functions */ #define HASHTABLE_PROTOTYPE(name, entry_, type) \ hashtable_index_t name##_CALCULATE_HASH(struct name *, type *); \ void name##_ENTRY_STORE(struct entry_*, type *); \ type *name##_ENTRY_FIND(struct entry_*, type *); \ type *name##_ENTRY_FIND_SPECIAL(struct entry_ *, type *, \ int (*) (const void *, const void *)); \ void name##_ENTRY_REMOVE(struct entry_*, type *); /* * Generates implementations of the hash table functions */ #define HASHTABLE_GENERATE(name, entry_, type, field, HASH, CMP) \ hashtable_index_t name##_CALCULATE_HASH(struct name *table, type *data) \ { \ \ return HASH(data, table->entries_size); \ } \ \ void name##_ENTRY_STORE(struct entry_ *the_entry, type *data) \ { \ \ if (the_entry->field.size == the_entry->field.capacity) \ HASHTABLE_ENTRY_CAPACITY_INCREASE(the_entry, field, type);\ \ memcpy(&(the_entry->field.values[the_entry->field.size++]), \ data, \ sizeof(type)); \ qsort(the_entry->field.values, the_entry->field.size, \ sizeof(type), CMP); \ } \ \ type *name##_ENTRY_FIND(struct entry_ *the_entry, type *key) \ { \ \ return ((type *)bsearch(key, the_entry->field.values, \ the_entry->field.size, sizeof(type), CMP)); \ } \ \ type *name##_ENTRY_FIND_SPECIAL(struct entry_ *the_entry, type *key, \ int (*compar) (const void *, const void *)) \ { \ return ((type *)bsearch(key, the_entry->field.values, \ the_entry->field.size, sizeof(type), compar)); \ } \ \ void name##_ENTRY_REMOVE(struct entry_ *the_entry, type *del_elm) \ { \ \ memmove(del_elm, del_elm + 1, \ (&the_entry->field.values[--the_entry->field.size] - del_elm) *\ sizeof(type)); \ } /* * Macro definitions below wrap the functions, generaed with * HASHTABLE_GENERATE macro. You should use them and avoid using generated * functions directly. */ #define HASHTABLE_CALCULATE_HASH(name, table, data) \ (name##_CALCULATE_HASH((table), data)) #define HASHTABLE_ENTRY_STORE(name, entry, data) \ name##_ENTRY_STORE((entry), data) #define HASHTABLE_ENTRY_FIND(name, entry, key) \ (name##_ENTRY_FIND((entry), (key))) #define HASHTABLE_ENTRY_FIND_SPECIAL(name, entry, key, cmp) \ (name##_ENTRY_FIND_SPECIAL((entry), (key), (cmp))) #define HASHTABLE_ENTRY_REMOVE(name, entry, del_elm) \ name##_ENTRY_REMOVE((entry), (del_elm)) #endif Index: stable/11/usr.sbin/nscd/nscd.c =================================================================== --- stable/11/usr.sbin/nscd/nscd.c (revision 315598) +++ stable/11/usr.sbin/nscd/nscd.c (revision 315599) @@ -1,870 +1,870 @@ /*- * Copyright (c) 2005 Michael Bushkov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in thereg * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "agents/passwd.h" #include "agents/group.h" #include "agents/services.h" #include "cachelib.h" #include "config.h" #include "debug.h" #include "log.h" #include "nscdcli.h" #include "parser.h" #include "query.h" #include "singletons.h" #ifndef CONFIG_PATH #define CONFIG_PATH "/etc/nscd.conf" #endif #define DEFAULT_CONFIG_PATH "nscd.conf" #define MAX_SOCKET_IO_SIZE 4096 struct processing_thread_args { cache the_cache; struct configuration *the_configuration; struct runtime_env *the_runtime_env; }; static void accept_connection(struct kevent *, struct runtime_env *, struct configuration *); static void destroy_cache_(cache); static void destroy_runtime_env(struct runtime_env *); static cache init_cache_(struct configuration *); static struct runtime_env *init_runtime_env(struct configuration *); static void processing_loop(cache, struct runtime_env *, struct configuration *); static void process_socket_event(struct kevent *, struct runtime_env *, struct configuration *); static void process_timer_event(struct kevent *, struct runtime_env *, struct configuration *); static void *processing_thread(void *); static void usage(void); void get_time_func(struct timeval *); static void usage(void) { fprintf(stderr, "usage: nscd [-dnst] [-i cachename] [-I cachename]\n"); exit(1); } static cache init_cache_(struct configuration *config) { struct cache_params params; cache retval; struct configuration_entry *config_entry; size_t size, i; int res; TRACE_IN(init_cache_); memset(¶ms, 0, sizeof(struct cache_params)); params.get_time_func = get_time_func; retval = init_cache(¶ms); size = configuration_get_entries_size(config); for (i = 0; i < size; ++i) { config_entry = configuration_get_entry(config, i); /* * We should register common entries now - multipart entries * would be registered automatically during the queries. */ res = register_cache_entry(retval, (struct cache_entry_params *) &config_entry->positive_cache_params); config_entry->positive_cache_entry = find_cache_entry(retval, config_entry->positive_cache_params.cep.entry_name); assert(config_entry->positive_cache_entry != INVALID_CACHE_ENTRY); res = register_cache_entry(retval, (struct cache_entry_params *) &config_entry->negative_cache_params); config_entry->negative_cache_entry = find_cache_entry(retval, config_entry->negative_cache_params.cep.entry_name); assert(config_entry->negative_cache_entry != INVALID_CACHE_ENTRY); } LOG_MSG_2("cache", "cache was successfully initialized"); TRACE_OUT(init_cache_); return (retval); } static void destroy_cache_(cache the_cache) { TRACE_IN(destroy_cache_); destroy_cache(the_cache); TRACE_OUT(destroy_cache_); } /* * Socket and kqueues are prepared here. We have one global queue for both * socket and timers events. */ static struct runtime_env * init_runtime_env(struct configuration *config) { int serv_addr_len; struct sockaddr_un serv_addr; struct kevent eventlist; struct timespec timeout; struct runtime_env *retval; TRACE_IN(init_runtime_env); retval = calloc(1, sizeof(*retval)); assert(retval != NULL); retval->sockfd = socket(PF_LOCAL, SOCK_STREAM, 0); if (config->force_unlink == 1) unlink(config->socket_path); memset(&serv_addr, 0, sizeof(struct sockaddr_un)); serv_addr.sun_family = PF_LOCAL; strlcpy(serv_addr.sun_path, config->socket_path, sizeof(serv_addr.sun_path)); serv_addr_len = sizeof(serv_addr.sun_family) + strlen(serv_addr.sun_path) + 1; if (bind(retval->sockfd, (struct sockaddr *)&serv_addr, serv_addr_len) == -1) { close(retval->sockfd); free(retval); LOG_ERR_2("runtime environment", "can't bind socket to path: " "%s", config->socket_path); TRACE_OUT(init_runtime_env); return (NULL); } LOG_MSG_2("runtime environment", "using socket %s", config->socket_path); /* * Here we're marking socket as non-blocking and setting its backlog * to the maximum value */ chmod(config->socket_path, config->socket_mode); listen(retval->sockfd, -1); fcntl(retval->sockfd, F_SETFL, O_NONBLOCK); retval->queue = kqueue(); assert(retval->queue != -1); EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0); memset(&timeout, 0, sizeof(struct timespec)); kevent(retval->queue, &eventlist, 1, NULL, 0, &timeout); LOG_MSG_2("runtime environment", "successfully initialized"); TRACE_OUT(init_runtime_env); return (retval); } static void destroy_runtime_env(struct runtime_env *env) { TRACE_IN(destroy_runtime_env); close(env->queue); close(env->sockfd); free(env); TRACE_OUT(destroy_runtime_env); } static void accept_connection(struct kevent *event_data, struct runtime_env *env, struct configuration *config) { struct kevent eventlist[2]; struct timespec timeout; struct query_state *qstate; int fd; int res; uid_t euid; gid_t egid; TRACE_IN(accept_connection); fd = accept(event_data->ident, NULL, NULL); if (fd == -1) { LOG_ERR_2("accept_connection", "error %d during accept()", errno); TRACE_OUT(accept_connection); return; } if (getpeereid(fd, &euid, &egid) != 0) { LOG_ERR_2("accept_connection", "error %d during getpeereid()", errno); TRACE_OUT(accept_connection); return; } qstate = init_query_state(fd, sizeof(int), euid, egid); if (qstate == NULL) { LOG_ERR_2("accept_connection", "can't init query_state"); TRACE_OUT(accept_connection); return; } memset(&timeout, 0, sizeof(struct timespec)); EV_SET(&eventlist[0], fd, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, qstate->timeout.tv_sec * 1000, qstate); EV_SET(&eventlist[1], fd, EVFILT_READ, EV_ADD | EV_ONESHOT, NOTE_LOWAT, qstate->kevent_watermark, qstate); res = kevent(env->queue, eventlist, 2, NULL, 0, &timeout); if (res < 0) LOG_ERR_2("accept_connection", "kevent error"); TRACE_OUT(accept_connection); } static void process_socket_event(struct kevent *event_data, struct runtime_env *env, struct configuration *config) { struct kevent eventlist[2]; struct timeval query_timeout; struct timespec kevent_timeout; int nevents; int eof_res, res; ssize_t io_res; struct query_state *qstate; TRACE_IN(process_socket_event); eof_res = event_data->flags & EV_EOF ? 1 : 0; res = 0; memset(&kevent_timeout, 0, sizeof(struct timespec)); EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_DELETE, 0, 0, NULL); nevents = kevent(env->queue, eventlist, 1, NULL, 0, &kevent_timeout); if (nevents == -1) { if (errno == ENOENT) { /* the timer is already handling this event */ TRACE_OUT(process_socket_event); return; } else { /* some other error happened */ LOG_ERR_2("process_socket_event", "kevent error, errno" " is %d", errno); TRACE_OUT(process_socket_event); return; } } qstate = (struct query_state *)event_data->udata; /* * If the buffer that is to be send/received is too large, * we send it implicitly, by using query_io_buffer_read and * query_io_buffer_write functions in the query_state. These functions * use the temporary buffer, which is later send/received in parts. * The code below implements buffer splitting/mergind for send/receive * operations. It also does the actual socket IO operations. */ if (((qstate->use_alternate_io == 0) && (qstate->kevent_watermark <= (size_t)event_data->data)) || ((qstate->use_alternate_io != 0) && (qstate->io_buffer_watermark <= (size_t)event_data->data))) { if (qstate->use_alternate_io != 0) { switch (qstate->io_buffer_filter) { case EVFILT_READ: io_res = query_socket_read(qstate, qstate->io_buffer_p, qstate->io_buffer_watermark); if (io_res < 0) { qstate->use_alternate_io = 0; qstate->process_func = NULL; } else { qstate->io_buffer_p += io_res; if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) { qstate->io_buffer_p = qstate->io_buffer; qstate->use_alternate_io = 0; } } break; default: break; } } if (qstate->use_alternate_io == 0) { do { res = qstate->process_func(qstate); } while ((qstate->kevent_watermark == 0) && (qstate->process_func != NULL) && (res == 0)); if (res != 0) qstate->process_func = NULL; } if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_filter == EVFILT_WRITE)) { io_res = query_socket_write(qstate, qstate->io_buffer_p, qstate->io_buffer_watermark); if (io_res < 0) { qstate->use_alternate_io = 0; qstate->process_func = NULL; } else qstate->io_buffer_p += io_res; } } else { /* assuming that socket was closed */ qstate->process_func = NULL; qstate->use_alternate_io = 0; } if (((qstate->process_func == NULL) && (qstate->use_alternate_io == 0)) || (eof_res != 0) || (res != 0)) { destroy_query_state(qstate); close(event_data->ident); TRACE_OUT(process_socket_event); return; } /* updating the query_state lifetime variable */ get_time_func(&query_timeout); query_timeout.tv_usec = 0; query_timeout.tv_sec -= qstate->creation_time.tv_sec; if (query_timeout.tv_sec > qstate->timeout.tv_sec) query_timeout.tv_sec = 0; else query_timeout.tv_sec = qstate->timeout.tv_sec - query_timeout.tv_sec; if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size)) qstate->use_alternate_io = 0; if (qstate->use_alternate_io == 0) { /* * If we must send/receive the large block of data, * we should prepare the query_state's io_XXX fields. * We should also substitute its write_func and read_func * with the query_io_buffer_write and query_io_buffer_read, * which will allow us to implicitly send/receive this large * buffer later (in the subsequent calls to the * process_socket_event). */ if (qstate->kevent_watermark > MAX_SOCKET_IO_SIZE) { if (qstate->io_buffer != NULL) free(qstate->io_buffer); qstate->io_buffer = calloc(1, qstate->kevent_watermark); assert(qstate->io_buffer != NULL); qstate->io_buffer_p = qstate->io_buffer; qstate->io_buffer_size = qstate->kevent_watermark; qstate->io_buffer_filter = qstate->kevent_filter; qstate->write_func = query_io_buffer_write; qstate->read_func = query_io_buffer_read; if (qstate->kevent_filter == EVFILT_READ) qstate->use_alternate_io = 1; qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE; EV_SET(&eventlist[1], event_data->ident, qstate->kevent_filter, EV_ADD | EV_ONESHOT, NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate); } else { EV_SET(&eventlist[1], event_data->ident, qstate->kevent_filter, EV_ADD | EV_ONESHOT, NOTE_LOWAT, qstate->kevent_watermark, qstate); } } else { if (qstate->io_buffer + qstate->io_buffer_size - qstate->io_buffer_p < MAX_SOCKET_IO_SIZE) { qstate->io_buffer_watermark = qstate->io_buffer + qstate->io_buffer_size - qstate->io_buffer_p; EV_SET(&eventlist[1], event_data->ident, qstate->io_buffer_filter, EV_ADD | EV_ONESHOT, NOTE_LOWAT, qstate->io_buffer_watermark, qstate); } else { qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE; EV_SET(&eventlist[1], event_data->ident, qstate->io_buffer_filter, EV_ADD | EV_ONESHOT, NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate); } } EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, query_timeout.tv_sec * 1000, qstate); kevent(env->queue, eventlist, 2, NULL, 0, &kevent_timeout); TRACE_OUT(process_socket_event); } /* * This routine is called if timer event has been signaled in the kqueue. It * just closes the socket and destroys the query_state. */ static void process_timer_event(struct kevent *event_data, struct runtime_env *env, struct configuration *config) { struct query_state *qstate; TRACE_IN(process_timer_event); qstate = (struct query_state *)event_data->udata; destroy_query_state(qstate); close(event_data->ident); TRACE_OUT(process_timer_event); } /* * Processing loop is the basic processing routine, that forms a body of each * procssing thread */ static void processing_loop(cache the_cache, struct runtime_env *env, struct configuration *config) { struct timespec timeout; const int eventlist_size = 1; struct kevent eventlist[eventlist_size]; int nevents, i; TRACE_MSG("=> processing_loop"); memset(&timeout, 0, sizeof(struct timespec)); memset(&eventlist, 0, sizeof(struct kevent) * eventlist_size); for (;;) { nevents = kevent(env->queue, NULL, 0, eventlist, eventlist_size, NULL); /* * we can only receive 1 event on success */ if (nevents == 1) { struct kevent *event_data; event_data = &eventlist[0]; if ((int)event_data->ident == env->sockfd) { for (i = 0; i < event_data->data; ++i) accept_connection(event_data, env, config); EV_SET(eventlist, s_runtime_env->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT, 0, 0, 0); memset(&timeout, 0, sizeof(struct timespec)); kevent(s_runtime_env->queue, eventlist, 1, NULL, 0, &timeout); } else { switch (event_data->filter) { case EVFILT_READ: case EVFILT_WRITE: process_socket_event(event_data, env, config); break; case EVFILT_TIMER: process_timer_event(event_data, env, config); break; default: break; } } } else { /* this branch shouldn't be currently executed */ } } TRACE_MSG("<= processing_loop"); } /* * Wrapper above the processing loop function. It sets the thread signal mask * to avoid SIGPIPE signals (which can happen if the client works incorrectly). */ static void * processing_thread(void *data) { struct processing_thread_args *args; sigset_t new; TRACE_MSG("=> processing_thread"); args = (struct processing_thread_args *)data; sigemptyset(&new); sigaddset(&new, SIGPIPE); if (pthread_sigmask(SIG_BLOCK, &new, NULL) != 0) LOG_ERR_1("processing thread", "thread can't block the SIGPIPE signal"); processing_loop(args->the_cache, args->the_runtime_env, args->the_configuration); free(args); TRACE_MSG("<= processing_thread"); return (NULL); } void get_time_func(struct timeval *time) { struct timespec res; memset(&res, 0, sizeof(struct timespec)); clock_gettime(CLOCK_MONOTONIC, &res); time->tv_sec = res.tv_sec; time->tv_usec = 0; } /* * The idea of _nss_cache_cycle_prevention_function is that nsdispatch * will search for this symbol in the executable. This symbol is the * attribute of the caching daemon. So, if it exists, nsdispatch won't try * to connect to the caching daemon and will just ignore the 'cache' * source in the nsswitch.conf. This method helps to avoid cycles and * organize self-performing requests. * * (not actually a function; it used to be, but it doesn't make any * difference, as long as it has external linkage) */ void *_nss_cache_cycle_prevention_function; int main(int argc, char *argv[]) { struct processing_thread_args *thread_args; pthread_t *threads; struct pidfh *pidfile; pid_t pid; char const *config_file; char const *error_str; int error_line; int i, res; int trace_mode_enabled; int force_single_threaded; int do_not_daemonize; int clear_user_cache_entries, clear_all_cache_entries; char *user_config_entry_name, *global_config_entry_name; int show_statistics; int daemon_mode, interactive_mode; /* by default all debug messages are omitted */ TRACE_OFF(); /* parsing command line arguments */ trace_mode_enabled = 0; force_single_threaded = 0; do_not_daemonize = 0; clear_user_cache_entries = 0; clear_all_cache_entries = 0; show_statistics = 0; user_config_entry_name = NULL; global_config_entry_name = NULL; while ((res = getopt(argc, argv, "nstdi:I:")) != -1) { switch (res) { case 'n': do_not_daemonize = 1; break; case 's': force_single_threaded = 1; break; case 't': trace_mode_enabled = 1; break; case 'i': clear_user_cache_entries = 1; if (optarg != NULL) if (strcmp(optarg, "all") != 0) user_config_entry_name = strdup(optarg); break; case 'I': clear_all_cache_entries = 1; if (optarg != NULL) if (strcmp(optarg, "all") != 0) global_config_entry_name = strdup(optarg); break; case 'd': show_statistics = 1; break; case '?': default: usage(); /* NOT REACHED */ } } daemon_mode = do_not_daemonize | force_single_threaded | trace_mode_enabled; interactive_mode = clear_user_cache_entries | clear_all_cache_entries | show_statistics; if ((daemon_mode != 0) && (interactive_mode != 0)) { LOG_ERR_1("main", "daemon mode and interactive_mode arguments " "can't be used together"); usage(); } if (interactive_mode != 0) { FILE *pidfin = fopen(DEFAULT_PIDFILE_PATH, "r"); char pidbuf[256]; struct nscd_connection_params connection_params; nscd_connection connection; int result; if (pidfin == NULL) errx(EXIT_FAILURE, "There is no daemon running."); memset(pidbuf, 0, sizeof(pidbuf)); fread(pidbuf, sizeof(pidbuf) - 1, 1, pidfin); fclose(pidfin); if (ferror(pidfin) != 0) errx(EXIT_FAILURE, "Can't read from pidfile."); if (sscanf(pidbuf, "%d", &pid) != 1) errx(EXIT_FAILURE, "Invalid pidfile."); LOG_MSG_1("main", "daemon PID is %d", pid); memset(&connection_params, 0, sizeof(struct nscd_connection_params)); connection_params.socket_path = DEFAULT_SOCKET_PATH; connection = open_nscd_connection__(&connection_params); if (connection == INVALID_NSCD_CONNECTION) errx(EXIT_FAILURE, "Can't connect to the daemon."); if (clear_user_cache_entries != 0) { result = nscd_transform__(connection, user_config_entry_name, TT_USER); if (result != 0) LOG_MSG_1("main", "user cache transformation failed"); else LOG_MSG_1("main", "user cache_transformation " "succeeded"); } if (clear_all_cache_entries != 0) { if (geteuid() != 0) errx(EXIT_FAILURE, "Only root can initiate " "global cache transformation."); result = nscd_transform__(connection, global_config_entry_name, TT_ALL); if (result != 0) LOG_MSG_1("main", "global cache transformation " "failed"); else LOG_MSG_1("main", "global cache transformation " "succeeded"); } close_nscd_connection__(connection); free(user_config_entry_name); free(global_config_entry_name); return (EXIT_SUCCESS); } pidfile = pidfile_open(DEFAULT_PIDFILE_PATH, 0644, &pid); if (pidfile == NULL) { if (errno == EEXIST) errx(EXIT_FAILURE, "Daemon already running, pid: %d.", pid); warn("Cannot open or create pidfile"); } if (trace_mode_enabled == 1) TRACE_ON(); /* blocking the main thread from receiving SIGPIPE signal */ sigblock(sigmask(SIGPIPE)); /* daemonization */ if (do_not_daemonize == 0) { res = daemon(0, trace_mode_enabled == 0 ? 0 : 1); if (res != 0) { LOG_ERR_1("main", "can't daemonize myself: %s", strerror(errno)); pidfile_remove(pidfile); goto fin; } else LOG_MSG_1("main", "successfully daemonized"); } pidfile_write(pidfile); s_agent_table = init_agent_table(); register_agent(s_agent_table, init_passwd_agent()); register_agent(s_agent_table, init_passwd_mp_agent()); register_agent(s_agent_table, init_group_agent()); register_agent(s_agent_table, init_group_mp_agent()); register_agent(s_agent_table, init_services_agent()); register_agent(s_agent_table, init_services_mp_agent()); LOG_MSG_1("main", "request agents registered successfully"); /* * Hosts agent can't work properly until we have access to the * appropriate dtab structures, which are used in nsdispatch * calls * register_agent(s_agent_table, init_hosts_agent()); */ /* configuration initialization */ s_configuration = init_configuration(); fill_configuration_defaults(s_configuration); error_str = NULL; error_line = 0; config_file = CONFIG_PATH; res = parse_config_file(s_configuration, config_file, &error_str, &error_line); if ((res != 0) && (error_str == NULL)) { config_file = DEFAULT_CONFIG_PATH; res = parse_config_file(s_configuration, config_file, &error_str, &error_line); } if (res != 0) { if (error_str != NULL) { LOG_ERR_1("main", "error in configuration file(%s, %d): %s\n", config_file, error_line, error_str); } else { LOG_ERR_1("main", "no configuration file found " "- was looking for %s and %s", CONFIG_PATH, DEFAULT_CONFIG_PATH); } destroy_configuration(s_configuration); return (-1); } if (force_single_threaded == 1) s_configuration->threads_num = 1; /* cache initialization */ s_cache = init_cache_(s_configuration); if (s_cache == NULL) { LOG_ERR_1("main", "can't initialize the cache"); destroy_configuration(s_configuration); return (-1); } /* runtime environment initialization */ s_runtime_env = init_runtime_env(s_configuration); if (s_runtime_env == NULL) { LOG_ERR_1("main", "can't initialize the runtime environment"); destroy_configuration(s_configuration); destroy_cache_(s_cache); return (-1); } if (s_configuration->threads_num > 1) { - threads = calloc(1, sizeof(*threads) * - s_configuration->threads_num); + threads = calloc(s_configuration->threads_num, + sizeof(*threads)); for (i = 0; i < s_configuration->threads_num; ++i) { thread_args = malloc( sizeof(*thread_args)); thread_args->the_cache = s_cache; thread_args->the_runtime_env = s_runtime_env; thread_args->the_configuration = s_configuration; LOG_MSG_1("main", "thread #%d was successfully created", i); pthread_create(&threads[i], NULL, processing_thread, thread_args); thread_args = NULL; } for (i = 0; i < s_configuration->threads_num; ++i) pthread_join(threads[i], NULL); } else { LOG_MSG_1("main", "working in single-threaded mode"); processing_loop(s_cache, s_runtime_env, s_configuration); } fin: /* runtime environment destruction */ destroy_runtime_env(s_runtime_env); /* cache destruction */ destroy_cache_(s_cache); /* configuration destruction */ destroy_configuration(s_configuration); /* agents table destruction */ destroy_agent_table(s_agent_table); pidfile_remove(pidfile); return (EXIT_SUCCESS); } Index: stable/11/usr.sbin/ypbind/yp_ping.c =================================================================== --- stable/11/usr.sbin/ypbind/yp_ping.c (revision 315598) +++ stable/11/usr.sbin/ypbind/yp_ping.c (revision 315599) @@ -1,308 +1,308 @@ /* * Copyright (c) 1996, 1997 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2009, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of Sun Microsystems, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #if 0 #ifndef lint static char *sccsid = "@(#)from: clnt_udp.c 1.39 87/08/11 Copyr 1984 Sun Micro"; static char *sccsid = "@(#)from: clnt_udp.c 2.2 88/08/01 4.0 RPCSRC"; #endif #endif #include __FBSDID("$FreeBSD$"); /* * clnt_udp.c, Implements a UDP/IP based, client side RPC. * * Copyright (C) 1984, Sun Microsystems, Inc. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "yp_ping.h" /* * pmap_getport.c * Client interface to pmap rpc service. * * Copyright (C) 1984, Sun Microsystems, Inc. */ static struct timeval timeout = { 1, 0 }; static struct timeval tottimeout = { 1, 0 }; /* * Find the mapped port for program,version. * Calls the pmap service remotely to do the lookup. * Returns 0 if no map exists. */ static u_short __pmap_getport(struct sockaddr_in *address, u_long program, u_long version, u_int protocol) { u_short port = 0; int sock = -1; register CLIENT *client; struct pmap parms; address->sin_port = htons(PMAPPORT); client = clntudp_bufcreate(address, PMAPPROG, PMAPVERS, timeout, &sock, RPCSMALLMSGSIZE, RPCSMALLMSGSIZE); if (client != (CLIENT *)NULL) { parms.pm_prog = program; parms.pm_vers = version; parms.pm_prot = protocol; parms.pm_port = 0; /* not needed or used */ if (CLNT_CALL(client, PMAPPROC_GETPORT, (xdrproc_t)xdr_pmap, &parms, (xdrproc_t)xdr_u_short, &port, tottimeout) != RPC_SUCCESS){ rpc_createerr.cf_stat = RPC_PMAPFAILURE; clnt_geterr(client, &rpc_createerr.cf_error); } else if (port == 0) { rpc_createerr.cf_stat = RPC_PROGNOTREGISTERED; } CLNT_DESTROY(client); } if (sock != -1) (void)close(sock); address->sin_port = 0; return (port); } /* * Transmit to YPPROC_DOMAIN_NONACK, return immediately. */ static bool_t * ypproc_domain_nonack_2_send(domainname *argp, CLIENT *clnt) { static bool_t clnt_res; struct timeval TIMEOUT = { 0, 0 }; memset((char *)&clnt_res, 0, sizeof (clnt_res)); if (clnt_call(clnt, YPPROC_DOMAIN_NONACK, (xdrproc_t) xdr_domainname, (caddr_t) argp, (xdrproc_t) xdr_bool, (caddr_t) &clnt_res, TIMEOUT) != RPC_SUCCESS) { return (NULL); } return (&clnt_res); } /* * Receive response from YPPROC_DOMAIN_NONACK asynchronously. */ static bool_t * ypproc_domain_nonack_2_recv(domainname *argp, CLIENT *clnt) { static bool_t clnt_res; struct timeval TIMEOUT = { 0, 0 }; memset((char *)&clnt_res, 0, sizeof (clnt_res)); if (clnt_call(clnt, YPPROC_DOMAIN_NONACK, (xdrproc_t) NULL, (caddr_t) argp, (xdrproc_t) xdr_bool, (caddr_t) &clnt_res, TIMEOUT) != RPC_SUCCESS) { return (NULL); } return (&clnt_res); } /* * "We have the machine that goes 'ping!'" -- Monty Python * * This function blasts packets at the YPPROC_DOMAIN_NONACK procedures * of the NIS servers listed in restricted_addrs structure. * Whoever replies the fastest becomes our chosen server. * * Note: THIS IS NOT A BROADCAST OPERATION! We could use clnt_broadcast() * for this, but that has the following problems: * - We only get the address of the machine that replied in the * 'eachresult' callback, and on multi-homed machines this can * lead to confusion. * - clnt_broadcast() only transmits to local networks, whereas with * NIS+ you can have a perfectly good server located anywhere on or * off the local network. * - clnt_broadcast() blocks for an arbitrary amount of time which the * caller can't control -- we want to avoid that. * * Also note that this has nothing to do with the NIS_PING procedure used * for replica updates. */ struct ping_req { struct sockaddr_in sin; u_int32_t xid; }; int __yp_ping(struct in_addr *restricted_addrs, int cnt, char *dom, short *port) { struct timeval tv = { 5, 0 }; struct ping_req **reqs; unsigned long i; int async; struct sockaddr_in sin, *any = NULL; struct netbuf addr; int winner = -1; u_int32_t xid_seed, xid_lookup; int sock, dontblock = 1; CLIENT *clnt; char *foo = dom; int validsrvs = 0; /* Set up handles. */ - reqs = calloc(1, sizeof(struct ping_req *) * cnt); + reqs = calloc(cnt, sizeof(struct ping_req *)); xid_seed = time(NULL) ^ getpid(); for (i = 0; i < cnt; i++) { bzero((char *)&sin, sizeof(sin)); sin.sin_family = AF_INET; bcopy((char *)&restricted_addrs[i], (char *)&sin.sin_addr, sizeof(struct in_addr)); sin.sin_port = htons(__pmap_getport(&sin, YPPROG, YPVERS, IPPROTO_UDP)); if (sin.sin_port == 0) continue; reqs[i] = calloc(1, sizeof(struct ping_req)); bcopy((char *)&sin, (char *)&reqs[i]->sin, sizeof(sin)); any = &reqs[i]->sin; reqs[i]->xid = xid_seed; xid_seed++; validsrvs++; } /* Make sure at least one server was assigned */ if (!validsrvs) { free(reqs); return(-1); } /* Create RPC handle */ sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); clnt = clntudp_create(any, YPPROG, YPVERS, tv, &sock); if (clnt == NULL) { close(sock); for (i = 0; i < cnt; i++) if (reqs[i] != NULL) free(reqs[i]); free(reqs); return(-1); } clnt->cl_auth = authunix_create_default(); tv.tv_sec = 0; clnt_control(clnt, CLSET_TIMEOUT, (char *)&tv); async = TRUE; clnt_control(clnt, CLSET_ASYNC, (char *)&async); ioctl(sock, FIONBIO, &dontblock); /* Transmit */ for (i = 0; i < cnt; i++) { if (reqs[i] != NULL) { clnt_control(clnt, CLSET_XID, (char *)&reqs[i]->xid); addr.len = sizeof(reqs[i]->sin); addr.buf = (char *) &reqs[i]->sin; clnt_control(clnt, CLSET_SVC_ADDR, &addr); ypproc_domain_nonack_2_send(&foo, clnt); } } /* Receive reply */ ypproc_domain_nonack_2_recv(&foo, clnt); /* Got a winner -- look him up. */ clnt_control(clnt, CLGET_XID, (char *)&xid_lookup); for (i = 0; i < cnt; i++) { if (reqs[i] != NULL && reqs[i]->xid == xid_lookup) { winner = i; *port = reqs[i]->sin.sin_port; } } /* Shut everything down */ auth_destroy(clnt->cl_auth); clnt_destroy(clnt); close(sock); for (i = 0; i < cnt; i++) if (reqs[i] != NULL) free(reqs[i]); free(reqs); return(winner); } Index: stable/11 =================================================================== --- stable/11 (revision 315598) +++ stable/11 (revision 315599) Property changes on: stable/11 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r315212-315215