Index: head/usr.sbin/camdd/camdd.c =================================================================== --- head/usr.sbin/camdd/camdd.c (revision 298885) +++ head/usr.sbin/camdd/camdd.c (revision 298886) @@ -1,3423 +1,3423 @@ /*- * Copyright (c) 1997-2007 Kenneth D. Merry * Copyright (c) 2013, 2014, 2015 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * Authors: Ken Merry (Spectra Logic Corporation) */ /* * This is eventually intended to be: * - A basic data transfer/copy utility * - A simple benchmark utility * - An example of how to use the asynchronous pass(4) driver interface. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { CAMDD_CMD_NONE = 0x00000000, CAMDD_CMD_HELP = 0x00000001, CAMDD_CMD_WRITE = 0x00000002, CAMDD_CMD_READ = 0x00000003 } camdd_cmdmask; typedef enum { CAMDD_ARG_NONE = 0x00000000, CAMDD_ARG_VERBOSE = 0x00000001, CAMDD_ARG_DEVICE = 0x00000002, CAMDD_ARG_BUS = 0x00000004, CAMDD_ARG_TARGET = 0x00000008, CAMDD_ARG_LUN = 0x00000010, CAMDD_ARG_UNIT = 0x00000020, CAMDD_ARG_TIMEOUT = 0x00000040, CAMDD_ARG_ERR_RECOVER = 0x00000080, CAMDD_ARG_RETRIES = 0x00000100 } camdd_argmask; typedef enum { CAMDD_DEV_NONE = 0x00, CAMDD_DEV_PASS = 0x01, CAMDD_DEV_FILE = 0x02 } camdd_dev_type; struct camdd_io_opts { camdd_dev_type dev_type; char *dev_name; uint64_t blocksize; uint64_t queue_depth; uint64_t offset; int min_cmd_size; int write_dev; uint64_t debug; }; typedef enum { CAMDD_BUF_NONE, CAMDD_BUF_DATA, CAMDD_BUF_INDIRECT } camdd_buf_type; struct camdd_buf_indirect { /* * Pointer to the source buffer. */ struct camdd_buf *src_buf; /* * Offset into the source buffer, in bytes. */ uint64_t offset; /* * Pointer to the starting point in the source buffer. */ uint8_t *start_ptr; /* * Length of this chunk in bytes. */ size_t len; }; struct camdd_buf_data { /* * Buffer allocated when we allocate this camdd_buf. This should * be the size of the blocksize for this device. */ uint8_t *buf; /* * The amount of backing store allocated in buf. Generally this * will be the blocksize of the device. */ uint32_t alloc_len; /* * The amount of data that was put into the buffer (on reads) or * the amount of data we have put onto the src_list so far (on * writes). */ uint32_t fill_len; /* * The amount of data that was not transferred. */ uint32_t resid; /* * Starting byte offset on the reader. */ uint64_t src_start_offset; /* * CCB used for pass(4) device targets. */ union ccb ccb; /* * Number of scatter/gather segments. */ int sg_count; /* * Set if we had to tack on an extra buffer to round the transfer * up to a sector size. */ int extra_buf; /* * Scatter/gather list used generally when we're the writer for a * pass(4) device. */ bus_dma_segment_t *segs; /* * Scatter/gather list used generally when we're the writer for a * file or block device; */ struct iovec *iovec; }; union camdd_buf_types { struct camdd_buf_indirect indirect; struct camdd_buf_data data; }; typedef enum { CAMDD_STATUS_NONE, CAMDD_STATUS_OK, CAMDD_STATUS_SHORT_IO, CAMDD_STATUS_EOF, CAMDD_STATUS_ERROR } camdd_buf_status; struct camdd_buf { camdd_buf_type buf_type; union camdd_buf_types buf_type_spec; camdd_buf_status status; uint64_t lba; size_t len; /* * A reference count of how many indirect buffers point to this * buffer. */ int refcount; /* * A link back to our parent device. */ struct camdd_dev *dev; STAILQ_ENTRY(camdd_buf) links; STAILQ_ENTRY(camdd_buf) work_links; /* * A count of the buffers on the src_list. */ int src_count; /* * List of buffers from our partner thread that are the components * of this buffer for the I/O. Uses src_links. */ STAILQ_HEAD(,camdd_buf) src_list; STAILQ_ENTRY(camdd_buf) src_links; }; #define NUM_DEV_TYPES 2 struct camdd_dev_pass { int scsi_dev_type; struct cam_device *dev; uint64_t max_sector; uint32_t block_len; uint32_t cpi_maxio; }; typedef enum { CAMDD_FILE_NONE, CAMDD_FILE_REG, CAMDD_FILE_STD, CAMDD_FILE_PIPE, CAMDD_FILE_DISK, CAMDD_FILE_TAPE, CAMDD_FILE_TTY, CAMDD_FILE_MEM } camdd_file_type; typedef enum { CAMDD_FF_NONE = 0x00, CAMDD_FF_CAN_SEEK = 0x01 } camdd_file_flags; struct camdd_dev_file { int fd; struct stat sb; char filename[MAXPATHLEN + 1]; camdd_file_type file_type; camdd_file_flags file_flags; uint8_t *tmp_buf; }; struct camdd_dev_block { int fd; uint64_t size_bytes; uint32_t block_len; }; union camdd_dev_spec { struct camdd_dev_pass pass; struct camdd_dev_file file; struct camdd_dev_block block; }; typedef enum { CAMDD_DEV_FLAG_NONE = 0x00, CAMDD_DEV_FLAG_EOF = 0x01, CAMDD_DEV_FLAG_PEER_EOF = 0x02, CAMDD_DEV_FLAG_ACTIVE = 0x04, CAMDD_DEV_FLAG_EOF_SENT = 0x08, CAMDD_DEV_FLAG_EOF_QUEUED = 0x10 } camdd_dev_flags; struct camdd_dev { camdd_dev_type dev_type; union camdd_dev_spec dev_spec; camdd_dev_flags flags; char device_name[MAXPATHLEN+1]; uint32_t blocksize; uint32_t sector_size; uint64_t max_sector; uint64_t sector_io_limit; int min_cmd_size; int write_dev; int retry_count; int io_timeout; int debug; uint64_t start_offset_bytes; uint64_t next_io_pos_bytes; uint64_t next_peer_pos_bytes; uint64_t next_completion_pos_bytes; uint64_t peer_bytes_queued; uint64_t bytes_transferred; uint32_t target_queue_depth; uint32_t cur_active_io; uint8_t *extra_buf; uint32_t extra_buf_len; struct camdd_dev *peer_dev; pthread_mutex_t mutex; pthread_cond_t cond; int kq; int (*run)(struct camdd_dev *dev); int (*fetch)(struct camdd_dev *dev); /* * Buffers that are available for I/O. Uses links. */ STAILQ_HEAD(,camdd_buf) free_queue; /* * Free indirect buffers. These are used for breaking a large * buffer into multiple pieces. */ STAILQ_HEAD(,camdd_buf) free_indirect_queue; /* * Buffers that have been queued to the kernel. Uses links. */ STAILQ_HEAD(,camdd_buf) active_queue; /* * Will generally contain one of our buffers that is waiting for enough * I/O from our partner thread to be able to execute. This will * generally happen when our per-I/O-size is larger than the * partner thread's per-I/O-size. Uses links. */ STAILQ_HEAD(,camdd_buf) pending_queue; /* * Number of buffers on the pending queue */ int num_pending_queue; /* * Buffers that are filled and ready to execute. This is used when * our partner (reader) thread sends us blocks that are larger than * our blocksize, and so we have to split them into multiple pieces. */ STAILQ_HEAD(,camdd_buf) run_queue; /* * Number of buffers on the run queue. */ int num_run_queue; STAILQ_HEAD(,camdd_buf) reorder_queue; int num_reorder_queue; /* * Buffers that have been queued to us by our partner thread * (generally the reader thread) to be written out. Uses * work_links. */ STAILQ_HEAD(,camdd_buf) work_queue; /* * Buffers that have been completed by our partner thread. Uses * work_links. */ STAILQ_HEAD(,camdd_buf) peer_done_queue; /* * Number of buffers on the peer done queue. */ uint32_t num_peer_done_queue; /* * A list of buffers that we have queued to our peer thread. Uses * links. */ STAILQ_HEAD(,camdd_buf) peer_work_queue; /* * Number of buffers on the peer work queue. */ uint32_t num_peer_work_queue; }; static sem_t camdd_sem; static int need_exit = 0; static int error_exit = 0; static int need_status = 0; #ifndef min #define min(a, b) (a < b) ? a : b #endif /* * XXX KDM private copy of timespecsub(). This is normally defined in * sys/time.h, but is only enabled in the kernel. If that definition is * enabled in userland, it breaks the build of libnetbsd. */ #ifndef timespecsub #define timespecsub(vvp, uvp) \ do { \ (vvp)->tv_sec -= (uvp)->tv_sec; \ (vvp)->tv_nsec -= (uvp)->tv_nsec; \ if ((vvp)->tv_nsec < 0) { \ (vvp)->tv_sec--; \ (vvp)->tv_nsec += 1000000000; \ } \ } while (0) #endif -/* Generically usefull offsets into the peripheral private area */ +/* Generically useful offsets into the peripheral private area */ #define ppriv_ptr0 periph_priv.entries[0].ptr #define ppriv_ptr1 periph_priv.entries[1].ptr #define ppriv_field0 periph_priv.entries[0].field #define ppriv_field1 periph_priv.entries[1].field #define ccb_buf ppriv_ptr0 #define CAMDD_FILE_DEFAULT_BLOCK 524288 #define CAMDD_FILE_DEFAULT_DEPTH 1 #define CAMDD_PASS_MAX_BLOCK 1048576 #define CAMDD_PASS_DEFAULT_DEPTH 6 #define CAMDD_PASS_RW_TIMEOUT 60 * 1000 static int parse_btl(char *tstr, int *bus, int *target, int *lun, camdd_argmask *arglst); void camdd_free_dev(struct camdd_dev *dev); struct camdd_dev *camdd_alloc_dev(camdd_dev_type dev_type, struct kevent *new_ke, int num_ke, int retry_count, int timeout); static struct camdd_buf *camdd_alloc_buf(struct camdd_dev *dev, camdd_buf_type buf_type); void camdd_release_buf(struct camdd_buf *buf); struct camdd_buf *camdd_get_buf(struct camdd_dev *dev, camdd_buf_type buf_type); int camdd_buf_sg_create(struct camdd_buf *buf, int iovec, uint32_t sector_size, uint32_t *num_sectors_used, int *double_buf_needed); uint32_t camdd_buf_get_len(struct camdd_buf *buf); void camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf); int camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize, uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran); struct camdd_dev *camdd_probe_file(int fd, struct camdd_io_opts *io_opts, int retry_count, int timeout); struct camdd_dev *camdd_probe_pass(struct cam_device *cam_dev, struct camdd_io_opts *io_opts, camdd_argmask arglist, int probe_retry_count, int probe_timeout, int io_retry_count, int io_timeout); void *camdd_file_worker(void *arg); camdd_buf_status camdd_ccb_status(union ccb *ccb); int camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf); int camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf); void camdd_peer_done(struct camdd_buf *buf); void camdd_complete_buf(struct camdd_dev *dev, struct camdd_buf *buf, int *error_count); int camdd_pass_fetch(struct camdd_dev *dev); int camdd_file_run(struct camdd_dev *dev); int camdd_pass_run(struct camdd_dev *dev); int camdd_get_next_lba_len(struct camdd_dev *dev, uint64_t *lba, ssize_t *len); int camdd_queue(struct camdd_dev *dev, struct camdd_buf *read_buf); void camdd_get_depth(struct camdd_dev *dev, uint32_t *our_depth, uint32_t *peer_depth, uint32_t *our_bytes, uint32_t *peer_bytes); void *camdd_worker(void *arg); void camdd_sig_handler(int sig); void camdd_print_status(struct camdd_dev *camdd_dev, struct camdd_dev *other_dev, struct timespec *start_time); int camdd_rw(struct camdd_io_opts *io_opts, int num_io_opts, uint64_t max_io, int retry_count, int timeout); int camdd_parse_io_opts(char *args, int is_write, struct camdd_io_opts *io_opts); void usage(void); /* * Parse out a bus, or a bus, target and lun in the following * format: * bus * bus:target * bus:target:lun * * Returns the number of parsed components, or 0. */ static int parse_btl(char *tstr, int *bus, int *target, int *lun, camdd_argmask *arglst) { char *tmpstr; int convs = 0; while (isspace(*tstr) && (*tstr != '\0')) tstr++; tmpstr = (char *)strtok(tstr, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *bus = strtol(tmpstr, NULL, 0); *arglst |= CAMDD_ARG_BUS; convs++; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *target = strtol(tmpstr, NULL, 0); *arglst |= CAMDD_ARG_TARGET; convs++; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *lun = strtol(tmpstr, NULL, 0); *arglst |= CAMDD_ARG_LUN; convs++; } } } return convs; } /* * XXX KDM clean up and free all of the buffers on the queue! */ void camdd_free_dev(struct camdd_dev *dev) { if (dev == NULL) return; switch (dev->dev_type) { case CAMDD_DEV_FILE: { struct camdd_dev_file *file_dev = &dev->dev_spec.file; if (file_dev->fd != -1) close(file_dev->fd); free(file_dev->tmp_buf); break; } case CAMDD_DEV_PASS: { struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass; if (pass_dev->dev != NULL) cam_close_device(pass_dev->dev); break; } default: break; } free(dev); } struct camdd_dev * camdd_alloc_dev(camdd_dev_type dev_type, struct kevent *new_ke, int num_ke, int retry_count, int timeout) { struct camdd_dev *dev = NULL; struct kevent *ke; size_t ke_size; int retval = 0; dev = malloc(sizeof(*dev)); if (dev == NULL) { warn("%s: unable to malloc %zu bytes", __func__, sizeof(*dev)); goto bailout; } bzero(dev, sizeof(*dev)); dev->dev_type = dev_type; dev->io_timeout = timeout; dev->retry_count = retry_count; STAILQ_INIT(&dev->free_queue); STAILQ_INIT(&dev->free_indirect_queue); STAILQ_INIT(&dev->active_queue); STAILQ_INIT(&dev->pending_queue); STAILQ_INIT(&dev->run_queue); STAILQ_INIT(&dev->reorder_queue); STAILQ_INIT(&dev->work_queue); STAILQ_INIT(&dev->peer_done_queue); STAILQ_INIT(&dev->peer_work_queue); retval = pthread_mutex_init(&dev->mutex, NULL); if (retval != 0) { warnc(retval, "%s: failed to initialize mutex", __func__); goto bailout; } retval = pthread_cond_init(&dev->cond, NULL); if (retval != 0) { warnc(retval, "%s: failed to initialize condition variable", __func__); goto bailout; } dev->kq = kqueue(); if (dev->kq == -1) { warn("%s: Unable to create kqueue", __func__); goto bailout; } ke_size = sizeof(struct kevent) * (num_ke + 4); ke = malloc(ke_size); if (ke == NULL) { warn("%s: unable to malloc %zu bytes", __func__, ke_size); goto bailout; } bzero(ke, ke_size); if (num_ke > 0) bcopy(new_ke, ke, num_ke * sizeof(struct kevent)); EV_SET(&ke[num_ke++], (uintptr_t)&dev->work_queue, EVFILT_USER, EV_ADD|EV_ENABLE|EV_CLEAR, 0,0, 0); EV_SET(&ke[num_ke++], (uintptr_t)&dev->peer_done_queue, EVFILT_USER, EV_ADD|EV_ENABLE|EV_CLEAR, 0,0, 0); EV_SET(&ke[num_ke++], SIGINFO, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0,0,0); EV_SET(&ke[num_ke++], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0,0,0); retval = kevent(dev->kq, ke, num_ke, NULL, 0, NULL); if (retval == -1) { warn("%s: Unable to register kevents", __func__); goto bailout; } return (dev); bailout: free(dev); return (NULL); } static struct camdd_buf * camdd_alloc_buf(struct camdd_dev *dev, camdd_buf_type buf_type) { struct camdd_buf *buf = NULL; uint8_t *data_ptr = NULL; /* * We only need to allocate data space for data buffers. */ switch (buf_type) { case CAMDD_BUF_DATA: data_ptr = malloc(dev->blocksize); if (data_ptr == NULL) { warn("unable to allocate %u bytes", dev->blocksize); goto bailout_error; } break; default: break; } buf = malloc(sizeof(*buf)); if (buf == NULL) { warn("unable to allocate %zu bytes", sizeof(*buf)); goto bailout_error; } bzero(buf, sizeof(*buf)); buf->buf_type = buf_type; buf->dev = dev; switch (buf_type) { case CAMDD_BUF_DATA: { struct camdd_buf_data *data; data = &buf->buf_type_spec.data; data->alloc_len = dev->blocksize; data->buf = data_ptr; break; } case CAMDD_BUF_INDIRECT: break; default: break; } STAILQ_INIT(&buf->src_list); return (buf); bailout_error: if (data_ptr != NULL) free(data_ptr); if (buf != NULL) free(buf); return (NULL); } void camdd_release_buf(struct camdd_buf *buf) { struct camdd_dev *dev; dev = buf->dev; switch (buf->buf_type) { case CAMDD_BUF_DATA: { struct camdd_buf_data *data; data = &buf->buf_type_spec.data; if (data->segs != NULL) { if (data->extra_buf != 0) { void *extra_buf; extra_buf = (void *) data->segs[data->sg_count - 1].ds_addr; free(extra_buf); data->extra_buf = 0; } free(data->segs); data->segs = NULL; data->sg_count = 0; } else if (data->iovec != NULL) { if (data->extra_buf != 0) { free(data->iovec[data->sg_count - 1].iov_base); data->extra_buf = 0; } free(data->iovec); data->iovec = NULL; data->sg_count = 0; } STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); break; } case CAMDD_BUF_INDIRECT: STAILQ_INSERT_TAIL(&dev->free_indirect_queue, buf, links); break; default: err(1, "%s: Invalid buffer type %d for released buffer", __func__, buf->buf_type); break; } } struct camdd_buf * camdd_get_buf(struct camdd_dev *dev, camdd_buf_type buf_type) { struct camdd_buf *buf = NULL; switch (buf_type) { case CAMDD_BUF_DATA: buf = STAILQ_FIRST(&dev->free_queue); if (buf != NULL) { struct camdd_buf_data *data; uint8_t *data_ptr; uint32_t alloc_len; STAILQ_REMOVE_HEAD(&dev->free_queue, links); data = &buf->buf_type_spec.data; data_ptr = data->buf; alloc_len = data->alloc_len; bzero(buf, sizeof(*buf)); data->buf = data_ptr; data->alloc_len = alloc_len; } break; case CAMDD_BUF_INDIRECT: buf = STAILQ_FIRST(&dev->free_indirect_queue); if (buf != NULL) { STAILQ_REMOVE_HEAD(&dev->free_indirect_queue, links); bzero(buf, sizeof(*buf)); } break; default: warnx("Unknown buffer type %d requested", buf_type); break; } if (buf == NULL) return (camdd_alloc_buf(dev, buf_type)); else { STAILQ_INIT(&buf->src_list); buf->dev = dev; buf->buf_type = buf_type; return (buf); } } int camdd_buf_sg_create(struct camdd_buf *buf, int iovec, uint32_t sector_size, uint32_t *num_sectors_used, int *double_buf_needed) { struct camdd_buf *tmp_buf; struct camdd_buf_data *data; uint8_t *extra_buf = NULL; size_t extra_buf_len = 0; int i, retval = 0; data = &buf->buf_type_spec.data; data->sg_count = buf->src_count; /* * Compose a scatter/gather list from all of the buffers in the list. * If the length of the buffer isn't a multiple of the sector size, * we'll have to add an extra buffer. This should only happen * at the end of a transfer. */ if ((data->fill_len % sector_size) != 0) { extra_buf_len = sector_size - (data->fill_len % sector_size); extra_buf = calloc(extra_buf_len, 1); if (extra_buf == NULL) { warn("%s: unable to allocate %zu bytes for extra " "buffer space", __func__, extra_buf_len); retval = 1; goto bailout; } data->extra_buf = 1; data->sg_count++; } if (iovec == 0) { data->segs = calloc(data->sg_count, sizeof(bus_dma_segment_t)); if (data->segs == NULL) { warn("%s: unable to allocate %zu bytes for S/G list", __func__, sizeof(bus_dma_segment_t) * data->sg_count); retval = 1; goto bailout; } } else { data->iovec = calloc(data->sg_count, sizeof(struct iovec)); if (data->iovec == NULL) { warn("%s: unable to allocate %zu bytes for S/G list", __func__, sizeof(struct iovec) * data->sg_count); retval = 1; goto bailout; } } for (i = 0, tmp_buf = STAILQ_FIRST(&buf->src_list); i < buf->src_count && tmp_buf != NULL; i++, tmp_buf = STAILQ_NEXT(tmp_buf, src_links)) { if (tmp_buf->buf_type == CAMDD_BUF_DATA) { struct camdd_buf_data *tmp_data; tmp_data = &tmp_buf->buf_type_spec.data; if (iovec == 0) { data->segs[i].ds_addr = (bus_addr_t) tmp_data->buf; data->segs[i].ds_len = tmp_data->fill_len - tmp_data->resid; } else { data->iovec[i].iov_base = tmp_data->buf; data->iovec[i].iov_len = tmp_data->fill_len - tmp_data->resid; } if (((tmp_data->fill_len - tmp_data->resid) % sector_size) != 0) *double_buf_needed = 1; } else { struct camdd_buf_indirect *tmp_ind; tmp_ind = &tmp_buf->buf_type_spec.indirect; if (iovec == 0) { data->segs[i].ds_addr = (bus_addr_t)tmp_ind->start_ptr; data->segs[i].ds_len = tmp_ind->len; } else { data->iovec[i].iov_base = tmp_ind->start_ptr; data->iovec[i].iov_len = tmp_ind->len; } if ((tmp_ind->len % sector_size) != 0) *double_buf_needed = 1; } } if (extra_buf != NULL) { if (iovec == 0) { data->segs[i].ds_addr = (bus_addr_t)extra_buf; data->segs[i].ds_len = extra_buf_len; } else { data->iovec[i].iov_base = extra_buf; data->iovec[i].iov_len = extra_buf_len; } i++; } if ((tmp_buf != NULL) || (i != data->sg_count)) { warnx("buffer source count does not match " "number of buffers in list!"); retval = 1; goto bailout; } bailout: if (retval == 0) { *num_sectors_used = (data->fill_len + extra_buf_len) / sector_size; } return (retval); } uint32_t camdd_buf_get_len(struct camdd_buf *buf) { uint32_t len = 0; if (buf->buf_type != CAMDD_BUF_DATA) { struct camdd_buf_indirect *indirect; indirect = &buf->buf_type_spec.indirect; len = indirect->len; } else { struct camdd_buf_data *data; data = &buf->buf_type_spec.data; len = data->fill_len; } return (len); } void camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf) { struct camdd_buf_data *data; assert(buf->buf_type == CAMDD_BUF_DATA); data = &buf->buf_type_spec.data; STAILQ_INSERT_TAIL(&buf->src_list, child_buf, src_links); buf->src_count++; data->fill_len += camdd_buf_get_len(child_buf); } typedef enum { CAMDD_TS_MAX_BLK, CAMDD_TS_MIN_BLK, CAMDD_TS_BLK_GRAN, CAMDD_TS_EFF_IOSIZE } camdd_status_item_index; static struct camdd_status_items { const char *name; struct mt_status_entry *entry; } req_status_items[] = { { "max_blk", NULL }, { "min_blk", NULL }, { "blk_gran", NULL }, { "max_effective_iosize", NULL } }; int camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize, uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran) { struct mt_status_data status_data; char *xml_str = NULL; unsigned int i; int retval = 0; retval = mt_get_xml_str(fd, MTIOCEXTGET, &xml_str); if (retval != 0) err(1, "Couldn't get XML string from %s", filename); retval = mt_get_status(xml_str, &status_data); if (retval != XML_STATUS_OK) { warn("couldn't get status for %s", filename); retval = 1; goto bailout; } else retval = 0; if (status_data.error != 0) { warnx("%s", status_data.error_str); retval = 1; goto bailout; } for (i = 0; i < sizeof(req_status_items) / sizeof(req_status_items[0]); i++) { char *name; name = __DECONST(char *, req_status_items[i].name); req_status_items[i].entry = mt_status_entry_find(&status_data, name); if (req_status_items[i].entry == NULL) { errx(1, "Cannot find status entry %s", req_status_items[i].name); } } *max_iosize = req_status_items[CAMDD_TS_EFF_IOSIZE].entry->value_unsigned; *max_blk= req_status_items[CAMDD_TS_MAX_BLK].entry->value_unsigned; *min_blk= req_status_items[CAMDD_TS_MIN_BLK].entry->value_unsigned; *blk_gran = req_status_items[CAMDD_TS_BLK_GRAN].entry->value_unsigned; bailout: free(xml_str); mt_status_free(&status_data); return (retval); } struct camdd_dev * camdd_probe_file(int fd, struct camdd_io_opts *io_opts, int retry_count, int timeout) { struct camdd_dev *dev = NULL; struct camdd_dev_file *file_dev; uint64_t blocksize = io_opts->blocksize; dev = camdd_alloc_dev(CAMDD_DEV_FILE, NULL, 0, retry_count, timeout); if (dev == NULL) goto bailout; file_dev = &dev->dev_spec.file; file_dev->fd = fd; strlcpy(file_dev->filename, io_opts->dev_name, sizeof(file_dev->filename)); strlcpy(dev->device_name, io_opts->dev_name, sizeof(dev->device_name)); if (blocksize == 0) dev->blocksize = CAMDD_FILE_DEFAULT_BLOCK; else dev->blocksize = blocksize; if ((io_opts->queue_depth != 0) && (io_opts->queue_depth != 1)) { warnx("Queue depth %ju for %s ignored, only 1 outstanding " "command supported", (uintmax_t)io_opts->queue_depth, io_opts->dev_name); } dev->target_queue_depth = CAMDD_FILE_DEFAULT_DEPTH; dev->run = camdd_file_run; dev->fetch = NULL; /* * We can effectively access files on byte boundaries. We'll reset * this for devices like disks that can be accessed on sector * boundaries. */ dev->sector_size = 1; if ((fd != STDIN_FILENO) && (fd != STDOUT_FILENO)) { int retval; retval = fstat(fd, &file_dev->sb); if (retval != 0) { warn("Cannot stat %s", dev->device_name); goto bailout; camdd_free_dev(dev); dev = NULL; } if (S_ISREG(file_dev->sb.st_mode)) { file_dev->file_type = CAMDD_FILE_REG; } else if (S_ISCHR(file_dev->sb.st_mode)) { int type; if (ioctl(fd, FIODTYPE, &type) == -1) err(1, "FIODTYPE ioctl failed on %s", dev->device_name); else { if (type & D_TAPE) file_dev->file_type = CAMDD_FILE_TAPE; else if (type & D_DISK) file_dev->file_type = CAMDD_FILE_DISK; else if (type & D_MEM) file_dev->file_type = CAMDD_FILE_MEM; else if (type & D_TTY) file_dev->file_type = CAMDD_FILE_TTY; } } else if (S_ISDIR(file_dev->sb.st_mode)) { errx(1, "cannot operate on directory %s", dev->device_name); } else if (S_ISFIFO(file_dev->sb.st_mode)) { file_dev->file_type = CAMDD_FILE_PIPE; } else errx(1, "Cannot determine file type for %s", dev->device_name); switch (file_dev->file_type) { case CAMDD_FILE_REG: if (file_dev->sb.st_size != 0) dev->max_sector = file_dev->sb.st_size - 1; else dev->max_sector = 0; file_dev->file_flags |= CAMDD_FF_CAN_SEEK; break; case CAMDD_FILE_TAPE: { uint64_t max_iosize, max_blk, min_blk, blk_gran; /* * Check block limits and maximum effective iosize. * Make sure the blocksize is within the block * limits (and a multiple of the minimum blocksize) * and that the blocksize is <= maximum effective * iosize. */ retval = camdd_probe_tape(fd, dev->device_name, &max_iosize, &max_blk, &min_blk, &blk_gran); if (retval != 0) errx(1, "Unable to probe tape %s", dev->device_name); /* * The blocksize needs to be <= the maximum * effective I/O size of the tape device. Note * that this also takes into account the maximum * blocksize reported by READ BLOCK LIMITS. */ if (dev->blocksize > max_iosize) { warnx("Blocksize %u too big for %s, limiting " "to %ju", dev->blocksize, dev->device_name, max_iosize); dev->blocksize = max_iosize; } /* * The blocksize needs to be at least min_blk; */ if (dev->blocksize < min_blk) { warnx("Blocksize %u too small for %s, " "increasing to %ju", dev->blocksize, dev->device_name, min_blk); dev->blocksize = min_blk; } /* * And the blocksize needs to be a multiple of * the block granularity. */ if ((blk_gran != 0) && (dev->blocksize % (1 << blk_gran))) { warnx("Blocksize %u for %s not a multiple of " "%d, adjusting to %d", dev->blocksize, dev->device_name, (1 << blk_gran), dev->blocksize & ~((1 << blk_gran) - 1)); dev->blocksize &= ~((1 << blk_gran) - 1); } if (dev->blocksize == 0) { errx(1, "Unable to derive valid blocksize for " "%s", dev->device_name); } /* * For tape drives, set the sector size to the * blocksize so that we make sure not to write * less than the blocksize out to the drive. */ dev->sector_size = dev->blocksize; break; } case CAMDD_FILE_DISK: { off_t media_size; unsigned int sector_size; file_dev->file_flags |= CAMDD_FF_CAN_SEEK; if (ioctl(fd, DIOCGSECTORSIZE, §or_size) == -1) { err(1, "DIOCGSECTORSIZE ioctl failed on %s", dev->device_name); } if (sector_size == 0) { errx(1, "DIOCGSECTORSIZE ioctl returned " "invalid sector size %u for %s", sector_size, dev->device_name); } if (ioctl(fd, DIOCGMEDIASIZE, &media_size) == -1) { err(1, "DIOCGMEDIASIZE ioctl failed on %s", dev->device_name); } if (media_size == 0) { errx(1, "DIOCGMEDIASIZE ioctl returned " "invalid media size %ju for %s", (uintmax_t)media_size, dev->device_name); } if (dev->blocksize % sector_size) { errx(1, "%s blocksize %u not a multiple of " "sector size %u", dev->device_name, dev->blocksize, sector_size); } dev->sector_size = sector_size; dev->max_sector = (media_size / sector_size) - 1; break; } case CAMDD_FILE_MEM: file_dev->file_flags |= CAMDD_FF_CAN_SEEK; break; default: break; } } if ((io_opts->offset != 0) && ((file_dev->file_flags & CAMDD_FF_CAN_SEEK) == 0)) { warnx("Offset %ju specified for %s, but we cannot seek on %s", io_opts->offset, io_opts->dev_name, io_opts->dev_name); goto bailout_error; } #if 0 else if ((io_opts->offset != 0) && ((io_opts->offset % dev->sector_size) != 0)) { warnx("Offset %ju for %s is not a multiple of the " "sector size %u", io_opts->offset, io_opts->dev_name, dev->sector_size); goto bailout_error; } else { dev->start_offset_bytes = io_opts->offset; } #endif bailout: return (dev); bailout_error: camdd_free_dev(dev); return (NULL); } /* * Need to implement this. Do a basic probe: * - Check the inquiry data, make sure we're talking to a device that we * can reasonably expect to talk to -- direct, RBC, CD, WORM. * - Send a test unit ready, make sure the device is available. * - Get the capacity and block size. */ struct camdd_dev * camdd_probe_pass(struct cam_device *cam_dev, struct camdd_io_opts *io_opts, camdd_argmask arglist, int probe_retry_count, int probe_timeout, int io_retry_count, int io_timeout) { union ccb *ccb; uint64_t maxsector; uint32_t cpi_maxio, max_iosize, pass_numblocks; uint32_t block_len; struct scsi_read_capacity_data rcap; struct scsi_read_capacity_data_long rcaplong; struct camdd_dev *dev; struct camdd_dev_pass *pass_dev; struct kevent ke; int scsi_dev_type; dev = NULL; scsi_dev_type = SID_TYPE(&cam_dev->inq_data); maxsector = 0; block_len = 0; /* * For devices that support READ CAPACITY, we'll attempt to get the * capacity. Otherwise, we really don't support tape or other * devices via SCSI passthrough, so just return an error in that case. */ switch (scsi_dev_type) { case T_DIRECT: case T_WORM: case T_CDROM: case T_OPTICAL: case T_RBC: break; default: errx(1, "Unsupported SCSI device type %d", scsi_dev_type); break; /*NOTREACHED*/ } ccb = cam_getccb(cam_dev); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); goto bailout; } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); scsi_read_capacity(&ccb->csio, /*retries*/ probe_retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, &rcap, SSD_FULL_SIZE, /*timeout*/ probe_timeout ? probe_timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAMDD_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(cam_dev, ccb) < 0) { warn("error sending READ CAPACITY command"); cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto bailout; } maxsector = scsi_4btoul(rcap.addr); block_len = scsi_4btoul(rcap.length); /* * A last block of 2^32-1 means that the true capacity is over 2TB, * and we need to issue the long READ CAPACITY to get the real * capacity. Otherwise, we're all set. */ if (maxsector != 0xffffffff) goto rcap_done; scsi_read_capacity_16(&ccb->csio, /*retries*/ probe_retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*lba*/ 0, /*reladdr*/ 0, /*pmi*/ 0, (uint8_t *)&rcaplong, sizeof(rcaplong), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ probe_timeout ? probe_timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAMDD_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(cam_dev, ccb) < 0) { warn("error sending READ CAPACITY (16) command"); cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto bailout; } maxsector = scsi_8btou64(rcaplong.addr); block_len = scsi_4btoul(rcaplong.length); rcap_done: bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); ccb->ccb_h.func_code = XPT_PATH_INQ; ccb->ccb_h.flags = CAM_DIR_NONE; ccb->ccb_h.retry_count = 1; if (cam_send_ccb(cam_dev, ccb) < 0) { warn("error sending XPT_PATH_INQ CCB"); cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto bailout; } EV_SET(&ke, cam_dev->fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0); dev = camdd_alloc_dev(CAMDD_DEV_PASS, &ke, 1, io_retry_count, io_timeout); if (dev == NULL) goto bailout; pass_dev = &dev->dev_spec.pass; pass_dev->scsi_dev_type = scsi_dev_type; pass_dev->dev = cam_dev; pass_dev->max_sector = maxsector; pass_dev->block_len = block_len; pass_dev->cpi_maxio = ccb->cpi.maxio; snprintf(dev->device_name, sizeof(dev->device_name), "%s%u", pass_dev->dev->device_name, pass_dev->dev->dev_unit_num); dev->sector_size = block_len; dev->max_sector = maxsector; /* * Determine the optimal blocksize to use for this device. */ /* * If the controller has not specified a maximum I/O size, * just go with 128K as a somewhat conservative value. */ if (pass_dev->cpi_maxio == 0) cpi_maxio = 131072; else cpi_maxio = pass_dev->cpi_maxio; /* * If the controller has a large maximum I/O size, limit it * to something smaller so that the kernel doesn't have trouble * allocating buffers to copy data in and out for us. * XXX KDM this is until we have unmapped I/O support in the kernel. */ max_iosize = min(cpi_maxio, CAMDD_PASS_MAX_BLOCK); /* * If we weren't able to get a block size for some reason, * default to 512 bytes. */ block_len = pass_dev->block_len; if (block_len == 0) block_len = 512; /* * Figure out how many blocksize chunks will fit in the * maximum I/O size. */ pass_numblocks = max_iosize / block_len; /* * And finally, multiple the number of blocks by the LBA * length to get our maximum block size; */ dev->blocksize = pass_numblocks * block_len; if (io_opts->blocksize != 0) { if ((io_opts->blocksize % dev->sector_size) != 0) { warnx("Blocksize %ju for %s is not a multiple of " "sector size %u", (uintmax_t)io_opts->blocksize, dev->device_name, dev->sector_size); goto bailout_error; } dev->blocksize = io_opts->blocksize; } dev->target_queue_depth = CAMDD_PASS_DEFAULT_DEPTH; if (io_opts->queue_depth != 0) dev->target_queue_depth = io_opts->queue_depth; if (io_opts->offset != 0) { if (io_opts->offset > (dev->max_sector * dev->sector_size)) { warnx("Offset %ju is past the end of device %s", io_opts->offset, dev->device_name); goto bailout_error; } #if 0 else if ((io_opts->offset % dev->sector_size) != 0) { warnx("Offset %ju for %s is not a multiple of the " "sector size %u", io_opts->offset, dev->device_name, dev->sector_size); goto bailout_error; } dev->start_offset_bytes = io_opts->offset; #endif } dev->min_cmd_size = io_opts->min_cmd_size; dev->run = camdd_pass_run; dev->fetch = camdd_pass_fetch; bailout: cam_freeccb(ccb); return (dev); bailout_error: cam_freeccb(ccb); camdd_free_dev(dev); return (NULL); } void * camdd_worker(void *arg) { struct camdd_dev *dev = arg; struct camdd_buf *buf; struct timespec ts, *kq_ts; ts.tv_sec = 0; ts.tv_nsec = 0; pthread_mutex_lock(&dev->mutex); dev->flags |= CAMDD_DEV_FLAG_ACTIVE; for (;;) { struct kevent ke; int retval = 0; /* * XXX KDM check the reorder queue depth? */ if (dev->write_dev == 0) { uint32_t our_depth, peer_depth, peer_bytes, our_bytes; uint32_t target_depth = dev->target_queue_depth; uint32_t peer_target_depth = dev->peer_dev->target_queue_depth; uint32_t peer_blocksize = dev->peer_dev->blocksize; camdd_get_depth(dev, &our_depth, &peer_depth, &our_bytes, &peer_bytes); #if 0 while (((our_depth < target_depth) && (peer_depth < peer_target_depth)) || ((peer_bytes + our_bytes) < (peer_blocksize * 2))) { #endif while (((our_depth + peer_depth) < (target_depth + peer_target_depth)) || ((peer_bytes + our_bytes) < (peer_blocksize * 3))) { retval = camdd_queue(dev, NULL); if (retval == 1) break; else if (retval != 0) { error_exit = 1; goto bailout; } camdd_get_depth(dev, &our_depth, &peer_depth, &our_bytes, &peer_bytes); } } /* * See if we have any I/O that is ready to execute. */ buf = STAILQ_FIRST(&dev->run_queue); if (buf != NULL) { while (dev->target_queue_depth > dev->cur_active_io) { retval = dev->run(dev); if (retval == -1) { dev->flags |= CAMDD_DEV_FLAG_EOF; error_exit = 1; break; } else if (retval != 0) { break; } } } /* * We've reached EOF, or our partner has reached EOF. */ if ((dev->flags & CAMDD_DEV_FLAG_EOF) || (dev->flags & CAMDD_DEV_FLAG_PEER_EOF)) { if (dev->write_dev != 0) { if ((STAILQ_EMPTY(&dev->work_queue)) && (dev->num_run_queue == 0) && (dev->cur_active_io == 0)) { goto bailout; } } else { /* * If we're the reader, and the writer * got EOF, he is already done. If we got * the EOF, then we need to wait until * everything is flushed out for the writer. */ if (dev->flags & CAMDD_DEV_FLAG_PEER_EOF) { goto bailout; } else if ((dev->num_peer_work_queue == 0) && (dev->num_peer_done_queue == 0) && (dev->cur_active_io == 0) && (dev->num_run_queue == 0)) { goto bailout; } } /* * XXX KDM need to do something about the pending * queue and cleanup resources. */ } if ((dev->write_dev == 0) && (dev->cur_active_io == 0) && (dev->peer_bytes_queued < dev->peer_dev->blocksize)) kq_ts = &ts; else kq_ts = NULL; /* * Run kevent to see if there are events to process. */ pthread_mutex_unlock(&dev->mutex); retval = kevent(dev->kq, NULL, 0, &ke, 1, kq_ts); pthread_mutex_lock(&dev->mutex); if (retval == -1) { warn("%s: error returned from kevent",__func__); goto bailout; } else if (retval != 0) { switch (ke.filter) { case EVFILT_READ: if (dev->fetch != NULL) { retval = dev->fetch(dev); if (retval == -1) { error_exit = 1; goto bailout; } } break; case EVFILT_SIGNAL: /* * We register for this so we don't get * an error as a result of a SIGINFO or a * SIGINT. It will actually get handled * by the signal handler. If we get a * SIGINT, bail out without printing an * error message. Any other signals * will result in the error message above. */ if (ke.ident == SIGINT) goto bailout; break; case EVFILT_USER: retval = 0; /* * Check to see if the other thread has * queued any I/O for us to do. (In this * case we're the writer.) */ for (buf = STAILQ_FIRST(&dev->work_queue); buf != NULL; buf = STAILQ_FIRST(&dev->work_queue)) { STAILQ_REMOVE_HEAD(&dev->work_queue, work_links); retval = camdd_queue(dev, buf); /* * We keep going unless we get an * actual error. If we get EOF, we * still want to remove the buffers * from the queue and send the back * to the reader thread. */ if (retval == -1) { error_exit = 1; goto bailout; } else retval = 0; } /* * Next check to see if the other thread has * queued any completed buffers back to us. * (In this case we're the reader.) */ for (buf = STAILQ_FIRST(&dev->peer_done_queue); buf != NULL; buf = STAILQ_FIRST(&dev->peer_done_queue)){ STAILQ_REMOVE_HEAD( &dev->peer_done_queue, work_links); dev->num_peer_done_queue--; camdd_peer_done(buf); } break; default: warnx("%s: unknown kevent filter %d", __func__, ke.filter); break; } } } bailout: dev->flags &= ~CAMDD_DEV_FLAG_ACTIVE; /* XXX KDM cleanup resources here? */ pthread_mutex_unlock(&dev->mutex); need_exit = 1; sem_post(&camdd_sem); return (NULL); } /* * Simplistic translation of CCB status to our local status. */ camdd_buf_status camdd_ccb_status(union ccb *ccb) { camdd_buf_status status = CAMDD_STATUS_NONE; cam_status ccb_status; ccb_status = ccb->ccb_h.status & CAM_STATUS_MASK; switch (ccb_status) { case CAM_REQ_CMP: { if (ccb->csio.resid == 0) { status = CAMDD_STATUS_OK; } else if (ccb->csio.dxfer_len > ccb->csio.resid) { status = CAMDD_STATUS_SHORT_IO; } else { status = CAMDD_STATUS_EOF; } break; } case CAM_SCSI_STATUS_ERROR: { switch (ccb->csio.scsi_status) { case SCSI_STATUS_OK: case SCSI_STATUS_COND_MET: case SCSI_STATUS_INTERMED: case SCSI_STATUS_INTERMED_COND_MET: status = CAMDD_STATUS_OK; break; case SCSI_STATUS_CMD_TERMINATED: case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_QUEUE_FULL: case SCSI_STATUS_BUSY: case SCSI_STATUS_RESERV_CONFLICT: default: status = CAMDD_STATUS_ERROR; break; } break; } default: status = CAMDD_STATUS_ERROR; break; } return (status); } /* * Queue a buffer to our peer's work thread for writing. * * Returns 0 for success, -1 for failure, 1 if the other thread exited. */ int camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf) { struct kevent ke; STAILQ_HEAD(, camdd_buf) local_queue; struct camdd_buf *buf1, *buf2; struct camdd_buf_data *data = NULL; uint64_t peer_bytes_queued = 0; int active = 1; int retval = 0; STAILQ_INIT(&local_queue); /* * Since we're the reader, we need to queue our I/O to the writer * in sequential order in order to make sure it gets written out * in sequential order. * * Check the next expected I/O starting offset. If this doesn't * match, put it on the reorder queue. */ if ((buf->lba * dev->sector_size) != dev->next_completion_pos_bytes) { /* * If there is nothing on the queue, there is no sorting * needed. */ if (STAILQ_EMPTY(&dev->reorder_queue)) { STAILQ_INSERT_TAIL(&dev->reorder_queue, buf, links); dev->num_reorder_queue++; goto bailout; } /* * Sort in ascending order by starting LBA. There should * be no identical LBAs. */ for (buf1 = STAILQ_FIRST(&dev->reorder_queue); buf1 != NULL; buf1 = buf2) { buf2 = STAILQ_NEXT(buf1, links); if (buf->lba < buf1->lba) { /* * If we're less than the first one, then * we insert at the head of the list * because this has to be the first element * on the list. */ STAILQ_INSERT_HEAD(&dev->reorder_queue, buf, links); dev->num_reorder_queue++; break; } else if (buf->lba > buf1->lba) { if (buf2 == NULL) { STAILQ_INSERT_TAIL(&dev->reorder_queue, buf, links); dev->num_reorder_queue++; break; } else if (buf->lba < buf2->lba) { STAILQ_INSERT_AFTER(&dev->reorder_queue, buf1, buf, links); dev->num_reorder_queue++; break; } } else { errx(1, "Found buffers with duplicate LBA %ju!", buf->lba); } } goto bailout; } else { /* * We're the next expected I/O completion, so put ourselves * on the local queue to be sent to the writer. We use * work_links here so that we can queue this to the * peer_work_queue before taking the buffer off of the * local_queue. */ dev->next_completion_pos_bytes += buf->len; STAILQ_INSERT_TAIL(&local_queue, buf, work_links); /* * Go through the reorder queue looking for more sequential * I/O and add it to the local queue. */ for (buf1 = STAILQ_FIRST(&dev->reorder_queue); buf1 != NULL; buf1 = STAILQ_FIRST(&dev->reorder_queue)) { /* * As soon as we see an I/O that is out of sequence, * we're done. */ if ((buf1->lba * dev->sector_size) != dev->next_completion_pos_bytes) break; STAILQ_REMOVE_HEAD(&dev->reorder_queue, links); dev->num_reorder_queue--; STAILQ_INSERT_TAIL(&local_queue, buf1, work_links); dev->next_completion_pos_bytes += buf1->len; } } /* * Setup the event to let the other thread know that it has work * pending. */ EV_SET(&ke, (uintptr_t)&dev->peer_dev->work_queue, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); /* * Put this on our shadow queue so that we know what we've queued * to the other thread. */ STAILQ_FOREACH_SAFE(buf1, &local_queue, work_links, buf2) { if (buf1->buf_type != CAMDD_BUF_DATA) { errx(1, "%s: should have a data buffer, not an " "indirect buffer", __func__); } data = &buf1->buf_type_spec.data; /* * We only need to send one EOF to the writer, and don't * need to continue sending EOFs after that. */ if (buf1->status == CAMDD_STATUS_EOF) { if (dev->flags & CAMDD_DEV_FLAG_EOF_SENT) { STAILQ_REMOVE(&local_queue, buf1, camdd_buf, work_links); camdd_release_buf(buf1); retval = 1; continue; } dev->flags |= CAMDD_DEV_FLAG_EOF_SENT; } STAILQ_INSERT_TAIL(&dev->peer_work_queue, buf1, links); peer_bytes_queued += (data->fill_len - data->resid); dev->peer_bytes_queued += (data->fill_len - data->resid); dev->num_peer_work_queue++; } if (STAILQ_FIRST(&local_queue) == NULL) goto bailout; /* * Drop our mutex and pick up the other thread's mutex. We need to * do this to avoid deadlocks. */ pthread_mutex_unlock(&dev->mutex); pthread_mutex_lock(&dev->peer_dev->mutex); if (dev->peer_dev->flags & CAMDD_DEV_FLAG_ACTIVE) { /* * Put the buffers on the other thread's incoming work queue. */ for (buf1 = STAILQ_FIRST(&local_queue); buf1 != NULL; buf1 = STAILQ_FIRST(&local_queue)) { STAILQ_REMOVE_HEAD(&local_queue, work_links); STAILQ_INSERT_TAIL(&dev->peer_dev->work_queue, buf1, work_links); } /* * Send an event to the other thread's kqueue to let it know * that there is something on the work queue. */ retval = kevent(dev->peer_dev->kq, &ke, 1, NULL, 0, NULL); if (retval == -1) warn("%s: unable to add peer work_queue kevent", __func__); else retval = 0; } else active = 0; pthread_mutex_unlock(&dev->peer_dev->mutex); pthread_mutex_lock(&dev->mutex); /* * If the other side isn't active, run through the queue and * release all of the buffers. */ if (active == 0) { for (buf1 = STAILQ_FIRST(&local_queue); buf1 != NULL; buf1 = STAILQ_FIRST(&local_queue)) { STAILQ_REMOVE_HEAD(&local_queue, work_links); STAILQ_REMOVE(&dev->peer_work_queue, buf1, camdd_buf, links); dev->num_peer_work_queue--; camdd_release_buf(buf1); } dev->peer_bytes_queued -= peer_bytes_queued; retval = 1; } bailout: return (retval); } /* * Return a buffer to the reader thread when we have completed writing it. */ int camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf) { struct kevent ke; int retval = 0; /* * Setup the event to let the other thread know that we have * completed a buffer. */ EV_SET(&ke, (uintptr_t)&dev->peer_dev->peer_done_queue, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); /* * Drop our lock and acquire the other thread's lock before * manipulating */ pthread_mutex_unlock(&dev->mutex); pthread_mutex_lock(&dev->peer_dev->mutex); /* * Put the buffer on the reader thread's peer done queue now that * we have completed it. */ STAILQ_INSERT_TAIL(&dev->peer_dev->peer_done_queue, peer_buf, work_links); dev->peer_dev->num_peer_done_queue++; /* * Send an event to the peer thread to let it know that we've added * something to its peer done queue. */ retval = kevent(dev->peer_dev->kq, &ke, 1, NULL, 0, NULL); if (retval == -1) warn("%s: unable to add peer_done_queue kevent", __func__); else retval = 0; /* * Drop the other thread's lock and reacquire ours. */ pthread_mutex_unlock(&dev->peer_dev->mutex); pthread_mutex_lock(&dev->mutex); return (retval); } /* * Free a buffer that was written out by the writer thread and returned to * the reader thread. */ void camdd_peer_done(struct camdd_buf *buf) { struct camdd_dev *dev; struct camdd_buf_data *data; dev = buf->dev; if (buf->buf_type != CAMDD_BUF_DATA) { errx(1, "%s: should have a data buffer, not an " "indirect buffer", __func__); } data = &buf->buf_type_spec.data; STAILQ_REMOVE(&dev->peer_work_queue, buf, camdd_buf, links); dev->num_peer_work_queue--; dev->peer_bytes_queued -= (data->fill_len - data->resid); if (buf->status == CAMDD_STATUS_EOF) dev->flags |= CAMDD_DEV_FLAG_PEER_EOF; STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); } /* * Assumes caller holds the lock for this device. */ void camdd_complete_buf(struct camdd_dev *dev, struct camdd_buf *buf, int *error_count) { int retval = 0; /* * If we're the reader, we need to send the completed I/O * to the writer. If we're the writer, we need to just * free up resources, or let the reader know if we've * encountered an error. */ if (dev->write_dev == 0) { retval = camdd_queue_peer_buf(dev, buf); if (retval != 0) (*error_count)++; } else { struct camdd_buf *tmp_buf, *next_buf; STAILQ_FOREACH_SAFE(tmp_buf, &buf->src_list, src_links, next_buf) { struct camdd_buf *src_buf; struct camdd_buf_indirect *indirect; STAILQ_REMOVE(&buf->src_list, tmp_buf, camdd_buf, src_links); tmp_buf->status = buf->status; if (tmp_buf->buf_type == CAMDD_BUF_DATA) { camdd_complete_peer_buf(dev, tmp_buf); continue; } indirect = &tmp_buf->buf_type_spec.indirect; src_buf = indirect->src_buf; src_buf->refcount--; /* * XXX KDM we probably need to account for * exactly how many bytes we were able to * write. Allocate the residual to the * first N buffers? Or just track the * number of bytes written? Right now the reader * doesn't do anything with a residual. */ src_buf->status = buf->status; if (src_buf->refcount <= 0) camdd_complete_peer_buf(dev, src_buf); STAILQ_INSERT_TAIL(&dev->free_indirect_queue, tmp_buf, links); } STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); } } /* * Fetch all completed commands from the pass(4) device. * * Returns the number of commands received, or -1 if any of the commands * completed with an error. Returns 0 if no commands are available. */ int camdd_pass_fetch(struct camdd_dev *dev) { struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass; union ccb ccb; int retval = 0, num_fetched = 0, error_count = 0; pthread_mutex_unlock(&dev->mutex); /* * XXX KDM we don't distinguish between EFAULT and ENOENT. */ while ((retval = ioctl(pass_dev->dev->fd, CAMIOGET, &ccb)) != -1) { struct camdd_buf *buf; struct camdd_buf_data *data; cam_status ccb_status; union ccb *buf_ccb; buf = ccb.ccb_h.ccb_buf; data = &buf->buf_type_spec.data; buf_ccb = &data->ccb; num_fetched++; /* * Copy the CCB back out so we get status, sense data, etc. */ bcopy(&ccb, buf_ccb, sizeof(ccb)); pthread_mutex_lock(&dev->mutex); /* * We're now done, so take this off the active queue. */ STAILQ_REMOVE(&dev->active_queue, buf, camdd_buf, links); dev->cur_active_io--; ccb_status = ccb.ccb_h.status & CAM_STATUS_MASK; if (ccb_status != CAM_REQ_CMP) { cam_error_print(pass_dev->dev, &ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } data->resid = ccb.csio.resid; dev->bytes_transferred += (ccb.csio.dxfer_len - ccb.csio.resid); if (buf->status == CAMDD_STATUS_NONE) buf->status = camdd_ccb_status(&ccb); if (buf->status == CAMDD_STATUS_ERROR) error_count++; else if (buf->status == CAMDD_STATUS_EOF) { /* * Once we queue this buffer to our partner thread, * he will know that we've hit EOF. */ dev->flags |= CAMDD_DEV_FLAG_EOF; } camdd_complete_buf(dev, buf, &error_count); /* * Unlock in preparation for the ioctl call. */ pthread_mutex_unlock(&dev->mutex); } pthread_mutex_lock(&dev->mutex); if (error_count > 0) return (-1); else return (num_fetched); } /* * Returns -1 for error, 0 for success/continue, and 1 for resource * shortage/stop processing. */ int camdd_file_run(struct camdd_dev *dev) { struct camdd_dev_file *file_dev = &dev->dev_spec.file; struct camdd_buf_data *data; struct camdd_buf *buf; off_t io_offset; int retval = 0, write_dev = dev->write_dev; int error_count = 0, no_resources = 0, double_buf_needed = 0; uint32_t num_sectors = 0, db_len = 0; buf = STAILQ_FIRST(&dev->run_queue); if (buf == NULL) { no_resources = 1; goto bailout; } else if ((dev->write_dev == 0) && (dev->flags & (CAMDD_DEV_FLAG_EOF | CAMDD_DEV_FLAG_EOF_SENT))) { STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links); dev->num_run_queue--; buf->status = CAMDD_STATUS_EOF; error_count++; goto bailout; } /* * If we're writing, we need to go through the source buffer list * and create an S/G list. */ if (write_dev != 0) { retval = camdd_buf_sg_create(buf, /*iovec*/ 1, dev->sector_size, &num_sectors, &double_buf_needed); if (retval != 0) { no_resources = 1; goto bailout; } } STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links); dev->num_run_queue--; data = &buf->buf_type_spec.data; /* * pread(2) and pwrite(2) offsets are byte offsets. */ io_offset = buf->lba * dev->sector_size; /* * Unlock the mutex while we read or write. */ pthread_mutex_unlock(&dev->mutex); /* * Note that we don't need to double buffer if we're the reader * because in that case, we have allocated a single buffer of * sufficient size to do the read. This copy is necessary on * writes because if one of the components of the S/G list is not * a sector size multiple, the kernel will reject the write. This * is unfortunate but not surprising. So this will make sure that * we're using a single buffer that is a multiple of the sector size. */ if ((double_buf_needed != 0) && (data->sg_count > 1) && (write_dev != 0)) { uint32_t cur_offset; int i; if (file_dev->tmp_buf == NULL) file_dev->tmp_buf = calloc(dev->blocksize, 1); if (file_dev->tmp_buf == NULL) { buf->status = CAMDD_STATUS_ERROR; error_count++; goto bailout; } for (i = 0, cur_offset = 0; i < data->sg_count; i++) { bcopy(data->iovec[i].iov_base, &file_dev->tmp_buf[cur_offset], data->iovec[i].iov_len); cur_offset += data->iovec[i].iov_len; } db_len = cur_offset; } if (file_dev->file_flags & CAMDD_FF_CAN_SEEK) { if (write_dev == 0) { /* * XXX KDM is there any way we would need a S/G * list here? */ retval = pread(file_dev->fd, data->buf, buf->len, io_offset); } else { if (double_buf_needed != 0) { retval = pwrite(file_dev->fd, file_dev->tmp_buf, db_len, io_offset); } else if (data->sg_count == 0) { retval = pwrite(file_dev->fd, data->buf, data->fill_len, io_offset); } else { retval = pwritev(file_dev->fd, data->iovec, data->sg_count, io_offset); } } } else { if (write_dev == 0) { /* * XXX KDM is there any way we would need a S/G * list here? */ retval = read(file_dev->fd, data->buf, buf->len); } else { if (double_buf_needed != 0) { retval = write(file_dev->fd, file_dev->tmp_buf, db_len); } else if (data->sg_count == 0) { retval = write(file_dev->fd, data->buf, data->fill_len); } else { retval = writev(file_dev->fd, data->iovec, data->sg_count); } } } /* We're done, re-acquire the lock */ pthread_mutex_lock(&dev->mutex); if (retval >= (ssize_t)data->fill_len) { /* * If the bytes transferred is more than the request size, * that indicates an overrun, which should only happen at * the end of a transfer if we have to round up to a sector * boundary. */ if (buf->status == CAMDD_STATUS_NONE) buf->status = CAMDD_STATUS_OK; data->resid = 0; dev->bytes_transferred += retval; } else if (retval == -1) { warn("Error %s %s", (write_dev) ? "writing to" : "reading from", file_dev->filename); buf->status = CAMDD_STATUS_ERROR; data->resid = data->fill_len; error_count++; if (dev->debug == 0) goto bailout; if ((double_buf_needed != 0) && (write_dev != 0)) { fprintf(stderr, "%s: fd %d, DB buf %p, len %u lba %ju " "offset %ju\n", __func__, file_dev->fd, file_dev->tmp_buf, db_len, (uintmax_t)buf->lba, (uintmax_t)io_offset); } else if (data->sg_count == 0) { fprintf(stderr, "%s: fd %d, buf %p, len %u, lba %ju " "offset %ju\n", __func__, file_dev->fd, data->buf, data->fill_len, (uintmax_t)buf->lba, (uintmax_t)io_offset); } else { int i; fprintf(stderr, "%s: fd %d, len %u, lba %ju " "offset %ju\n", __func__, file_dev->fd, data->fill_len, (uintmax_t)buf->lba, (uintmax_t)io_offset); for (i = 0; i < data->sg_count; i++) { fprintf(stderr, "index %d ptr %p len %zu\n", i, data->iovec[i].iov_base, data->iovec[i].iov_len); } } } else if (retval == 0) { buf->status = CAMDD_STATUS_EOF; if (dev->debug != 0) printf("%s: got EOF from %s!\n", __func__, file_dev->filename); data->resid = data->fill_len; error_count++; } else if (retval < (ssize_t)data->fill_len) { if (buf->status == CAMDD_STATUS_NONE) buf->status = CAMDD_STATUS_SHORT_IO; data->resid = data->fill_len - retval; dev->bytes_transferred += retval; } bailout: if (buf != NULL) { if (buf->status == CAMDD_STATUS_EOF) { struct camdd_buf *buf2; dev->flags |= CAMDD_DEV_FLAG_EOF; STAILQ_FOREACH(buf2, &dev->run_queue, links) buf2->status = CAMDD_STATUS_EOF; } camdd_complete_buf(dev, buf, &error_count); } if (error_count != 0) return (-1); else if (no_resources != 0) return (1); else return (0); } /* * Execute one command from the run queue. Returns 0 for success, 1 for * stop processing, and -1 for error. */ int camdd_pass_run(struct camdd_dev *dev) { struct camdd_buf *buf = NULL; struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass; struct camdd_buf_data *data; uint32_t num_blocks, sectors_used = 0; union ccb *ccb; int retval = 0, is_write = dev->write_dev; int double_buf_needed = 0; buf = STAILQ_FIRST(&dev->run_queue); if (buf == NULL) { retval = 1; goto bailout; } /* * If we're writing, we need to go through the source buffer list * and create an S/G list. */ if (is_write != 0) { retval = camdd_buf_sg_create(buf, /*iovec*/ 0,dev->sector_size, §ors_used, &double_buf_needed); if (retval != 0) { retval = -1; goto bailout; } } STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links); dev->num_run_queue--; data = &buf->buf_type_spec.data; ccb = &data->ccb; bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); /* * In almost every case the number of blocks should be the device * block size. The exception may be at the end of an I/O stream * for a partial block or at the end of a device. */ if (is_write != 0) num_blocks = sectors_used; else num_blocks = data->fill_len / pass_dev->block_len; scsi_read_write(&ccb->csio, /*retries*/ dev->retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*readop*/ (dev->write_dev == 0) ? SCSI_RW_READ : SCSI_RW_WRITE, /*byte2*/ 0, /*minimum_cmd_size*/ dev->min_cmd_size, /*lba*/ buf->lba, /*block_count*/ num_blocks, /*data_ptr*/ (data->sg_count != 0) ? (uint8_t *)data->segs : data->buf, /*dxfer_len*/ (num_blocks * pass_dev->block_len), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ dev->io_timeout); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (dev->retry_count != 0) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (data->sg_count != 0) { ccb->csio.sglist_cnt = data->sg_count; ccb->ccb_h.flags |= CAM_DATA_SG; } /* * Store a pointer to the buffer in the CCB. The kernel will * restore this when we get it back, and we'll use it to identify * the buffer this CCB came from. */ ccb->ccb_h.ccb_buf = buf; /* * Unlock our mutex in preparation for issuing the ioctl. */ pthread_mutex_unlock(&dev->mutex); /* * Queue the CCB to the pass(4) driver. */ if (ioctl(pass_dev->dev->fd, CAMIOQUEUE, ccb) == -1) { pthread_mutex_lock(&dev->mutex); warn("%s: error sending CAMIOQUEUE ioctl to %s%u", __func__, pass_dev->dev->device_name, pass_dev->dev->dev_unit_num); warn("%s: CCB address is %p", __func__, ccb); retval = -1; STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); } else { pthread_mutex_lock(&dev->mutex); dev->cur_active_io++; STAILQ_INSERT_TAIL(&dev->active_queue, buf, links); } bailout: return (retval); } int camdd_get_next_lba_len(struct camdd_dev *dev, uint64_t *lba, ssize_t *len) { struct camdd_dev_pass *pass_dev; uint32_t num_blocks; int retval = 0; pass_dev = &dev->dev_spec.pass; *lba = dev->next_io_pos_bytes / dev->sector_size; *len = dev->blocksize; num_blocks = *len / dev->sector_size; /* * If max_sector is 0, then we have no set limit. This can happen * if we're writing to a file in a filesystem, or reading from * something like /dev/zero. */ if ((dev->max_sector != 0) || (dev->sector_io_limit != 0)) { uint64_t max_sector; if ((dev->max_sector != 0) && (dev->sector_io_limit != 0)) max_sector = min(dev->sector_io_limit, dev->max_sector); else if (dev->max_sector != 0) max_sector = dev->max_sector; else max_sector = dev->sector_io_limit; /* * Check to see whether we're starting off past the end of * the device. If so, we need to just send an EOF * notification to the writer. */ if (*lba > max_sector) { *len = 0; retval = 1; } else if (((*lba + num_blocks) > max_sector + 1) || ((*lba + num_blocks) < *lba)) { /* * If we get here (but pass the first check), we * can trim the request length down to go to the * end of the device. */ num_blocks = (max_sector + 1) - *lba; *len = num_blocks * dev->sector_size; retval = 1; } } dev->next_io_pos_bytes += *len; return (retval); } /* * Returns 0 for success, 1 for EOF detected, and -1 for failure. */ int camdd_queue(struct camdd_dev *dev, struct camdd_buf *read_buf) { struct camdd_buf *buf = NULL; struct camdd_buf_data *data; struct camdd_dev_pass *pass_dev; size_t new_len; struct camdd_buf_data *rb_data; int is_write = dev->write_dev; int eof_flush_needed = 0; int retval = 0; int error; pass_dev = &dev->dev_spec.pass; /* * If we've gotten EOF or our partner has, we should not continue * queueing I/O. If we're a writer, though, we should continue * to write any buffers that don't have EOF status. */ if ((dev->flags & CAMDD_DEV_FLAG_EOF) || ((dev->flags & CAMDD_DEV_FLAG_PEER_EOF) && (is_write == 0))) { /* * Tell the worker thread that we have seen EOF. */ retval = 1; /* * If we're the writer, send the buffer back with EOF status. */ if (is_write) { read_buf->status = CAMDD_STATUS_EOF; error = camdd_complete_peer_buf(dev, read_buf); } goto bailout; } if (is_write == 0) { buf = camdd_get_buf(dev, CAMDD_BUF_DATA); if (buf == NULL) { retval = -1; goto bailout; } data = &buf->buf_type_spec.data; retval = camdd_get_next_lba_len(dev, &buf->lba, &buf->len); if (retval != 0) { buf->status = CAMDD_STATUS_EOF; if ((buf->len == 0) && ((dev->flags & (CAMDD_DEV_FLAG_EOF_SENT | CAMDD_DEV_FLAG_EOF_QUEUED)) != 0)) { camdd_release_buf(buf); goto bailout; } dev->flags |= CAMDD_DEV_FLAG_EOF_QUEUED; } data->fill_len = buf->len; data->src_start_offset = buf->lba * dev->sector_size; /* * Put this on the run queue. */ STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); dev->num_run_queue++; /* We're done. */ goto bailout; } /* * Check for new EOF status from the reader. */ if ((read_buf->status == CAMDD_STATUS_EOF) || (read_buf->status == CAMDD_STATUS_ERROR)) { dev->flags |= CAMDD_DEV_FLAG_PEER_EOF; if ((STAILQ_FIRST(&dev->pending_queue) == NULL) && (read_buf->len == 0)) { camdd_complete_peer_buf(dev, read_buf); retval = 1; goto bailout; } else eof_flush_needed = 1; } /* * See if we have a buffer we're composing with pieces from our * partner thread. */ buf = STAILQ_FIRST(&dev->pending_queue); if (buf == NULL) { uint64_t lba; ssize_t len; retval = camdd_get_next_lba_len(dev, &lba, &len); if (retval != 0) { read_buf->status = CAMDD_STATUS_EOF; if (len == 0) { dev->flags |= CAMDD_DEV_FLAG_EOF; error = camdd_complete_peer_buf(dev, read_buf); goto bailout; } } /* * If we don't have a pending buffer, we need to grab a new * one from the free list or allocate another one. */ buf = camdd_get_buf(dev, CAMDD_BUF_DATA); if (buf == NULL) { retval = 1; goto bailout; } buf->lba = lba; buf->len = len; STAILQ_INSERT_TAIL(&dev->pending_queue, buf, links); dev->num_pending_queue++; } data = &buf->buf_type_spec.data; rb_data = &read_buf->buf_type_spec.data; if ((rb_data->src_start_offset != dev->next_peer_pos_bytes) && (dev->debug != 0)) { printf("%s: WARNING: reader offset %#jx != expected offset " "%#jx\n", __func__, (uintmax_t)rb_data->src_start_offset, (uintmax_t)dev->next_peer_pos_bytes); } dev->next_peer_pos_bytes = rb_data->src_start_offset + (rb_data->fill_len - rb_data->resid); new_len = (rb_data->fill_len - rb_data->resid) + data->fill_len; if (new_len < buf->len) { /* * There are three cases here: * 1. We need more data to fill up a block, so we put * this I/O on the queue and wait for more I/O. * 2. We have a pending buffer in the queue that is * smaller than our blocksize, but we got an EOF. So we * need to go ahead and flush the write out. * 3. We got an error. */ /* * Increment our fill length. */ data->fill_len += (rb_data->fill_len - rb_data->resid); /* * Add the new read buffer to the list for writing. */ STAILQ_INSERT_TAIL(&buf->src_list, read_buf, src_links); /* Increment the count */ buf->src_count++; if (eof_flush_needed == 0) { /* * We need to exit, because we don't have enough * data yet. */ goto bailout; } else { /* * Take the buffer off of the pending queue. */ STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, links); dev->num_pending_queue--; /* * If we need an EOF flush, but there is no data * to flush, go ahead and return this buffer. */ if (data->fill_len == 0) { camdd_complete_buf(dev, buf, /*error_count*/0); retval = 1; goto bailout; } /* * Put this on the next queue for execution. */ STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); dev->num_run_queue++; } } else if (new_len == buf->len) { /* * We have enough data to completey fill one block, * so we're ready to issue the I/O. */ /* * Take the buffer off of the pending queue. */ STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, links); dev->num_pending_queue--; /* * Add the new read buffer to the list for writing. */ STAILQ_INSERT_TAIL(&buf->src_list, read_buf, src_links); /* Increment the count */ buf->src_count++; /* * Increment our fill length. */ data->fill_len += (rb_data->fill_len - rb_data->resid); /* * Put this on the next queue for execution. */ STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); dev->num_run_queue++; } else { struct camdd_buf *idb; struct camdd_buf_indirect *indirect; uint32_t len_to_go, cur_offset; idb = camdd_get_buf(dev, CAMDD_BUF_INDIRECT); if (idb == NULL) { retval = 1; goto bailout; } indirect = &idb->buf_type_spec.indirect; indirect->src_buf = read_buf; read_buf->refcount++; indirect->offset = 0; indirect->start_ptr = rb_data->buf; /* * We've already established that there is more * data in read_buf than we have room for in our * current write request. So this particular chunk * of the request should just be the remainder * needed to fill up a block. */ indirect->len = buf->len - (data->fill_len - data->resid); camdd_buf_add_child(buf, idb); /* * This buffer is ready to execute, so we can take * it off the pending queue and put it on the run * queue. */ STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, links); dev->num_pending_queue--; STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); dev->num_run_queue++; cur_offset = indirect->offset + indirect->len; /* * The resulting I/O would be too large to fit in * one block. We need to split this I/O into * multiple pieces. Allocate as many buffers as needed. */ for (len_to_go = rb_data->fill_len - rb_data->resid - indirect->len; len_to_go > 0;) { struct camdd_buf *new_buf; struct camdd_buf_data *new_data; uint64_t lba; ssize_t len; retval = camdd_get_next_lba_len(dev, &lba, &len); if ((retval != 0) && (len == 0)) { /* * The device has already been marked * as EOF, and there is no space left. */ goto bailout; } new_buf = camdd_get_buf(dev, CAMDD_BUF_DATA); if (new_buf == NULL) { retval = 1; goto bailout; } new_buf->lba = lba; new_buf->len = len; idb = camdd_get_buf(dev, CAMDD_BUF_INDIRECT); if (idb == NULL) { retval = 1; goto bailout; } indirect = &idb->buf_type_spec.indirect; indirect->src_buf = read_buf; read_buf->refcount++; indirect->offset = cur_offset; indirect->start_ptr = rb_data->buf + cur_offset; indirect->len = min(len_to_go, new_buf->len); #if 0 if (((indirect->len % dev->sector_size) != 0) || ((indirect->offset % dev->sector_size) != 0)) { warnx("offset %ju len %ju not aligned with " "sector size %u", indirect->offset, (uintmax_t)indirect->len, dev->sector_size); } #endif cur_offset += indirect->len; len_to_go -= indirect->len; camdd_buf_add_child(new_buf, idb); new_data = &new_buf->buf_type_spec.data; if ((new_data->fill_len == new_buf->len) || (eof_flush_needed != 0)) { STAILQ_INSERT_TAIL(&dev->run_queue, new_buf, links); dev->num_run_queue++; } else if (new_data->fill_len < buf->len) { STAILQ_INSERT_TAIL(&dev->pending_queue, new_buf, links); dev->num_pending_queue++; } else { warnx("%s: too much data in new " "buffer!", __func__); retval = 1; goto bailout; } } } bailout: return (retval); } void camdd_get_depth(struct camdd_dev *dev, uint32_t *our_depth, uint32_t *peer_depth, uint32_t *our_bytes, uint32_t *peer_bytes) { *our_depth = dev->cur_active_io + dev->num_run_queue; if (dev->num_peer_work_queue > dev->num_peer_done_queue) *peer_depth = dev->num_peer_work_queue - dev->num_peer_done_queue; else *peer_depth = 0; *our_bytes = *our_depth * dev->blocksize; *peer_bytes = dev->peer_bytes_queued; } void camdd_sig_handler(int sig) { if (sig == SIGINFO) need_status = 1; else { need_exit = 1; error_exit = 1; } sem_post(&camdd_sem); } void camdd_print_status(struct camdd_dev *camdd_dev, struct camdd_dev *other_dev, struct timespec *start_time) { struct timespec done_time; uint64_t total_ns; long double mb_sec, total_sec; int error = 0; error = clock_gettime(CLOCK_MONOTONIC_PRECISE, &done_time); if (error != 0) { warn("Unable to get done time"); return; } timespecsub(&done_time, start_time); total_ns = done_time.tv_nsec + (done_time.tv_sec * 1000000000); total_sec = total_ns; total_sec /= 1000000000; fprintf(stderr, "%ju bytes %s %s\n%ju bytes %s %s\n" "%.4Lf seconds elapsed\n", (uintmax_t)camdd_dev->bytes_transferred, (camdd_dev->write_dev == 0) ? "read from" : "written to", camdd_dev->device_name, (uintmax_t)other_dev->bytes_transferred, (other_dev->write_dev == 0) ? "read from" : "written to", other_dev->device_name, total_sec); mb_sec = min(other_dev->bytes_transferred,camdd_dev->bytes_transferred); mb_sec /= 1024 * 1024; mb_sec *= 1000000000; mb_sec /= total_ns; fprintf(stderr, "%.2Lf MB/sec\n", mb_sec); } int camdd_rw(struct camdd_io_opts *io_opts, int num_io_opts, uint64_t max_io, int retry_count, int timeout) { char *device = NULL; struct cam_device *new_cam_dev = NULL; struct camdd_dev *devs[2]; struct timespec start_time; pthread_t threads[2]; int unit = 0; int error = 0; int i; if (num_io_opts != 2) { warnx("Must have one input and one output path"); error = 1; goto bailout; } bzero(devs, sizeof(devs)); for (i = 0; i < num_io_opts; i++) { switch (io_opts[i].dev_type) { case CAMDD_DEV_PASS: { camdd_argmask new_arglist = CAMDD_ARG_NONE; int bus = 0, target = 0, lun = 0; char name[30]; int rv; if (isdigit(io_opts[i].dev_name[0])) { /* device specified as bus:target[:lun] */ rv = parse_btl(io_opts[i].dev_name, &bus, &target, &lun, &new_arglist); if (rv < 2) { warnx("numeric device specification " "must be either bus:target, or " "bus:target:lun"); error = 1; goto bailout; } /* default to 0 if lun was not specified */ if ((new_arglist & CAMDD_ARG_LUN) == 0) { lun = 0; new_arglist |= CAMDD_ARG_LUN; } } else { if (cam_get_device(io_opts[i].dev_name, name, sizeof name, &unit) == -1) { warnx("%s", cam_errbuf); error = 1; goto bailout; } device = strdup(name); new_arglist |= CAMDD_ARG_DEVICE |CAMDD_ARG_UNIT; } if (new_arglist & (CAMDD_ARG_BUS | CAMDD_ARG_TARGET)) new_cam_dev = cam_open_btl(bus, target, lun, O_RDWR, NULL); else new_cam_dev = cam_open_spec_device(device, unit, O_RDWR, NULL); if (new_cam_dev == NULL) { warnx("%s", cam_errbuf); error = 1; goto bailout; } devs[i] = camdd_probe_pass(new_cam_dev, /*io_opts*/ &io_opts[i], CAMDD_ARG_ERR_RECOVER, /*probe_retry_count*/ 3, /*probe_timeout*/ 5000, /*io_retry_count*/ retry_count, /*io_timeout*/ timeout); if (devs[i] == NULL) { warn("Unable to probe device %s%u", new_cam_dev->device_name, new_cam_dev->dev_unit_num); error = 1; goto bailout; } break; } case CAMDD_DEV_FILE: { int fd = -1; if (io_opts[i].dev_name[0] == '-') { if (io_opts[i].write_dev != 0) fd = STDOUT_FILENO; else fd = STDIN_FILENO; } else { if (io_opts[i].write_dev != 0) { fd = open(io_opts[i].dev_name, O_RDWR | O_CREAT, S_IWUSR |S_IRUSR); } else { fd = open(io_opts[i].dev_name, O_RDONLY); } } if (fd == -1) { warn("error opening file %s", io_opts[i].dev_name); error = 1; goto bailout; } devs[i] = camdd_probe_file(fd, &io_opts[i], retry_count, timeout); if (devs[i] == NULL) { error = 1; goto bailout; } break; } default: warnx("Unknown device type %d (%s)", io_opts[i].dev_type, io_opts[i].dev_name); error = 1; goto bailout; break; /*NOTREACHED */ } devs[i]->write_dev = io_opts[i].write_dev; devs[i]->start_offset_bytes = io_opts[i].offset; if (max_io != 0) { devs[i]->sector_io_limit = (devs[i]->start_offset_bytes / devs[i]->sector_size) + (max_io / devs[i]->sector_size) - 1; devs[i]->sector_io_limit = (devs[i]->start_offset_bytes / devs[i]->sector_size) + (max_io / devs[i]->sector_size) - 1; } devs[i]->next_io_pos_bytes = devs[i]->start_offset_bytes; devs[i]->next_completion_pos_bytes =devs[i]->start_offset_bytes; } devs[0]->peer_dev = devs[1]; devs[1]->peer_dev = devs[0]; devs[0]->next_peer_pos_bytes = devs[0]->peer_dev->next_io_pos_bytes; devs[1]->next_peer_pos_bytes = devs[1]->peer_dev->next_io_pos_bytes; sem_init(&camdd_sem, /*pshared*/ 0, 0); signal(SIGINFO, camdd_sig_handler); signal(SIGINT, camdd_sig_handler); error = clock_gettime(CLOCK_MONOTONIC_PRECISE, &start_time); if (error != 0) { warn("Unable to get start time"); goto bailout; } for (i = 0; i < num_io_opts; i++) { error = pthread_create(&threads[i], NULL, camdd_worker, (void *)devs[i]); if (error != 0) { warnc(error, "pthread_create() failed"); goto bailout; } } for (;;) { if ((sem_wait(&camdd_sem) == -1) || (need_exit != 0)) { struct kevent ke; for (i = 0; i < num_io_opts; i++) { EV_SET(&ke, (uintptr_t)&devs[i]->work_queue, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); devs[i]->flags |= CAMDD_DEV_FLAG_EOF; error = kevent(devs[i]->kq, &ke, 1, NULL, 0, NULL); if (error == -1) warn("%s: unable to wake up thread", __func__); error = 0; } break; } else if (need_status != 0) { camdd_print_status(devs[0], devs[1], &start_time); need_status = 0; } } for (i = 0; i < num_io_opts; i++) { pthread_join(threads[i], NULL); } camdd_print_status(devs[0], devs[1], &start_time); bailout: for (i = 0; i < num_io_opts; i++) camdd_free_dev(devs[i]); return (error + error_exit); } void usage(void) { fprintf(stderr, "usage: camdd <-i|-o pass=pass0,bs=1M,offset=1M,depth=4>\n" " <-i|-o file=/tmp/file,bs=512K,offset=1M>\n" " <-i|-o file=/dev/da0,bs=512K,offset=1M>\n" " <-i|-o file=/dev/nsa0,bs=512K>\n" " [-C retry_count][-E][-m max_io_amt][-t timeout_secs][-v][-h]\n" "Option description\n" "-i Specify input device/file and parameters\n" "-o Specify output device/file and parameters\n" "Input and Output parameters\n" "pass=name Specify a pass(4) device like pass0 or /dev/pass0\n" "file=name Specify a file or device, /tmp/foo, /dev/da0, /dev/null\n" " or - for stdin/stdout\n" "bs=blocksize Specify blocksize in bytes, or using K, M, G, etc. suffix\n" "offset=len Specify starting offset in bytes or using K, M, G suffix\n" " NOTE: offset cannot be specified on tapes, pipes, stdin/out\n" "depth=N Specify a numeric queue depth. This only applies to pass(4)\n" "mcs=N Specify a minimum cmd size for pass(4) read/write commands\n" "Optional arguments\n" "-C retry_cnt Specify a retry count for pass(4) devices\n" "-E Enable CAM error recovery for pass(4) devices\n" "-m max_io Specify the maximum amount to be transferred in bytes or\n" " using K, G, M, etc. suffixes\n" "-t timeout Specify the I/O timeout to use with pass(4) devices\n" "-v Enable verbose error recovery\n" "-h Print this message\n"); } int camdd_parse_io_opts(char *args, int is_write, struct camdd_io_opts *io_opts) { char *tmpstr, *tmpstr2; char *orig_tmpstr = NULL; int retval = 0; io_opts->write_dev = is_write; tmpstr = strdup(args); if (tmpstr == NULL) { warn("strdup failed"); retval = 1; goto bailout; } orig_tmpstr = tmpstr; while ((tmpstr2 = strsep(&tmpstr, ",")) != NULL) { char *name, *value; /* * If the user creates an empty parameter by putting in two * commas, skip over it and look for the next field. */ if (*tmpstr2 == '\0') continue; name = strsep(&tmpstr2, "="); if (*name == '\0') { warnx("Got empty I/O parameter name"); retval = 1; goto bailout; } value = strsep(&tmpstr2, "="); if ((value == NULL) || (*value == '\0')) { warnx("Empty I/O parameter value for %s", name); retval = 1; goto bailout; } if (strncasecmp(name, "file", 4) == 0) { io_opts->dev_type = CAMDD_DEV_FILE; io_opts->dev_name = strdup(value); if (io_opts->dev_name == NULL) { warn("Error allocating memory"); retval = 1; goto bailout; } } else if (strncasecmp(name, "pass", 4) == 0) { io_opts->dev_type = CAMDD_DEV_PASS; io_opts->dev_name = strdup(value); if (io_opts->dev_name == NULL) { warn("Error allocating memory"); retval = 1; goto bailout; } } else if ((strncasecmp(name, "bs", 2) == 0) || (strncasecmp(name, "blocksize", 9) == 0)) { retval = expand_number(value, &io_opts->blocksize); if (retval == -1) { warn("expand_number(3) failed on %s=%s", name, value); retval = 1; goto bailout; } } else if (strncasecmp(name, "depth", 5) == 0) { char *endptr; io_opts->queue_depth = strtoull(value, &endptr, 0); if (*endptr != '\0') { warnx("invalid queue depth %s", value); retval = 1; goto bailout; } } else if (strncasecmp(name, "mcs", 3) == 0) { char *endptr; io_opts->min_cmd_size = strtol(value, &endptr, 0); if ((*endptr != '\0') || ((io_opts->min_cmd_size > 16) || (io_opts->min_cmd_size < 0))) { warnx("invalid minimum cmd size %s", value); retval = 1; goto bailout; } } else if (strncasecmp(name, "offset", 6) == 0) { retval = expand_number(value, &io_opts->offset); if (retval == -1) { warn("expand_number(3) failed on %s=%s", name, value); retval = 1; goto bailout; } } else if (strncasecmp(name, "debug", 5) == 0) { char *endptr; io_opts->debug = strtoull(value, &endptr, 0); if (*endptr != '\0') { warnx("invalid debug level %s", value); retval = 1; goto bailout; } } else { warnx("Unrecognized parameter %s=%s", name, value); } } bailout: free(orig_tmpstr); return (retval); } int main(int argc, char **argv) { int c; camdd_argmask arglist = CAMDD_ARG_NONE; int timeout = 0, retry_count = 1; int error = 0; uint64_t max_io = 0; struct camdd_io_opts *opt_list = NULL; if (argc == 1) { usage(); exit(1); } opt_list = calloc(2, sizeof(struct camdd_io_opts)); if (opt_list == NULL) { warn("Unable to allocate option list"); error = 1; goto bailout; } while ((c = getopt(argc, argv, "C:Ehi:m:o:t:v")) != -1){ switch (c) { case 'C': retry_count = strtol(optarg, NULL, 0); if (retry_count < 0) errx(1, "retry count %d is < 0", retry_count); arglist |= CAMDD_ARG_RETRIES; break; case 'E': arglist |= CAMDD_ARG_ERR_RECOVER; break; case 'i': case 'o': if (((c == 'i') && (opt_list[0].dev_type != CAMDD_DEV_NONE)) || ((c == 'o') && (opt_list[1].dev_type != CAMDD_DEV_NONE))) { errx(1, "Only one input and output path " "allowed"); } error = camdd_parse_io_opts(optarg, (c == 'o') ? 1 : 0, (c == 'o') ? &opt_list[1] : &opt_list[0]); if (error != 0) goto bailout; break; case 'm': error = expand_number(optarg, &max_io); if (error == -1) { warn("invalid maximum I/O amount %s", optarg); error = 1; goto bailout; } break; case 't': timeout = strtol(optarg, NULL, 0); if (timeout < 0) errx(1, "invalid timeout %d", timeout); /* Convert the timeout from seconds to ms */ timeout *= 1000; arglist |= CAMDD_ARG_TIMEOUT; break; case 'v': arglist |= CAMDD_ARG_VERBOSE; break; case 'h': default: usage(); exit(1); break; /*NOTREACHED*/ } } if ((opt_list[0].dev_type == CAMDD_DEV_NONE) || (opt_list[1].dev_type == CAMDD_DEV_NONE)) errx(1, "Must specify both -i and -o"); /* * Set the timeout if the user hasn't specified one. */ if (timeout == 0) timeout = CAMDD_PASS_RW_TIMEOUT; error = camdd_rw(opt_list, 2, max_io, retry_count, timeout); bailout: free(opt_list); exit(error); } Index: head/usr.sbin/cron/cron/cron.c =================================================================== --- head/usr.sbin/cron/cron/cron.c (revision 298885) +++ head/usr.sbin/cron/cron/cron.c (revision 298886) @@ -1,561 +1,561 @@ /* Copyright 1988,1990,1993,1994 by Paul Vixie * All rights reserved * * Distribute freely, except: don't remove my name from the source or * documentation (don't take credit for my work), mark your changes (don't * get me blamed for your possible bugs), don't alter or remove this * notice. May be sold if buildable source is provided to buyer. No * warrantee of any kind, express or implied, is included with this * software; use at your own risk, responsibility for damages (if any) to * anyone resulting from the use of this software rests entirely with the * user. * * Send bug reports, bug fixes, enhancements, requests, flames, etc., and * I'll try to keep a version up to date. I can be reached as follows: * Paul Vixie uunet!decwrl!vixie!paul */ #if !defined(lint) && !defined(LINT) static const char rcsid[] = "$FreeBSD$"; #endif #define MAIN_PROGRAM #include "cron.h" #include #include #if SYS_TIME_H # include #else # include #endif static void usage(void), run_reboot_jobs(cron_db *), cron_tick(cron_db *, int), cron_sync(int), cron_sleep(cron_db *, int), cron_clean(cron_db *), #ifdef USE_SIGCHLD sigchld_handler(int), #endif sighup_handler(int), parse_args(int c, char *v[]); static int run_at_secres(cron_db *); static time_t last_time = 0; static int dst_enabled = 0; struct pidfh *pfh; static void usage() { #if DEBUGGING char **dflags; #endif fprintf(stderr, "usage: cron [-j jitter] [-J rootjitter] " "[-m mailto] [-s] [-o] [-x debugflag[,...]]\n"); #if DEBUGGING fprintf(stderr, "\ndebugflags: "); for(dflags = DebugFlagNames; *dflags; dflags++) { fprintf(stderr, "%s ", *dflags); } fprintf(stderr, "\n"); #endif exit(ERROR_EXIT); } static void open_pidfile(void) { char pidfile[MAX_FNAME]; char buf[MAX_TEMPSTR]; int otherpid; (void) snprintf(pidfile, sizeof(pidfile), PIDFILE, PIDDIR); pfh = pidfile_open(pidfile, 0600, &otherpid); if (pfh == NULL) { if (errno == EEXIST) { snprintf(buf, sizeof(buf), "cron already running, pid: %d", otherpid); } else { snprintf(buf, sizeof(buf), "can't open or create %s: %s", pidfile, strerror(errno)); } log_it("CRON", getpid(), "DEATH", buf); errx(ERROR_EXIT, "%s", buf); } } int main(argc, argv) int argc; char *argv[]; { cron_db database; int runnum; int secres1, secres2; struct tm *tm; ProgramName = argv[0]; #if defined(BSD) setlinebuf(stdout); setlinebuf(stderr); #endif parse_args(argc, argv); #ifdef USE_SIGCHLD (void) signal(SIGCHLD, sigchld_handler); #else (void) signal(SIGCLD, SIG_IGN); #endif (void) signal(SIGHUP, sighup_handler); open_pidfile(); set_cron_uid(); set_cron_cwd(); #if defined(POSIX) setenv("PATH", _PATH_DEFPATH, 1); #endif /* if there are no debug flags turned on, fork as a daemon should. */ # if DEBUGGING if (DebugFlags) { # else if (0) { # endif (void) fprintf(stderr, "[%d] cron started\n", getpid()); } else { if (daemon(1, 0) == -1) { pidfile_remove(pfh); log_it("CRON",getpid(),"DEATH","can't become daemon"); exit(0); } } if (madvise(NULL, 0, MADV_PROTECT) != 0) log_it("CRON", getpid(), "WARNING", "madvise() failed"); pidfile_write(pfh); database.head = NULL; database.tail = NULL; database.mtime = (time_t) 0; load_database(&database); secres1 = secres2 = run_at_secres(&database); run_reboot_jobs(&database); cron_sync(secres1); runnum = 0; while (TRUE) { # if DEBUGGING /* if (!(DebugFlags & DTEST)) */ # endif /*DEBUGGING*/ cron_sleep(&database, secres1); if (secres1 == 0 || runnum % 60 == 0) { load_database(&database); secres2 = run_at_secres(&database); if (secres2 != secres1) { secres1 = secres2; if (secres1 != 0) { runnum = 0; } else { /* * Going from 1 sec to 60 sec res. If we * are already at minute's boundary, so * let it run, otherwise schedule for the * next minute. */ tm = localtime(&TargetTime); if (tm->tm_sec > 0) { cron_sync(secres2); continue; } } } } /* do this iteration */ cron_tick(&database, secres1); /* sleep 1 or 60 seconds */ TargetTime += (secres1 != 0) ? 1 : 60; runnum += 1; } } static void run_reboot_jobs(db) cron_db *db; { register user *u; register entry *e; for (u = db->head; u != NULL; u = u->next) { for (e = u->crontab; e != NULL; e = e->next) { if (e->flags & WHEN_REBOOT) { job_add(e, u); } } } (void) job_runqueue(); } static void cron_tick(cron_db *db, int secres) { static struct tm lasttm; static time_t diff = 0, /* time difference in seconds from the last offset change */ difflimit = 0; /* end point for the time zone correction */ struct tm otztm; /* time in the old time zone */ int otzsecond, otzminute, otzhour, otzdom, otzmonth, otzdow; register struct tm *tm = localtime(&TargetTime); register int second, minute, hour, dom, month, dow; register user *u; register entry *e; - /* make 0-based values out of these so we can use them as indicies + /* make 0-based values out of these so we can use them as indices */ second = (secres == 0) ? 0 : tm->tm_sec -FIRST_SECOND; minute = tm->tm_min -FIRST_MINUTE; hour = tm->tm_hour -FIRST_HOUR; dom = tm->tm_mday -FIRST_DOM; month = tm->tm_mon +1 /* 0..11 -> 1..12 */ -FIRST_MONTH; dow = tm->tm_wday -FIRST_DOW; Debug(DSCH, ("[%d] tick(%d,%d,%d,%d,%d,%d)\n", getpid(), second, minute, hour, dom, month, dow)) if (dst_enabled && last_time != 0 && TargetTime > last_time /* exclude stepping back */ && tm->tm_gmtoff != lasttm.tm_gmtoff ) { diff = tm->tm_gmtoff - lasttm.tm_gmtoff; if ( diff > 0 ) { /* ST->DST */ /* mark jobs for an earlier run */ difflimit = TargetTime + diff; for (u = db->head; u != NULL; u = u->next) { for (e = u->crontab; e != NULL; e = e->next) { e->flags &= ~NOT_UNTIL; if ( e->lastrun >= TargetTime ) e->lastrun = 0; /* not include the ends of hourly ranges */ if ( e->lastrun < TargetTime - 3600 ) e->flags |= RUN_AT; else e->flags &= ~RUN_AT; } } } else { /* diff < 0 : DST->ST */ /* mark jobs for skipping */ difflimit = TargetTime - diff; for (u = db->head; u != NULL; u = u->next) { for (e = u->crontab; e != NULL; e = e->next) { e->flags |= NOT_UNTIL; e->flags &= ~RUN_AT; } } } } if (diff != 0) { /* if the time was reset of the end of special zone is reached */ if (last_time == 0 || TargetTime >= difflimit) { /* disable the TZ switch checks */ diff = 0; difflimit = 0; for (u = db->head; u != NULL; u = u->next) { for (e = u->crontab; e != NULL; e = e->next) { e->flags &= ~(RUN_AT|NOT_UNTIL); } } } else { /* get the time in the old time zone */ time_t difftime = TargetTime + tm->tm_gmtoff - diff; gmtime_r(&difftime, &otztm); - /* make 0-based values out of these so we can use them as indicies + /* make 0-based values out of these so we can use them as indices */ otzsecond = (secres == 0) ? 0 : otztm.tm_sec -FIRST_SECOND; otzminute = otztm.tm_min -FIRST_MINUTE; otzhour = otztm.tm_hour -FIRST_HOUR; otzdom = otztm.tm_mday -FIRST_DOM; otzmonth = otztm.tm_mon +1 /* 0..11 -> 1..12 */ -FIRST_MONTH; otzdow = otztm.tm_wday -FIRST_DOW; } } /* the dom/dow situation is odd. '* * 1,15 * Sun' will run on the * first and fifteenth AND every Sunday; '* * * * Sun' will run *only* * on Sundays; '* * 1,15 * *' will run *only* the 1st and 15th. this * is why we keep 'e->dow_star' and 'e->dom_star'. yes, it's bizarre. * like many bizarre things, it's the standard. */ for (u = db->head; u != NULL; u = u->next) { for (e = u->crontab; e != NULL; e = e->next) { Debug(DSCH|DEXT, ("user [%s:%d:%d:...] cmd=\"%s\"\n", env_get("LOGNAME", e->envp), e->uid, e->gid, e->cmd)) if ( diff != 0 && (e->flags & (RUN_AT|NOT_UNTIL)) ) { if (bit_test(e->second, otzsecond) && bit_test(e->minute, otzminute) && bit_test(e->hour, otzhour) && bit_test(e->month, otzmonth) && ( ((e->flags & DOM_STAR) || (e->flags & DOW_STAR)) ? (bit_test(e->dow,otzdow) && bit_test(e->dom,otzdom)) : (bit_test(e->dow,otzdow) || bit_test(e->dom,otzdom)) ) ) { if ( e->flags & RUN_AT ) { e->flags &= ~RUN_AT; e->lastrun = TargetTime; job_add(e, u); continue; } else e->flags &= ~NOT_UNTIL; } else if ( e->flags & NOT_UNTIL ) continue; } if (bit_test(e->second, second) && bit_test(e->minute, minute) && bit_test(e->hour, hour) && bit_test(e->month, month) && ( ((e->flags & DOM_STAR) || (e->flags & DOW_STAR)) ? (bit_test(e->dow,dow) && bit_test(e->dom,dom)) : (bit_test(e->dow,dow) || bit_test(e->dom,dom)) ) ) { e->flags &= ~RUN_AT; e->lastrun = TargetTime; job_add(e, u); } } } last_time = TargetTime; lasttm = *tm; } /* the task here is to figure out how long it's going to be until :00 of the * following minute and initialize TargetTime to this value. TargetTime * will subsequently slide 60 seconds at a time, with correction applied * implicitly in cron_sleep(). it would be nice to let cron execute in * the "current minute" before going to sleep, but by restarting cron you * could then get it to execute a given minute's jobs more than once. * instead we have the chance of missing a minute's jobs completely, but * that's something sysadmin's know to expect what with crashing computers.. */ static void cron_sync(int secres) { struct tm *tm; TargetTime = time((time_t*)0); if (secres != 0) { TargetTime += 1; } else { tm = localtime(&TargetTime); TargetTime += (60 - tm->tm_sec); } } static void timespec_subtract(struct timespec *result, struct timespec *x, struct timespec *y) { *result = *x; result->tv_sec -= y->tv_sec; result->tv_nsec -= y->tv_nsec; if (result->tv_nsec < 0) { result->tv_sec--; result->tv_nsec += 1000000000; } } static void cron_sleep(cron_db *db, int secres) { int seconds_to_wait; int rval; struct timespec ctime, ttime, stime, remtime; /* * Loop until we reach the top of the next minute, sleep when possible. */ for (;;) { clock_gettime(CLOCK_REALTIME, &ctime); ttime.tv_sec = TargetTime; ttime.tv_nsec = 0; timespec_subtract(&stime, &ttime, &ctime); /* * If the seconds_to_wait value is insane, jump the cron */ if (stime.tv_sec < -600 || stime.tv_sec > 600) { cron_clean(db); cron_sync(secres); continue; } seconds_to_wait = (stime.tv_nsec > 0) ? stime.tv_sec + 1 : stime.tv_sec; Debug(DSCH, ("[%d] TargetTime=%ld, sec-to-wait=%d\n", getpid(), (long)TargetTime, seconds_to_wait)) /* * If we've run out of wait time or there are no jobs left * to run, break */ if (stime.tv_sec < 0) break; if (job_runqueue() == 0) { Debug(DSCH, ("[%d] sleeping for %d seconds\n", getpid(), seconds_to_wait)) for (;;) { rval = nanosleep(&stime, &remtime); if (rval == 0 || errno != EINTR) break; stime.tv_sec = remtime.tv_sec; stime.tv_nsec = remtime.tv_nsec; } } } } /* if the time was changed abruptly, clear the flags related * to the daylight time switch handling to avoid strange effects */ static void cron_clean(db) cron_db *db; { user *u; entry *e; last_time = 0; for (u = db->head; u != NULL; u = u->next) { for (e = u->crontab; e != NULL; e = e->next) { e->flags &= ~(RUN_AT|NOT_UNTIL); } } } #ifdef USE_SIGCHLD static void sigchld_handler(int x) { WAIT_T waiter; PID_T pid; for (;;) { #ifdef POSIX pid = waitpid(-1, &waiter, WNOHANG); #else pid = wait3(&waiter, WNOHANG, (struct rusage *)0); #endif switch (pid) { case -1: Debug(DPROC, ("[%d] sigchld...no children\n", getpid())) return; case 0: Debug(DPROC, ("[%d] sigchld...no dead kids\n", getpid())) return; default: Debug(DPROC, ("[%d] sigchld...pid #%d died, stat=%d\n", getpid(), pid, WEXITSTATUS(waiter))) } } } #endif /*USE_SIGCHLD*/ static void sighup_handler(int x) { log_close(); } static void parse_args(argc, argv) int argc; char *argv[]; { int argch; char *endp; while ((argch = getopt(argc, argv, "j:J:m:osx:")) != -1) { switch (argch) { case 'j': Jitter = strtoul(optarg, &endp, 10); if (*optarg == '\0' || *endp != '\0' || Jitter > 60) errx(ERROR_EXIT, "bad value for jitter: %s", optarg); break; case 'J': RootJitter = strtoul(optarg, &endp, 10); if (*optarg == '\0' || *endp != '\0' || RootJitter > 60) errx(ERROR_EXIT, "bad value for root jitter: %s", optarg); break; case 'm': defmailto = optarg; break; case 'o': dst_enabled = 0; break; case 's': dst_enabled = 1; break; case 'x': if (!set_debug_flags(optarg)) usage(); break; default: usage(); } } } static int run_at_secres(cron_db *db) { user *u; entry *e; for (u = db->head; u != NULL; u = u->next) { for (e = u->crontab; e != NULL; e = e->next) { if ((e->flags & SEC_RES) != 0) return 1; } } return 0; } Index: head/usr.sbin/cron/cron/popen.c =================================================================== --- head/usr.sbin/cron/cron/popen.c (revision 298885) +++ head/usr.sbin/cron/cron/popen.c (revision 298886) @@ -1,246 +1,246 @@ /* * Copyright (c) 1988 The Regents of the University of California. * All rights reserved. * * This code is derived from software written by Ken Arnold and * published in UNIX Review, Vol. 6, No. 8. * * Redistribution and use in source and binary forms are permitted * provided that the above copyright notice and this paragraph are * duplicated in all such forms and that any documentation, * advertising materials, and other materials related to such * distribution and use acknowledge that the software was developed * by the University of California, Berkeley. The name of the * University may not be used to endorse or promote products derived * from this software without specific prior written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * */ /* this came out of the ftpd sources; it's been modified to avoid the * globbing stuff since we don't need it. also execvp instead of execv. */ #ifndef lint #if 0 static char sccsid[] = "@(#)popen.c 5.7 (Berkeley) 2/14/89"; #endif static const char rcsid[] = "$FreeBSD$"; #endif /* not lint */ #include "cron.h" #include #include #include #if defined(SYSLOG) # include #endif #if defined(LOGIN_CAP) # include #endif #define MAX_ARGS 100 #define WANT_GLOBBING 0 /* - * Special version of popen which avoids call to shell. This insures noone + * Special version of popen which avoids call to shell. This insures no one * may create a pipe to a hidden program as a side effect of a list or dir * command. */ static PID_T *pids; static int fds; FILE * cron_popen(program, type, e) char *program, *type; entry *e; { register char *cp; FILE *iop; int argc, pdes[2]; PID_T pid; char *usernm; char *argv[MAX_ARGS + 1]; # if defined(LOGIN_CAP) struct passwd *pwd; login_cap_t *lc; # endif #if WANT_GLOBBING char **pop, *vv[2]; int gargc; char *gargv[1000]; extern char **glob(), **copyblk(); #endif if ((*type != 'r' && *type != 'w') || type[1]) return(NULL); if (!pids) { if ((fds = getdtablesize()) <= 0) return(NULL); if (!(pids = calloc(fds, sizeof(PID_T)))) return(NULL); } if (pipe(pdes) < 0) return(NULL); /* break up string into pieces */ for (argc = 0, cp = program; argc < MAX_ARGS; cp = NULL) if (!(argv[argc++] = strtok(cp, " \t\n"))) break; argv[MAX_ARGS] = NULL; #if WANT_GLOBBING /* glob each piece */ gargv[0] = argv[0]; for (gargc = argc = 1; argv[argc]; argc++) { if (!(pop = glob(argv[argc]))) { /* globbing failed */ vv[0] = argv[argc]; vv[1] = NULL; pop = copyblk(vv); } argv[argc] = (char *)pop; /* save to free later */ while (*pop && gargc < 1000) gargv[gargc++] = *pop++; } gargv[gargc] = NULL; #endif iop = NULL; switch(pid = vfork()) { case -1: /* error */ (void)close(pdes[0]); (void)close(pdes[1]); goto pfree; /* NOTREACHED */ case 0: /* child */ if (e != NULL) { #ifdef SYSLOG closelog(); #endif /* get new pgrp, void tty, etc. */ (void) setsid(); } if (*type == 'r') { /* Do not share our parent's stdin */ (void)close(0); (void)open(_PATH_DEVNULL, O_RDWR); if (pdes[1] != 1) { dup2(pdes[1], 1); dup2(pdes[1], 2); /* stderr, too! */ (void)close(pdes[1]); } (void)close(pdes[0]); } else { if (pdes[0] != 0) { dup2(pdes[0], 0); (void)close(pdes[0]); } /* Hack: stdout gets revoked */ (void)close(1); (void)open(_PATH_DEVNULL, O_RDWR); (void)close(2); (void)open(_PATH_DEVNULL, O_RDWR); (void)close(pdes[1]); } if (e != NULL) { /* Set user's entire context, but skip the environment * as cron provides a separate interface for this */ usernm = env_get("LOGNAME", e->envp); # if defined(LOGIN_CAP) if ((pwd = getpwnam(usernm)) == NULL) pwd = getpwuid(e->uid); lc = NULL; if (pwd != NULL) { pwd->pw_gid = e->gid; if (e->class != NULL) lc = login_getclass(e->class); } if (pwd && setusercontext(lc, pwd, e->uid, LOGIN_SETALL & ~(LOGIN_SETPATH|LOGIN_SETENV)) == 0) (void) endpwent(); else { /* fall back to the old method */ (void) endpwent(); # endif /* * Set our directory, uid and gid. Set gid * first since once we set uid, we've lost * root privileges. */ if (setgid(e->gid) != 0) _exit(ERROR_EXIT); # if defined(BSD) if (initgroups(usernm, e->gid) != 0) _exit(ERROR_EXIT); # endif if (setlogin(usernm) != 0) _exit(ERROR_EXIT); if (setuid(e->uid) != 0) _exit(ERROR_EXIT); /* we aren't root after this..*/ #if defined(LOGIN_CAP) } if (lc != NULL) login_close(lc); #endif chdir(env_get("HOME", e->envp)); } #if WANT_GLOBBING execvp(gargv[0], gargv); #else execvp(argv[0], argv); #endif _exit(1); } /* parent; assume fdopen can't fail... */ if (*type == 'r') { iop = fdopen(pdes[0], type); (void)close(pdes[1]); } else { iop = fdopen(pdes[1], type); (void)close(pdes[0]); } pids[fileno(iop)] = pid; pfree: #if WANT_GLOBBING for (argc = 1; argv[argc] != NULL; argc++) { /* blkfree((char **)argv[argc]); */ free((char *)argv[argc]); } #endif return(iop); } int cron_pclose(iop) FILE *iop; { register int fdes; int omask; WAIT_T stat_loc; PID_T pid; /* * pclose returns -1 if stream is not associated with a * `popened' command, or, if already `pclosed'. */ if (pids == 0 || pids[fdes = fileno(iop)] == 0) return(-1); (void)fclose(iop); omask = sigblock(sigmask(SIGINT)|sigmask(SIGQUIT)|sigmask(SIGHUP)); while ((pid = wait(&stat_loc)) != pids[fdes] && pid != -1) ; (void)sigsetmask(omask); pids[fdes] = 0; return (pid == -1 ? -1 : WEXITSTATUS(stat_loc)); } Index: head/usr.sbin/ctladm/ctladm.c =================================================================== --- head/usr.sbin/ctladm/ctladm.c (revision 298885) +++ head/usr.sbin/ctladm/ctladm.c (revision 298886) @@ -1,4262 +1,4262 @@ /*- * Copyright (c) 2003, 2004 Silicon Graphics International Corp. * Copyright (c) 1997-2007 Kenneth D. Merry * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/usr.sbin/ctladm/ctladm.c#4 $ */ /* * CAM Target Layer exercise program. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ctladm.h" #ifdef min #undef min #endif #define min(x,y) (x < y) ? x : y typedef enum { CTLADM_CMD_TUR, CTLADM_CMD_INQUIRY, CTLADM_CMD_REQ_SENSE, CTLADM_CMD_ARRAYLIST, CTLADM_CMD_REPORT_LUNS, CTLADM_CMD_HELP, CTLADM_CMD_DEVLIST, CTLADM_CMD_ADDDEV, CTLADM_CMD_RM, CTLADM_CMD_CREATE, CTLADM_CMD_READ, CTLADM_CMD_WRITE, CTLADM_CMD_PORT, CTLADM_CMD_PORTLIST, CTLADM_CMD_READCAPACITY, CTLADM_CMD_MODESENSE, CTLADM_CMD_DUMPOOA, CTLADM_CMD_DUMPSTRUCTS, CTLADM_CMD_START, CTLADM_CMD_STOP, CTLADM_CMD_SYNC_CACHE, CTLADM_CMD_LUNLIST, CTLADM_CMD_DELAY, CTLADM_CMD_ERR_INJECT, CTLADM_CMD_PRES_IN, CTLADM_CMD_PRES_OUT, CTLADM_CMD_INQ_VPD_DEVID, CTLADM_CMD_RTPG, CTLADM_CMD_MODIFY, CTLADM_CMD_ISLIST, CTLADM_CMD_ISLOGOUT, CTLADM_CMD_ISTERMINATE, CTLADM_CMD_LUNMAP } ctladm_cmdfunction; typedef enum { CTLADM_ARG_NONE = 0x0000000, CTLADM_ARG_AUTOSENSE = 0x0000001, CTLADM_ARG_DEVICE = 0x0000002, CTLADM_ARG_ARRAYSIZE = 0x0000004, CTLADM_ARG_BACKEND = 0x0000008, CTLADM_ARG_CDBSIZE = 0x0000010, CTLADM_ARG_DATALEN = 0x0000020, CTLADM_ARG_FILENAME = 0x0000040, CTLADM_ARG_LBA = 0x0000080, CTLADM_ARG_PC = 0x0000100, CTLADM_ARG_PAGE_CODE = 0x0000200, CTLADM_ARG_PAGE_LIST = 0x0000400, CTLADM_ARG_SUBPAGE = 0x0000800, CTLADM_ARG_PAGELIST = 0x0001000, CTLADM_ARG_DBD = 0x0002000, CTLADM_ARG_TARG_LUN = 0x0004000, CTLADM_ARG_BLOCKSIZE = 0x0008000, CTLADM_ARG_IMMED = 0x0010000, CTLADM_ARG_RELADR = 0x0020000, CTLADM_ARG_RETRIES = 0x0040000, CTLADM_ARG_ONOFFLINE = 0x0080000, CTLADM_ARG_ONESHOT = 0x0100000, CTLADM_ARG_TIMEOUT = 0x0200000, CTLADM_ARG_INITIATOR = 0x0400000, CTLADM_ARG_NOCOPY = 0x0800000, CTLADM_ARG_NEED_TL = 0x1000000 } ctladm_cmdargs; struct ctladm_opts { const char *optname; uint32_t cmdnum; ctladm_cmdargs argnum; const char *subopt; }; typedef enum { CC_OR_NOT_FOUND, CC_OR_AMBIGUOUS, CC_OR_FOUND } ctladm_optret; static const char rw_opts[] = "Nb:c:d:f:l:"; static const char startstop_opts[] = "i"; static struct ctladm_opts option_table[] = { {"adddev", CTLADM_CMD_ADDDEV, CTLADM_ARG_NONE, NULL}, {"create", CTLADM_CMD_CREATE, CTLADM_ARG_NONE, "b:B:d:l:o:s:S:t:"}, {"delay", CTLADM_CMD_DELAY, CTLADM_ARG_NEED_TL, "T:l:t:"}, {"devid", CTLADM_CMD_INQ_VPD_DEVID, CTLADM_ARG_NEED_TL, NULL}, {"devlist", CTLADM_CMD_DEVLIST, CTLADM_ARG_NONE, "b:vx"}, {"dumpooa", CTLADM_CMD_DUMPOOA, CTLADM_ARG_NONE, NULL}, {"dumpstructs", CTLADM_CMD_DUMPSTRUCTS, CTLADM_ARG_NONE, NULL}, {"help", CTLADM_CMD_HELP, CTLADM_ARG_NONE, NULL}, {"inject", CTLADM_CMD_ERR_INJECT, CTLADM_ARG_NEED_TL, "cd:i:p:r:s:"}, {"inquiry", CTLADM_CMD_INQUIRY, CTLADM_ARG_NEED_TL, NULL}, {"islist", CTLADM_CMD_ISLIST, CTLADM_ARG_NONE, "vx"}, {"islogout", CTLADM_CMD_ISLOGOUT, CTLADM_ARG_NONE, "ac:i:p:"}, {"isterminate", CTLADM_CMD_ISTERMINATE, CTLADM_ARG_NONE, "ac:i:p:"}, {"lunlist", CTLADM_CMD_LUNLIST, CTLADM_ARG_NONE, NULL}, {"lunmap", CTLADM_CMD_LUNMAP, CTLADM_ARG_NONE, "p:l:L:"}, {"modesense", CTLADM_CMD_MODESENSE, CTLADM_ARG_NEED_TL, "P:S:dlm:c:"}, {"modify", CTLADM_CMD_MODIFY, CTLADM_ARG_NONE, "b:l:o:s:"}, {"port", CTLADM_CMD_PORT, CTLADM_ARG_NONE, "lo:p:qt:w:W:x"}, {"portlist", CTLADM_CMD_PORTLIST, CTLADM_ARG_NONE, "f:ilp:qvx"}, {"prin", CTLADM_CMD_PRES_IN, CTLADM_ARG_NEED_TL, "a:"}, {"prout", CTLADM_CMD_PRES_OUT, CTLADM_ARG_NEED_TL, "a:k:r:s:"}, {"read", CTLADM_CMD_READ, CTLADM_ARG_NEED_TL, rw_opts}, {"readcapacity", CTLADM_CMD_READCAPACITY, CTLADM_ARG_NEED_TL, "c:"}, {"remove", CTLADM_CMD_RM, CTLADM_ARG_NONE, "b:l:o:"}, {"reportluns", CTLADM_CMD_REPORT_LUNS, CTLADM_ARG_NEED_TL, NULL}, {"reqsense", CTLADM_CMD_REQ_SENSE, CTLADM_ARG_NEED_TL, NULL}, {"rtpg", CTLADM_CMD_RTPG, CTLADM_ARG_NEED_TL, NULL}, {"start", CTLADM_CMD_START, CTLADM_ARG_NEED_TL, startstop_opts}, {"stop", CTLADM_CMD_STOP, CTLADM_ARG_NEED_TL, startstop_opts}, {"synccache", CTLADM_CMD_SYNC_CACHE, CTLADM_ARG_NEED_TL, "b:c:il:r"}, {"tur", CTLADM_CMD_TUR, CTLADM_ARG_NEED_TL, NULL}, {"write", CTLADM_CMD_WRITE, CTLADM_ARG_NEED_TL, rw_opts}, {"-?", CTLADM_CMD_HELP, CTLADM_ARG_NONE, NULL}, {"-h", CTLADM_CMD_HELP, CTLADM_ARG_NONE, NULL}, {NULL, 0, 0, NULL} }; ctladm_optret getoption(struct ctladm_opts *table, char *arg, uint32_t *cmdnum, ctladm_cmdargs *argnum, const char **subopt); static int cctl_dump_ooa(int fd, int argc, char **argv); static int cctl_port(int fd, int argc, char **argv, char *combinedopt); static int cctl_do_io(int fd, int retries, union ctl_io *io, const char *func); static int cctl_delay(int fd, int lun, int argc, char **argv, char *combinedopt); static int cctl_lunlist(int fd); static int cctl_sync_cache(int fd, int lun, int iid, int retries, int argc, char **argv, char *combinedopt); static int cctl_start_stop(int fd, int lun, int iid, int retries, int start, int argc, char **argv, char *combinedopt); static int cctl_mode_sense(int fd, int lun, int iid, int retries, int argc, char **argv, char *combinedopt); static int cctl_read_capacity(int fd, int lun, int iid, int retries, int argc, char **argv, char *combinedopt); static int cctl_read_write(int fd, int lun, int iid, int retries, int argc, char **argv, char *combinedopt, ctladm_cmdfunction command); static int cctl_get_luns(int fd, int lun, int iid, int retries, struct scsi_report_luns_data **lun_data, uint32_t *num_luns); static int cctl_report_luns(int fd, int lun, int iid, int retries); static int cctl_tur(int fd, int lun, int iid, int retries); static int cctl_get_inquiry(int fd, int lun, int iid, int retries, char *path_str, int path_len, struct scsi_inquiry_data *inq_data); static int cctl_inquiry(int fd, int lun, int iid, int retries); static int cctl_req_sense(int fd, int lun, int iid, int retries); static int cctl_persistent_reserve_in(int fd, int lun, int initiator, int argc, char **argv, char *combinedopt, int retry_count); static int cctl_persistent_reserve_out(int fd, int lun, int initiator, int argc, char **argv, char *combinedopt, int retry_count); static int cctl_create_lun(int fd, int argc, char **argv, char *combinedopt); static int cctl_inquiry_vpd_devid(int fd, int lun, int initiator); static int cctl_report_target_port_group(int fd, int lun, int initiator); static int cctl_modify_lun(int fd, int argc, char **argv, char *combinedopt); static int cctl_portlist(int fd, int argc, char **argv, char *combinedopt); ctladm_optret getoption(struct ctladm_opts *table, char *arg, uint32_t *cmdnum, ctladm_cmdargs *argnum, const char **subopt) { struct ctladm_opts *opts; int num_matches = 0; for (opts = table; (opts != NULL) && (opts->optname != NULL); opts++) { if (strncmp(opts->optname, arg, strlen(arg)) == 0) { *cmdnum = opts->cmdnum; *argnum = opts->argnum; *subopt = opts->subopt; if (strcmp(opts->optname, arg) == 0) return (CC_OR_FOUND); if (++num_matches > 1) return(CC_OR_AMBIGUOUS); } } if (num_matches > 0) return(CC_OR_FOUND); else return(CC_OR_NOT_FOUND); } static int cctl_dump_ooa(int fd, int argc, char **argv) { struct ctl_ooa ooa; long double cmd_latency; int num_entries, len, lun = -1, retval = 0; unsigned int i; num_entries = 104; if ((argc > 2) && (isdigit(argv[2][0]))) lun = strtol(argv[2], NULL, 0); retry: len = num_entries * sizeof(struct ctl_ooa_entry); bzero(&ooa, sizeof(ooa)); ooa.entries = malloc(len); if (ooa.entries == NULL) { warn("%s: error mallocing %d bytes", __func__, len); return (1); } if (lun >= 0) { ooa.lun_num = lun; } else ooa.flags |= CTL_OOA_FLAG_ALL_LUNS; ooa.alloc_len = len; ooa.alloc_num = num_entries; if (ioctl(fd, CTL_GET_OOA, &ooa) == -1) { warn("%s: CTL_GET_OOA ioctl failed", __func__); retval = 1; goto bailout; } if (ooa.status == CTL_OOA_NEED_MORE_SPACE) { num_entries = num_entries * 2; free(ooa.entries); ooa.entries = NULL; goto retry; } if (ooa.status != CTL_OOA_OK) { warnx("%s: CTL_GET_OOA ioctl returned error %d", __func__, ooa.status); retval = 1; goto bailout; } fprintf(stdout, "Dumping OOA queues\n"); for (i = 0; i < ooa.fill_num; i++) { struct ctl_ooa_entry *entry; char cdb_str[(SCSI_MAX_CDBLEN * 3) +1]; struct bintime delta_bt; struct timespec ts; entry = &ooa.entries[i]; delta_bt = ooa.cur_bt; bintime_sub(&delta_bt, &entry->start_bt); bintime2timespec(&delta_bt, &ts); cmd_latency = ts.tv_sec * 1000; if (ts.tv_nsec > 0) cmd_latency += ts.tv_nsec / 1000000; fprintf(stdout, "LUN %jd tag 0x%04x%s%s%s%s%s: %s. CDB: %s " "(%0.0Lf ms)\n", (intmax_t)entry->lun_num, entry->tag_num, (entry->cmd_flags & CTL_OOACMD_FLAG_BLOCKED) ? " BLOCKED" : "", (entry->cmd_flags & CTL_OOACMD_FLAG_DMA) ? " DMA" : "", (entry->cmd_flags & CTL_OOACMD_FLAG_DMA_QUEUED) ? " DMAQUEUED" : "", (entry->cmd_flags & CTL_OOACMD_FLAG_ABORT) ? " ABORT" : "", (entry->cmd_flags & CTL_OOACMD_FLAG_RTR) ? " RTR" :"", scsi_op_desc(entry->cdb[0], NULL), scsi_cdb_string(entry->cdb, cdb_str, sizeof(cdb_str)), cmd_latency); } fprintf(stdout, "OOA queues dump done\n"); bailout: free(ooa.entries); return (retval); } static int cctl_dump_structs(int fd, ctladm_cmdargs cmdargs __unused) { if (ioctl(fd, CTL_DUMP_STRUCTS) == -1) { warn(__func__); return (1); } return (0); } typedef enum { CCTL_PORT_MODE_NONE, CCTL_PORT_MODE_LIST, CCTL_PORT_MODE_SET, CCTL_PORT_MODE_ON, CCTL_PORT_MODE_OFF } cctl_port_mode; static struct ctladm_opts cctl_fe_table[] = { {"fc", CTL_PORT_FC, CTLADM_ARG_NONE, NULL}, {"scsi", CTL_PORT_SCSI, CTLADM_ARG_NONE, NULL}, {"internal", CTL_PORT_INTERNAL, CTLADM_ARG_NONE, NULL}, {"iscsi", CTL_PORT_ISCSI, CTLADM_ARG_NONE, NULL}, {"sas", CTL_PORT_SAS, CTLADM_ARG_NONE, NULL}, {"all", CTL_PORT_ALL, CTLADM_ARG_NONE, NULL}, {NULL, 0, 0, NULL} }; static int cctl_port(int fd, int argc, char **argv, char *combinedopt) { int c; int32_t targ_port = -1; int retval = 0; int wwnn_set = 0, wwpn_set = 0; uint64_t wwnn = 0, wwpn = 0; cctl_port_mode port_mode = CCTL_PORT_MODE_NONE; struct ctl_port_entry entry; ctl_port_type port_type = CTL_PORT_NONE; int quiet = 0, xml = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': if (port_mode != CCTL_PORT_MODE_NONE) goto bailout_badarg; port_mode = CCTL_PORT_MODE_LIST; break; case 'o': if (port_mode != CCTL_PORT_MODE_NONE) goto bailout_badarg; if (strcasecmp(optarg, "on") == 0) port_mode = CCTL_PORT_MODE_ON; else if (strcasecmp(optarg, "off") == 0) port_mode = CCTL_PORT_MODE_OFF; else { warnx("Invalid -o argument %s, \"on\" or " "\"off\" are the only valid args", optarg); retval = 1; goto bailout; } break; case 'p': targ_port = strtol(optarg, NULL, 0); break; case 'q': quiet = 1; break; case 't': { ctladm_optret optret; ctladm_cmdargs argnum; const char *subopt; ctl_port_type tmp_port_type; optret = getoption(cctl_fe_table, optarg, &tmp_port_type, &argnum, &subopt); if (optret == CC_OR_AMBIGUOUS) { warnx("%s: ambiguous frontend type %s", __func__, optarg); retval = 1; goto bailout; } else if (optret == CC_OR_NOT_FOUND) { warnx("%s: invalid frontend type %s", __func__, optarg); retval = 1; goto bailout; } port_type |= tmp_port_type; break; } case 'w': if ((port_mode != CCTL_PORT_MODE_NONE) && (port_mode != CCTL_PORT_MODE_SET)) goto bailout_badarg; port_mode = CCTL_PORT_MODE_SET; wwnn = strtoull(optarg, NULL, 0); wwnn_set = 1; break; case 'W': if ((port_mode != CCTL_PORT_MODE_NONE) && (port_mode != CCTL_PORT_MODE_SET)) goto bailout_badarg; port_mode = CCTL_PORT_MODE_SET; wwpn = strtoull(optarg, NULL, 0); wwpn_set = 1; break; case 'x': xml = 1; break; } } /* * The user can specify either one or more frontend types (-t), or * a specific frontend, but not both. * * If the user didn't specify a frontend type or number, set it to * all. This is primarily needed for the enable/disable ioctls. * This will be a no-op for the listing code. For the set ioctl, * we'll throw an error, since that only works on one port at a time. */ if ((port_type != CTL_PORT_NONE) && (targ_port != -1)) { warnx("%s: can only specify one of -t or -n", __func__); retval = 1; goto bailout; } else if ((targ_port == -1) && (port_type == CTL_PORT_NONE)) port_type = CTL_PORT_ALL; bzero(&entry, sizeof(entry)); /* * These are needed for all but list/dump mode. */ entry.port_type = port_type; entry.targ_port = targ_port; switch (port_mode) { case CCTL_PORT_MODE_LIST: { char opts[] = "xq"; char argx[] = "-x"; char argq[] = "-q"; char *argvx[2]; int argcx = 0; optind = 0; optreset = 1; if (xml) argvx[argcx++] = argx; if (quiet) argvx[argcx++] = argq; cctl_portlist(fd, argcx, argvx, opts); break; } case CCTL_PORT_MODE_SET: if (targ_port == -1) { warnx("%s: -w and -W require -n", __func__); retval = 1; goto bailout; } if (wwnn_set) { entry.flags |= CTL_PORT_WWNN_VALID; entry.wwnn = wwnn; } if (wwpn_set) { entry.flags |= CTL_PORT_WWPN_VALID; entry.wwpn = wwpn; } if (ioctl(fd, CTL_SET_PORT_WWNS, &entry) == -1) { warn("%s: CTL_SET_PORT_WWNS ioctl failed", __func__); retval = 1; goto bailout; } break; case CCTL_PORT_MODE_ON: if (ioctl(fd, CTL_ENABLE_PORT, &entry) == -1) { warn("%s: CTL_ENABLE_PORT ioctl failed", __func__); retval = 1; goto bailout; } fprintf(stdout, "Front End Ports enabled\n"); break; case CCTL_PORT_MODE_OFF: if (ioctl(fd, CTL_DISABLE_PORT, &entry) == -1) { warn("%s: CTL_DISABLE_PORT ioctl failed", __func__); retval = 1; goto bailout; } fprintf(stdout, "Front End Ports disabled\n"); break; default: warnx("%s: one of -l, -o or -w/-W must be specified", __func__); retval = 1; goto bailout; break; } bailout: return (retval); bailout_badarg: warnx("%s: only one of -l, -o or -w/-W may be specified", __func__); return (1); } static int cctl_do_io(int fd, int retries, union ctl_io *io, const char *func) { do { if (ioctl(fd, CTL_IO, io) == -1) { warn("%s: error sending CTL_IO ioctl", func); return (-1); } } while (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) && (retries-- > 0)); return (0); } static int cctl_delay(int fd, int lun, int argc, char **argv, char *combinedopt) { struct ctl_io_delay_info delay_info; char *delayloc = NULL; char *delaytype = NULL; int delaytime = -1; int retval; int c; retval = 0; memset(&delay_info, 0, sizeof(delay_info)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'T': delaytype = strdup(optarg); break; case 'l': delayloc = strdup(optarg); break; case 't': delaytime = strtoul(optarg, NULL, 0); break; } } if (delaytime == -1) { warnx("%s: you must specify the delaytime with -t", __func__); retval = 1; goto bailout; } if (strcasecmp(delayloc, "datamove") == 0) delay_info.delay_loc = CTL_DELAY_LOC_DATAMOVE; else if (strcasecmp(delayloc, "done") == 0) delay_info.delay_loc = CTL_DELAY_LOC_DONE; else { warnx("%s: invalid delay location %s", __func__, delayloc); retval = 1; goto bailout; } if ((delaytype == NULL) || (strcmp(delaytype, "oneshot") == 0)) delay_info.delay_type = CTL_DELAY_TYPE_ONESHOT; else if (strcmp(delaytype, "cont") == 0) delay_info.delay_type = CTL_DELAY_TYPE_CONT; else { warnx("%s: invalid delay type %s", __func__, delaytype); retval = 1; goto bailout; } delay_info.lun_id = lun; delay_info.delay_secs = delaytime; if (ioctl(fd, CTL_DELAY_IO, &delay_info) == -1) { warn("%s: CTL_DELAY_IO ioctl failed", __func__); retval = 1; goto bailout; } switch (delay_info.status) { case CTL_DELAY_STATUS_NONE: warnx("%s: no delay status??", __func__); retval = 1; break; case CTL_DELAY_STATUS_OK: break; case CTL_DELAY_STATUS_INVALID_LUN: warnx("%s: invalid lun %d", __func__, lun); retval = 1; break; case CTL_DELAY_STATUS_INVALID_TYPE: warnx("%s: invalid delay type %d", __func__, delay_info.delay_type); retval = 1; break; case CTL_DELAY_STATUS_INVALID_LOC: warnx("%s: delay location %s not implemented?", __func__, delayloc); retval = 1; break; case CTL_DELAY_STATUS_NOT_IMPLEMENTED: warnx("%s: delay not implemented in the kernel", __func__); warnx("%s: recompile with the CTL_IO_DELAY flag set", __func__); retval = 1; break; default: warnx("%s: unknown delay return status %d", __func__, delay_info.status); retval = 1; break; } bailout: free(delayloc); free(delaytype); return (retval); } static struct ctladm_opts cctl_err_types[] = { {"aborted", CTL_LUN_INJ_ABORTED, CTLADM_ARG_NONE, NULL}, {"mediumerr", CTL_LUN_INJ_MEDIUM_ERR, CTLADM_ARG_NONE, NULL}, {"ua", CTL_LUN_INJ_UA, CTLADM_ARG_NONE, NULL}, {"custom", CTL_LUN_INJ_CUSTOM, CTLADM_ARG_NONE, NULL}, {NULL, 0, 0, NULL} }; static struct ctladm_opts cctl_err_patterns[] = { {"read", CTL_LUN_PAT_READ, CTLADM_ARG_NONE, NULL}, {"write", CTL_LUN_PAT_WRITE, CTLADM_ARG_NONE, NULL}, {"rw", CTL_LUN_PAT_READWRITE, CTLADM_ARG_NONE, NULL}, {"readwrite", CTL_LUN_PAT_READWRITE, CTLADM_ARG_NONE, NULL}, {"readcap", CTL_LUN_PAT_READCAP, CTLADM_ARG_NONE, NULL}, {"tur", CTL_LUN_PAT_TUR, CTLADM_ARG_NONE, NULL}, {"any", CTL_LUN_PAT_ANY, CTLADM_ARG_NONE, NULL}, #if 0 {"cmd", CTL_LUN_PAT_CMD, CTLADM_ARG_NONE, NULL}, #endif {NULL, 0, 0, NULL} }; static int cctl_error_inject(int fd, uint32_t lun, int argc, char **argv, char *combinedopt) { int retval = 0; struct ctl_error_desc err_desc; uint64_t lba = 0; uint32_t len = 0; uint64_t delete_id = 0; int delete_id_set = 0; int continuous = 0; int sense_len = 0; int fd_sense = 0; int c; bzero(&err_desc, sizeof(err_desc)); err_desc.lun_id = lun; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'c': continuous = 1; break; case 'd': delete_id = strtoull(optarg, NULL, 0); delete_id_set = 1; break; case 'i': case 'p': { ctladm_optret optret; ctladm_cmdargs argnum; const char *subopt; if (c == 'i') { ctl_lun_error err_type; if (err_desc.lun_error != CTL_LUN_INJ_NONE) { warnx("%s: can't specify multiple -i " "arguments", __func__); retval = 1; goto bailout; } optret = getoption(cctl_err_types, optarg, &err_type, &argnum, &subopt); err_desc.lun_error = err_type; } else { ctl_lun_error_pattern pattern; optret = getoption(cctl_err_patterns, optarg, &pattern, &argnum, &subopt); err_desc.error_pattern |= pattern; } if (optret == CC_OR_AMBIGUOUS) { warnx("%s: ambiguous argument %s", __func__, optarg); retval = 1; goto bailout; } else if (optret == CC_OR_NOT_FOUND) { warnx("%s: argument %s not found", __func__, optarg); retval = 1; goto bailout; } break; } case 'r': { char *tmpstr, *tmpstr2; tmpstr = strdup(optarg); if (tmpstr == NULL) { warn("%s: error duplicating string %s", __func__, optarg); retval = 1; goto bailout; } tmpstr2 = strsep(&tmpstr, ","); if (tmpstr2 == NULL) { warnx("%s: invalid -r argument %s", __func__, optarg); retval = 1; free(tmpstr); goto bailout; } lba = strtoull(tmpstr2, NULL, 0); tmpstr2 = strsep(&tmpstr, ","); if (tmpstr2 == NULL) { warnx("%s: no len argument for -r lba,len, got" " %s", __func__, optarg); retval = 1; free(tmpstr); goto bailout; } len = strtoul(tmpstr2, NULL, 0); free(tmpstr); break; } case 's': { struct get_hook hook; char *sensestr; sense_len = strtol(optarg, NULL, 0); if (sense_len <= 0) { warnx("invalid number of sense bytes %d", sense_len); retval = 1; goto bailout; } sense_len = MIN(sense_len, SSD_FULL_SIZE); hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; sensestr = cget(&hook, NULL); if ((sensestr != NULL) && (sensestr[0] == '-')) { fd_sense = 1; } else { buff_encode_visit( (uint8_t *)&err_desc.custom_sense, sense_len, sensestr, iget, &hook); } optind += hook.got; break; } default: break; } } if (delete_id_set != 0) { err_desc.serial = delete_id; if (ioctl(fd, CTL_ERROR_INJECT_DELETE, &err_desc) == -1) { warn("%s: error issuing CTL_ERROR_INJECT_DELETE ioctl", __func__); retval = 1; } goto bailout; } if (err_desc.lun_error == CTL_LUN_INJ_NONE) { warnx("%s: error injection command (-i) needed", __func__); retval = 1; goto bailout; } else if ((err_desc.lun_error == CTL_LUN_INJ_CUSTOM) && (sense_len == 0)) { warnx("%s: custom error requires -s", __func__); retval = 1; goto bailout; } if (continuous != 0) err_desc.lun_error |= CTL_LUN_INJ_CONTINUOUS; /* * If fd_sense is set, we need to read the sense data the user * wants returned from stdin. */ if (fd_sense == 1) { ssize_t amt_read; int amt_to_read = sense_len; u_int8_t *buf_ptr = (uint8_t *)&err_desc.custom_sense; for (amt_read = 0; amt_to_read > 0; amt_read = read(STDIN_FILENO, buf_ptr, amt_to_read)) { if (amt_read == -1) { warn("error reading sense data from stdin"); retval = 1; goto bailout; } amt_to_read -= amt_read; buf_ptr += amt_read; } } if (err_desc.error_pattern == CTL_LUN_PAT_NONE) { warnx("%s: command pattern (-p) needed", __func__); retval = 1; goto bailout; } if (len != 0) { err_desc.error_pattern |= CTL_LUN_PAT_RANGE; /* * We could check here to see whether it's a read/write * command, but that will be pointless once we allow * custom patterns. At that point, the user could specify * a READ(6) CDB type, and we wouldn't have an easy way here * to verify whether range checking is possible there. The * user will just figure it out when his error never gets * executed. */ #if 0 if ((err_desc.pattern & CTL_LUN_PAT_READWRITE) == 0) { warnx("%s: need read and/or write pattern if range " "is specified", __func__); retval = 1; goto bailout; } #endif err_desc.lba_range.lba = lba; err_desc.lba_range.len = len; } if (ioctl(fd, CTL_ERROR_INJECT, &err_desc) == -1) { warn("%s: error issuing CTL_ERROR_INJECT ioctl", __func__); retval = 1; } else { printf("Error injection succeeded, serial number is %ju\n", (uintmax_t)err_desc.serial); } bailout: return (retval); } static int cctl_lunlist(int fd) { struct scsi_report_luns_data *lun_data; struct scsi_inquiry_data *inq_data; uint32_t num_luns; int initid; unsigned int i; int retval; inq_data = NULL; initid = 7; /* * XXX KDM assuming LUN 0 is fine, but we may need to change this * if we ever acquire the ability to have multiple targets. */ if ((retval = cctl_get_luns(fd, /*lun*/ 0, initid, /*retries*/ 2, &lun_data, &num_luns)) != 0) goto bailout; inq_data = malloc(sizeof(*inq_data)); if (inq_data == NULL) { warn("%s: couldn't allocate memory for inquiry data\n", __func__); retval = 1; goto bailout; } for (i = 0; i < num_luns; i++) { char scsi_path[40]; int lun_val; switch (lun_data->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) { case RPL_LUNDATA_ATYP_PERIPH: lun_val = lun_data->luns[i].lundata[1]; break; case RPL_LUNDATA_ATYP_FLAT: lun_val = (lun_data->luns[i].lundata[0] & RPL_LUNDATA_FLAT_LUN_MASK) | (lun_data->luns[i].lundata[1] << RPL_LUNDATA_FLAT_LUN_BITS); break; case RPL_LUNDATA_ATYP_LUN: case RPL_LUNDATA_ATYP_EXTLUN: default: fprintf(stdout, "Unsupported LUN format %d\n", lun_data->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK); lun_val = -1; break; } if (lun_val == -1) continue; if ((retval = cctl_get_inquiry(fd, lun_val, initid, /*retries*/ 2, scsi_path, sizeof(scsi_path), inq_data)) != 0) { goto bailout; } printf("%s", scsi_path); scsi_print_inquiry(inq_data); } bailout: if (lun_data != NULL) free(lun_data); if (inq_data != NULL) free(inq_data); return (retval); } static int cctl_sync_cache(int fd, int lun, int iid, int retries, int argc, char **argv, char *combinedopt) { union ctl_io *io; int cdb_size = -1; int retval; uint64_t our_lba = 0; uint32_t our_block_count = 0; int reladr = 0, immed = 0; int c; retval = 0; io = ctl_scsi_alloc_io(iid); if (io == NULL) { warnx("%s: can't allocate memory", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'b': our_block_count = strtoul(optarg, NULL, 0); break; case 'c': cdb_size = strtol(optarg, NULL, 0); break; case 'i': immed = 1; break; case 'l': our_lba = strtoull(optarg, NULL, 0); break; case 'r': reladr = 1; break; default: break; } } if (cdb_size != -1) { switch (cdb_size) { case 10: case 16: break; default: warnx("%s: invalid cdbsize %d, valid sizes are 10 " "and 16", __func__, cdb_size); retval = 1; goto bailout; break; /* NOTREACHED */ } } else cdb_size = 10; ctl_scsi_sync_cache(/*io*/ io, /*immed*/ immed, /*reladr*/ reladr, /*minimum_cdb_size*/ cdb_size, /*starting_lba*/ our_lba, /*block_count*/ our_block_count, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = iid; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { fprintf(stdout, "Cache synchronized successfully\n"); } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); return (retval); } static int cctl_start_stop(int fd, int lun, int iid, int retries, int start, int argc, char **argv, char *combinedopt) { union ctl_io *io; char scsi_path[40]; int immed = 0; int retval, c; retval = 0; io = ctl_scsi_alloc_io(iid); if (io == NULL) { warnx("%s: can't allocate memory", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'i': immed = 1; break; default: break; } } /* * Use an ordered tag for the stop command, to guarantee that any * pending I/O will finish before the stop command executes. This * would normally be the case anyway, since CTL will basically * treat the start/stop command as an ordered command with respect * to any other command except an INQUIRY. (See ctl_ser_table.c.) */ ctl_scsi_start_stop(/*io*/ io, /*start*/ start, /*load_eject*/ 0, /*immediate*/ immed, /*power_conditions*/ SSS_PC_START_VALID, /*ctl_tag_type*/ start ? CTL_TAG_SIMPLE : CTL_TAG_ORDERED, /*control*/ 0); io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = iid; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } ctl_scsi_path_string(io, scsi_path, sizeof(scsi_path)); if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { fprintf(stdout, "%s LUN %s successfully\n", scsi_path, (start) ? "started" : "stopped"); } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); return (retval); } static int cctl_mode_sense(int fd, int lun, int iid, int retries, int argc, char **argv, char *combinedopt) { union ctl_io *io; uint32_t datalen; uint8_t *dataptr; int pc = -1, cdbsize, retval, dbd = 0, subpage = -1; int list = 0; int page_code = -1; int c; cdbsize = 0; retval = 0; dataptr = NULL; io = ctl_scsi_alloc_io(iid); if (io == NULL) { warn("%s: can't allocate memory", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'P': pc = strtoul(optarg, NULL, 0); break; case 'S': subpage = strtoul(optarg, NULL, 0); break; case 'd': dbd = 1; break; case 'l': list = 1; break; case 'm': page_code = strtoul(optarg, NULL, 0); break; case 'c': cdbsize = strtol(optarg, NULL, 0); break; default: break; } } if (((list == 0) && (page_code == -1)) || ((list != 0) && (page_code != -1))) { warnx("%s: you must specify either a page code (-m) or -l", __func__); retval = 1; goto bailout; } if ((page_code != -1) && ((page_code > SMS_ALL_PAGES_PAGE) || (page_code < 0))) { warnx("%s: page code %d is out of range", __func__, page_code); retval = 1; goto bailout; } if (list == 1) { page_code = SMS_ALL_PAGES_PAGE; if (pc != -1) { warnx("%s: arg -P makes no sense with -l", __func__); retval = 1; goto bailout; } if (subpage != -1) { warnx("%s: arg -S makes no sense with -l", __func__); retval = 1; goto bailout; } } if (pc == -1) pc = SMS_PAGE_CTRL_CURRENT; else { if ((pc > 3) || (pc < 0)) { warnx("%s: page control value %d is out of range: 0-3", __func__, pc); retval = 1; goto bailout; } } if ((subpage != -1) && ((subpage > 255) || (subpage < 0))) { warnx("%s: subpage code %d is out of range: 0-255", __func__, subpage); retval = 1; goto bailout; } if (cdbsize != 0) { switch (cdbsize) { case 6: case 10: break; default: warnx("%s: invalid cdbsize %d, valid sizes are 6 " "and 10", __func__, cdbsize); retval = 1; goto bailout; break; } } else cdbsize = 6; if (subpage == -1) subpage = 0; if (cdbsize == 6) datalen = 255; else datalen = 65535; dataptr = (uint8_t *)malloc(datalen); if (dataptr == NULL) { warn("%s: can't allocate %d bytes", __func__, datalen); retval = 1; goto bailout; } memset(dataptr, 0, datalen); ctl_scsi_mode_sense(io, /*data_ptr*/ dataptr, /*data_len*/ datalen, /*dbd*/ dbd, /*llbaa*/ 0, /*page_code*/ page_code, /*pc*/ pc << 6, /*subpage*/ subpage, /*minimum_cdb_size*/ cdbsize, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = iid; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { int pages_len, used_len; uint32_t returned_len; uint8_t *ndataptr; if (io->scsiio.cdb[0] == MODE_SENSE_6) { struct scsi_mode_hdr_6 *hdr6; int bdlen; hdr6 = (struct scsi_mode_hdr_6 *)dataptr; returned_len = hdr6->datalen + 1; bdlen = hdr6->block_descr_len; ndataptr = (uint8_t *)((uint8_t *)&hdr6[1] + bdlen); } else { struct scsi_mode_hdr_10 *hdr10; int bdlen; hdr10 = (struct scsi_mode_hdr_10 *)dataptr; returned_len = scsi_2btoul(hdr10->datalen) + 2; bdlen = scsi_2btoul(hdr10->block_descr_len); ndataptr = (uint8_t *)((uint8_t *)&hdr10[1] + bdlen); } /* just in case they can give us more than we allocated for */ returned_len = min(returned_len, datalen); pages_len = returned_len - (ndataptr - dataptr); #if 0 fprintf(stdout, "returned_len = %d, pages_len = %d\n", returned_len, pages_len); #endif if (list == 1) { fprintf(stdout, "Supported mode pages:\n"); for (used_len = 0; used_len < pages_len;) { struct scsi_mode_page_header *header; header = (struct scsi_mode_page_header *) &ndataptr[used_len]; fprintf(stdout, "%d\n", header->page_code); used_len += header->page_length + 2; } } else { for (used_len = 0; used_len < pages_len; used_len++) { fprintf(stdout, "0x%x ", ndataptr[used_len]); if (((used_len+1) % 16) == 0) fprintf(stdout, "\n"); } fprintf(stdout, "\n"); } } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); if (dataptr != NULL) free(dataptr); return (retval); } static int cctl_read_capacity(int fd, int lun, int iid, int retries, int argc, char **argv, char *combinedopt) { union ctl_io *io; struct scsi_read_capacity_data *data; struct scsi_read_capacity_data_long *longdata; int cdbsize = -1, retval; uint8_t *dataptr; int c; cdbsize = 10; dataptr = NULL; retval = 0; io = ctl_scsi_alloc_io(iid); if (io == NULL) { warn("%s: can't allocate memory\n", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'c': cdbsize = strtol(optarg, NULL, 0); break; default: break; } } if (cdbsize != -1) { switch (cdbsize) { case 10: case 16: break; default: warnx("%s: invalid cdbsize %d, valid sizes are 10 " "and 16", __func__, cdbsize); retval = 1; goto bailout; break; /* NOTREACHED */ } } else cdbsize = 10; dataptr = (uint8_t *)malloc(sizeof(*longdata)); if (dataptr == NULL) { warn("%s: can't allocate %zd bytes\n", __func__, sizeof(*longdata)); retval = 1; goto bailout; } memset(dataptr, 0, sizeof(*longdata)); retry: switch (cdbsize) { case 10: ctl_scsi_read_capacity(io, /*data_ptr*/ dataptr, /*data_len*/ sizeof(*longdata), /*addr*/ 0, /*reladr*/ 0, /*pmi*/ 0, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); break; case 16: ctl_scsi_read_capacity_16(io, /*data_ptr*/ dataptr, /*data_len*/ sizeof(*longdata), /*addr*/ 0, /*reladr*/ 0, /*pmi*/ 0, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); break; } io->io_hdr.nexus.initid = iid; io->io_hdr.nexus.targ_lun = lun; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { uint64_t maxlba; uint32_t blocksize; if (cdbsize == 10) { data = (struct scsi_read_capacity_data *)dataptr; maxlba = scsi_4btoul(data->addr); blocksize = scsi_4btoul(data->length); if (maxlba == 0xffffffff) { cdbsize = 16; goto retry; } } else { longdata=(struct scsi_read_capacity_data_long *)dataptr; maxlba = scsi_8btou64(longdata->addr); blocksize = scsi_4btoul(longdata->length); } fprintf(stdout, "Disk Capacity: %ju, Blocksize: %d\n", (uintmax_t)maxlba, blocksize); } else { ctl_io_error_print(io, NULL, stderr); } bailout: ctl_scsi_free_io(io); if (dataptr != NULL) free(dataptr); return (retval); } static int cctl_read_write(int fd, int lun, int iid, int retries, int argc, char **argv, char *combinedopt, ctladm_cmdfunction command) { union ctl_io *io; int file_fd, do_stdio; int cdbsize = -1, databytes; uint8_t *dataptr; char *filename = NULL; int datalen = -1, blocksize = -1; uint64_t lba = 0; int lba_set = 0; int retval; int c; retval = 0; do_stdio = 0; dataptr = NULL; file_fd = -1; io = ctl_scsi_alloc_io(iid); if (io == NULL) { warn("%s: can't allocate memory\n", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'N': io->io_hdr.flags |= CTL_FLAG_NO_DATAMOVE; break; case 'b': blocksize = strtoul(optarg, NULL, 0); break; case 'c': cdbsize = strtoul(optarg, NULL, 0); break; case 'd': datalen = strtoul(optarg, NULL, 0); break; case 'f': filename = strdup(optarg); break; case 'l': lba = strtoull(optarg, NULL, 0); lba_set = 1; break; default: break; } } if (filename == NULL) { warnx("%s: you must supply a filename using -f", __func__); retval = 1; goto bailout; } if (datalen == -1) { warnx("%s: you must specify the data length with -d", __func__); retval = 1; goto bailout; } if (lba_set == 0) { warnx("%s: you must specify the LBA with -l", __func__); retval = 1; goto bailout; } if (blocksize == -1) { warnx("%s: you must specify the blocksize with -b", __func__); retval = 1; goto bailout; } if (cdbsize != -1) { switch (cdbsize) { case 6: case 10: case 12: case 16: break; default: warnx("%s: invalid cdbsize %d, valid sizes are 6, " "10, 12 or 16", __func__, cdbsize); retval = 1; goto bailout; break; /* NOTREACHED */ } } else cdbsize = 6; databytes = datalen * blocksize; dataptr = (uint8_t *)malloc(databytes); if (dataptr == NULL) { warn("%s: can't allocate %d bytes\n", __func__, databytes); retval = 1; goto bailout; } if (strcmp(filename, "-") == 0) { if (command == CTLADM_CMD_READ) file_fd = STDOUT_FILENO; else file_fd = STDIN_FILENO; do_stdio = 1; } else { file_fd = open(filename, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); if (file_fd == -1) { warn("%s: can't open file %s", __func__, filename); retval = 1; goto bailout; } } memset(dataptr, 0, databytes); if (command == CTLADM_CMD_WRITE) { int bytes_read; bytes_read = read(file_fd, dataptr, databytes); if (bytes_read == -1) { warn("%s: error reading file %s", __func__, filename); retval = 1; goto bailout; } if (bytes_read != databytes) { warnx("%s: only read %d bytes from file %s", __func__, bytes_read, filename); retval = 1; goto bailout; } } ctl_scsi_read_write(io, /*data_ptr*/ dataptr, /*data_len*/ databytes, /*read_op*/ (command == CTLADM_CMD_READ) ? 1 : 0, /*byte2*/ 0, /*minimum_cdb_size*/ cdbsize, /*lba*/ lba, /*num_blocks*/ datalen, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = iid; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } if (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && (command == CTLADM_CMD_READ)) { int bytes_written; bytes_written = write(file_fd, dataptr, databytes); if (bytes_written == -1) { warn("%s: can't write to %s", __func__, filename); goto bailout; } } else if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); if (dataptr != NULL) free(dataptr); if ((do_stdio == 0) && (file_fd != -1)) close(file_fd); return (retval); } static int cctl_get_luns(int fd, int lun, int iid, int retries, struct scsi_report_luns_data **lun_data, uint32_t *num_luns) { union ctl_io *io; uint32_t nluns; int lun_datalen; int retval; retval = 0; io = ctl_scsi_alloc_io(iid); if (io == NULL) { warnx("%s: can't allocate memory", __func__); return (1); } /* * lun_data includes space for 1 lun, allocate space for 4 initially. * If that isn't enough, we'll allocate more. */ nluns = 4; retry: lun_datalen = sizeof(*lun_data) + (nluns * sizeof(struct scsi_report_luns_lundata)); *lun_data = malloc(lun_datalen); if (*lun_data == NULL) { warnx("%s: can't allocate memory", __func__); ctl_scsi_free_io(io); return (1); } ctl_scsi_report_luns(io, /*data_ptr*/ (uint8_t *)*lun_data, /*data_len*/ lun_datalen, /*select_report*/ RPL_REPORT_ALL, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.initid = iid; io->io_hdr.nexus.targ_lun = lun; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { uint32_t returned_len, returned_luns; returned_len = scsi_4btoul((*lun_data)->length); returned_luns = returned_len / 8; if (returned_luns > nluns) { nluns = returned_luns; free(*lun_data); goto retry; } /* These should be the same */ *num_luns = MIN(returned_luns, nluns); } else { ctl_io_error_print(io, NULL, stderr); retval = 1; } bailout: ctl_scsi_free_io(io); return (retval); } static int cctl_report_luns(int fd, int lun, int iid, int retries) { struct scsi_report_luns_data *lun_data; uint32_t num_luns, i; int retval; lun_data = NULL; if ((retval = cctl_get_luns(fd, lun, iid, retries, &lun_data, &num_luns)) != 0) goto bailout; fprintf(stdout, "%u LUNs returned\n", num_luns); for (i = 0; i < num_luns; i++) { int lun_val; /* * XXX KDM figure out a way to share this code with * cctl_lunlist()? */ switch (lun_data->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) { case RPL_LUNDATA_ATYP_PERIPH: lun_val = lun_data->luns[i].lundata[1]; break; case RPL_LUNDATA_ATYP_FLAT: lun_val = (lun_data->luns[i].lundata[0] & RPL_LUNDATA_FLAT_LUN_MASK) | (lun_data->luns[i].lundata[1] << RPL_LUNDATA_FLAT_LUN_BITS); break; case RPL_LUNDATA_ATYP_LUN: case RPL_LUNDATA_ATYP_EXTLUN: default: fprintf(stdout, "Unsupported LUN format %d\n", lun_data->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK); lun_val = -1; break; } if (lun_val == -1) continue; fprintf(stdout, "%d\n", lun_val); } bailout: if (lun_data != NULL) free(lun_data); return (retval); } static int cctl_tur(int fd, int lun, int iid, int retries) { union ctl_io *io; io = ctl_scsi_alloc_io(iid); if (io == NULL) { fprintf(stderr, "can't allocate memory\n"); return (1); } ctl_scsi_tur(io, /* tag_type */ CTL_TAG_SIMPLE, /* control */ 0); io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = iid; if (cctl_do_io(fd, retries, io, __func__) != 0) { ctl_scsi_free_io(io); return (1); } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) fprintf(stdout, "Unit is ready\n"); else ctl_io_error_print(io, NULL, stderr); return (0); } static int cctl_get_inquiry(int fd, int lun, int iid, int retries, char *path_str, int path_len, struct scsi_inquiry_data *inq_data) { union ctl_io *io; int retval; retval = 0; io = ctl_scsi_alloc_io(iid); if (io == NULL) { warnx("cctl_inquiry: can't allocate memory\n"); return (1); } ctl_scsi_inquiry(/*io*/ io, /*data_ptr*/ (uint8_t *)inq_data, /*data_len*/ sizeof(*inq_data), /*byte2*/ 0, /*page_code*/ 0, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = iid; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { retval = 1; ctl_io_error_print(io, NULL, stderr); } else if (path_str != NULL) ctl_scsi_path_string(io, path_str, path_len); bailout: ctl_scsi_free_io(io); return (retval); } static int cctl_inquiry(int fd, int lun, int iid, int retries) { struct scsi_inquiry_data *inq_data; char scsi_path[40]; int retval; inq_data = malloc(sizeof(*inq_data)); if (inq_data == NULL) { warnx("%s: can't allocate inquiry data", __func__); retval = 1; goto bailout; } if ((retval = cctl_get_inquiry(fd, lun, iid, retries, scsi_path, sizeof(scsi_path), inq_data)) != 0) goto bailout; printf("%s", scsi_path); scsi_print_inquiry(inq_data); bailout: if (inq_data != NULL) free(inq_data); return (retval); } static int cctl_req_sense(int fd, int lun, int iid, int retries) { union ctl_io *io; struct scsi_sense_data *sense_data; int retval; retval = 0; io = ctl_scsi_alloc_io(iid); if (io == NULL) { warnx("cctl_req_sense: can't allocate memory\n"); return (1); } sense_data = malloc(sizeof(*sense_data)); memset(sense_data, 0, sizeof(*sense_data)); ctl_scsi_request_sense(/*io*/ io, /*data_ptr*/ (uint8_t *)sense_data, /*data_len*/ sizeof(*sense_data), /*byte2*/ 0, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = iid; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { bcopy(sense_data, &io->scsiio.sense_data, sizeof(*sense_data)); io->scsiio.sense_len = sizeof(*sense_data); ctl_scsi_sense_print(&io->scsiio, NULL, stdout); } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); free(sense_data); return (retval); } static int cctl_report_target_port_group(int fd, int lun, int iid) { union ctl_io *io; uint32_t datalen; uint8_t *dataptr; int retval; dataptr = NULL; retval = 0; io = ctl_scsi_alloc_io(iid); if (io == NULL) { warn("%s: can't allocate memory", __func__); return (1); } datalen = 64; dataptr = (uint8_t *)malloc(datalen); if (dataptr == NULL) { warn("%s: can't allocate %d bytes", __func__, datalen); retval = 1; goto bailout; } memset(dataptr, 0, datalen); ctl_scsi_maintenance_in(/*io*/ io, /*data_ptr*/ dataptr, /*data_len*/ datalen, /*action*/ SA_RPRT_TRGT_GRP, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = iid; if (cctl_do_io(fd, 0, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { int returned_len, used_len; returned_len = scsi_4btoul(&dataptr[0]) + 4; for (used_len = 0; used_len < returned_len; used_len++) { fprintf(stdout, "0x%02x ", dataptr[used_len]); if (((used_len+1) % 8) == 0) fprintf(stdout, "\n"); } fprintf(stdout, "\n"); } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); if (dataptr != NULL) free(dataptr); return (retval); } static int cctl_inquiry_vpd_devid(int fd, int lun, int iid) { union ctl_io *io; uint32_t datalen; uint8_t *dataptr; int retval; retval = 0; dataptr = NULL; io = ctl_scsi_alloc_io(iid); if (io == NULL) { warn("%s: can't allocate memory", __func__); return (1); } datalen = 256; dataptr = (uint8_t *)malloc(datalen); if (dataptr == NULL) { warn("%s: can't allocate %d bytes", __func__, datalen); retval = 1; goto bailout; } memset(dataptr, 0, datalen); ctl_scsi_inquiry(/*io*/ io, /*data_ptr*/ dataptr, /*data_len*/ datalen, /*byte2*/ SI_EVPD, /*page_code*/ SVPD_DEVICE_ID, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = iid; if (cctl_do_io(fd, 0, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { int returned_len, used_len; returned_len = scsi_2btoul(&dataptr[2]) + 4; for (used_len = 0; used_len < returned_len; used_len++) { fprintf(stdout, "0x%02x ", dataptr[used_len]); if (((used_len+1) % 8) == 0) fprintf(stdout, "\n"); } fprintf(stdout, "\n"); } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); if (dataptr != NULL) free(dataptr); return (retval); } static int cctl_persistent_reserve_in(int fd, int lun, int iid, int argc, char **argv, char *combinedopt, int retry_count) { union ctl_io *io; uint32_t datalen; uint8_t *dataptr; int action = -1; int retval; int c; retval = 0; dataptr = NULL; io = ctl_scsi_alloc_io(iid); if (io == NULL) { warn("%s: can't allocate memory", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'a': action = strtol(optarg, NULL, 0); break; default: break; } } if (action < 0 || action > 2) { warn("action must be specified and in the range: 0-2"); retval = 1; goto bailout; } datalen = 256; dataptr = (uint8_t *)malloc(datalen); if (dataptr == NULL) { warn("%s: can't allocate %d bytes", __func__, datalen); retval = 1; goto bailout; } memset(dataptr, 0, datalen); ctl_scsi_persistent_res_in(io, /*data_ptr*/ dataptr, /*data_len*/ datalen, /*action*/ action, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = iid; if (cctl_do_io(fd, retry_count, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { int returned_len, used_len; switch (action) { case 0: returned_len = scsi_4btoul(&dataptr[4]) + 8; returned_len = min(returned_len, 256); break; case 1: returned_len = scsi_4btoul(&dataptr[4]) + 8; break; case 2: returned_len = 8; break; default: warnx("%s: invalid action %d", __func__, action); goto bailout; break; /* NOTREACHED */ } for (used_len = 0; used_len < returned_len; used_len++) { fprintf(stdout, "0x%02x ", dataptr[used_len]); if (((used_len+1) % 8) == 0) fprintf(stdout, "\n"); } fprintf(stdout, "\n"); } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); if (dataptr != NULL) free(dataptr); return (retval); } static int cctl_persistent_reserve_out(int fd, int lun, int iid, int argc, char **argv, char *combinedopt, int retry_count) { union ctl_io *io; uint32_t datalen; uint64_t key = 0, sa_key = 0; int action = -1, restype = -1; uint8_t *dataptr; int retval; int c; retval = 0; dataptr = NULL; io = ctl_scsi_alloc_io(iid); if (io == NULL) { warn("%s: can't allocate memory", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'a': action = strtol(optarg, NULL, 0); break; case 'k': key = strtoull(optarg, NULL, 0); break; case 'r': restype = strtol(optarg, NULL, 0); break; case 's': sa_key = strtoull(optarg, NULL, 0); break; default: break; } } if (action < 0 || action > 5) { warn("action must be specified and in the range: 0-5"); retval = 1; goto bailout; } if (restype < 0 || restype > 5) { if (action != 0 && action != 5 && action != 3) { warn("'restype' must specified and in the range: 0-5"); retval = 1; goto bailout; } } datalen = 24; dataptr = (uint8_t *)malloc(datalen); if (dataptr == NULL) { warn("%s: can't allocate %d bytes", __func__, datalen); retval = 1; goto bailout; } memset(dataptr, 0, datalen); ctl_scsi_persistent_res_out(io, /*data_ptr*/ dataptr, /*data_len*/ datalen, /*action*/ action, /*type*/ restype, /*key*/ key, /*sa key*/ sa_key, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = iid; if (cctl_do_io(fd, retry_count, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { char scsi_path[40]; ctl_scsi_path_string(io, scsi_path, sizeof(scsi_path)); fprintf( stdout, "%sPERSISTENT RESERVE OUT executed " "successfully\n", scsi_path); } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); if (dataptr != NULL) free(dataptr); return (retval); } struct cctl_req_option { char *name; int namelen; char *value; int vallen; STAILQ_ENTRY(cctl_req_option) links; }; static int cctl_create_lun(int fd, int argc, char **argv, char *combinedopt) { struct ctl_lun_req req; int device_type = -1; uint64_t lun_size = 0; uint32_t blocksize = 0, req_lun_id = 0; char *serial_num = NULL; char *device_id = NULL; int lun_size_set = 0, blocksize_set = 0, lun_id_set = 0; char *backend_name = NULL; STAILQ_HEAD(, cctl_req_option) option_list; int num_options = 0; int retval = 0, c; STAILQ_INIT(&option_list); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'b': backend_name = strdup(optarg); break; case 'B': blocksize = strtoul(optarg, NULL, 0); blocksize_set = 1; break; case 'd': device_id = strdup(optarg); break; case 'l': req_lun_id = strtoul(optarg, NULL, 0); lun_id_set = 1; break; case 'o': { struct cctl_req_option *option; char *tmpstr; char *name, *value; tmpstr = strdup(optarg); name = strsep(&tmpstr, "="); if (name == NULL) { warnx("%s: option -o takes \"name=value\"" "argument", __func__); retval = 1; goto bailout; } value = strsep(&tmpstr, "="); if (value == NULL) { warnx("%s: option -o takes \"name=value\"" "argument", __func__); retval = 1; goto bailout; } option = malloc(sizeof(*option)); if (option == NULL) { warn("%s: error allocating %zd bytes", __func__, sizeof(*option)); retval = 1; goto bailout; } option->name = strdup(name); option->namelen = strlen(name) + 1; option->value = strdup(value); option->vallen = strlen(value) + 1; free(tmpstr); STAILQ_INSERT_TAIL(&option_list, option, links); num_options++; break; } case 's': if (strcasecmp(optarg, "auto") != 0) { retval = expand_number(optarg, &lun_size); if (retval != 0) { warn("%s: invalid -s argument", __func__); retval = 1; goto bailout; } } lun_size_set = 1; break; case 'S': serial_num = strdup(optarg); break; case 't': device_type = strtoul(optarg, NULL, 0); break; default: break; } } if (backend_name == NULL) { warnx("%s: backend name (-b) must be specified", __func__); retval = 1; goto bailout; } bzero(&req, sizeof(req)); strlcpy(req.backend, backend_name, sizeof(req.backend)); req.reqtype = CTL_LUNREQ_CREATE; if (blocksize_set != 0) req.reqdata.create.blocksize_bytes = blocksize; if (lun_size_set != 0) req.reqdata.create.lun_size_bytes = lun_size; if (lun_id_set != 0) { req.reqdata.create.flags |= CTL_LUN_FLAG_ID_REQ; req.reqdata.create.req_lun_id = req_lun_id; } req.reqdata.create.flags |= CTL_LUN_FLAG_DEV_TYPE; if (device_type != -1) req.reqdata.create.device_type = device_type; else req.reqdata.create.device_type = T_DIRECT; if (serial_num != NULL) { strlcpy(req.reqdata.create.serial_num, serial_num, sizeof(req.reqdata.create.serial_num)); req.reqdata.create.flags |= CTL_LUN_FLAG_SERIAL_NUM; } if (device_id != NULL) { strlcpy(req.reqdata.create.device_id, device_id, sizeof(req.reqdata.create.device_id)); req.reqdata.create.flags |= CTL_LUN_FLAG_DEVID; } req.num_be_args = num_options; if (num_options > 0) { struct cctl_req_option *option, *next_option; int i; req.be_args = malloc(num_options * sizeof(*req.be_args)); if (req.be_args == NULL) { warn("%s: error allocating %zd bytes", __func__, num_options * sizeof(*req.be_args)); retval = 1; goto bailout; } for (i = 0, option = STAILQ_FIRST(&option_list); i < num_options; i++, option = next_option) { next_option = STAILQ_NEXT(option, links); req.be_args[i].namelen = option->namelen; req.be_args[i].name = strdup(option->name); req.be_args[i].vallen = option->vallen; req.be_args[i].value = strdup(option->value); /* * XXX KDM do we want a way to specify a writeable * flag of some sort? Do we want a way to specify * binary data? */ req.be_args[i].flags = CTL_BEARG_ASCII | CTL_BEARG_RD; STAILQ_REMOVE(&option_list, option, cctl_req_option, links); free(option->name); free(option->value); free(option); } } if (ioctl(fd, CTL_LUN_REQ, &req) == -1) { warn("%s: error issuing CTL_LUN_REQ ioctl", __func__); retval = 1; goto bailout; } switch (req.status) { case CTL_LUN_ERROR: warnx("LUN creation error: %s", req.error_str); retval = 1; goto bailout; case CTL_LUN_WARNING: warnx("LUN creation warning: %s", req.error_str); break; case CTL_LUN_OK: break; default: warnx("unknown LUN creation status: %d", req.status); retval = 1; goto bailout; } fprintf(stdout, "LUN created successfully\n"); fprintf(stdout, "backend: %s\n", req.backend); fprintf(stdout, "device type: %d\n",req.reqdata.create.device_type); fprintf(stdout, "LUN size: %ju bytes\n", (uintmax_t)req.reqdata.create.lun_size_bytes); fprintf(stdout, "blocksize %u bytes\n", req.reqdata.create.blocksize_bytes); fprintf(stdout, "LUN ID: %d\n", req.reqdata.create.req_lun_id); fprintf(stdout, "Serial Number: %s\n", req.reqdata.create.serial_num); fprintf(stdout, "Device ID; %s\n", req.reqdata.create.device_id); bailout: return (retval); } static int cctl_rm_lun(int fd, int argc, char **argv, char *combinedopt) { struct ctl_lun_req req; uint32_t lun_id = 0; int lun_id_set = 0; char *backend_name = NULL; STAILQ_HEAD(, cctl_req_option) option_list; int num_options = 0; int retval = 0, c; STAILQ_INIT(&option_list); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'b': backend_name = strdup(optarg); break; case 'l': lun_id = strtoul(optarg, NULL, 0); lun_id_set = 1; break; case 'o': { struct cctl_req_option *option; char *tmpstr; char *name, *value; tmpstr = strdup(optarg); name = strsep(&tmpstr, "="); if (name == NULL) { warnx("%s: option -o takes \"name=value\"" "argument", __func__); retval = 1; goto bailout; } value = strsep(&tmpstr, "="); if (value == NULL) { warnx("%s: option -o takes \"name=value\"" "argument", __func__); retval = 1; goto bailout; } option = malloc(sizeof(*option)); if (option == NULL) { warn("%s: error allocating %zd bytes", __func__, sizeof(*option)); retval = 1; goto bailout; } option->name = strdup(name); option->namelen = strlen(name) + 1; option->value = strdup(value); option->vallen = strlen(value) + 1; free(tmpstr); STAILQ_INSERT_TAIL(&option_list, option, links); num_options++; break; } default: break; } } if (backend_name == NULL) errx(1, "%s: backend name (-b) must be specified", __func__); if (lun_id_set == 0) errx(1, "%s: LUN id (-l) must be specified", __func__); bzero(&req, sizeof(req)); strlcpy(req.backend, backend_name, sizeof(req.backend)); req.reqtype = CTL_LUNREQ_RM; req.reqdata.rm.lun_id = lun_id; req.num_be_args = num_options; if (num_options > 0) { struct cctl_req_option *option, *next_option; int i; req.be_args = malloc(num_options * sizeof(*req.be_args)); if (req.be_args == NULL) { warn("%s: error allocating %zd bytes", __func__, num_options * sizeof(*req.be_args)); retval = 1; goto bailout; } for (i = 0, option = STAILQ_FIRST(&option_list); i < num_options; i++, option = next_option) { next_option = STAILQ_NEXT(option, links); req.be_args[i].namelen = option->namelen; req.be_args[i].name = strdup(option->name); req.be_args[i].vallen = option->vallen; req.be_args[i].value = strdup(option->value); /* * XXX KDM do we want a way to specify a writeable * flag of some sort? Do we want a way to specify * binary data? */ req.be_args[i].flags = CTL_BEARG_ASCII | CTL_BEARG_RD; STAILQ_REMOVE(&option_list, option, cctl_req_option, links); free(option->name); free(option->value); free(option); } } if (ioctl(fd, CTL_LUN_REQ, &req) == -1) { warn("%s: error issuing CTL_LUN_REQ ioctl", __func__); retval = 1; goto bailout; } switch (req.status) { case CTL_LUN_ERROR: warnx("LUN removal error: %s", req.error_str); retval = 1; goto bailout; case CTL_LUN_WARNING: warnx("LUN removal warning: %s", req.error_str); break; case CTL_LUN_OK: break; default: warnx("unknown LUN removal status: %d", req.status); retval = 1; goto bailout; } printf("LUN %d removed successfully\n", lun_id); bailout: return (retval); } static int cctl_modify_lun(int fd, int argc, char **argv, char *combinedopt) { struct ctl_lun_req req; uint64_t lun_size = 0; uint32_t lun_id = 0; int lun_id_set = 0, lun_size_set = 0; char *backend_name = NULL; STAILQ_HEAD(, cctl_req_option) option_list; int num_options = 0; int retval = 0, c; STAILQ_INIT(&option_list); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'b': backend_name = strdup(optarg); break; case 'l': lun_id = strtoul(optarg, NULL, 0); lun_id_set = 1; break; case 'o': { struct cctl_req_option *option; char *tmpstr; char *name, *value; tmpstr = strdup(optarg); name = strsep(&tmpstr, "="); if (name == NULL) { warnx("%s: option -o takes \"name=value\"" "argument", __func__); retval = 1; goto bailout; } value = strsep(&tmpstr, "="); if (value == NULL) { warnx("%s: option -o takes \"name=value\"" "argument", __func__); retval = 1; goto bailout; } option = malloc(sizeof(*option)); if (option == NULL) { warn("%s: error allocating %zd bytes", __func__, sizeof(*option)); retval = 1; goto bailout; } option->name = strdup(name); option->namelen = strlen(name) + 1; option->value = strdup(value); option->vallen = strlen(value) + 1; free(tmpstr); STAILQ_INSERT_TAIL(&option_list, option, links); num_options++; break; } case 's': if (strcasecmp(optarg, "auto") != 0) { retval = expand_number(optarg, &lun_size); if (retval != 0) { warn("%s: invalid -s argument", __func__); retval = 1; goto bailout; } } lun_size_set = 1; break; default: break; } } if (backend_name == NULL) errx(1, "%s: backend name (-b) must be specified", __func__); if (lun_id_set == 0) errx(1, "%s: LUN id (-l) must be specified", __func__); if (lun_size_set == 0 && num_options == 0) errx(1, "%s: size (-s) or options (-o) must be specified", __func__); bzero(&req, sizeof(req)); strlcpy(req.backend, backend_name, sizeof(req.backend)); req.reqtype = CTL_LUNREQ_MODIFY; req.reqdata.modify.lun_id = lun_id; req.reqdata.modify.lun_size_bytes = lun_size; req.num_be_args = num_options; if (num_options > 0) { struct cctl_req_option *option, *next_option; int i; req.be_args = malloc(num_options * sizeof(*req.be_args)); if (req.be_args == NULL) { warn("%s: error allocating %zd bytes", __func__, num_options * sizeof(*req.be_args)); retval = 1; goto bailout; } for (i = 0, option = STAILQ_FIRST(&option_list); i < num_options; i++, option = next_option) { next_option = STAILQ_NEXT(option, links); req.be_args[i].namelen = option->namelen; req.be_args[i].name = strdup(option->name); req.be_args[i].vallen = option->vallen; req.be_args[i].value = strdup(option->value); /* * XXX KDM do we want a way to specify a writeable * flag of some sort? Do we want a way to specify * binary data? */ req.be_args[i].flags = CTL_BEARG_ASCII | CTL_BEARG_RD; STAILQ_REMOVE(&option_list, option, cctl_req_option, links); free(option->name); free(option->value); free(option); } } if (ioctl(fd, CTL_LUN_REQ, &req) == -1) { warn("%s: error issuing CTL_LUN_REQ ioctl", __func__); retval = 1; goto bailout; } switch (req.status) { case CTL_LUN_ERROR: warnx("LUN modification error: %s", req.error_str); retval = 1; goto bailout; case CTL_LUN_WARNING: warnx("LUN modification warning: %s", req.error_str); break; case CTL_LUN_OK: break; default: warnx("unknown LUN modification status: %d", req.status); retval = 1; goto bailout; } printf("LUN %d modified successfully\n", lun_id); bailout: return (retval); } struct cctl_islist_conn { int connection_id; char *initiator; char *initiator_addr; char *initiator_alias; char *target; char *target_alias; char *header_digest; char *data_digest; char *max_data_segment_length;; char *offload;; int immediate_data; int iser; STAILQ_ENTRY(cctl_islist_conn) links; }; struct cctl_islist_data { int num_conns; STAILQ_HEAD(,cctl_islist_conn) conn_list; struct cctl_islist_conn *cur_conn; int level; struct sbuf *cur_sb[32]; }; static void cctl_islist_start_element(void *user_data, const char *name, const char **attr) { int i; struct cctl_islist_data *islist; struct cctl_islist_conn *cur_conn; islist = (struct cctl_islist_data *)user_data; cur_conn = islist->cur_conn; islist->level++; if ((u_int)islist->level >= (sizeof(islist->cur_sb) / sizeof(islist->cur_sb[0]))) errx(1, "%s: too many nesting levels, %zd max", __func__, sizeof(islist->cur_sb) / sizeof(islist->cur_sb[0])); islist->cur_sb[islist->level] = sbuf_new_auto(); if (islist->cur_sb[islist->level] == NULL) err(1, "%s: Unable to allocate sbuf", __func__); if (strcmp(name, "connection") == 0) { if (cur_conn != NULL) errx(1, "%s: improper connection element nesting", __func__); cur_conn = calloc(1, sizeof(*cur_conn)); if (cur_conn == NULL) err(1, "%s: cannot allocate %zd bytes", __func__, sizeof(*cur_conn)); islist->num_conns++; islist->cur_conn = cur_conn; STAILQ_INSERT_TAIL(&islist->conn_list, cur_conn, links); for (i = 0; attr[i] != NULL; i += 2) { if (strcmp(attr[i], "id") == 0) { cur_conn->connection_id = strtoull(attr[i+1], NULL, 0); } else { errx(1, "%s: invalid connection attribute %s = %s", __func__, attr[i], attr[i+1]); } } } } static void cctl_islist_end_element(void *user_data, const char *name) { struct cctl_islist_data *islist; struct cctl_islist_conn *cur_conn; char *str; islist = (struct cctl_islist_data *)user_data; cur_conn = islist->cur_conn; if ((cur_conn == NULL) && (strcmp(name, "ctlislist") != 0)) errx(1, "%s: cur_conn == NULL! (name = %s)", __func__, name); if (islist->cur_sb[islist->level] == NULL) errx(1, "%s: no valid sbuf at level %d (name %s)", __func__, islist->level, name); sbuf_finish(islist->cur_sb[islist->level]); str = strdup(sbuf_data(islist->cur_sb[islist->level])); if (str == NULL) err(1, "%s can't allocate %zd bytes for string", __func__, sbuf_len(islist->cur_sb[islist->level])); sbuf_delete(islist->cur_sb[islist->level]); islist->cur_sb[islist->level] = NULL; islist->level--; if (strcmp(name, "initiator") == 0) { cur_conn->initiator = str; str = NULL; } else if (strcmp(name, "initiator_addr") == 0) { cur_conn->initiator_addr = str; str = NULL; } else if (strcmp(name, "initiator_alias") == 0) { cur_conn->initiator_alias = str; str = NULL; } else if (strcmp(name, "target") == 0) { cur_conn->target = str; str = NULL; } else if (strcmp(name, "target_alias") == 0) { cur_conn->target_alias = str; str = NULL; } else if (strcmp(name, "target_portal_group_tag") == 0) { } else if (strcmp(name, "header_digest") == 0) { cur_conn->header_digest = str; str = NULL; } else if (strcmp(name, "data_digest") == 0) { cur_conn->data_digest = str; str = NULL; } else if (strcmp(name, "max_data_segment_length") == 0) { cur_conn->max_data_segment_length = str; str = NULL; } else if (strcmp(name, "offload") == 0) { cur_conn->offload = str; str = NULL; } else if (strcmp(name, "immediate_data") == 0) { cur_conn->immediate_data = atoi(str); } else if (strcmp(name, "iser") == 0) { cur_conn->iser = atoi(str); } else if (strcmp(name, "connection") == 0) { islist->cur_conn = NULL; } else if (strcmp(name, "ctlislist") == 0) { /* Nothing. */ } else { /* - * Unknown element; ignore it for forward compatiblity. + * Unknown element; ignore it for forward compatibility. */ } free(str); } static void cctl_islist_char_handler(void *user_data, const XML_Char *str, int len) { struct cctl_islist_data *islist; islist = (struct cctl_islist_data *)user_data; sbuf_bcat(islist->cur_sb[islist->level], str, len); } static int cctl_islist(int fd, int argc, char **argv, char *combinedopt) { struct ctl_iscsi req; struct cctl_islist_data islist; struct cctl_islist_conn *conn; XML_Parser parser; char *conn_str; int conn_len; int dump_xml = 0; int c, retval, verbose = 0; retval = 0; conn_len = 4096; bzero(&islist, sizeof(islist)); STAILQ_INIT(&islist.conn_list); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'v': verbose = 1; break; case 'x': dump_xml = 1; break; default: break; } } retry: conn_str = malloc(conn_len); bzero(&req, sizeof(req)); req.type = CTL_ISCSI_LIST; req.data.list.alloc_len = conn_len; req.data.list.conn_xml = conn_str; if (ioctl(fd, CTL_ISCSI, &req) == -1) { warn("%s: error issuing CTL_ISCSI ioctl", __func__); retval = 1; goto bailout; } if (req.status == CTL_ISCSI_ERROR) { warnx("%s: error returned from CTL_ISCSI ioctl:\n%s", __func__, req.error_str); } else if (req.status == CTL_ISCSI_LIST_NEED_MORE_SPACE) { conn_len = conn_len << 1; goto retry; } if (dump_xml != 0) { printf("%s", conn_str); goto bailout; } parser = XML_ParserCreate(NULL); if (parser == NULL) { warn("%s: Unable to create XML parser", __func__); retval = 1; goto bailout; } XML_SetUserData(parser, &islist); XML_SetElementHandler(parser, cctl_islist_start_element, cctl_islist_end_element); XML_SetCharacterDataHandler(parser, cctl_islist_char_handler); retval = XML_Parse(parser, conn_str, strlen(conn_str), 1); if (retval != 1) { warnx("%s: Unable to parse XML: Error %d", __func__, XML_GetErrorCode(parser)); XML_ParserFree(parser); retval = 1; goto bailout; } retval = 0; XML_ParserFree(parser); if (verbose != 0) { STAILQ_FOREACH(conn, &islist.conn_list, links) { printf("Session ID: %d\n", conn->connection_id); printf("Initiator name: %s\n", conn->initiator); printf("Initiator portal: %s\n", conn->initiator_addr); printf("Initiator alias: %s\n", conn->initiator_alias); printf("Target name: %s\n", conn->target); printf("Target alias: %s\n", conn->target_alias); printf("Header digest: %s\n", conn->header_digest); printf("Data digest: %s\n", conn->data_digest); printf("DataSegmentLen: %s\n", conn->max_data_segment_length); printf("ImmediateData: %s\n", conn->immediate_data ? "Yes" : "No"); printf("iSER (RDMA): %s\n", conn->iser ? "Yes" : "No"); printf("Offload driver: %s\n", conn->offload); printf("\n"); } } else { printf("%4s %-16s %-36s %-36s\n", "ID", "Portal", "Initiator name", "Target name"); STAILQ_FOREACH(conn, &islist.conn_list, links) { printf("%4u %-16s %-36s %-36s\n", conn->connection_id, conn->initiator_addr, conn->initiator, conn->target); } } bailout: free(conn_str); return (retval); } static int cctl_islogout(int fd, int argc, char **argv, char *combinedopt) { struct ctl_iscsi req; int retval = 0, c; int all = 0, connection_id = -1, nargs = 0; char *initiator_name = NULL, *initiator_addr = NULL; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'a': all = 1; nargs++; break; case 'c': connection_id = strtoul(optarg, NULL, 0); nargs++; break; case 'i': initiator_name = strdup(optarg); if (initiator_name == NULL) err(1, "%s: strdup", __func__); nargs++; break; case 'p': initiator_addr = strdup(optarg); if (initiator_addr == NULL) err(1, "%s: strdup", __func__); nargs++; break; default: break; } } if (nargs == 0) errx(1, "%s: either -a, -c, -i, or -p must be specified", __func__); if (nargs > 1) errx(1, "%s: only one of -a, -c, -i, or -p may be specified", __func__); bzero(&req, sizeof(req)); req.type = CTL_ISCSI_LOGOUT; req.data.logout.connection_id = connection_id; if (initiator_addr != NULL) strlcpy(req.data.logout.initiator_addr, initiator_addr, sizeof(req.data.logout.initiator_addr)); if (initiator_name != NULL) strlcpy(req.data.logout.initiator_name, initiator_name, sizeof(req.data.logout.initiator_name)); if (all != 0) req.data.logout.all = 1; if (ioctl(fd, CTL_ISCSI, &req) == -1) { warn("%s: error issuing CTL_ISCSI ioctl", __func__); retval = 1; goto bailout; } if (req.status != CTL_ISCSI_OK) { warnx("%s: error returned from CTL iSCSI logout request:\n%s", __func__, req.error_str); retval = 1; goto bailout; } printf("iSCSI logout requests submitted\n"); bailout: return (retval); } static int cctl_isterminate(int fd, int argc, char **argv, char *combinedopt) { struct ctl_iscsi req; int retval = 0, c; int all = 0, connection_id = -1, nargs = 0; char *initiator_name = NULL, *initiator_addr = NULL; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'a': all = 1; nargs++; break; case 'c': connection_id = strtoul(optarg, NULL, 0); nargs++; break; case 'i': initiator_name = strdup(optarg); if (initiator_name == NULL) err(1, "%s: strdup", __func__); nargs++; break; case 'p': initiator_addr = strdup(optarg); if (initiator_addr == NULL) err(1, "%s: strdup", __func__); nargs++; break; default: break; } } if (nargs == 0) errx(1, "%s: either -a, -c, -i, or -p must be specified", __func__); if (nargs > 1) errx(1, "%s: only one of -a, -c, -i, or -p may be specified", __func__); bzero(&req, sizeof(req)); req.type = CTL_ISCSI_TERMINATE; req.data.terminate.connection_id = connection_id; if (initiator_addr != NULL) strlcpy(req.data.terminate.initiator_addr, initiator_addr, sizeof(req.data.terminate.initiator_addr)); if (initiator_name != NULL) strlcpy(req.data.terminate.initiator_name, initiator_name, sizeof(req.data.terminate.initiator_name)); if (all != 0) req.data.terminate.all = 1; if (ioctl(fd, CTL_ISCSI, &req) == -1) { warn("%s: error issuing CTL_ISCSI ioctl", __func__); retval = 1; goto bailout; } if (req.status != CTL_ISCSI_OK) { warnx("%s: error returned from CTL iSCSI connection " "termination request:\n%s", __func__, req.error_str); retval = 1; goto bailout; } printf("iSCSI connections terminated\n"); bailout: return (retval); } /* * Name/value pair used for per-LUN attributes. */ struct cctl_lun_nv { char *name; char *value; STAILQ_ENTRY(cctl_lun_nv) links; }; /* * Backend LUN information. */ struct cctl_lun { uint64_t lun_id; char *backend_type; uint64_t size_blocks; uint32_t blocksize; char *serial_number; char *device_id; STAILQ_HEAD(,cctl_lun_nv) attr_list; STAILQ_ENTRY(cctl_lun) links; }; struct cctl_devlist_data { int num_luns; STAILQ_HEAD(,cctl_lun) lun_list; struct cctl_lun *cur_lun; int level; struct sbuf *cur_sb[32]; }; static void cctl_start_element(void *user_data, const char *name, const char **attr) { int i; struct cctl_devlist_data *devlist; struct cctl_lun *cur_lun; devlist = (struct cctl_devlist_data *)user_data; cur_lun = devlist->cur_lun; devlist->level++; if ((u_int)devlist->level >= (sizeof(devlist->cur_sb) / sizeof(devlist->cur_sb[0]))) errx(1, "%s: too many nesting levels, %zd max", __func__, sizeof(devlist->cur_sb) / sizeof(devlist->cur_sb[0])); devlist->cur_sb[devlist->level] = sbuf_new_auto(); if (devlist->cur_sb[devlist->level] == NULL) err(1, "%s: Unable to allocate sbuf", __func__); if (strcmp(name, "lun") == 0) { if (cur_lun != NULL) errx(1, "%s: improper lun element nesting", __func__); cur_lun = calloc(1, sizeof(*cur_lun)); if (cur_lun == NULL) err(1, "%s: cannot allocate %zd bytes", __func__, sizeof(*cur_lun)); devlist->num_luns++; devlist->cur_lun = cur_lun; STAILQ_INIT(&cur_lun->attr_list); STAILQ_INSERT_TAIL(&devlist->lun_list, cur_lun, links); for (i = 0; attr[i] != NULL; i += 2) { if (strcmp(attr[i], "id") == 0) { cur_lun->lun_id = strtoull(attr[i+1], NULL, 0); } else { errx(1, "%s: invalid LUN attribute %s = %s", __func__, attr[i], attr[i+1]); } } } } static void cctl_end_element(void *user_data, const char *name) { struct cctl_devlist_data *devlist; struct cctl_lun *cur_lun; char *str; devlist = (struct cctl_devlist_data *)user_data; cur_lun = devlist->cur_lun; if ((cur_lun == NULL) && (strcmp(name, "ctllunlist") != 0)) errx(1, "%s: cur_lun == NULL! (name = %s)", __func__, name); if (devlist->cur_sb[devlist->level] == NULL) errx(1, "%s: no valid sbuf at level %d (name %s)", __func__, devlist->level, name); if (sbuf_finish(devlist->cur_sb[devlist->level]) != 0) err(1, "%s: sbuf_finish", __func__); str = strdup(sbuf_data(devlist->cur_sb[devlist->level])); if (str == NULL) err(1, "%s can't allocate %zd bytes for string", __func__, sbuf_len(devlist->cur_sb[devlist->level])); if (strlen(str) == 0) { free(str); str = NULL; } sbuf_delete(devlist->cur_sb[devlist->level]); devlist->cur_sb[devlist->level] = NULL; devlist->level--; if (strcmp(name, "backend_type") == 0) { cur_lun->backend_type = str; str = NULL; } else if (strcmp(name, "size") == 0) { cur_lun->size_blocks = strtoull(str, NULL, 0); } else if (strcmp(name, "blocksize") == 0) { cur_lun->blocksize = strtoul(str, NULL, 0); } else if (strcmp(name, "serial_number") == 0) { cur_lun->serial_number = str; str = NULL; } else if (strcmp(name, "device_id") == 0) { cur_lun->device_id = str; str = NULL; } else if (strcmp(name, "lun") == 0) { devlist->cur_lun = NULL; } else if (strcmp(name, "ctllunlist") == 0) { /* Nothing. */ } else { struct cctl_lun_nv *nv; nv = calloc(1, sizeof(*nv)); if (nv == NULL) err(1, "%s: can't allocate %zd bytes for nv pair", __func__, sizeof(*nv)); nv->name = strdup(name); if (nv->name == NULL) err(1, "%s: can't allocated %zd bytes for string", __func__, strlen(name)); nv->value = str; str = NULL; STAILQ_INSERT_TAIL(&cur_lun->attr_list, nv, links); } free(str); } static void cctl_char_handler(void *user_data, const XML_Char *str, int len) { struct cctl_devlist_data *devlist; devlist = (struct cctl_devlist_data *)user_data; sbuf_bcat(devlist->cur_sb[devlist->level], str, len); } static int cctl_devlist(int fd, int argc, char **argv, char *combinedopt) { struct ctl_lun_list list; struct cctl_devlist_data devlist; struct cctl_lun *lun; XML_Parser parser; char *lun_str; int lun_len; int dump_xml = 0; int retval, c; char *backend = NULL; int verbose = 0; retval = 0; lun_len = 4096; bzero(&devlist, sizeof(devlist)); STAILQ_INIT(&devlist.lun_list); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'b': backend = strdup(optarg); break; case 'v': verbose++; break; case 'x': dump_xml = 1; break; default: break; } } retry: lun_str = malloc(lun_len); bzero(&list, sizeof(list)); list.alloc_len = lun_len; list.status = CTL_LUN_LIST_NONE; list.lun_xml = lun_str; if (ioctl(fd, CTL_LUN_LIST, &list) == -1) { warn("%s: error issuing CTL_LUN_LIST ioctl", __func__); retval = 1; goto bailout; } if (list.status == CTL_LUN_LIST_ERROR) { warnx("%s: error returned from CTL_LUN_LIST ioctl:\n%s", __func__, list.error_str); } else if (list.status == CTL_LUN_LIST_NEED_MORE_SPACE) { lun_len = lun_len << 1; goto retry; } if (dump_xml != 0) { printf("%s", lun_str); goto bailout; } parser = XML_ParserCreate(NULL); if (parser == NULL) { warn("%s: Unable to create XML parser", __func__); retval = 1; goto bailout; } XML_SetUserData(parser, &devlist); XML_SetElementHandler(parser, cctl_start_element, cctl_end_element); XML_SetCharacterDataHandler(parser, cctl_char_handler); retval = XML_Parse(parser, lun_str, strlen(lun_str), 1); if (retval != 1) { warnx("%s: Unable to parse XML: Error %d", __func__, XML_GetErrorCode(parser)); XML_ParserFree(parser); retval = 1; goto bailout; } retval = 0; XML_ParserFree(parser); printf("LUN Backend %18s %4s %-16s %-16s\n", "Size (Blocks)", "BS", "Serial Number", "Device ID"); STAILQ_FOREACH(lun, &devlist.lun_list, links) { struct cctl_lun_nv *nv; if ((backend != NULL) && (strcmp(lun->backend_type, backend) != 0)) continue; printf("%3ju %-8s %18ju %4u %-16s %-16s\n", (uintmax_t)lun->lun_id, lun->backend_type, (uintmax_t)lun->size_blocks, lun->blocksize, lun->serial_number, lun->device_id); if (verbose == 0) continue; STAILQ_FOREACH(nv, &lun->attr_list, links) { printf(" %s=%s\n", nv->name, nv->value); } } bailout: free(lun_str); return (retval); } /* * Port information. */ struct cctl_port { uint64_t port_id; char *online; char *frontend_type; char *name; int pp, vp; char *target, *port, *lun_map; STAILQ_HEAD(,cctl_lun_nv) init_list; STAILQ_HEAD(,cctl_lun_nv) lun_list; STAILQ_HEAD(,cctl_lun_nv) attr_list; STAILQ_ENTRY(cctl_port) links; }; struct cctl_portlist_data { int num_ports; STAILQ_HEAD(,cctl_port) port_list; struct cctl_port *cur_port; int level; uint64_t cur_id; struct sbuf *cur_sb[32]; }; static void cctl_start_pelement(void *user_data, const char *name, const char **attr) { int i; struct cctl_portlist_data *portlist; struct cctl_port *cur_port; portlist = (struct cctl_portlist_data *)user_data; cur_port = portlist->cur_port; portlist->level++; if ((u_int)portlist->level >= (sizeof(portlist->cur_sb) / sizeof(portlist->cur_sb[0]))) errx(1, "%s: too many nesting levels, %zd max", __func__, sizeof(portlist->cur_sb) / sizeof(portlist->cur_sb[0])); portlist->cur_sb[portlist->level] = sbuf_new_auto(); if (portlist->cur_sb[portlist->level] == NULL) err(1, "%s: Unable to allocate sbuf", __func__); portlist->cur_id = 0; for (i = 0; attr[i] != NULL; i += 2) { if (strcmp(attr[i], "id") == 0) { portlist->cur_id = strtoull(attr[i+1], NULL, 0); break; } } if (strcmp(name, "targ_port") == 0) { if (cur_port != NULL) errx(1, "%s: improper port element nesting", __func__); cur_port = calloc(1, sizeof(*cur_port)); if (cur_port == NULL) err(1, "%s: cannot allocate %zd bytes", __func__, sizeof(*cur_port)); portlist->num_ports++; portlist->cur_port = cur_port; STAILQ_INIT(&cur_port->init_list); STAILQ_INIT(&cur_port->lun_list); STAILQ_INIT(&cur_port->attr_list); cur_port->port_id = portlist->cur_id; STAILQ_INSERT_TAIL(&portlist->port_list, cur_port, links); } } static void cctl_end_pelement(void *user_data, const char *name) { struct cctl_portlist_data *portlist; struct cctl_port *cur_port; char *str; portlist = (struct cctl_portlist_data *)user_data; cur_port = portlist->cur_port; if ((cur_port == NULL) && (strcmp(name, "ctlportlist") != 0)) errx(1, "%s: cur_port == NULL! (name = %s)", __func__, name); if (portlist->cur_sb[portlist->level] == NULL) errx(1, "%s: no valid sbuf at level %d (name %s)", __func__, portlist->level, name); if (sbuf_finish(portlist->cur_sb[portlist->level]) != 0) err(1, "%s: sbuf_finish", __func__); str = strdup(sbuf_data(portlist->cur_sb[portlist->level])); if (str == NULL) err(1, "%s can't allocate %zd bytes for string", __func__, sbuf_len(portlist->cur_sb[portlist->level])); if (strlen(str) == 0) { free(str); str = NULL; } sbuf_delete(portlist->cur_sb[portlist->level]); portlist->cur_sb[portlist->level] = NULL; portlist->level--; if (strcmp(name, "frontend_type") == 0) { cur_port->frontend_type = str; str = NULL; } else if (strcmp(name, "port_name") == 0) { cur_port->name = str; str = NULL; } else if (strcmp(name, "online") == 0) { cur_port->online = str; str = NULL; } else if (strcmp(name, "physical_port") == 0) { cur_port->pp = strtoull(str, NULL, 0); } else if (strcmp(name, "virtual_port") == 0) { cur_port->vp = strtoull(str, NULL, 0); } else if (strcmp(name, "target") == 0) { cur_port->target = str; str = NULL; } else if (strcmp(name, "port") == 0) { cur_port->port = str; str = NULL; } else if (strcmp(name, "lun_map") == 0) { cur_port->lun_map = str; str = NULL; } else if (strcmp(name, "targ_port") == 0) { portlist->cur_port = NULL; } else if (strcmp(name, "ctlportlist") == 0) { /* Nothing. */ } else { struct cctl_lun_nv *nv; nv = calloc(1, sizeof(*nv)); if (nv == NULL) err(1, "%s: can't allocate %zd bytes for nv pair", __func__, sizeof(*nv)); if (strcmp(name, "initiator") == 0 || strcmp(name, "lun") == 0) asprintf(&nv->name, "%ju", portlist->cur_id); else nv->name = strdup(name); if (nv->name == NULL) err(1, "%s: can't allocated %zd bytes for string", __func__, strlen(name)); nv->value = str; str = NULL; if (strcmp(name, "initiator") == 0) STAILQ_INSERT_TAIL(&cur_port->init_list, nv, links); else if (strcmp(name, "lun") == 0) STAILQ_INSERT_TAIL(&cur_port->lun_list, nv, links); else STAILQ_INSERT_TAIL(&cur_port->attr_list, nv, links); } free(str); } static void cctl_char_phandler(void *user_data, const XML_Char *str, int len) { struct cctl_portlist_data *portlist; portlist = (struct cctl_portlist_data *)user_data; sbuf_bcat(portlist->cur_sb[portlist->level], str, len); } static int cctl_portlist(int fd, int argc, char **argv, char *combinedopt) { struct ctl_lun_list list; struct cctl_portlist_data portlist; struct cctl_port *port; XML_Parser parser; char *port_str; int port_len; int dump_xml = 0; int retval, c; char *frontend = NULL; uint64_t portarg = UINT64_MAX; int verbose = 0, init = 0, lun = 0, quiet = 0; retval = 0; port_len = 4096; bzero(&portlist, sizeof(portlist)); STAILQ_INIT(&portlist.port_list); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'f': frontend = strdup(optarg); break; case 'i': init++; break; case 'l': lun++; break; case 'p': portarg = strtoll(optarg, NULL, 0); break; case 'q': quiet++; break; case 'v': verbose++; break; case 'x': dump_xml = 1; break; default: break; } } retry: port_str = malloc(port_len); bzero(&list, sizeof(list)); list.alloc_len = port_len; list.status = CTL_LUN_LIST_NONE; list.lun_xml = port_str; if (ioctl(fd, CTL_PORT_LIST, &list) == -1) { warn("%s: error issuing CTL_PORT_LIST ioctl", __func__); retval = 1; goto bailout; } if (list.status == CTL_LUN_LIST_ERROR) { warnx("%s: error returned from CTL_PORT_LIST ioctl:\n%s", __func__, list.error_str); } else if (list.status == CTL_LUN_LIST_NEED_MORE_SPACE) { port_len = port_len << 1; goto retry; } if (dump_xml != 0) { printf("%s", port_str); goto bailout; } parser = XML_ParserCreate(NULL); if (parser == NULL) { warn("%s: Unable to create XML parser", __func__); retval = 1; goto bailout; } XML_SetUserData(parser, &portlist); XML_SetElementHandler(parser, cctl_start_pelement, cctl_end_pelement); XML_SetCharacterDataHandler(parser, cctl_char_phandler); retval = XML_Parse(parser, port_str, strlen(port_str), 1); if (retval != 1) { warnx("%s: Unable to parse XML: Error %d", __func__, XML_GetErrorCode(parser)); XML_ParserFree(parser); retval = 1; goto bailout; } retval = 0; XML_ParserFree(parser); if (quiet == 0) printf("Port Online Frontend Name pp vp\n"); STAILQ_FOREACH(port, &portlist.port_list, links) { struct cctl_lun_nv *nv; if ((frontend != NULL) && (strcmp(port->frontend_type, frontend) != 0)) continue; if ((portarg != UINT64_MAX) && (portarg != port->port_id)) continue; printf("%-4ju %-6s %-8s %-8s %-2d %-2d %s\n", (uintmax_t)port->port_id, port->online, port->frontend_type, port->name, port->pp, port->vp, port->port ? port->port : ""); if (init || verbose) { if (port->target) printf(" Target: %s\n", port->target); STAILQ_FOREACH(nv, &port->init_list, links) { printf(" Initiator %s: %s\n", nv->name, nv->value); } } if (lun || verbose) { if (port->lun_map) { STAILQ_FOREACH(nv, &port->lun_list, links) printf(" LUN %s: %s\n", nv->name, nv->value); if (STAILQ_EMPTY(&port->lun_list)) printf(" No LUNs mapped\n"); } else printf(" All LUNs mapped\n"); } if (verbose) { STAILQ_FOREACH(nv, &port->attr_list, links) { printf(" %s=%s\n", nv->name, nv->value); } } } bailout: free(port_str); return (retval); } static int cctl_lunmap(int fd, int argc, char **argv, char *combinedopt) { struct ctl_lun_map lm; int retval = 0, c; retval = 0; lm.port = UINT32_MAX; lm.plun = UINT32_MAX; lm.lun = UINT32_MAX; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'p': lm.port = strtoll(optarg, NULL, 0); break; case 'l': lm.plun = strtoll(optarg, NULL, 0); break; case 'L': lm.lun = strtoll(optarg, NULL, 0); break; default: break; } } if (ioctl(fd, CTL_LUN_MAP, &lm) == -1) { warn("%s: error issuing CTL_LUN_MAP ioctl", __func__); retval = 1; } return (retval); } void usage(int error) { fprintf(error ? stderr : stdout, "Usage:\n" "Primary commands:\n" " ctladm tur [dev_id][general options]\n" " ctladm inquiry [dev_id][general options]\n" " ctladm devid [dev_id][general options]\n" " ctladm reqsense [dev_id][general options]\n" " ctladm reportluns [dev_id][general options]\n" " ctladm read [dev_id][general options] <-l lba> <-d len>\n" " <-f file|-> <-b blocksize> [-c cdbsize][-N]\n" " ctladm write [dev_id][general options] <-l lba> <-d len>\n" " <-f file|-> <-b blocksize> [-c cdbsize][-N]\n" " ctladm readcap [dev_id][general options] [-c cdbsize]\n" " ctladm modesense [dev_id][general options] <-m page|-l> [-P pc]\n" " [-d] [-S subpage] [-c cdbsize]\n" " ctladm prin [dev_id][general options] <-a action>\n" " ctladm prout [dev_id][general options] <-a action>\n" " <-r restype] [-k key] [-s sa_key]\n" " ctladm rtpg [dev_id][general options]\n" " ctladm start [dev_id][general options] [-i] [-o]\n" " ctladm stop [dev_id][general options] [-i] [-o]\n" " ctladm synccache [dev_id][general options] [-l lba]\n" " [-b blockcount] [-r] [-i] [-c cdbsize]\n" " ctladm create <-b backend> [-B blocksize] [-d device_id]\n" " [-l lun_id] [-o name=value] [-s size_bytes]\n" " [-S serial_num] [-t dev_type]\n" " ctladm remove <-b backend> <-l lun_id> [-o name=value]\n" " ctladm modify <-b backend> <-l lun_id> <-s size_bytes>\n" " ctladm devlist [-b backend] [-v] [-x]\n" " ctladm lunlist\n" " ctladm lunmap -p targ_port [-l pLUN] [-L cLUN]\n" " ctladm delay [dev_id] <-l datamove|done> [-T oneshot|cont]\n" " [-t secs]\n" " ctladm inject [dev_id] <-i action> <-p pattern> [-r lba,len]\n" " [-s len fmt [args]] [-c] [-d delete_id]\n" " ctladm port <-o | [-w wwnn][-W wwpn]>\n" " [-p targ_port] [-t port_type]\n" " ctladm portlist [-f frontend] [-i] [-p targ_port] [-q] [-v] [-x]\n" " ctladm islist [-v | -x]\n" " ctladm islogout <-a | -c connection-id | -i name | -p portal>\n" " ctladm isterminate <-a | -c connection-id | -i name | -p portal>\n" " ctladm dumpooa\n" " ctladm dumpstructs\n" " ctladm help\n" "General Options:\n" "-I intiator_id : defaults to 7, used to change the initiator id\n" "-C retries : specify the number of times to retry this command\n" "-D devicename : specify the device to operate on\n" " : (default is %s)\n" "read/write options:\n" "-l lba : logical block address\n" "-d len : read/write length, in blocks\n" "-f file|- : write/read data to/from file or stdout/stdin\n" "-b blocksize : block size, in bytes\n" "-c cdbsize : specify minimum cdb size: 6, 10, 12 or 16\n" "-N : do not copy data to/from userland\n" "readcapacity options:\n" "-c cdbsize : specify minimum cdb size: 10 or 16\n" "modesense options:\n" "-m page : specify the mode page to view\n" "-l : request a list of supported pages\n" "-P pc : specify the page control value: 0-3 (current,\n" " changeable, default, saved, respectively)\n" "-d : disable block descriptors for mode sense\n" "-S subpage : specify a subpage\n" "-c cdbsize : specify minimum cdb size: 6 or 10\n" "persistent reserve in options:\n" "-a action : specify the action value: 0-2 (read key, read\n" " reservation, read capabilities, respectively)\n" "persistent reserve out options:\n" "-a action : specify the action value: 0-5 (register, reserve,\n" " release, clear, preempt, register and ignore)\n" "-k key : key value\n" "-s sa_key : service action value\n" "-r restype : specify the reservation type: 0-5(wr ex, ex ac,\n" " wr ex ro, ex ac ro, wr ex ar, ex ac ar)\n" "start/stop options:\n" "-i : set the immediate bit (CTL does not support this)\n" "-o : set the on/offline bit\n" "synccache options:\n" "-l lba : set the starting LBA\n" "-b blockcount : set the length to sync in blocks\n" "-r : set the relative addressing bit\n" "-i : set the immediate bit\n" "-c cdbsize : specify minimum cdb size: 10 or 16\n" "create options:\n" "-b backend : backend name (\"block\", \"ramdisk\", etc.)\n" "-B blocksize : LUN blocksize in bytes (some backends)\n" "-d device_id : SCSI VPD page 0x83 ID\n" "-l lun_id : requested LUN number\n" "-o name=value : backend-specific options, multiple allowed\n" "-s size_bytes : LUN size in bytes (some backends)\n" "-S serial_num : SCSI VPD page 0x80 serial number\n" "-t dev_type : SCSI device type (0=disk, 3=processor)\n" "remove options:\n" "-b backend : backend name (\"block\", \"ramdisk\", etc.)\n" "-l lun_id : LUN number to delete\n" "-o name=value : backend-specific options, multiple allowed\n" "devlist options:\n" "-b backend : list devices from specified backend only\n" "-v : be verbose, show backend attributes\n" "-x : dump raw XML\n" "delay options:\n" "-l datamove|done : delay command at datamove or done phase\n" "-T oneshot : delay one command, then resume normal completion\n" "-T cont : delay all commands\n" "-t secs : number of seconds to delay\n" "inject options:\n" "-i error_action : action to perform\n" "-p pattern : command pattern to look for\n" "-r lba,len : LBA range for pattern\n" "-s len fmt [args] : sense data for custom sense action\n" "-c : continuous operation\n" "-d delete_id : error id to delete\n" "port options:\n" "-l : list frontend ports\n" "-o on|off : turn frontend ports on or off\n" "-w wwnn : set WWNN for one frontend\n" "-W wwpn : set WWPN for one frontend\n" "-t port_type : specify fc, scsi, ioctl, internal frontend type\n" "-p targ_port : specify target port number\n" "-q : omit header in list output\n" "-x : output port list in XML format\n" "portlist options:\n" "-f fronetnd : specify frontend type\n" "-i : report target and initiators addresses\n" "-l : report LUN mapping\n" "-p targ_port : specify target port number\n" "-q : omit header in list output\n" "-v : verbose output (report all port options)\n" "-x : output port list in XML format\n" "lunmap options:\n" "-p targ_port : specify target port number\n" "-L pLUN : specify port-visible LUN\n" "-L cLUN : specify CTL LUN\n", CTL_DEFAULT_DEV); } int main(int argc, char **argv) { int c; ctladm_cmdfunction command; ctladm_cmdargs cmdargs; ctladm_optret optreturn; char *device; const char *mainopt = "C:D:I:"; const char *subopt = NULL; char combinedopt[256]; int lun; int optstart = 2; int retval, fd; int retries; int initid; int saved_errno; retval = 0; cmdargs = CTLADM_ARG_NONE; command = CTLADM_CMD_HELP; device = NULL; fd = -1; retries = 0; lun = 0; initid = 7; if (argc < 2) { usage(1); retval = 1; goto bailout; } /* * Get the base option. */ optreturn = getoption(option_table,argv[1], &command, &cmdargs,&subopt); if (optreturn == CC_OR_AMBIGUOUS) { warnx("ambiguous option %s", argv[1]); usage(0); exit(1); } else if (optreturn == CC_OR_NOT_FOUND) { warnx("option %s not found", argv[1]); usage(0); exit(1); } if (cmdargs & CTLADM_ARG_NEED_TL) { if ((argc < 3) || (!isdigit(argv[2][0]))) { warnx("option %s requires a lun argument", argv[1]); usage(0); exit(1); } lun = strtol(argv[2], NULL, 0); cmdargs |= CTLADM_ARG_TARG_LUN; optstart++; } /* * Ahh, getopt(3) is a pain. * * This is a gross hack. There really aren't many other good * options (excuse the pun) for parsing options in a situation like * this. getopt is kinda braindead, so you end up having to run * through the options twice, and give each invocation of getopt * the option string for the other invocation. * * You would think that you could just have two groups of options. * The first group would get parsed by the first invocation of * getopt, and the second group would get parsed by the second * invocation of getopt. It doesn't quite work out that way. When * the first invocation of getopt finishes, it leaves optind pointing * to the argument _after_ the first argument in the second group. * So when the second invocation of getopt comes around, it doesn't * recognize the first argument it gets and then bails out. * * A nice alternative would be to have a flag for getopt that says * "just keep parsing arguments even when you encounter an unknown * argument", but there isn't one. So there's no real clean way to * easily parse two sets of arguments without having one invocation * of getopt know about the other. * * Without this hack, the first invocation of getopt would work as * long as the generic arguments are first, but the second invocation * (in the subfunction) would fail in one of two ways. In the case * where you don't set optreset, it would fail because optind may be * pointing to the argument after the one it should be pointing at. * In the case where you do set optreset, and reset optind, it would * fail because getopt would run into the first set of options, which * it doesn't understand. * * All of this would "sort of" work if you could somehow figure out * whether optind had been incremented one option too far. The * mechanics of that, however, are more daunting than just giving * both invocations all of the expect options for either invocation. * * Needless to say, I wouldn't mind if someone invented a better * (non-GPL!) command line parsing interface than getopt. I * wouldn't mind if someone added more knobs to getopt to make it * work better. Who knows, I may talk myself into doing it someday, * if the standards weenies let me. As it is, it just leads to * hackery like this and causes people to avoid it in some cases. * * KDM, September 8th, 1998 */ if (subopt != NULL) sprintf(combinedopt, "%s%s", mainopt, subopt); else sprintf(combinedopt, "%s", mainopt); /* * Start getopt processing at argv[2/3], since we've already * accepted argv[1..2] as the command name, and as a possible * device name. */ optind = optstart; /* * Now we run through the argument list looking for generic * options, and ignoring options that possibly belong to * subfunctions. */ while ((c = getopt(argc, argv, combinedopt))!= -1){ switch (c) { case 'C': cmdargs |= CTLADM_ARG_RETRIES; retries = strtol(optarg, NULL, 0); break; case 'D': device = strdup(optarg); cmdargs |= CTLADM_ARG_DEVICE; break; case 'I': cmdargs |= CTLADM_ARG_INITIATOR; initid = strtol(optarg, NULL, 0); break; default: break; } } if ((cmdargs & CTLADM_ARG_INITIATOR) == 0) initid = 7; optind = optstart; optreset = 1; /* * Default to opening the CTL device for now. */ if (((cmdargs & CTLADM_ARG_DEVICE) == 0) && (command != CTLADM_CMD_HELP)) { device = strdup(CTL_DEFAULT_DEV); cmdargs |= CTLADM_ARG_DEVICE; } if ((cmdargs & CTLADM_ARG_DEVICE) && (command != CTLADM_CMD_HELP)) { fd = open(device, O_RDWR); if (fd == -1 && errno == ENOENT) { saved_errno = errno; retval = kldload("ctl"); if (retval != -1) fd = open(device, O_RDWR); else errno = saved_errno; } if (fd == -1) { fprintf(stderr, "%s: error opening %s: %s\n", argv[0], device, strerror(errno)); retval = 1; goto bailout; } } else if ((command != CTLADM_CMD_HELP) && ((cmdargs & CTLADM_ARG_DEVICE) == 0)) { fprintf(stderr, "%s: you must specify a device with the " "--device argument for this command\n", argv[0]); command = CTLADM_CMD_HELP; retval = 1; } switch (command) { case CTLADM_CMD_TUR: retval = cctl_tur(fd, lun, initid, retries); break; case CTLADM_CMD_INQUIRY: retval = cctl_inquiry(fd, lun, initid, retries); break; case CTLADM_CMD_REQ_SENSE: retval = cctl_req_sense(fd, lun, initid, retries); break; case CTLADM_CMD_REPORT_LUNS: retval = cctl_report_luns(fd, lun, initid, retries); break; case CTLADM_CMD_CREATE: retval = cctl_create_lun(fd, argc, argv, combinedopt); break; case CTLADM_CMD_RM: retval = cctl_rm_lun(fd, argc, argv, combinedopt); break; case CTLADM_CMD_DEVLIST: retval = cctl_devlist(fd, argc, argv, combinedopt); break; case CTLADM_CMD_READ: case CTLADM_CMD_WRITE: retval = cctl_read_write(fd, lun, initid, retries, argc, argv, combinedopt, command); break; case CTLADM_CMD_PORT: retval = cctl_port(fd, argc, argv, combinedopt); break; case CTLADM_CMD_PORTLIST: retval = cctl_portlist(fd, argc, argv, combinedopt); break; case CTLADM_CMD_LUNMAP: retval = cctl_lunmap(fd, argc, argv, combinedopt); break; case CTLADM_CMD_READCAPACITY: retval = cctl_read_capacity(fd, lun, initid, retries, argc, argv, combinedopt); break; case CTLADM_CMD_MODESENSE: retval = cctl_mode_sense(fd, lun, initid, retries, argc, argv, combinedopt); break; case CTLADM_CMD_START: case CTLADM_CMD_STOP: retval = cctl_start_stop(fd, lun, initid, retries, (command == CTLADM_CMD_START) ? 1 : 0, argc, argv, combinedopt); break; case CTLADM_CMD_SYNC_CACHE: retval = cctl_sync_cache(fd, lun, initid, retries, argc, argv, combinedopt); break; case CTLADM_CMD_LUNLIST: retval = cctl_lunlist(fd); break; case CTLADM_CMD_DELAY: retval = cctl_delay(fd, lun, argc, argv, combinedopt); break; case CTLADM_CMD_ERR_INJECT: retval = cctl_error_inject(fd, lun, argc, argv, combinedopt); break; case CTLADM_CMD_DUMPOOA: retval = cctl_dump_ooa(fd, argc, argv); break; case CTLADM_CMD_DUMPSTRUCTS: retval = cctl_dump_structs(fd, cmdargs); break; case CTLADM_CMD_PRES_IN: retval = cctl_persistent_reserve_in(fd, lun, initid, argc, argv, combinedopt, retries); break; case CTLADM_CMD_PRES_OUT: retval = cctl_persistent_reserve_out(fd, lun, initid, argc, argv, combinedopt, retries); break; case CTLADM_CMD_INQ_VPD_DEVID: retval = cctl_inquiry_vpd_devid(fd, lun, initid); break; case CTLADM_CMD_RTPG: retval = cctl_report_target_port_group(fd, lun, initid); break; case CTLADM_CMD_MODIFY: retval = cctl_modify_lun(fd, argc, argv, combinedopt); break; case CTLADM_CMD_ISLIST: retval = cctl_islist(fd, argc, argv, combinedopt); break; case CTLADM_CMD_ISLOGOUT: retval = cctl_islogout(fd, argc, argv, combinedopt); break; case CTLADM_CMD_ISTERMINATE: retval = cctl_isterminate(fd, argc, argv, combinedopt); break; case CTLADM_CMD_HELP: default: usage(retval); break; } bailout: if (fd != -1) close(fd); exit (retval); } /* * vim: ts=8 */ Index: head/usr.sbin/gssd/gssd.c =================================================================== --- head/usr.sbin/gssd/gssd.c (revision 298885) +++ head/usr.sbin/gssd/gssd.c (revision 298886) @@ -1,1292 +1,1292 @@ /*- * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ * Authors: Doug Rabson * Developed with Red Inc: Alfred Perlstein * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #ifndef WITHOUT_KERBEROS #include #endif #include #include #include #include #include #include #include #include #include #include #include "gssd.h" #ifndef _PATH_GSS_MECH #define _PATH_GSS_MECH "/etc/gss/mech" #endif #ifndef _PATH_GSSDSOCK #define _PATH_GSSDSOCK "/var/run/gssd.sock" #endif #define GSSD_CREDENTIAL_CACHE_FILE "/tmp/krb5cc_gssd" struct gss_resource { LIST_ENTRY(gss_resource) gr_link; - uint64_t gr_id; /* indentifier exported to kernel */ + uint64_t gr_id; /* identifier exported to kernel */ void* gr_res; /* GSS-API resource pointer */ }; LIST_HEAD(gss_resource_list, gss_resource) gss_resources; int gss_resource_count; uint32_t gss_next_id; uint32_t gss_start_time; int debug_level; static char ccfile_dirlist[PATH_MAX + 1], ccfile_substring[NAME_MAX + 1]; static char pref_realm[1024]; static int verbose; static int use_old_des; static int hostbased_initiator_cred; #ifndef WITHOUT_KERBEROS /* 1.2.752.43.13.14 */ static gss_OID_desc gss_krb5_set_allowable_enctypes_x_desc = {6, (void *) "\x2a\x85\x70\x2b\x0d\x0e"}; static gss_OID GSS_KRB5_SET_ALLOWABLE_ENCTYPES_X = &gss_krb5_set_allowable_enctypes_x_desc; static gss_OID_desc gss_krb5_mech_oid_x_desc = {9, (void *) "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02" }; static gss_OID GSS_KRB5_MECH_OID_X = &gss_krb5_mech_oid_x_desc; #endif static void gssd_load_mech(void); static int find_ccache_file(const char *, uid_t, char *); static int is_a_valid_tgt_cache(const char *, uid_t, int *, time_t *); static void gssd_verbose_out(const char *, ...); #ifndef WITHOUT_KERBEROS static krb5_error_code gssd_get_cc_from_keytab(const char *); static OM_uint32 gssd_get_user_cred(OM_uint32 *, uid_t, gss_cred_id_t *); #endif void gssd_terminate(int); extern void gssd_1(struct svc_req *rqstp, SVCXPRT *transp); extern int gssd_syscall(char *path); int main(int argc, char **argv) { /* * We provide an RPC service on a local-domain socket. The * kernel's GSS-API code will pass what it can't handle * directly to us. */ struct sockaddr_un sun; int fd, oldmask, ch, debug; SVCXPRT *xprt; /* * Initialize the credential cache file name substring and the * search directory list. */ strlcpy(ccfile_substring, "krb5cc_", sizeof(ccfile_substring)); ccfile_dirlist[0] = '\0'; pref_realm[0] = '\0'; debug = 0; verbose = 0; while ((ch = getopt(argc, argv, "dhovs:c:r:")) != -1) { switch (ch) { case 'd': debug_level++; break; case 'h': #ifndef WITHOUT_KERBEROS /* * Enable use of a host based initiator credential * in the default keytab file. */ hostbased_initiator_cred = 1; #else errx(1, "This option not available when built" " without MK_KERBEROS\n"); #endif break; case 'o': #ifndef WITHOUT_KERBEROS /* * Force use of DES and the old type of GSSAPI token. */ use_old_des = 1; #else errx(1, "This option not available when built" " without MK_KERBEROS\n"); #endif break; case 'v': verbose = 1; break; case 's': #ifndef WITHOUT_KERBEROS /* * Set the directory search list. This enables use of * find_ccache_file() to search the directories for a * suitable credentials cache file. */ strlcpy(ccfile_dirlist, optarg, sizeof(ccfile_dirlist)); #else errx(1, "This option not available when built" " without MK_KERBEROS\n"); #endif break; case 'c': /* * Specify a non-default credential cache file * substring. */ strlcpy(ccfile_substring, optarg, sizeof(ccfile_substring)); break; case 'r': /* * Set the preferred realm for the credential cache tgt. */ strlcpy(pref_realm, optarg, sizeof(pref_realm)); break; default: fprintf(stderr, "usage: %s [-d] [-s dir-list] [-c file-substring]" " [-r preferred-realm]\n", argv[0]); exit(1); break; } } gssd_load_mech(); if (!debug_level) { if (daemon(0, 0) != 0) err(1, "Can't daemonize"); signal(SIGINT, SIG_IGN); signal(SIGQUIT, SIG_IGN); signal(SIGHUP, SIG_IGN); } signal(SIGTERM, gssd_terminate); memset(&sun, 0, sizeof sun); sun.sun_family = AF_LOCAL; unlink(_PATH_GSSDSOCK); strcpy(sun.sun_path, _PATH_GSSDSOCK); sun.sun_len = SUN_LEN(&sun); fd = socket(AF_LOCAL, SOCK_STREAM, 0); if (fd < 0) { if (debug_level == 0) { syslog(LOG_ERR, "Can't create local gssd socket"); exit(1); } err(1, "Can't create local gssd socket"); } oldmask = umask(S_IXUSR|S_IRWXG|S_IRWXO); if (bind(fd, (struct sockaddr *) &sun, sun.sun_len) < 0) { if (debug_level == 0) { syslog(LOG_ERR, "Can't bind local gssd socket"); exit(1); } err(1, "Can't bind local gssd socket"); } umask(oldmask); if (listen(fd, SOMAXCONN) < 0) { if (debug_level == 0) { syslog(LOG_ERR, "Can't listen on local gssd socket"); exit(1); } err(1, "Can't listen on local gssd socket"); } xprt = svc_vc_create(fd, RPC_MAXDATASIZE, RPC_MAXDATASIZE); if (!xprt) { if (debug_level == 0) { syslog(LOG_ERR, "Can't create transport for local gssd socket"); exit(1); } err(1, "Can't create transport for local gssd socket"); } if (!svc_reg(xprt, GSSD, GSSDVERS, gssd_1, NULL)) { if (debug_level == 0) { syslog(LOG_ERR, "Can't register service for local gssd socket"); exit(1); } err(1, "Can't register service for local gssd socket"); } LIST_INIT(&gss_resources); gss_next_id = 1; gss_start_time = time(0); gssd_syscall(_PATH_GSSDSOCK); svc_run(); gssd_syscall(""); return (0); } static void gssd_load_mech(void) { FILE *fp; char buf[256]; char *p; char *name, *oid, *lib, *kobj; fp = fopen(_PATH_GSS_MECH, "r"); if (!fp) return; while (fgets(buf, sizeof(buf), fp)) { if (*buf == '#') continue; p = buf; name = strsep(&p, "\t\n "); if (p) while (isspace(*p)) p++; oid = strsep(&p, "\t\n "); if (p) while (isspace(*p)) p++; lib = strsep(&p, "\t\n "); if (p) while (isspace(*p)) p++; kobj = strsep(&p, "\t\n "); if (!name || !oid || !lib || !kobj) continue; if (strcmp(kobj, "-")) { /* * Attempt to load the kernel module if its * not already present. */ if (modfind(kobj) < 0) { if (kldload(kobj) < 0) { fprintf(stderr, "%s: can't find or load kernel module %s for %s\n", getprogname(), kobj, name); } } } } fclose(fp); } static void * gssd_find_resource(uint64_t id) { struct gss_resource *gr; if (!id) return (NULL); LIST_FOREACH(gr, &gss_resources, gr_link) if (gr->gr_id == id) return (gr->gr_res); return (NULL); } static uint64_t gssd_make_resource(void *res) { struct gss_resource *gr; if (!res) return (0); gr = malloc(sizeof(struct gss_resource)); if (!gr) return (0); gr->gr_id = (gss_next_id++) + ((uint64_t) gss_start_time << 32); gr->gr_res = res; LIST_INSERT_HEAD(&gss_resources, gr, gr_link); gss_resource_count++; if (debug_level > 1) printf("%d resources allocated\n", gss_resource_count); return (gr->gr_id); } static void gssd_delete_resource(uint64_t id) { struct gss_resource *gr; LIST_FOREACH(gr, &gss_resources, gr_link) { if (gr->gr_id == id) { LIST_REMOVE(gr, gr_link); free(gr); gss_resource_count--; if (debug_level > 1) printf("%d resources allocated\n", gss_resource_count); return; } } } static void gssd_verbose_out(const char *fmt, ...) { va_list ap; if (verbose != 0) { va_start(ap, fmt); if (debug_level == 0) vsyslog(LOG_INFO | LOG_DAEMON, fmt, ap); else vfprintf(stderr, fmt, ap); va_end(ap); } } bool_t gssd_null_1_svc(void *argp, void *result, struct svc_req *rqstp) { gssd_verbose_out("gssd_null: done\n"); return (TRUE); } bool_t gssd_init_sec_context_1_svc(init_sec_context_args *argp, init_sec_context_res *result, struct svc_req *rqstp) { gss_cred_id_t cred = GSS_C_NO_CREDENTIAL; gss_ctx_id_t ctx = GSS_C_NO_CONTEXT; gss_name_t name = GSS_C_NO_NAME; char ccname[PATH_MAX + 5 + 1], *cp, *cp2; int gotone, gotcred; OM_uint32 min_stat; #ifndef WITHOUT_KERBEROS gss_buffer_desc principal_desc; char enctype[sizeof(uint32_t)]; int key_enctype; OM_uint32 maj_stat; #endif memset(result, 0, sizeof(*result)); if (hostbased_initiator_cred != 0 && argp->cred != 0 && argp->uid == 0) { /* * These credentials are for a host based initiator name * in a keytab file, which should now have credentials * in /tmp/krb5cc_gssd, because gss_acquire_cred() did * the equivalent of "kinit -k". */ snprintf(ccname, sizeof(ccname), "FILE:%s", GSSD_CREDENTIAL_CACHE_FILE); } else if (ccfile_dirlist[0] != '\0' && argp->cred == 0) { /* * For the "-s" case and no credentials provided as an * argument, search the directory list for an appropriate * credential cache file. If the search fails, return failure. */ gotone = 0; cp = ccfile_dirlist; do { cp2 = strchr(cp, ':'); if (cp2 != NULL) *cp2 = '\0'; gotone = find_ccache_file(cp, argp->uid, ccname); if (gotone != 0) break; if (cp2 != NULL) *cp2++ = ':'; cp = cp2; } while (cp != NULL && *cp != '\0'); if (gotone == 0) { result->major_status = GSS_S_CREDENTIALS_EXPIRED; gssd_verbose_out("gssd_init_sec_context: -s no" " credential cache file found for uid=%d\n", (int)argp->uid); return (TRUE); } } else { /* * If there wasn't a "-s" option or the credentials have * been provided as an argument, do it the old way. * When credentials are provided, the uid should be root. */ if (argp->cred != 0 && argp->uid != 0) { if (debug_level == 0) syslog(LOG_ERR, "gss_init_sec_context:" " cred for non-root"); else fprintf(stderr, "gss_init_sec_context:" " cred for non-root\n"); } snprintf(ccname, sizeof(ccname), "FILE:/tmp/krb5cc_%d", (int) argp->uid); } setenv("KRB5CCNAME", ccname, TRUE); if (argp->cred) { cred = gssd_find_resource(argp->cred); if (!cred) { result->major_status = GSS_S_CREDENTIALS_EXPIRED; gssd_verbose_out("gssd_init_sec_context: cred" " resource not found\n"); return (TRUE); } } if (argp->ctx) { ctx = gssd_find_resource(argp->ctx); if (!ctx) { result->major_status = GSS_S_CONTEXT_EXPIRED; gssd_verbose_out("gssd_init_sec_context: context" " resource not found\n"); return (TRUE); } } if (argp->name) { name = gssd_find_resource(argp->name); if (!name) { result->major_status = GSS_S_BAD_NAME; gssd_verbose_out("gssd_init_sec_context: name" " resource not found\n"); return (TRUE); } } gotcred = 0; #ifndef WITHOUT_KERBEROS if (use_old_des != 0) { if (cred == GSS_C_NO_CREDENTIAL) { /* Acquire a credential for the uid. */ maj_stat = gssd_get_user_cred(&min_stat, argp->uid, &cred); if (maj_stat == GSS_S_COMPLETE) gotcred = 1; else gssd_verbose_out("gssd_init_sec_context: " "get user cred failed uid=%d major=0x%x " "minor=%d\n", (int)argp->uid, (unsigned int)maj_stat, (int)min_stat); } if (cred != GSS_C_NO_CREDENTIAL) { key_enctype = ETYPE_DES_CBC_CRC; enctype[0] = (key_enctype >> 24) & 0xff; enctype[1] = (key_enctype >> 16) & 0xff; enctype[2] = (key_enctype >> 8) & 0xff; enctype[3] = key_enctype & 0xff; principal_desc.length = sizeof(enctype); principal_desc.value = enctype; result->major_status = gss_set_cred_option( &result->minor_status, &cred, GSS_KRB5_SET_ALLOWABLE_ENCTYPES_X, &principal_desc); gssd_verbose_out("gssd_init_sec_context: set allowable " "enctype major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); if (result->major_status != GSS_S_COMPLETE) { if (gotcred != 0) gss_release_cred(&min_stat, &cred); return (TRUE); } } } #endif result->major_status = gss_init_sec_context(&result->minor_status, cred, &ctx, name, argp->mech_type, argp->req_flags, argp->time_req, argp->input_chan_bindings, &argp->input_token, &result->actual_mech_type, &result->output_token, &result->ret_flags, &result->time_rec); gssd_verbose_out("gssd_init_sec_context: done major=0x%x minor=%d" " uid=%d\n", (unsigned int)result->major_status, (int)result->minor_status, (int)argp->uid); if (gotcred != 0) gss_release_cred(&min_stat, &cred); if (result->major_status == GSS_S_COMPLETE || result->major_status == GSS_S_CONTINUE_NEEDED) { if (argp->ctx) result->ctx = argp->ctx; else result->ctx = gssd_make_resource(ctx); } return (TRUE); } bool_t gssd_accept_sec_context_1_svc(accept_sec_context_args *argp, accept_sec_context_res *result, struct svc_req *rqstp) { gss_ctx_id_t ctx = GSS_C_NO_CONTEXT; gss_cred_id_t cred = GSS_C_NO_CREDENTIAL; gss_name_t src_name; gss_cred_id_t delegated_cred_handle; memset(result, 0, sizeof(*result)); if (argp->ctx) { ctx = gssd_find_resource(argp->ctx); if (!ctx) { result->major_status = GSS_S_CONTEXT_EXPIRED; gssd_verbose_out("gssd_accept_sec_context: ctx" " resource not found\n"); return (TRUE); } } if (argp->cred) { cred = gssd_find_resource(argp->cred); if (!cred) { result->major_status = GSS_S_CREDENTIALS_EXPIRED; gssd_verbose_out("gssd_accept_sec_context: cred" " resource not found\n"); return (TRUE); } } memset(result, 0, sizeof(*result)); result->major_status = gss_accept_sec_context(&result->minor_status, &ctx, cred, &argp->input_token, argp->input_chan_bindings, &src_name, &result->mech_type, &result->output_token, &result->ret_flags, &result->time_rec, &delegated_cred_handle); gssd_verbose_out("gssd_accept_sec_context: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); if (result->major_status == GSS_S_COMPLETE || result->major_status == GSS_S_CONTINUE_NEEDED) { if (argp->ctx) result->ctx = argp->ctx; else result->ctx = gssd_make_resource(ctx); result->src_name = gssd_make_resource(src_name); result->delegated_cred_handle = gssd_make_resource(delegated_cred_handle); } return (TRUE); } bool_t gssd_delete_sec_context_1_svc(delete_sec_context_args *argp, delete_sec_context_res *result, struct svc_req *rqstp) { gss_ctx_id_t ctx = gssd_find_resource(argp->ctx); if (ctx) { result->major_status = gss_delete_sec_context( &result->minor_status, &ctx, &result->output_token); gssd_delete_resource(argp->ctx); } else { result->major_status = GSS_S_COMPLETE; result->minor_status = 0; } gssd_verbose_out("gssd_delete_sec_context: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); return (TRUE); } bool_t gssd_export_sec_context_1_svc(export_sec_context_args *argp, export_sec_context_res *result, struct svc_req *rqstp) { gss_ctx_id_t ctx = gssd_find_resource(argp->ctx); if (ctx) { result->major_status = gss_export_sec_context( &result->minor_status, &ctx, &result->interprocess_token); result->format = KGSS_HEIMDAL_1_1; gssd_delete_resource(argp->ctx); } else { result->major_status = GSS_S_FAILURE; result->minor_status = 0; result->interprocess_token.length = 0; result->interprocess_token.value = NULL; } gssd_verbose_out("gssd_export_sec_context: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); return (TRUE); } bool_t gssd_import_name_1_svc(import_name_args *argp, import_name_res *result, struct svc_req *rqstp) { gss_name_t name; result->major_status = gss_import_name(&result->minor_status, &argp->input_name_buffer, argp->input_name_type, &name); gssd_verbose_out("gssd_import_name: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); if (result->major_status == GSS_S_COMPLETE) result->output_name = gssd_make_resource(name); else result->output_name = 0; return (TRUE); } bool_t gssd_canonicalize_name_1_svc(canonicalize_name_args *argp, canonicalize_name_res *result, struct svc_req *rqstp) { gss_name_t name = gssd_find_resource(argp->input_name); gss_name_t output_name; memset(result, 0, sizeof(*result)); if (!name) { result->major_status = GSS_S_BAD_NAME; return (TRUE); } result->major_status = gss_canonicalize_name(&result->minor_status, name, argp->mech_type, &output_name); gssd_verbose_out("gssd_canonicalize_name: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); if (result->major_status == GSS_S_COMPLETE) result->output_name = gssd_make_resource(output_name); else result->output_name = 0; return (TRUE); } bool_t gssd_export_name_1_svc(export_name_args *argp, export_name_res *result, struct svc_req *rqstp) { gss_name_t name = gssd_find_resource(argp->input_name); memset(result, 0, sizeof(*result)); if (!name) { result->major_status = GSS_S_BAD_NAME; gssd_verbose_out("gssd_export_name: name resource not found\n"); return (TRUE); } result->major_status = gss_export_name(&result->minor_status, name, &result->exported_name); gssd_verbose_out("gssd_export_name: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); return (TRUE); } bool_t gssd_release_name_1_svc(release_name_args *argp, release_name_res *result, struct svc_req *rqstp) { gss_name_t name = gssd_find_resource(argp->input_name); if (name) { result->major_status = gss_release_name(&result->minor_status, &name); gssd_delete_resource(argp->input_name); } else { result->major_status = GSS_S_COMPLETE; result->minor_status = 0; } gssd_verbose_out("gssd_release_name: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); return (TRUE); } bool_t gssd_pname_to_uid_1_svc(pname_to_uid_args *argp, pname_to_uid_res *result, struct svc_req *rqstp) { gss_name_t name = gssd_find_resource(argp->pname); uid_t uid; char buf[1024], *bufp; struct passwd pwd, *pw; size_t buflen; int error; static size_t buflen_hint = 1024; memset(result, 0, sizeof(*result)); if (name) { result->major_status = gss_pname_to_uid(&result->minor_status, name, argp->mech, &uid); if (result->major_status == GSS_S_COMPLETE) { result->uid = uid; buflen = buflen_hint; for (;;) { pw = NULL; bufp = buf; if (buflen > sizeof(buf)) bufp = malloc(buflen); if (bufp == NULL) break; error = getpwuid_r(uid, &pwd, bufp, buflen, &pw); if (error != ERANGE) break; if (buflen > sizeof(buf)) free(bufp); buflen += 1024; if (buflen > buflen_hint) buflen_hint = buflen; } if (pw) { int len = NGROUPS; int groups[NGROUPS]; result->gid = pw->pw_gid; getgrouplist(pw->pw_name, pw->pw_gid, groups, &len); result->gidlist.gidlist_len = len; result->gidlist.gidlist_val = mem_alloc(len * sizeof(int)); memcpy(result->gidlist.gidlist_val, groups, len * sizeof(int)); gssd_verbose_out("gssd_pname_to_uid: mapped" " to uid=%d, gid=%d\n", (int)result->uid, (int)result->gid); } else { result->gid = 65534; result->gidlist.gidlist_len = 0; result->gidlist.gidlist_val = NULL; gssd_verbose_out("gssd_pname_to_uid: mapped" " to uid=%d, but no groups\n", (int)result->uid); } if (bufp != NULL && buflen > sizeof(buf)) free(bufp); } else gssd_verbose_out("gssd_pname_to_uid: failed major=0x%x" " minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); } else { result->major_status = GSS_S_BAD_NAME; result->minor_status = 0; gssd_verbose_out("gssd_pname_to_uid: no name\n"); } return (TRUE); } bool_t gssd_acquire_cred_1_svc(acquire_cred_args *argp, acquire_cred_res *result, struct svc_req *rqstp) { gss_name_t desired_name = GSS_C_NO_NAME; gss_cred_id_t cred; char ccname[PATH_MAX + 5 + 1], *cp, *cp2; int gotone; #ifndef WITHOUT_KERBEROS gss_buffer_desc namebuf; uint32_t minstat; krb5_error_code kret; #endif memset(result, 0, sizeof(*result)); if (argp->desired_name) { desired_name = gssd_find_resource(argp->desired_name); if (!desired_name) { result->major_status = GSS_S_BAD_NAME; gssd_verbose_out("gssd_acquire_cred: no desired name" " found\n"); return (TRUE); } } #ifndef WITHOUT_KERBEROS if (hostbased_initiator_cred != 0 && argp->desired_name != 0 && argp->uid == 0 && argp->cred_usage == GSS_C_INITIATE) { /* This is a host based initiator name in the keytab file. */ snprintf(ccname, sizeof(ccname), "FILE:%s", GSSD_CREDENTIAL_CACHE_FILE); setenv("KRB5CCNAME", ccname, TRUE); result->major_status = gss_display_name(&result->minor_status, desired_name, &namebuf, NULL); gssd_verbose_out("gssd_acquire_cred: desired name for host " "based initiator cred major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); if (result->major_status != GSS_S_COMPLETE) return (TRUE); if (namebuf.length > PATH_MAX + 5) { result->minor_status = 0; result->major_status = GSS_S_FAILURE; return (TRUE); } memcpy(ccname, namebuf.value, namebuf.length); ccname[namebuf.length] = '\0'; if ((cp = strchr(ccname, '@')) != NULL) *cp = '/'; kret = gssd_get_cc_from_keytab(ccname); gssd_verbose_out("gssd_acquire_cred: using keytab entry for " "%s, kerberos ret=%d\n", ccname, (int)kret); gss_release_buffer(&minstat, &namebuf); if (kret != 0) { result->minor_status = kret; result->major_status = GSS_S_FAILURE; return (TRUE); } } else #endif /* !WITHOUT_KERBEROS */ if (ccfile_dirlist[0] != '\0' && argp->desired_name == 0) { /* * For the "-s" case and no name provided as an * argument, search the directory list for an appropriate * credential cache file. If the search fails, return failure. */ gotone = 0; cp = ccfile_dirlist; do { cp2 = strchr(cp, ':'); if (cp2 != NULL) *cp2 = '\0'; gotone = find_ccache_file(cp, argp->uid, ccname); if (gotone != 0) break; if (cp2 != NULL) *cp2++ = ':'; cp = cp2; } while (cp != NULL && *cp != '\0'); if (gotone == 0) { result->major_status = GSS_S_CREDENTIALS_EXPIRED; gssd_verbose_out("gssd_acquire_cred: no cred cache" " file found\n"); return (TRUE); } setenv("KRB5CCNAME", ccname, TRUE); } else { /* * If there wasn't a "-s" option or the name has * been provided as an argument, do it the old way. * When a name is provided, it will normally exist in the * default keytab file and the uid will be root. */ if (argp->desired_name != 0 && argp->uid != 0) { if (debug_level == 0) syslog(LOG_ERR, "gss_acquire_cred:" " principal_name for non-root"); else fprintf(stderr, "gss_acquire_cred:" " principal_name for non-root\n"); } snprintf(ccname, sizeof(ccname), "FILE:/tmp/krb5cc_%d", (int) argp->uid); setenv("KRB5CCNAME", ccname, TRUE); } result->major_status = gss_acquire_cred(&result->minor_status, desired_name, argp->time_req, argp->desired_mechs, argp->cred_usage, &cred, &result->actual_mechs, &result->time_rec); gssd_verbose_out("gssd_acquire_cred: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); if (result->major_status == GSS_S_COMPLETE) result->output_cred = gssd_make_resource(cred); else result->output_cred = 0; return (TRUE); } bool_t gssd_set_cred_option_1_svc(set_cred_option_args *argp, set_cred_option_res *result, struct svc_req *rqstp) { gss_cred_id_t cred = gssd_find_resource(argp->cred); memset(result, 0, sizeof(*result)); if (!cred) { result->major_status = GSS_S_CREDENTIALS_EXPIRED; gssd_verbose_out("gssd_set_cred: no credentials\n"); return (TRUE); } result->major_status = gss_set_cred_option(&result->minor_status, &cred, argp->option_name, &argp->option_value); gssd_verbose_out("gssd_set_cred: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); return (TRUE); } bool_t gssd_release_cred_1_svc(release_cred_args *argp, release_cred_res *result, struct svc_req *rqstp) { gss_cred_id_t cred = gssd_find_resource(argp->cred); if (cred) { result->major_status = gss_release_cred(&result->minor_status, &cred); gssd_delete_resource(argp->cred); } else { result->major_status = GSS_S_COMPLETE; result->minor_status = 0; } gssd_verbose_out("gssd_release_cred: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); return (TRUE); } bool_t gssd_display_status_1_svc(display_status_args *argp, display_status_res *result, struct svc_req *rqstp) { result->message_context = argp->message_context; result->major_status = gss_display_status(&result->minor_status, argp->status_value, argp->status_type, argp->mech_type, &result->message_context, &result->status_string); gssd_verbose_out("gssd_display_status: done major=0x%x minor=%d\n", (unsigned int)result->major_status, (int)result->minor_status); return (TRUE); } int gssd_1_freeresult(SVCXPRT *transp, xdrproc_t xdr_result, caddr_t result) { /* * We don't use XDR to free the results - anything which was * allocated came from GSS-API. We use xdr_result to figure * out what to do. */ OM_uint32 junk; if (xdr_result == (xdrproc_t) xdr_init_sec_context_res) { init_sec_context_res *p = (init_sec_context_res *) result; gss_release_buffer(&junk, &p->output_token); } else if (xdr_result == (xdrproc_t) xdr_accept_sec_context_res) { accept_sec_context_res *p = (accept_sec_context_res *) result; gss_release_buffer(&junk, &p->output_token); } else if (xdr_result == (xdrproc_t) xdr_delete_sec_context_res) { delete_sec_context_res *p = (delete_sec_context_res *) result; gss_release_buffer(&junk, &p->output_token); } else if (xdr_result == (xdrproc_t) xdr_export_sec_context_res) { export_sec_context_res *p = (export_sec_context_res *) result; if (p->interprocess_token.length) memset(p->interprocess_token.value, 0, p->interprocess_token.length); gss_release_buffer(&junk, &p->interprocess_token); } else if (xdr_result == (xdrproc_t) xdr_export_name_res) { export_name_res *p = (export_name_res *) result; gss_release_buffer(&junk, &p->exported_name); } else if (xdr_result == (xdrproc_t) xdr_acquire_cred_res) { acquire_cred_res *p = (acquire_cred_res *) result; gss_release_oid_set(&junk, &p->actual_mechs); } else if (xdr_result == (xdrproc_t) xdr_pname_to_uid_res) { pname_to_uid_res *p = (pname_to_uid_res *) result; if (p->gidlist.gidlist_val) free(p->gidlist.gidlist_val); } else if (xdr_result == (xdrproc_t) xdr_display_status_res) { display_status_res *p = (display_status_res *) result; gss_release_buffer(&junk, &p->status_string); } return (TRUE); } /* * Search a directory for the most likely candidate to be used as the * credential cache for a uid. If successful, return 1 and fill the * file's path id into "rpath". Otherwise, return 0. */ static int find_ccache_file(const char *dirpath, uid_t uid, char *rpath) { DIR *dirp; struct dirent *dp; struct stat sb; time_t exptime, oexptime; int gotone, len, rating, orating; char namepath[PATH_MAX + 5 + 1]; char retpath[PATH_MAX + 5 + 1]; dirp = opendir(dirpath); if (dirp == NULL) return (0); gotone = 0; orating = 0; oexptime = 0; while ((dp = readdir(dirp)) != NULL) { len = snprintf(namepath, sizeof(namepath), "%s/%s", dirpath, dp->d_name); if (len < sizeof(namepath) && (hostbased_initiator_cred == 0 || strcmp(namepath, GSSD_CREDENTIAL_CACHE_FILE) != 0) && strstr(dp->d_name, ccfile_substring) != NULL && lstat(namepath, &sb) >= 0 && sb.st_uid == uid && S_ISREG(sb.st_mode)) { len = snprintf(namepath, sizeof(namepath), "FILE:%s/%s", dirpath, dp->d_name); if (len < sizeof(namepath) && is_a_valid_tgt_cache(namepath, uid, &rating, &exptime) != 0) { if (gotone == 0 || rating > orating || (rating == orating && exptime > oexptime)) { orating = rating; oexptime = exptime; strcpy(retpath, namepath); gotone = 1; } } } } closedir(dirp); if (gotone != 0) { strcpy(rpath, retpath); return (1); } return (0); } /* * Try to determine if the file is a valid tgt cache file. * Check that the file has a valid tgt for a principal. * If it does, return 1, otherwise return 0. * It also returns a "rating" and the expiry time for the TGT, when found. * This "rating" is higher based on heuristics that make it more * likely to be the correct credential cache file to use. It can * be used by the caller, along with expiry time, to select from * multiple credential cache files. */ static int is_a_valid_tgt_cache(const char *filepath, uid_t uid, int *retrating, time_t *retexptime) { #ifndef WITHOUT_KERBEROS krb5_context context; krb5_principal princ; krb5_ccache ccache; krb5_error_code retval; krb5_cc_cursor curse; krb5_creds krbcred; int gotone, orating, rating, ret; struct passwd *pw; char *cp, *cp2, *pname; time_t exptime; /* Find a likely name for the uid principal. */ pw = getpwuid(uid); /* * Do a bunch of krb5 library stuff to try and determine if * this file is a credentials cache with an appropriate TGT * in it. */ retval = krb5_init_context(&context); if (retval != 0) return (0); retval = krb5_cc_resolve(context, filepath, &ccache); if (retval != 0) { krb5_free_context(context); return (0); } ret = 0; orating = 0; exptime = 0; retval = krb5_cc_start_seq_get(context, ccache, &curse); if (retval == 0) { while ((retval = krb5_cc_next_cred(context, ccache, &curse, &krbcred)) == 0) { gotone = 0; rating = 0; retval = krb5_unparse_name(context, krbcred.server, &pname); if (retval == 0) { cp = strchr(pname, '/'); if (cp != NULL) { *cp++ = '\0'; if (strcmp(pname, "krbtgt") == 0 && krbcred.times.endtime > time(NULL) ) { gotone = 1; /* * Test to see if this is a * tgt for cross-realm auth. * Rate it higher, if it is not. */ cp2 = strchr(cp, '@'); if (cp2 != NULL) { *cp2++ = '\0'; if (strcmp(cp, cp2) == 0) rating++; } } } free(pname); } if (gotone != 0) { retval = krb5_unparse_name(context, krbcred.client, &pname); if (retval == 0) { cp = strchr(pname, '@'); if (cp != NULL) { *cp++ = '\0'; if (pw != NULL && strcmp(pname, pw->pw_name) == 0) rating++; if (strchr(pname, '/') == NULL) rating++; if (pref_realm[0] != '\0' && strcmp(cp, pref_realm) == 0) rating++; } } free(pname); if (rating > orating) { orating = rating; exptime = krbcred.times.endtime; } else if (rating == orating && krbcred.times.endtime > exptime) exptime = krbcred.times.endtime; ret = 1; } krb5_free_cred_contents(context, &krbcred); } krb5_cc_end_seq_get(context, ccache, &curse); } krb5_cc_close(context, ccache); krb5_free_context(context); if (ret != 0) { *retrating = orating; *retexptime = exptime; } return (ret); #else /* WITHOUT_KERBEROS */ return (0); #endif /* !WITHOUT_KERBEROS */ } #ifndef WITHOUT_KERBEROS /* * This function attempts to do essentially a "kinit -k" for the principal * name provided as the argument, so that there will be a TGT in the * credential cache. */ static krb5_error_code gssd_get_cc_from_keytab(const char *name) { krb5_error_code ret, opt_ret, princ_ret, cc_ret, kt_ret, cred_ret; krb5_context context; krb5_principal principal; krb5_keytab kt; krb5_creds cred; krb5_get_init_creds_opt *opt; krb5_deltat start_time = 0; krb5_ccache ccache; ret = krb5_init_context(&context); if (ret != 0) return (ret); opt_ret = cc_ret = kt_ret = cred_ret = 1; /* anything non-zero */ princ_ret = ret = krb5_parse_name(context, name, &principal); if (ret == 0) opt_ret = ret = krb5_get_init_creds_opt_alloc(context, &opt); if (ret == 0) cc_ret = ret = krb5_cc_default(context, &ccache); if (ret == 0) ret = krb5_cc_initialize(context, ccache, principal); if (ret == 0) { krb5_get_init_creds_opt_set_default_flags(context, "gssd", krb5_principal_get_realm(context, principal), opt); kt_ret = ret = krb5_kt_default(context, &kt); } if (ret == 0) cred_ret = ret = krb5_get_init_creds_keytab(context, &cred, principal, kt, start_time, NULL, opt); if (ret == 0) ret = krb5_cc_store_cred(context, ccache, &cred); if (kt_ret == 0) krb5_kt_close(context, kt); if (cc_ret == 0) krb5_cc_close(context, ccache); if (opt_ret == 0) krb5_get_init_creds_opt_free(context, opt); if (princ_ret == 0) krb5_free_principal(context, principal); if (cred_ret == 0) krb5_free_cred_contents(context, &cred); krb5_free_context(context); return (ret); } /* * Acquire a gss credential for a uid. */ static OM_uint32 gssd_get_user_cred(OM_uint32 *min_statp, uid_t uid, gss_cred_id_t *credp) { gss_buffer_desc principal_desc; gss_name_t name; OM_uint32 maj_stat, min_stat; gss_OID_set mechlist; struct passwd *pw; pw = getpwuid(uid); if (pw == NULL) { *min_statp = 0; return (GSS_S_FAILURE); } /* * The mechanism must be set to KerberosV for acquisition * of credentials to work reliably. */ maj_stat = gss_create_empty_oid_set(min_statp, &mechlist); if (maj_stat != GSS_S_COMPLETE) return (maj_stat); maj_stat = gss_add_oid_set_member(min_statp, GSS_KRB5_MECH_OID_X, &mechlist); if (maj_stat != GSS_S_COMPLETE) { gss_release_oid_set(&min_stat, &mechlist); return (maj_stat); } principal_desc.value = (void *)pw->pw_name; principal_desc.length = strlen(pw->pw_name); maj_stat = gss_import_name(min_statp, &principal_desc, GSS_C_NT_USER_NAME, &name); if (maj_stat != GSS_S_COMPLETE) { gss_release_oid_set(&min_stat, &mechlist); return (maj_stat); } /* Acquire the credentials. */ maj_stat = gss_acquire_cred(min_statp, name, 0, mechlist, GSS_C_INITIATE, credp, NULL, NULL); gss_release_name(&min_stat, &name); gss_release_oid_set(&min_stat, &mechlist); return (maj_stat); } #endif /* !WITHOUT_KERBEROS */ void gssd_terminate(int sig __unused) { #ifndef WITHOUT_KERBEROS if (hostbased_initiator_cred != 0) unlink(GSSD_CREDENTIAL_CACHE_FILE); #endif gssd_syscall(""); exit(0); } Index: head/usr.sbin/jail/jailparse.y =================================================================== --- head/usr.sbin/jail/jailparse.y (revision 298885) +++ head/usr.sbin/jail/jailparse.y (revision 298886) @@ -1,216 +1,216 @@ %{ /*- * Copyright (c) 2011 James Gritton * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include "jailp.h" #ifdef DEBUG #define YYDEBUG 1 #endif %} %union { struct cfjail *j; struct cfparams *pp; struct cfparam *p; struct cfstrings *ss; struct cfstring *s; char *cs; } %token PLEQ %token STR STR1 VAR VAR1 %type jail %type param_l %type

param name %type value %type string %% /* * A config file is a series of jails (containing parameters) and jail-less - * parameters which realy belong to a global pseudo-jail. + * parameters which really belong to a global pseudo-jail. */ conf : ; | conf jail ; | conf param ';' { struct cfjail *j; j = TAILQ_LAST(&cfjails, cfjails); if (!j || strcmp(j->name, "*")) { j = add_jail(); j->name = estrdup("*"); } TAILQ_INSERT_TAIL(&j->params, $2, tq); } | conf ';' jail : STR '{' param_l '}' { $$ = add_jail(); $$->name = $1; TAILQ_CONCAT(&$$->params, $3, tq); free($3); } ; param_l : { $$ = emalloc(sizeof(struct cfparams)); TAILQ_INIT($$); } | param_l param ';' { $$ = $1; TAILQ_INSERT_TAIL($$, $2, tq); } | param_l ';' ; /* * Parameters have a name and an optional list of value strings, * which may have "+=" or "=" preceding them. */ param : name { $$ = $1; } | name '=' value { $$ = $1; TAILQ_CONCAT(&$$->val, $3, tq); free($3); } | name PLEQ value { $$ = $1; TAILQ_CONCAT(&$$->val, $3, tq); $$->flags |= PF_APPEND; free($3); } | name value { $$ = $1; TAILQ_CONCAT(&$$->val, $2, tq); free($2); } | error { } ; /* * A parameter has a fixed name. A variable definition looks just like a * parameter except that the name is a variable. */ name : STR { $$ = emalloc(sizeof(struct cfparam)); $$->name = $1; TAILQ_INIT(&$$->val); $$->flags = 0; } | VAR { $$ = emalloc(sizeof(struct cfparam)); $$->name = $1; TAILQ_INIT(&$$->val); $$->flags = PF_VAR; } ; value : string { $$ = emalloc(sizeof(struct cfstrings)); TAILQ_INIT($$); TAILQ_INSERT_TAIL($$, $1, tq); } | value ',' string { $$ = $1; TAILQ_INSERT_TAIL($$, $3, tq); } ; /* * Strings may be passed in pieces, because of quoting and/or variable * interpolation. Reassemble them into a single string. */ string : STR { $$ = emalloc(sizeof(struct cfstring)); $$->s = $1; $$->len = strlen($1); STAILQ_INIT(&$$->vars); } | VAR { struct cfvar *v; $$ = emalloc(sizeof(struct cfstring)); $$->s = estrdup(""); $$->len = 0; STAILQ_INIT(&$$->vars); v = emalloc(sizeof(struct cfvar)); v->name = $1; v->pos = 0; STAILQ_INSERT_TAIL(&$$->vars, v, tq); } | string STR1 { size_t len1; $$ = $1; len1 = strlen($2); $$->s = erealloc($$->s, $$->len + len1 + 1); strcpy($$->s + $$->len, $2); free($2); $$->len += len1; } | string VAR1 { struct cfvar *v; $$ = $1; v = emalloc(sizeof(struct cfvar)); v->name = $2; v->pos = $$->len; STAILQ_INSERT_TAIL(&$$->vars, v, tq); } ; %% Index: head/usr.sbin/lpr/common_source/common.c =================================================================== --- head/usr.sbin/lpr/common_source/common.c (revision 298885) +++ head/usr.sbin/lpr/common_source/common.c (revision 298886) @@ -1,778 +1,778 @@ /* * Copyright (c) 1983, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if 0 #ifndef lint static char sccsid[] = "@(#)common.c 8.5 (Berkeley) 4/28/95"; #endif /* not lint */ #endif #include "lp.cdefs.h" /* A cross-platform version of */ __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include "lp.h" #include "lp.local.h" #include "pathnames.h" /* * Routines and data common to all the line printer functions. */ char line[BUFSIZ]; const char *progname; /* program name */ static int compar(const void *_p1, const void *_p2); /* * isdigit() takes a parameter of 'int', but expect values in the range * of unsigned char. Define a wrapper which takes a value of type 'char', * whether signed or unsigned, and ensure it ends up in the right range. */ #define isdigitch(Anychar) isdigit((u_char)(Anychar)) /* * Getline reads a line from the control file cfp, removes tabs, converts * new-line to null and leaves it in line. * Returns 0 at EOF or the number of characters read. */ int getline(FILE *cfp) { register int linel = 0; register char *lp = line; register int c; while ((c = getc(cfp)) != '\n' && (size_t)(linel+1) < sizeof(line)) { if (c == EOF) return(0); if (c == '\t') { do { *lp++ = ' '; linel++; } while ((linel & 07) != 0 && (size_t)(linel+1) < sizeof(line)); continue; } *lp++ = c; linel++; } *lp++ = '\0'; return(linel); } /* * Scan the current directory and make a list of daemon files sorted by * creation time. * Return the number of entries and a pointer to the list. */ int getq(const struct printer *pp, struct jobqueue *(*namelist[])) { register struct dirent *d; register struct jobqueue *q, **queue; size_t arraysz, entrysz, nitems; struct stat stbuf; DIR *dirp; int statres; PRIV_START if ((dirp = opendir(pp->spool_dir)) == NULL) { PRIV_END return (-1); } if (fstat(dirfd(dirp), &stbuf) < 0) goto errdone; PRIV_END /* * Estimate the array size by taking the size of the directory file * and dividing it by a multiple of the minimum size entry. */ arraysz = (stbuf.st_size / 24); if (arraysz < 16) arraysz = 16; queue = (struct jobqueue **)malloc(arraysz * sizeof(struct jobqueue *)); if (queue == NULL) goto errdone; nitems = 0; while ((d = readdir(dirp)) != NULL) { if (d->d_name[0] != 'c' || d->d_name[1] != 'f') continue; /* daemon control files only */ PRIV_START statres = stat(d->d_name, &stbuf); PRIV_END if (statres < 0) continue; /* Doesn't exist */ entrysz = sizeof(struct jobqueue) - sizeof(q->job_cfname) + strlen(d->d_name) + 1; q = (struct jobqueue *)malloc(entrysz); if (q == NULL) goto errdone; q->job_matched = 0; q->job_processed = 0; q->job_time = stbuf.st_mtime; strcpy(q->job_cfname, d->d_name); /* * Check to make sure the array has space left and * realloc the maximum size. */ if (++nitems > arraysz) { arraysz *= 2; queue = (struct jobqueue **)realloc((char *)queue, arraysz * sizeof(struct jobqueue *)); if (queue == NULL) goto errdone; } queue[nitems-1] = q; } closedir(dirp); if (nitems) qsort(queue, nitems, sizeof(struct jobqueue *), compar); *namelist = queue; return(nitems); errdone: closedir(dirp); PRIV_END return (-1); } /* * Compare modification times. */ static int compar(const void *p1, const void *p2) { const struct jobqueue *qe1, *qe2; qe1 = *(const struct jobqueue * const *)p1; qe2 = *(const struct jobqueue * const *)p2; if (qe1->job_time < qe2->job_time) return (-1); if (qe1->job_time > qe2->job_time) return (1); /* * At this point, the two files have the same last-modification time. * return a result based on filenames, so that 'cfA001some.host' will * come before 'cfA002some.host'. Since the jobid ('001') will wrap * around when it gets to '999', we also assume that '9xx' jobs are * older than '0xx' jobs. */ if ((qe1->job_cfname[3] == '9') && (qe2->job_cfname[3] == '0')) return (-1); if ((qe1->job_cfname[3] == '0') && (qe2->job_cfname[3] == '9')) return (1); return (strcmp(qe1->job_cfname, qe2->job_cfname)); } /* * A simple routine to determine the job number for a print job based on * the name of its control file. The algorithm used here may look odd, but * the main issue is that all parts of `lpd', `lpc', `lpq' & `lprm' must be * using the same algorithm, whatever that algorithm may be. If the caller * provides a non-null value for ''hostpp', then this returns a pointer to * the start of the hostname (or IP address?) as found in the filename. * * Algorithm: The standard `cf' file has the job number start in position 4, * but some implementations have that as an extra file-sequence letter, and * start the job number in position 5. The job number is usually three bytes, * but may be as many as five. Confusing matters still more, some Windows * print servers will append an IP address to the job number, instead of * the expected hostname. So, if the job number ends with a '.', then * assume the correct jobnum value is the first three digits. */ int calc_jobnum(const char *cfname, const char **hostpp) { int jnum; const char *cp, *numstr, *hoststr; numstr = cfname + 3; if (!isdigitch(*numstr)) numstr++; jnum = 0; for (cp = numstr; (cp < numstr + 5) && isdigitch(*cp); cp++) jnum = jnum * 10 + (*cp - '0'); hoststr = cp; /* * If the filename was built with an IP number instead of a hostname, * then recalculate using only the first three digits found. */ while(isdigitch(*cp)) cp++; if (*cp == '.') { jnum = 0; for (cp = numstr; (cp < numstr + 3) && isdigitch(*cp); cp++) jnum = jnum * 10 + (*cp - '0'); hoststr = cp; } if (hostpp != NULL) *hostpp = hoststr; return (jnum); } /* sleep n milliseconds */ void delay(int millisec) { struct timeval tdelay; if (millisec <= 0 || millisec > 10000) fatal((struct printer *)0, /* fatal() knows how to deal */ "unreasonable delay period (%d)", millisec); tdelay.tv_sec = millisec / 1000; tdelay.tv_usec = millisec * 1000 % 1000000; (void) select(0, (fd_set *)0, (fd_set *)0, (fd_set *)0, &tdelay); } char * lock_file_name(const struct printer *pp, char *buf, size_t len) { static char staticbuf[MAXPATHLEN]; if (buf == NULL) buf = staticbuf; if (len == 0) len = MAXPATHLEN; if (pp->lock_file[0] == '/') strlcpy(buf, pp->lock_file, len); else snprintf(buf, len, "%s/%s", pp->spool_dir, pp->lock_file); return buf; } char * status_file_name(const struct printer *pp, char *buf, size_t len) { static char staticbuf[MAXPATHLEN]; if (buf == NULL) buf = staticbuf; if (len == 0) len = MAXPATHLEN; if (pp->status_file[0] == '/') strlcpy(buf, pp->status_file, len); else snprintf(buf, len, "%s/%s", pp->spool_dir, pp->status_file); return buf; } /* * Routine to change operational state of a print queue. The operational * state is indicated by the access bits on the lock file for the queue. * At present, this is only called from various routines in lpc/cmds.c. * * XXX - Note that this works by changing access-bits on the * file, and you can only do that if you are the owner of * the file, or root. Thus, this won't really work for * userids in the "LPR_OPER" group, unless lpc is running * setuid to root (or maybe setuid to daemon). * Generally lpc is installed setgid to daemon, but does * not run setuid. */ int set_qstate(int action, const char *lfname) { struct stat stbuf; mode_t chgbits, newbits, oldmask; const char *failmsg, *okmsg; static const char *nomsg = "no state msg"; int chres, errsav, fd, res, statres; /* * Find what the current access-bits are. */ memset(&stbuf, 0, sizeof(stbuf)); PRIV_START statres = stat(lfname, &stbuf); errsav = errno; PRIV_END if ((statres < 0) && (errsav != ENOENT)) { printf("\tcannot stat() lock file\n"); return (SQS_STATFAIL); /* NOTREACHED */ } /* * Determine which bit(s) should change for the requested action. */ chgbits = stbuf.st_mode; newbits = LOCK_FILE_MODE; okmsg = NULL; failmsg = NULL; if (action & SQS_QCHANGED) { chgbits |= LFM_RESET_QUE; newbits |= LFM_RESET_QUE; /* The okmsg is not actually printed for this case. */ okmsg = nomsg; failmsg = "set queue-changed"; } if (action & SQS_DISABLEQ) { chgbits |= LFM_QUEUE_DIS; newbits |= LFM_QUEUE_DIS; okmsg = "queuing disabled"; failmsg = "disable queuing"; } if (action & SQS_STOPP) { chgbits |= LFM_PRINT_DIS; newbits |= LFM_PRINT_DIS; okmsg = "printing disabled"; failmsg = "disable printing"; if (action & SQS_DISABLEQ) { okmsg = "printer and queuing disabled"; failmsg = "disable queuing and printing"; } } if (action & SQS_ENABLEQ) { chgbits &= ~LFM_QUEUE_DIS; newbits &= ~LFM_QUEUE_DIS; okmsg = "queuing enabled"; failmsg = "enable queuing"; } if (action & SQS_STARTP) { chgbits &= ~LFM_PRINT_DIS; newbits &= ~LFM_PRINT_DIS; okmsg = "printing enabled"; failmsg = "enable printing"; } if (okmsg == NULL) { /* This routine was called with an invalid action. */ printf("\t\n"); return (SQS_PARMERR); /* NOTREACHED */ } res = 0; if (statres >= 0) { /* The file already exists, so change the access. */ PRIV_START chres = chmod(lfname, chgbits); errsav = errno; PRIV_END res = SQS_CHGOK; if (chres < 0) res = SQS_CHGFAIL; } else if (newbits == LOCK_FILE_MODE) { /* * The file does not exist, but the state requested is * the same as the default state when no file exists. * Thus, there is no need to create the file. */ res = SQS_SKIPCREOK; } else { /* * The file did not exist, so create it with the * appropriate access bits for the requested action. * Push a new umask around that create, to make sure * all the read/write bits are set as desired. */ oldmask = umask(S_IWOTH); PRIV_START fd = open(lfname, O_WRONLY|O_CREAT, newbits); errsav = errno; PRIV_END umask(oldmask); res = SQS_CREFAIL; if (fd >= 0) { res = SQS_CREOK; close(fd); } } switch (res) { case SQS_CHGOK: case SQS_CREOK: case SQS_SKIPCREOK: if (okmsg != nomsg) printf("\t%s\n", okmsg); break; case SQS_CREFAIL: printf("\tcannot create lock file: %s\n", strerror(errsav)); break; default: printf("\tcannot %s: %s\n", failmsg, strerror(errsav)); break; } return (res); } /* routine to get a current timestamp, optionally in a standard-fmt string */ void lpd_gettime(struct timespec *tsp, char *strp, size_t strsize) { struct timespec local_ts; struct timeval btime; char tempstr[TIMESTR_SIZE]; #ifdef STRFTIME_WRONG_z char *destp; #endif if (tsp == NULL) tsp = &local_ts; /* some platforms have a routine called clock_gettime, but the * routine does nothing but return "not implemented". */ memset(tsp, 0, sizeof(struct timespec)); if (clock_gettime(CLOCK_REALTIME, tsp)) { /* nanosec-aware rtn failed, fall back to microsec-aware rtn */ memset(tsp, 0, sizeof(struct timespec)); gettimeofday(&btime, NULL); tsp->tv_sec = btime.tv_sec; tsp->tv_nsec = btime.tv_usec * 1000; } /* caller may not need a character-ized version */ if ((strp == NULL) || (strsize < 1)) return; strftime(tempstr, TIMESTR_SIZE, LPD_TIMESTAMP_PATTERN, localtime(&tsp->tv_sec)); /* * This check is for implementations of strftime which treat %z * (timezone as [+-]hhmm ) like %Z (timezone as characters), or * completely ignore %z. This section is not needed on freebsd. * I'm not sure this is completely right, but it should work OK * for EST and EDT... */ #ifdef STRFTIME_WRONG_z destp = strrchr(tempstr, ':'); if (destp != NULL) { destp += 3; if ((*destp != '+') && (*destp != '-')) { char savday[6]; int tzmin = timezone / 60; int tzhr = tzmin / 60; if (daylight) tzhr--; strcpy(savday, destp + strlen(destp) - 4); snprintf(destp, (destp - tempstr), "%+03d%02d", (-1*tzhr), tzmin % 60); strcat(destp, savday); } } #endif if (strsize > TIMESTR_SIZE) { strsize = TIMESTR_SIZE; strp[TIMESTR_SIZE+1] = '\0'; } strlcpy(strp, tempstr, strsize); } /* routines for writing transfer-statistic records */ void trstat_init(struct printer *pp, const char *fname, int filenum) { register const char *srcp; register char *destp, *endp; /* * Figure out the job id of this file. The filename should be * 'cf', 'df', or maybe 'tf', followed by a letter (or sometimes * two), followed by the jobnum, followed by a hostname. * The jobnum is usually 3 digits, but might be as many as 5. * Note that some care has to be taken parsing this, as the * filename could be coming from a remote-host, and thus might * not look anything like what is expected... */ memset(pp->jobnum, 0, sizeof(pp->jobnum)); pp->jobnum[0] = '0'; srcp = strchr(fname, '/'); if (srcp == NULL) srcp = fname; destp = &(pp->jobnum[0]); endp = destp + 5; while (*srcp != '\0' && (*srcp < '0' || *srcp > '9')) srcp++; while (*srcp >= '0' && *srcp <= '9' && destp < endp) *(destp++) = *(srcp++); /* get the starting time in both numeric and string formats, and * save those away along with the file-number */ pp->jobdfnum = filenum; lpd_gettime(&pp->tr_start, pp->tr_timestr, (size_t)TIMESTR_SIZE); return; } void trstat_write(struct printer *pp, tr_sendrecv sendrecv, size_t bytecnt, const char *userid, const char *otherhost, const char *orighost) { #define STATLINE_SIZE 1024 double trtime; size_t remspace; int statfile; char thishost[MAXHOSTNAMELEN], statline[STATLINE_SIZE]; char *eostat; const char *lprhost, *recvdev, *recvhost, *rectype; const char *sendhost, *statfname; #define UPD_EOSTAT(xStr) do { \ eostat = strchr(xStr, '\0'); \ remspace = eostat - xStr; \ } while(0) lpd_gettime(&pp->tr_done, NULL, (size_t)0); trtime = DIFFTIME_TS(pp->tr_done, pp->tr_start); gethostname(thishost, sizeof(thishost)); lprhost = sendhost = recvhost = recvdev = NULL; switch (sendrecv) { case TR_SENDING: rectype = "send"; statfname = pp->stat_send; sendhost = thishost; recvhost = otherhost; break; case TR_RECVING: rectype = "recv"; statfname = pp->stat_recv; sendhost = otherhost; recvhost = thishost; break; case TR_PRINTING: /* * This case is for copying to a device (presumably local, * though filters using things like 'net/CAP' can confuse * this assumption...). */ rectype = "prnt"; statfname = pp->stat_send; sendhost = thishost; recvdev = _PATH_DEFDEVLP; if (pp->lp) recvdev = pp->lp; break; default: /* internal error... should we syslog/printf an error? */ return; } if (statfname == NULL) return; /* * the original-host and userid are found out by reading thru the * cf (control-file) for the job. Unfortunately, on incoming jobs * the df's (data-files) are sent before the matching cf, so the * orighost & userid are generally not-available for incoming jobs. * * (it would be nice to create a work-around for that..) */ if (orighost && (*orighost != '\0')) lprhost = orighost; else lprhost = ".na."; if (*userid == '\0') userid = NULL; /* * Format of statline. * Some of the keywords listed here are not implemented here, but * they are listed to reserve the meaning for a given keyword. * Fields are separated by a blank. The fields in statline are: * - time the transfer started * - name of the printer queue (the short-name...) * - hostname the file originally came from (the * 'lpr host'), if known, or "_na_" if not known. * - id of job from that host (generally three digits) * - file count (# of file within job) * - 4-byte field indicating the type of transfer * statistics record. "send" means it's from the * host sending a datafile, "recv" means it's from * a host as it receives a datafile. * user= - user who sent the job (if known) * secs= - seconds it took to transfer the file - * bytes= - number of bytes transfered (ie, "bytecount") + * bytes= - number of bytes transferred (ie, "bytecount") * bps=e - Bytes/sec (if the transfer was "big enough" * for this to be useful) * ! top= - type of printer (if the type is defined in * printcap, and if this statline is for sending * a file to that ptr) * ! qls= - queue-length at start of send/print-ing a job * ! qle= - queue-length at end of send/print-ing a job * sip= - IP address of sending host, only included when * receiving a job. * shost= - sending host (if that does != the original host) * rhost= - hostname receiving the file (ie, "destination") * rdev= - device receiving the file, when the file is being * send to a device instead of a remote host. * * Note: A single print job may be transferred multiple times. The * original 'lpr' occurs on one host, and that original host might * send to some interim host (or print server). That interim host * might turn around and send the job to yet another host (most likely * the real printer). The 'shost=' parameter is only included if the * sending host for this particular transfer is NOT the same as the * host which did the original 'lpr'. * * Many values have 'something=' tags before them, because they are * in some sense "optional", or their order may vary. "Optional" may * mean in the sense that different SITES might choose to have other * fields in the record, or that some fields are only included under * some circumstances. Programs processing these records should not * assume the order or existence of any of these keyword fields. */ snprintf(statline, STATLINE_SIZE, "%s %s %s %s %03ld %s", pp->tr_timestr, pp->printer, lprhost, pp->jobnum, pp->jobdfnum, rectype); UPD_EOSTAT(statline); if (userid != NULL) { snprintf(eostat, remspace, " user=%s", userid); UPD_EOSTAT(statline); } snprintf(eostat, remspace, " secs=%#.2f bytes=%lu", trtime, (unsigned long)bytecnt); UPD_EOSTAT(statline); /* * The bps field duplicates info from bytes and secs, so do * not bother to include it for very small files. */ if ((bytecnt > 25000) && (trtime > 1.1)) { snprintf(eostat, remspace, " bps=%#.2e", ((double)bytecnt/trtime)); UPD_EOSTAT(statline); } if (sendrecv == TR_RECVING) { if (remspace > 5+strlen(from_ip) ) { snprintf(eostat, remspace, " sip=%s", from_ip); UPD_EOSTAT(statline); } } if (0 != strcmp(lprhost, sendhost)) { if (remspace > 7+strlen(sendhost) ) { snprintf(eostat, remspace, " shost=%s", sendhost); UPD_EOSTAT(statline); } } if (recvhost) { if (remspace > 7+strlen(recvhost) ) { snprintf(eostat, remspace, " rhost=%s", recvhost); UPD_EOSTAT(statline); } } if (recvdev) { if (remspace > 6+strlen(recvdev) ) { snprintf(eostat, remspace, " rdev=%s", recvdev); UPD_EOSTAT(statline); } } if (remspace > 1) { strcpy(eostat, "\n"); } else { /* probably should back up to just before the final " x=".. */ strcpy(statline+STATLINE_SIZE-2, "\n"); } statfile = open(statfname, O_WRONLY|O_APPEND, 0664); if (statfile < 0) { /* statfile was given, but we can't open it. should we * syslog/printf this as an error? */ return; } write(statfile, statline, strlen(statline)); close(statfile); return; #undef UPD_EOSTAT } #include void fatal(const struct printer *pp, const char *msg, ...) { va_list ap; va_start(ap, msg); /* this error message is being sent to the 'from_host' */ if (from_host != local_host) (void)printf("%s: ", local_host); (void)printf("%s: ", progname); if (pp && pp->printer) (void)printf("%s: ", pp->printer); (void)vprintf(msg, ap); va_end(ap); (void)putchar('\n'); exit(1); } /* * Close all file descriptors from START on up. */ void closeallfds(int start) { int stop; if (USE_CLOSEFROM) /* The faster, modern solution */ closefrom(start); else { /* This older logic can be pretty awful on some OS's. The * getdtablesize() might return ``infinity'', and then this * will waste a lot of time closing file descriptors which * had never been open()-ed. */ stop = getdtablesize(); for (; start < stop; start++) close(start); } } Index: head/usr.sbin/lpr/lpd/recvjob.c =================================================================== --- head/usr.sbin/lpr/lpd/recvjob.c (revision 298885) +++ head/usr.sbin/lpr/lpd/recvjob.c (revision 298886) @@ -1,405 +1,405 @@ /* * Copyright (c) 1983, 1993 * The Regents of the University of California. All rights reserved. * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef lint static const char copyright[] = "@(#) Copyright (c) 1983, 1993\n\ The Regents of the University of California. All rights reserved.\n"; #endif /* not lint */ #if 0 #ifndef lint static char sccsid[] = "@(#)recvjob.c 8.2 (Berkeley) 4/27/95"; #endif /* not lint */ #endif #include "lp.cdefs.h" /* A cross-platform version of */ __FBSDID("$FreeBSD$"); /* * Receive printer jobs from the network, queue them and * start the printer daemon. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "lp.h" #include "lp.local.h" #include "ctlinfo.h" #include "extern.h" #include "pathnames.h" #define ack() (void) write(STDOUT_FILENO, sp, (size_t)1); /* * The buffer size to use when reading/writing spool files. */ #define SPL_BUFSIZ BUFSIZ static char dfname[NAME_MAX]; /* data files */ static int minfree; /* keep at least minfree blocks available */ static const char *sp = ""; static char tfname[NAME_MAX]; /* tmp copy of cf before linking */ static int chksize(int _size); static void frecverr(const char *_msg, ...) __printf0like(1, 2); static int noresponse(void); static void rcleanup(int _signo); static int read_number(const char *_fn); static int readfile(struct printer *_pp, char *_file, size_t _size); static int readjob(struct printer *_pp); void recvjob(const char *printer) { struct stat stb; int status; struct printer myprinter, *pp = &myprinter; /* * Perform lookup for printer name or abbreviation */ init_printer(pp); status = getprintcap(printer, pp); switch (status) { case PCAPERR_OSERR: frecverr("cannot open printer description file"); break; case PCAPERR_NOTFOUND: frecverr("unknown printer %s", printer); break; case PCAPERR_TCLOOP: fatal(pp, "potential reference loop detected in printcap file"); default: break; } (void) close(STDERR_FILENO); /* set up log file */ if (open(pp->log_file, O_WRONLY|O_APPEND, 0664) < 0) { syslog(LOG_ERR, "%s: %m", pp->log_file); (void) open(_PATH_DEVNULL, O_WRONLY); } if (chdir(pp->spool_dir) < 0) frecverr("%s: chdir(%s): %s", pp->printer, pp->spool_dir, strerror(errno)); if (stat(pp->lock_file, &stb) == 0) { if (stb.st_mode & 010) { /* queue is disabled */ putchar('\1'); /* return error code */ exit(1); } } else if (stat(pp->spool_dir, &stb) < 0) frecverr("%s: stat(%s): %s", pp->printer, pp->spool_dir, strerror(errno)); minfree = 2 * read_number("minfree"); /* scale KB to 512 blocks */ signal(SIGTERM, rcleanup); signal(SIGPIPE, rcleanup); if (readjob(pp)) printjob(pp); } /* * Read printer jobs sent by lpd and copy them to the spooling directory. - * Return the number of jobs successfully transfered. + * Return the number of jobs successfully transferred. */ static int readjob(struct printer *pp) { register int size; int cfcnt, dfcnt; char *cp, *clastp, *errmsg; char givenid[32], givenhost[MAXHOSTNAMELEN]; ack(); cfcnt = 0; dfcnt = 0; for (;;) { /* * Read a command to tell us what to do */ cp = line; clastp = line + sizeof(line) - 1; do { size = read(STDOUT_FILENO, cp, (size_t)1); if (size != (ssize_t)1) { if (size < (ssize_t)0) { frecverr("%s: lost connection", pp->printer); /*NOTREACHED*/ } return (cfcnt); } } while ((*cp++ != '\n') && (cp <= clastp)); if (cp > clastp) { frecverr("%s: readjob overflow", pp->printer); /*NOTREACHED*/ } *--cp = '\0'; cp = line; switch (*cp++) { case '\1': /* cleanup because data sent was bad */ rcleanup(0); continue; case '\2': /* read cf file */ size = 0; dfcnt = 0; while (*cp >= '0' && *cp <= '9') size = size * 10 + (*cp++ - '0'); if (*cp++ != ' ') break; /* * host name has been authenticated, we use our * view of the host name since we may be passed * something different than what gethostbyaddr() * returns */ strlcpy(cp + 6, from_host, sizeof(line) + (size_t)(line - cp - 6)); if (strchr(cp, '/')) { frecverr("readjob: %s: illegal path name", cp); /*NOTREACHED*/ } strlcpy(tfname, cp, sizeof(tfname)); tfname[sizeof (tfname) - 1] = '\0'; tfname[0] = 't'; if (!chksize(size)) { (void) write(STDOUT_FILENO, "\2", (size_t)1); continue; } if (!readfile(pp, tfname, (size_t)size)) { rcleanup(0); continue; } errmsg = ctl_renametf(pp->printer, tfname); tfname[0] = '\0'; if (errmsg != NULL) { frecverr("%s: %s", pp->printer, errmsg); /*NOTREACHED*/ } cfcnt++; continue; case '\3': /* read df file */ *givenid = '\0'; *givenhost = '\0'; size = 0; while (*cp >= '0' && *cp <= '9') size = size * 10 + (*cp++ - '0'); if (*cp++ != ' ') break; if (strchr(cp, '/')) { frecverr("readjob: %s: illegal path name", cp); /*NOTREACHED*/ } if (!chksize(size)) { (void) write(STDOUT_FILENO, "\2", (size_t)1); continue; } strlcpy(dfname, cp, sizeof(dfname)); dfcnt++; trstat_init(pp, dfname, dfcnt); (void) readfile(pp, dfname, (size_t)size); trstat_write(pp, TR_RECVING, (size_t)size, givenid, from_host, givenhost); continue; } frecverr("protocol screwup: %s", line); /*NOTREACHED*/ } } /* * Read files send by lpd and copy them to the spooling directory. */ static int readfile(struct printer *pp, char *file, size_t size) { register char *cp; char buf[SPL_BUFSIZ]; size_t amt, i; int err, fd, j; fd = open(file, O_CREAT|O_EXCL|O_WRONLY, FILMOD); if (fd < 0) { frecverr("%s: readfile: error on open(%s): %s", pp->printer, file, strerror(errno)); /*NOTREACHED*/ } ack(); err = 0; for (i = 0; i < size; i += SPL_BUFSIZ) { amt = SPL_BUFSIZ; cp = buf; if (i + amt > size) amt = size - i; do { j = read(STDOUT_FILENO, cp, amt); if (j <= 0) { frecverr("%s: lost connection", pp->printer); /*NOTREACHED*/ } amt -= j; cp += j; } while (amt > 0); amt = SPL_BUFSIZ; if (i + amt > size) amt = size - i; if (write(fd, buf, amt) != (ssize_t)amt) { err++; break; } } (void) close(fd); if (err) { frecverr("%s: write error on close(%s)", pp->printer, file); /*NOTREACHED*/ } if (noresponse()) { /* file sent had bad data in it */ if (strchr(file, '/') == NULL) (void) unlink(file); return (0); } ack(); return (1); } static int noresponse(void) { char resp; if (read(STDOUT_FILENO, &resp, (size_t)1) != 1) { frecverr("lost connection in noresponse()"); /*NOTREACHED*/ } if (resp == '\0') return(0); return(1); } /* * Check to see if there is enough space on the disk for size bytes. * 1 == OK, 0 == Not OK. */ static int chksize(int size) { int64_t spacefree; struct statfs sfb; if (statfs(".", &sfb) < 0) { syslog(LOG_ERR, "%s: %m", "statfs(\".\")"); return (1); } spacefree = sfb.f_bavail * (sfb.f_bsize / 512); size = (size + 511) / 512; if (minfree + size > spacefree) return(0); return(1); } static int read_number(const char *fn) { char lin[80]; register FILE *fp; if ((fp = fopen(fn, "r")) == NULL) return (0); if (fgets(lin, sizeof(lin), fp) == NULL) { fclose(fp); return (0); } fclose(fp); return (atoi(lin)); } /* - * Remove all the files associated with the current job being transfered. + * Remove all the files associated with the current job being transferred. */ static void rcleanup(int signo __unused) { if (tfname[0] && strchr(tfname, '/') == NULL) (void) unlink(tfname); if (dfname[0] && strchr(dfname, '/') == NULL) { do { do (void) unlink(dfname); while (dfname[2]-- != 'A'); dfname[2] = 'z'; } while (dfname[0]-- != 'd'); } dfname[0] = '\0'; } #include static void frecverr(const char *msg, ...) { va_list ap; va_start(ap, msg); syslog(LOG_ERR, "Error receiving job from %s:", from_host); vsyslog(LOG_ERR, msg, ap); va_end(ap); /* * rcleanup is not called until AFTER logging the error message, * because rcleanup will zap some variables which may have been * supplied as parameters for that msg... */ rcleanup(0); /* * Add a minimal delay before returning the final error code to * the sending host. This just in case that machine responds * this error by INSTANTLY retrying (and instantly re-failing...). * It would be stupid of the sending host to do that, but if there * was a broken implementation which did it, the result might be * obscure performance problems and a flood of syslog messages on * the receiving host. */ sleep(2); /* a paranoid throttling measure */ putchar('\1'); /* return error code */ exit(1); } Index: head/usr.sbin/nandsim/nandsim_cfgparse.c =================================================================== --- head/usr.sbin/nandsim/nandsim_cfgparse.c (revision 298885) +++ head/usr.sbin/nandsim/nandsim_cfgparse.c (revision 298886) @@ -1,959 +1,959 @@ /*- * Copyright (C) 2009-2012 Semihalf * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include "nandsim_cfgparse.h" #define warn(fmt, args...) do { \ printf("WARNING: " fmt "\n", ##args); } while (0) #define error(fmt, args...) do { \ printf("ERROR: " fmt "\n", ##args); } while (0) #define MSG_MANDATORYKEYMISSING "mandatory key \"%s\" value belonging to " \ "section \"%s\" is missing!\n" #define DEBUG #undef DEBUG #ifdef DEBUG #define debug(fmt, args...) do { \ printf("NANDSIM_CONF:" fmt "\n", ##args); } while (0) #else #define debug(fmt, args...) do {} while(0) #endif #define STRBUFSIZ 2000 /* Macros extracts type and type size */ #define TYPE(x) ((x) & 0xf8) #define SIZE(x) (((x) & 0x07)) /* Erase/Prog/Read time max and min values */ #define DELAYTIME_MIN 10000 #define DELAYTIME_MAX 10000000 /* Structure holding configuration for controller. */ static struct sim_ctrl ctrl_conf; /* Structure holding configuration for chip. */ static struct sim_chip chip_conf; static struct nandsim_key nandsim_ctrl_keys[] = { {"num_cs", 1, VALUE_UINT | SIZE_8, (void *)&ctrl_conf.num_cs, 0}, {"ctrl_num", 1, VALUE_UINT | SIZE_8, (void *)&ctrl_conf.num, 0}, {"ecc_layout", 1, VALUE_UINTARRAY | SIZE_16, (void *)&ctrl_conf.ecc_layout, MAX_ECC_BYTES}, {"filename", 0, VALUE_STRING, (void *)&ctrl_conf.filename, FILENAME_SIZE}, {"ecc", 0, VALUE_BOOL, (void *)&ctrl_conf.ecc, 0}, {NULL, 0, 0, NULL, 0}, }; static struct nandsim_key nandsim_chip_keys[] = { {"chip_cs", 1, VALUE_UINT | SIZE_8, (void *)&chip_conf.num, 0}, {"chip_ctrl", 1, VALUE_UINT | SIZE_8, (void *)&chip_conf.ctrl_num, 0}, {"device_id", 1, VALUE_UINT | SIZE_8, (void *)&chip_conf.device_id, 0}, {"manufacturer_id", 1, VALUE_UINT | SIZE_8, (void *)&chip_conf.manufact_id, 0}, {"model", 0, VALUE_STRING, (void *)&chip_conf.device_model, DEV_MODEL_STR_SIZE}, {"manufacturer", 0, VALUE_STRING, (void *)&chip_conf.manufacturer, MAN_STR_SIZE}, {"page_size", 1, VALUE_UINT | SIZE_32, (void *)&chip_conf.page_size, 0}, {"oob_size", 1, VALUE_UINT | SIZE_32, (void *)&chip_conf.oob_size, 0}, {"pages_per_block", 1, VALUE_UINT | SIZE_32, (void *)&chip_conf.pgs_per_blk, 0}, {"blocks_per_lun", 1, VALUE_UINT | SIZE_32, (void *)&chip_conf.blks_per_lun, 0}, {"luns", 1, VALUE_UINT | SIZE_32, (void *)&chip_conf.luns, 0}, {"column_addr_cycle", 1,VALUE_UINT | SIZE_8, (void *)&chip_conf.col_addr_cycles, 0}, {"row_addr_cycle", 1, VALUE_UINT | SIZE_8, (void *)&chip_conf.row_addr_cycles, 0}, {"program_time", 0, VALUE_UINT | SIZE_32, (void *)&chip_conf.prog_time, 0}, {"erase_time", 0, VALUE_UINT | SIZE_32, (void *)&chip_conf.erase_time, 0}, {"read_time", 0, VALUE_UINT | SIZE_32, (void *)&chip_conf.read_time, 0}, {"width", 1, VALUE_UINT | SIZE_8, (void *)&chip_conf.width, 0}, {"wear_out", 1, VALUE_UINT | SIZE_32, (void *)&chip_conf.wear_level, 0}, {"bad_block_map", 0, VALUE_UINTARRAY | SIZE_32, (void *)&chip_conf.bad_block_map, MAX_BAD_BLOCKS}, {NULL, 0, 0, NULL, 0}, }; static struct nandsim_section sections[] = { {"ctrl", (struct nandsim_key *)&nandsim_ctrl_keys}, {"chip", (struct nandsim_key *)&nandsim_chip_keys}, {NULL, NULL}, }; static uint8_t logoutputtoint(char *, int *); static uint8_t validate_chips(struct sim_chip *, int, struct sim_ctrl *, int); static uint8_t validate_ctrls(struct sim_ctrl *, int); static int configure_sim(const char *, struct rcfile *); static int create_ctrls(struct rcfile *, struct sim_ctrl **, int *); static int create_chips(struct rcfile *, struct sim_chip **, int *); static void destroy_ctrls(struct sim_ctrl *); static void destroy_chips(struct sim_chip *); static int validate_section_config(struct rcfile *, const char *, int); int convert_argint(char *arg, int *value) { if (arg == NULL || value == NULL) return (EINVAL); errno = 0; *value = (int)strtol(arg, NULL, 0); if (*value == 0 && errno != 0) { error("Cannot convert to number argument \'%s\'", arg); return (EINVAL); } return (0); } int convert_arguint(char *arg, unsigned int *value) { if (arg == NULL || value == NULL) return (EINVAL); errno = 0; *value = (unsigned int)strtol(arg, NULL, 0); if (*value == 0 && errno != 0) { error("Cannot convert to number argument \'%s\'", arg); return (EINVAL); } return (0); } /* Parse given ',' separated list of bytes into buffer. */ int parse_intarray(char *array, int **buffer) { char *tmp, *tmpstr, *origstr; unsigned int currbufp = 0, i; unsigned int count = 0, from = 0, to = 0; /* Remove square braces */ if (array[0] == '[') array ++; if (array[strlen(array)-1] == ']') array[strlen(array)-1] = ','; from = strlen(array); origstr = (char *)malloc(sizeof(char) * from); strcpy(origstr, array); tmpstr = (char *)strtok(array, ","); /* First loop checks for how big int array we need to allocate */ while (tmpstr != NULL) { errno = 0; if ((tmp = strchr(tmpstr, '-')) != NULL) { *tmp = ' '; if (convert_arguint(tmpstr, &from) || convert_arguint(tmp, &to)) { free(origstr); return (EINVAL); } count += to - from + 1; } else { if (convert_arguint(tmpstr, &from)) { free(origstr); return (EINVAL); } count++; } tmpstr = (char *)strtok(NULL, ","); } if (count == 0) goto out; /* Allocate buffer of ints */ tmpstr = (char *)strtok(origstr, ","); *buffer = malloc(count * sizeof(int)); /* Second loop is just inserting converted values into int array */ while (tmpstr != NULL) { errno = 0; if ((tmp = strchr(tmpstr, '-')) != NULL) { *tmp = ' '; from = strtol(tmpstr, NULL, 0); to = strtol(tmp, NULL, 0); tmpstr = strtok(NULL, ","); for (i = from; i <= to; i ++) (*buffer)[currbufp++] = i; continue; } errno = 0; from = (int)strtol(tmpstr, NULL, 0); (*buffer)[currbufp++] = from; tmpstr = (char *)strtok(NULL, ","); } out: free(origstr); return (count); } /* Convert logoutput strings literals into appropriate ints. */ static uint8_t logoutputtoint(char *logoutput, int *output) { int out; if (strcmp(logoutput, "file") == 0) out = NANDSIM_OUTPUT_FILE; else if (strcmp(logoutput, "console") == 0) out = NANDSIM_OUTPUT_CONSOLE; else if (strcmp(logoutput, "ram") == 0) out = NANDSIM_OUTPUT_RAM; else if (strcmp(logoutput, "none") == 0) out = NANDSIM_OUTPUT_NONE; else out = -1; *output = out; if (out == -1) return (EINVAL); else return (0); } static int configure_sim(const char *devfname, struct rcfile *f) { struct sim_param sim_conf; char buf[255]; int err, tmpv, fd; err = rc_getint(f, "sim", 0, "log_level", &tmpv); if (tmpv < 0 || tmpv > 255 || err) { error("Bad log level specified (%d)\n", tmpv); return (ENOTSUP); } else sim_conf.log_level = tmpv; rc_getstring(f, "sim", 0, "log_output", 255, (char *)&buf); tmpv = -1; err = logoutputtoint((char *)&buf, &tmpv); if (err) { error("Log output specified in config file does not seem to " "be valid (%s)!", (char *)&buf); return (ENOTSUP); } sim_conf.log_output = tmpv; fd = open(devfname, O_RDWR); if (fd == -1) { error("could not open simulator device file (%s)!", devfname); return (EX_OSFILE); } err = ioctl(fd, NANDSIM_SIM_PARAM, &sim_conf); if (err) { error("simulator parameters could not be modified: %s", strerror(errno)); close(fd); return (ENXIO); } close(fd); return (EX_OK); } static int create_ctrls(struct rcfile *f, struct sim_ctrl **ctrls, int *cnt) { int count, i; struct sim_ctrl *ctrlsptr; count = rc_getsectionscount(f, "ctrl"); if (count > MAX_SIM_DEV) { error("Too many CTRL sections specified(%d)", count); return (ENOTSUP); } else if (count == 0) { error("No ctrl sections specified"); return (ENOENT); } ctrlsptr = (struct sim_ctrl *)malloc(sizeof(struct sim_ctrl) * count); if (ctrlsptr == NULL) { error("Could not allocate memory for ctrl configuration"); return (ENOMEM); } for (i = 0; i < count; i++) { bzero((void *)&ctrl_conf, sizeof(ctrl_conf)); /* * ECC layout have to end up with 0xffff, so * we're filling buffer with 0xff. If ecc_layout is * defined in config file, values will be overridden. */ memset((void *)&ctrl_conf.ecc_layout, 0xff, sizeof(ctrl_conf.ecc_layout)); if (validate_section_config(f, "ctrl", i) != 0) { free(ctrlsptr); return (EINVAL); } if (parse_section(f, "ctrl", i) != 0) { free(ctrlsptr); return (EINVAL); } memcpy(&ctrlsptr[i], &ctrl_conf, sizeof(ctrl_conf)); /* Try to create ctrl with config parsed */ debug("NUM=%d\nNUM_CS=%d\nECC=%d\nFILENAME=%s\nECC_LAYOUT[0]" "=%d\nECC_LAYOUT[1]=%d\n\n", ctrlsptr[i].num, ctrlsptr[i].num_cs, ctrlsptr[i].ecc, ctrlsptr[i].filename, ctrlsptr[i].ecc_layout[0], ctrlsptr[i].ecc_layout[1]); } *cnt = count; *ctrls = ctrlsptr; return (0); } static void destroy_ctrls(struct sim_ctrl *ctrls) { free(ctrls); } static int create_chips(struct rcfile *f, struct sim_chip **chips, int *cnt) { struct sim_chip *chipsptr; int count, i; count = rc_getsectionscount(f, "chip"); if (count > (MAX_CTRL_CS * MAX_SIM_DEV)) { error("Too many chip sections specified(%d)", count); return (ENOTSUP); } else if (count == 0) { error("No chip sections specified"); return (ENOENT); } chipsptr = (struct sim_chip *)malloc(sizeof(struct sim_chip) * count); if (chipsptr == NULL) { error("Could not allocate memory for chip configuration"); return (ENOMEM); } for (i = 0; i < count; i++) { bzero((void *)&chip_conf, sizeof(chip_conf)); /* * Bad block map have to end up with 0xffff, so * we're filling array with 0xff. If bad block map is * defined in config file, values will be overridden. */ memset((void *)&chip_conf.bad_block_map, 0xff, sizeof(chip_conf.bad_block_map)); if (validate_section_config(f, "chip", i) != 0) { free(chipsptr); return (EINVAL); } if (parse_section(f, "chip", i) != 0) { free(chipsptr); return (EINVAL); } memcpy(&chipsptr[i], &chip_conf, sizeof(chip_conf)); /* Try to create chip with config parsed */ debug("CHIP:\nNUM=%d\nCTRL_NUM=%d\nDEVID=%d\nMANID=%d\n" "PAGE_SZ=%d\nOOBSZ=%d\nREAD_T=%d\nDEVMODEL=%s\n" "MAN=%s\nCOLADDRCYCLES=%d\nROWADDRCYCLES=%d\nCHWIDTH=%d\n" "PGS/BLK=%d\nBLK/LUN=%d\nLUNS=%d\nERR_RATIO=%d\n" "WEARLEVEL=%d\nISWP=%d\n\n\n\n", chipsptr[i].num, chipsptr[i].ctrl_num, chipsptr[i].device_id, chipsptr[i].manufact_id, chipsptr[i].page_size, chipsptr[i].oob_size, chipsptr[i].read_time, chipsptr[i].device_model, chipsptr[i].manufacturer, chipsptr[i].col_addr_cycles, chipsptr[i].row_addr_cycles, chipsptr[i].width, chipsptr[i].pgs_per_blk, chipsptr[i].blks_per_lun, chipsptr[i].luns, chipsptr[i].error_ratio, chipsptr[i].wear_level, chipsptr[i].is_wp); } *cnt = count; *chips = chipsptr; return (0); } static void destroy_chips(struct sim_chip *chips) { free(chips); } int parse_config(char *cfgfname, const char *devfname) { int err = 0, fd; unsigned int chipsectionscnt, ctrlsectionscnt, i; struct rcfile *f; struct sim_chip *chips; struct sim_ctrl *ctrls; err = rc_open(cfgfname, "r", &f); if (err) { error("could not open configuration file (%s)", cfgfname); return (EX_NOINPUT); } /* First, try to configure simulator itself. */ if (configure_sim(devfname, f) != EX_OK) { rc_close(f); return (EINVAL); } debug("SIM CONFIGURED!\n"); /* Then create controllers' configs */ if (create_ctrls(f, &ctrls, &ctrlsectionscnt) != 0) { rc_close(f); return (ENXIO); } debug("CTRLS CONFIG READ!\n"); /* Then create chips' configs */ if (create_chips(f, &chips, &chipsectionscnt) != 0) { destroy_ctrls(ctrls); rc_close(f); return (ENXIO); } debug("CHIPS CONFIG READ!\n"); if (validate_ctrls(ctrls, ctrlsectionscnt) != 0) { destroy_ctrls(ctrls); destroy_chips(chips); rc_close(f); return (EX_SOFTWARE); } if (validate_chips(chips, chipsectionscnt, ctrls, ctrlsectionscnt) != 0) { destroy_ctrls(ctrls); destroy_chips(chips); rc_close(f); return (EX_SOFTWARE); } /* Open device */ fd = open(devfname, O_RDWR); if (fd == -1) { error("could not open simulator device file (%s)!", devfname); rc_close(f); destroy_chips(chips); destroy_ctrls(ctrls); return (EX_OSFILE); } debug("SIM CONFIG STARTED!\n"); /* At this stage, both ctrls' and chips' configs should be valid */ for (i = 0; i < ctrlsectionscnt; i++) { err = ioctl(fd, NANDSIM_CREATE_CTRL, &ctrls[i]); if (err) { if (err == EEXIST) error("Controller#%d already created\n", ctrls[i].num); else if (err == EINVAL) - error("Incorrect controler number (%d)\n", + error("Incorrect controller number (%d)\n", ctrls[i].num); else error("Could not created controller#%d\n", ctrls[i].num); /* Errors during controller creation stops parsing */ close(fd); rc_close(f); destroy_ctrls(ctrls); destroy_chips(chips); return (ENXIO); } debug("CTRL#%d CONFIG STARTED!\n", i); } for (i = 0; i < chipsectionscnt; i++) { err = ioctl(fd, NANDSIM_CREATE_CHIP, &chips[i]); if (err) { if (err == EEXIST) error("Chip#%d for controller#%d already " "created\n", chips[i].num, chips[i].ctrl_num); else if (err == EINVAL) error("Incorrect chip number (%d:%d)\n", chips[i].num, chips[i].ctrl_num); else error("Could not create chip (%d:%d)\n", chips[i].num, chips[i].ctrl_num); error("Could not start chip#%d\n", i); destroy_chips(chips); destroy_ctrls(ctrls); close(fd); rc_close(f); return (ENXIO); } } debug("CHIPS CONFIG STARTED!\n"); close(fd); rc_close(f); destroy_chips(chips); destroy_ctrls(ctrls); return (0); } /* * Function tries to get appropriate value for given key, convert it to * array of ints (of given size), and perform all the necessary checks and * conversions. */ static int get_argument_intarray(const char *sect_name, int sectno, struct nandsim_key *key, struct rcfile *f) { char strbuf[STRBUFSIZ]; int *intbuf; int getres; uint32_t cnt, i = 0; getres = rc_getstring(f, sect_name, sectno, key->keyname, STRBUFSIZ, (char *)&strbuf); if (getres != 0) { if (key->mandatory != 0) { error(MSG_MANDATORYKEYMISSING, key->keyname, sect_name); return (EINVAL); } else /* Non-mandatory key, not present -- skip */ return (0); } cnt = parse_intarray((char *)&strbuf, &intbuf); cnt = (cnt <= key->maxlength) ? cnt : key->maxlength; for (i = 0; i < cnt; i++) { if (SIZE(key->valuetype) == SIZE_8) *((uint8_t *)(key->field) + i) = (uint8_t)intbuf[i]; else if (SIZE(key->valuetype) == SIZE_16) *((uint16_t *)(key->field) + i) = (uint16_t)intbuf[i]; else *((uint32_t *)(key->field) + i) = (uint32_t)intbuf[i]; } free(intbuf); return (0); } /* * Function tries to get appropriate value for given key, convert it to * int of certain length. */ static int get_argument_int(const char *sect_name, int sectno, struct nandsim_key *key, struct rcfile *f) { int getres; uint32_t val; getres = rc_getint(f, sect_name, sectno, key->keyname, &val); if (getres != 0) { if (key->mandatory != 0) { error(MSG_MANDATORYKEYMISSING, key->keyname, sect_name); return (EINVAL); } else /* Non-mandatory key, not present -- skip */ return (0); } if (SIZE(key->valuetype) == SIZE_8) *(uint8_t *)(key->field) = (uint8_t)val; else if (SIZE(key->valuetype) == SIZE_16) *(uint16_t *)(key->field) = (uint16_t)val; else *(uint32_t *)(key->field) = (uint32_t)val; return (0); } /* Function tries to get string value for given key */ static int get_argument_string(const char *sect_name, int sectno, struct nandsim_key *key, struct rcfile *f) { char strbuf[STRBUFSIZ]; int getres; getres = rc_getstring(f, sect_name, sectno, key->keyname, STRBUFSIZ, strbuf); if (getres != 0) { if (key->mandatory != 0) { error(MSG_MANDATORYKEYMISSING, key->keyname, sect_name); return (1); } else /* Non-mandatory key, not present -- skip */ return (0); } strncpy(key->field, (char *)&strbuf, (size_t)(key->maxlength - 1)); return (0); } /* Function tries to get on/off value for given key */ static int get_argument_bool(const char *sect_name, int sectno, struct nandsim_key *key, struct rcfile *f) { int getres, val; getres = rc_getbool(f, sect_name, sectno, key->keyname, &val); if (getres != 0) { if (key->mandatory != 0) { error(MSG_MANDATORYKEYMISSING, key->keyname, sect_name); return (1); } else /* Non-mandatory key, not present -- skip */ return (0); } *(uint8_t *)key->field = (uint8_t)val; return (0); } int parse_section(struct rcfile *f, const char *sect_name, int sectno) { struct nandsim_key *key; struct nandsim_section *sect = (struct nandsim_section *)§ions; int getres = 0; while (1) { if (sect == NULL) return (EINVAL); if (strcmp(sect->name, sect_name) == 0) break; else sect++; } key = sect->keys; do { debug("->Section: %s, Key: %s, type: %d, size: %d", sect_name, key->keyname, TYPE(key->valuetype), SIZE(key->valuetype)/2); switch (TYPE(key->valuetype)) { case VALUE_UINT: /* Single int value */ getres = get_argument_int(sect_name, sectno, key, f); if (getres != 0) return (getres); break; case VALUE_UINTARRAY: /* Array of ints */ getres = get_argument_intarray(sect_name, sectno, key, f); if (getres != 0) return (getres); break; case VALUE_STRING: /* Array of chars */ getres = get_argument_string(sect_name, sectno, key, f); if (getres != 0) return (getres); break; case VALUE_BOOL: /* Boolean value (true/false/on/off/yes/no) */ getres = get_argument_bool(sect_name, sectno, key, f); if (getres != 0) return (getres); break; } } while ((++key)->keyname != NULL); return (0); } static uint8_t validate_chips(struct sim_chip *chips, int chipcnt, struct sim_ctrl *ctrls, int ctrlcnt) { int cchipcnt, i, width, j, id, max; cchipcnt = chipcnt; for (chipcnt -= 1; chipcnt >= 0; chipcnt--) { if (chips[chipcnt].num >= MAX_CTRL_CS) { error("chip no. too high (%d)!!\n", chips[chipcnt].num); return (EINVAL); } if (chips[chipcnt].ctrl_num >= MAX_SIM_DEV) { error("controller no. too high (%d)!!\n", chips[chipcnt].ctrl_num); return (EINVAL); } if (chips[chipcnt].width != 8 && chips[chipcnt].width != 16) { error("invalid width:%d for chip#%d", chips[chipcnt].width, chips[chipcnt].num); return (EINVAL); } /* Check if page size is > 512 and if its power of 2 */ if (chips[chipcnt].page_size < 512 || (chips[chipcnt].page_size & (chips[chipcnt].page_size - 1)) != 0) { error("invalid page size:%d for chip#%d at ctrl#%d!!" "\n", chips[chipcnt].page_size, chips[chipcnt].num, chips[chipcnt].ctrl_num); return (EINVAL); } /* Check if controller no. ctrl_num is configured */ for (i = 0, id = -1; i < ctrlcnt && id == -1; i++) if (ctrls[i].num == chips[chipcnt].ctrl_num) id = i; if (i == ctrlcnt && id == -1) { error("Missing configuration for controller %d" " (at least one chip is connected to it)", chips[chipcnt].ctrl_num); return (EINVAL); } else { /* * Controller is configured -> check oob_size * validity */ i = 0; max = ctrls[id].ecc_layout[0]; while (i < MAX_ECC_BYTES && ctrls[id].ecc_layout[i] != 0xffff) { if (ctrls[id].ecc_layout[i] > max) max = ctrls[id].ecc_layout[i]; i++; } if (chips[chipcnt].oob_size < (unsigned)i) { error("OOB size for chip#%d at ctrl#%d is " "smaller than ecc layout length!", chips[chipcnt].num, chips[chipcnt].ctrl_num); exit(EINVAL); } if (chips[chipcnt].oob_size < (unsigned)max) { error("OOB size for chip#%d at ctrl#%d is " "smaller than maximal ecc position in " "defined layout!", chips[chipcnt].num, chips[chipcnt].ctrl_num); exit(EINVAL); } } if ((chips[chipcnt].erase_time < DELAYTIME_MIN || chips[chipcnt].erase_time > DELAYTIME_MAX) && chips[chipcnt].erase_time != 0) { error("Invalid erase time value for chip#%d at " "ctrl#%d", chips[chipcnt].num, chips[chipcnt].ctrl_num); return (EINVAL); } if ((chips[chipcnt].prog_time < DELAYTIME_MIN || chips[chipcnt].prog_time > DELAYTIME_MAX) && chips[chipcnt].prog_time != 0) { error("Invalid prog time value for chip#%d at " "ctr#%d!", chips[chipcnt].num, chips[chipcnt].ctrl_num); return (EINVAL); } if ((chips[chipcnt].read_time < DELAYTIME_MIN || chips[chipcnt].read_time > DELAYTIME_MAX) && chips[chipcnt].read_time != 0) { error("Invalid read time value for chip#%d at " "ctrl#%d!", chips[chipcnt].num, chips[chipcnt].ctrl_num); return (EINVAL); } } /* Check if chips attached to the same controller, have same width */ for (i = 0; i < ctrlcnt; i++) { width = -1; for (j = 0; j < cchipcnt; j++) { if (chips[j].ctrl_num == i) { if (width == -1) { width = chips[j].width; } else { if (width != chips[j].width) { error("Chips attached to " "ctrl#%d have different " "widths!\n", i); return (EINVAL); } } } } } return (0); } static uint8_t validate_ctrls(struct sim_ctrl *ctrl, int ctrlcnt) { for (ctrlcnt -= 1; ctrlcnt >= 0; ctrlcnt--) { if (ctrl[ctrlcnt].num > MAX_SIM_DEV) { error("Controller no. too high (%d)!!\n", ctrl[ctrlcnt].num); return (EINVAL); } if (ctrl[ctrlcnt].num_cs > MAX_CTRL_CS) { error("Too many CS (%d)!!\n", ctrl[ctrlcnt].num_cs); return (EINVAL); } if (ctrl[ctrlcnt].ecc != 0 && ctrl[ctrlcnt].ecc != 1) { error("ECC is set to neither 0 nor 1 !\n"); return (EINVAL); } } return (0); } static int validate_section_config(struct rcfile *f, const char *sect_name, int sectno) { struct nandsim_key *key; struct nandsim_section *sect; char **keys_tbl; int i, match; for (match = 0, sect = (struct nandsim_section *)§ions; sect != NULL; sect++) { if (strcmp(sect->name, sect_name) == 0) { match = 1; break; } } if (match == 0) return (EINVAL); keys_tbl = rc_getkeys(f, sect_name, sectno); if (keys_tbl == NULL) return (ENOMEM); for (i = 0; keys_tbl[i] != NULL; i++) { key = sect->keys; match = 0; do { if (strcmp(keys_tbl[i], key->keyname) == 0) { match = 1; break; } } while ((++key)->keyname != NULL); if (match == 0) { error("Invalid key in config file: %s\n", keys_tbl[i]); free(keys_tbl); return (EINVAL); } } free(keys_tbl); return (0); } Index: head/usr.sbin/rtadvd/rtadvd.c =================================================================== --- head/usr.sbin/rtadvd/rtadvd.c (revision 298885) +++ head/usr.sbin/rtadvd/rtadvd.c (revision 298886) @@ -1,1914 +1,1914 @@ /* $FreeBSD$ */ /* $KAME: rtadvd.c,v 1.82 2003/08/05 12:34:23 itojun Exp $ */ /* * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * Copyright (C) 2011 Hiroki Sato * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pathnames.h" #include "rtadvd.h" #include "if.h" #include "rrenum.h" #include "advcap.h" #include "timer_subr.h" #include "timer.h" #include "config.h" #include "control.h" #include "control_server.h" #define RTADV_TYPE2BITMASK(type) (0x1 << type) struct msghdr rcvmhdr; static char *rcvcmsgbuf; static size_t rcvcmsgbuflen; static char *sndcmsgbuf = NULL; static size_t sndcmsgbuflen; struct msghdr sndmhdr; struct iovec rcviov[2]; struct iovec sndiov[2]; struct sockaddr_in6 rcvfrom; static const char *pidfilename = _PATH_RTADVDPID; const char *conffile = _PATH_RTADVDCONF; static struct pidfh *pfh; static int dflag, sflag; static int wait_shutdown; #define PFD_RAWSOCK 0 #define PFD_RTSOCK 1 #define PFD_CSOCK 2 #define PFD_MAX 3 struct railist_head_t railist = TAILQ_HEAD_INITIALIZER(railist); struct ifilist_head_t ifilist = TAILQ_HEAD_INITIALIZER(ifilist); struct nd_optlist { TAILQ_ENTRY(nd_optlist) nol_next; struct nd_opt_hdr *nol_opt; }; union nd_opt { struct nd_opt_hdr *opt_array[9]; struct { struct nd_opt_hdr *zero; struct nd_opt_hdr *src_lladdr; struct nd_opt_hdr *tgt_lladdr; struct nd_opt_prefix_info *pi; struct nd_opt_rd_hdr *rh; struct nd_opt_mtu *mtu; TAILQ_HEAD(, nd_optlist) opt_list; } nd_opt_each; }; #define opt_src_lladdr nd_opt_each.src_lladdr #define opt_tgt_lladdr nd_opt_each.tgt_lladdr #define opt_pi nd_opt_each.pi #define opt_rh nd_opt_each.rh #define opt_mtu nd_opt_each.mtu #define opt_list nd_opt_each.opt_list #define NDOPT_FLAG_SRCLINKADDR (1 << 0) #define NDOPT_FLAG_TGTLINKADDR (1 << 1) #define NDOPT_FLAG_PREFIXINFO (1 << 2) #define NDOPT_FLAG_RDHDR (1 << 3) #define NDOPT_FLAG_MTU (1 << 4) #define NDOPT_FLAG_RDNSS (1 << 5) #define NDOPT_FLAG_DNSSL (1 << 6) static uint32_t ndopt_flags[] = { [ND_OPT_SOURCE_LINKADDR] = NDOPT_FLAG_SRCLINKADDR, [ND_OPT_TARGET_LINKADDR] = NDOPT_FLAG_TGTLINKADDR, [ND_OPT_PREFIX_INFORMATION] = NDOPT_FLAG_PREFIXINFO, [ND_OPT_REDIRECTED_HEADER] = NDOPT_FLAG_RDHDR, [ND_OPT_MTU] = NDOPT_FLAG_MTU, [ND_OPT_RDNSS] = NDOPT_FLAG_RDNSS, [ND_OPT_DNSSL] = NDOPT_FLAG_DNSSL, }; static void rtadvd_shutdown(void); static void sock_open(struct sockinfo *); static void rtsock_open(struct sockinfo *); static void rtadvd_input(struct sockinfo *); static void rs_input(int, struct nd_router_solicit *, struct in6_pktinfo *, struct sockaddr_in6 *); static void ra_input(int, struct nd_router_advert *, struct in6_pktinfo *, struct sockaddr_in6 *); static int prefix_check(struct nd_opt_prefix_info *, struct rainfo *, struct sockaddr_in6 *); static int nd6_options(struct nd_opt_hdr *, int, union nd_opt *, uint32_t); static void free_ndopts(union nd_opt *); static void rtmsg_input(struct sockinfo *); static void set_short_delay(struct ifinfo *); static int check_accept_rtadv(int); static void usage(void) { fprintf(stderr, "usage: rtadvd [-dDfRs] " "[-c configfile] [-C ctlsock] [-M ifname] [-p pidfile]\n"); exit(1); } int main(int argc, char *argv[]) { struct pollfd set[PFD_MAX]; struct timespec *timeout; int i, ch; int fflag = 0, logopt; int error; pid_t pid, otherpid; /* get command line options and arguments */ while ((ch = getopt(argc, argv, "c:C:dDfhM:p:Rs")) != -1) { switch (ch) { case 'c': conffile = optarg; break; case 'C': ctrlsock.si_name = optarg; break; case 'd': dflag++; break; case 'D': dflag += 3; break; case 'f': fflag = 1; break; case 'M': mcastif = optarg; break; case 'R': fprintf(stderr, "rtadvd: " "the -R option is currently ignored.\n"); /* accept_rr = 1; */ /* run anyway... */ break; case 's': sflag = 1; break; case 'p': pidfilename = optarg; break; default: usage(); } } argc -= optind; argv += optind; logopt = LOG_NDELAY | LOG_PID; if (fflag) logopt |= LOG_PERROR; openlog("rtadvd", logopt, LOG_DAEMON); /* set log level */ if (dflag > 2) (void)setlogmask(LOG_UPTO(LOG_DEBUG)); else if (dflag > 1) (void)setlogmask(LOG_UPTO(LOG_INFO)); else if (dflag > 0) (void)setlogmask(LOG_UPTO(LOG_NOTICE)); else (void)setlogmask(LOG_UPTO(LOG_ERR)); /* timer initialization */ rtadvd_timer_init(); pfh = pidfile_open(pidfilename, 0600, &otherpid); if (pfh == NULL) { if (errno == EEXIST) errx(1, "%s already running, pid: %d", getprogname(), otherpid); syslog(LOG_ERR, "failed to open the pid file %s, run anyway.", pidfilename); } if (!fflag) daemon(1, 0); sock_open(&sock); update_ifinfo(&ifilist, UPDATE_IFINFO_ALL); for (i = 0; i < argc; i++) update_persist_ifinfo(&ifilist, argv[i]); csock_open(&ctrlsock, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH); if (ctrlsock.si_fd == -1) { syslog(LOG_ERR, "cannot open control socket: %s", strerror(errno)); exit(1); } /* record the current PID */ pid = getpid(); pidfile_write(pfh); set[PFD_RAWSOCK].fd = sock.si_fd; set[PFD_RAWSOCK].events = POLLIN; if (sflag == 0) { rtsock_open(&rtsock); set[PFD_RTSOCK].fd = rtsock.si_fd; set[PFD_RTSOCK].events = POLLIN; } else set[PFD_RTSOCK].fd = -1; set[PFD_CSOCK].fd = ctrlsock.si_fd; set[PFD_CSOCK].events = POLLIN; signal(SIGTERM, set_do_shutdown); signal(SIGINT, set_do_shutdown); signal(SIGHUP, set_do_reload); error = csock_listen(&ctrlsock); if (error) { syslog(LOG_ERR, "cannot listen control socket: %s", strerror(errno)); exit(1); } /* load configuration file */ set_do_reload(0); while (1) { if (is_do_shutdown()) rtadvd_shutdown(); if (is_do_reload()) { loadconfig_ifname(reload_ifname()); if (reload_ifname() == NULL) syslog(LOG_INFO, "configuration file reloaded."); else syslog(LOG_INFO, "configuration file for %s reloaded.", reload_ifname()); reset_do_reload(); } /* timeout handler update for active interfaces */ rtadvd_update_timeout_handler(); /* timer expiration check and reset the timer */ timeout = rtadvd_check_timer(); if (timeout != NULL) { syslog(LOG_DEBUG, "<%s> set timer to %ld:%ld. waiting for " "inputs or timeout", __func__, (long int)timeout->tv_sec, (long int)timeout->tv_nsec / 1000); } else { syslog(LOG_DEBUG, "<%s> there's no timer. waiting for inputs", __func__); } if ((i = poll(set, sizeof(set)/sizeof(set[0]), timeout ? (timeout->tv_sec * 1000 + timeout->tv_nsec / 1000 / 1000) : INFTIM)) < 0) { /* EINTR would occur if a signal was delivered */ if (errno != EINTR) syslog(LOG_ERR, "poll() failed: %s", strerror(errno)); continue; } if (i == 0) /* timeout */ continue; if (rtsock.si_fd != -1 && set[PFD_RTSOCK].revents & POLLIN) rtmsg_input(&rtsock); if (set[PFD_RAWSOCK].revents & POLLIN) rtadvd_input(&sock); if (set[PFD_CSOCK].revents & POLLIN) { int fd; fd = csock_accept(&ctrlsock); if (fd == -1) syslog(LOG_ERR, "cannot accept() control socket: %s", strerror(errno)); else { cm_handler_server(fd); close(fd); } } } exit(0); /* NOTREACHED */ } static void rtadvd_shutdown(void) { struct ifinfo *ifi; struct rainfo *rai; struct rdnss *rdn; struct dnssl *dns; if (wait_shutdown) { syslog(LOG_INFO, "waiting expiration of the all RA timers."); TAILQ_FOREACH(ifi, &ifilist, ifi_next) { /* * Ignore !IFF_UP interfaces in waiting for shutdown. */ if (!(ifi->ifi_flags & IFF_UP) && ifi->ifi_ra_timer != NULL) { ifi->ifi_state = IFI_STATE_UNCONFIGURED; rtadvd_remove_timer(ifi->ifi_ra_timer); ifi->ifi_ra_timer = NULL; syslog(LOG_DEBUG, "<%s> %s(idx=%d) is down. " "Timer removed and marked as UNCONFIGURED.", __func__, ifi->ifi_ifname, ifi->ifi_ifindex); } } TAILQ_FOREACH(ifi, &ifilist, ifi_next) { if (ifi->ifi_ra_timer != NULL) break; } if (ifi == NULL) { syslog(LOG_NOTICE, "gracefully terminated."); exit(0); } sleep(1); return; } syslog(LOG_DEBUG, "<%s> cease to be an advertising router", __func__); wait_shutdown = 1; TAILQ_FOREACH(rai, &railist, rai_next) { rai->rai_lifetime = 0; TAILQ_FOREACH(rdn, &rai->rai_rdnss, rd_next) rdn->rd_ltime = 0; TAILQ_FOREACH(dns, &rai->rai_dnssl, dn_next) dns->dn_ltime = 0; } TAILQ_FOREACH(ifi, &ifilist, ifi_next) { if (!ifi->ifi_persist) continue; if (ifi->ifi_state == IFI_STATE_UNCONFIGURED) continue; if (ifi->ifi_ra_timer == NULL) continue; if (ifi->ifi_ra_lastsent.tv_sec == 0 && ifi->ifi_ra_lastsent.tv_nsec == 0 && ifi->ifi_ra_timer != NULL) { /* * When RA configured but never sent, * ignore the IF immediately. */ rtadvd_remove_timer(ifi->ifi_ra_timer); ifi->ifi_ra_timer = NULL; ifi->ifi_state = IFI_STATE_UNCONFIGURED; continue; } ifi->ifi_state = IFI_STATE_TRANSITIVE; /* Mark as the shut-down state. */ ifi->ifi_rainfo_trans = ifi->ifi_rainfo; ifi->ifi_rainfo = NULL; ifi->ifi_burstcount = MAX_FINAL_RTR_ADVERTISEMENTS; ifi->ifi_burstinterval = MIN_DELAY_BETWEEN_RAS; ra_timer_update(ifi, &ifi->ifi_ra_timer->rat_tm); rtadvd_set_timer(&ifi->ifi_ra_timer->rat_tm, ifi->ifi_ra_timer); } syslog(LOG_NOTICE, "final RA transmission started."); pidfile_remove(pfh); csock_close(&ctrlsock); } static void rtmsg_input(struct sockinfo *s) { int n, type, ifindex = 0, plen; size_t len; char msg[2048], *next, *lim; char ifname[IFNAMSIZ]; struct if_announcemsghdr *ifan; struct rt_msghdr *rtm; struct prefix *pfx; struct rainfo *rai; struct in6_addr *addr; struct ifinfo *ifi; char addrbuf[INET6_ADDRSTRLEN]; int prefixchange = 0; if (s == NULL) { syslog(LOG_ERR, "<%s> internal error", __func__); exit(1); } n = read(s->si_fd, msg, sizeof(msg)); rtm = (struct rt_msghdr *)msg; syslog(LOG_DEBUG, "<%s> received a routing message " "(type = %d, len = %d)", __func__, rtm->rtm_type, n); if (n > rtm->rtm_msglen) { /* * This usually won't happen for messages received on * a routing socket. */ syslog(LOG_DEBUG, "<%s> received data length is larger than " "1st routing message len. multiple messages? " "read %d bytes, but 1st msg len = %d", __func__, n, rtm->rtm_msglen); #if 0 /* adjust length */ n = rtm->rtm_msglen; #endif } lim = msg + n; for (next = msg; next < lim; next += len) { int oldifflags; next = get_next_msg(next, lim, 0, &len, RTADV_TYPE2BITMASK(RTM_ADD) | RTADV_TYPE2BITMASK(RTM_DELETE) | RTADV_TYPE2BITMASK(RTM_NEWADDR) | RTADV_TYPE2BITMASK(RTM_DELADDR) | RTADV_TYPE2BITMASK(RTM_IFINFO) | RTADV_TYPE2BITMASK(RTM_IFANNOUNCE)); if (len == 0) break; type = ((struct rt_msghdr *)next)->rtm_type; switch (type) { case RTM_ADD: case RTM_DELETE: ifindex = get_rtm_ifindex(next); break; case RTM_NEWADDR: case RTM_DELADDR: ifindex = (int)((struct ifa_msghdr *)next)->ifam_index; break; case RTM_IFINFO: ifindex = (int)((struct if_msghdr *)next)->ifm_index; break; case RTM_IFANNOUNCE: ifan = (struct if_announcemsghdr *)next; switch (ifan->ifan_what) { case IFAN_ARRIVAL: case IFAN_DEPARTURE: break; default: syslog(LOG_DEBUG, "<%s:%d> unknown ifan msg (ifan_what=%d)", __func__, __LINE__, ifan->ifan_what); continue; } syslog(LOG_DEBUG, "<%s>: if_announcemsg (idx=%d:%d)", __func__, ifan->ifan_index, ifan->ifan_what); switch (ifan->ifan_what) { case IFAN_ARRIVAL: syslog(LOG_NOTICE, "interface added (idx=%d)", ifan->ifan_index); update_ifinfo(&ifilist, ifan->ifan_index); loadconfig_index(ifan->ifan_index); break; case IFAN_DEPARTURE: syslog(LOG_NOTICE, "interface removed (idx=%d)", ifan->ifan_index); rm_ifinfo_index(ifan->ifan_index); /* Clear ifi_ifindex */ TAILQ_FOREACH(ifi, &ifilist, ifi_next) { if (ifi->ifi_ifindex == ifan->ifan_index) { ifi->ifi_ifindex = 0; break; } } update_ifinfo(&ifilist, ifan->ifan_index); break; } continue; default: /* should not reach here */ syslog(LOG_DEBUG, "<%s:%d> unknown rtmsg %d on %s", __func__, __LINE__, type, if_indextoname(ifindex, ifname)); continue; } ifi = if_indextoifinfo(ifindex); if (ifi == NULL) { syslog(LOG_DEBUG, "<%s> ifinfo not found for idx=%d. Why?", __func__, ifindex); continue; } rai = ifi->ifi_rainfo; if (rai == NULL) { syslog(LOG_DEBUG, "<%s> route changed on " "non advertising interface(%s)", __func__, ifi->ifi_ifname); continue; } oldifflags = ifi->ifi_flags; /* init ifflags because it may have changed */ update_ifinfo(&ifilist, ifindex); switch (type) { case RTM_ADD: if (sflag) break; /* we aren't interested in prefixes */ addr = get_addr(msg); plen = get_prefixlen(msg); /* sanity check for plen */ /* as RFC2373, prefixlen is at least 4 */ if (plen < 4 || plen > 127) { syslog(LOG_INFO, "<%s> new interface route's" "plen %d is invalid for a prefix", __func__, plen); break; } pfx = find_prefix(rai, addr, plen); if (pfx) { if (pfx->pfx_timer) { /* * If the prefix has been invalidated, * make it available again. */ update_prefix(pfx); prefixchange = 1; } else syslog(LOG_DEBUG, "<%s> new prefix(%s/%d) " "added on %s, " "but it was already in list", __func__, inet_ntop(AF_INET6, addr, (char *)addrbuf, sizeof(addrbuf)), plen, ifi->ifi_ifname); break; } make_prefix(rai, ifindex, addr, plen); prefixchange = 1; break; case RTM_DELETE: if (sflag) break; addr = get_addr(msg); plen = get_prefixlen(msg); /* sanity check for plen */ /* as RFC2373, prefixlen is at least 4 */ if (plen < 4 || plen > 127) { syslog(LOG_INFO, "<%s> deleted interface route's " "plen %d is invalid for a prefix", __func__, plen); break; } pfx = find_prefix(rai, addr, plen); if (pfx == NULL) { syslog(LOG_DEBUG, "<%s> prefix(%s/%d) was deleted on %s, " "but it was not in list", __func__, inet_ntop(AF_INET6, addr, (char *)addrbuf, sizeof(addrbuf)), plen, ifi->ifi_ifname); break; } invalidate_prefix(pfx); prefixchange = 1; break; case RTM_NEWADDR: case RTM_DELADDR: case RTM_IFINFO: break; default: /* should not reach here */ syslog(LOG_DEBUG, "<%s:%d> unknown rtmsg %d on %s", __func__, __LINE__, type, if_indextoname(ifindex, ifname)); return; } /* check if an interface flag is changed */ if ((oldifflags & IFF_UP) && /* UP to DOWN */ !(ifi->ifi_flags & IFF_UP)) { syslog(LOG_NOTICE, "ifi_ifname); rtadvd_remove_timer(ifi->ifi_ra_timer); ifi->ifi_ra_timer = NULL; } else if (!(oldifflags & IFF_UP) && /* DOWN to UP */ (ifi->ifi_flags & IFF_UP)) { syslog(LOG_NOTICE, "interface %s becomes up. restart timer.", ifi->ifi_ifname); ifi->ifi_state = IFI_STATE_TRANSITIVE; ifi->ifi_burstcount = MAX_INITIAL_RTR_ADVERTISEMENTS; ifi->ifi_burstinterval = MAX_INITIAL_RTR_ADVERT_INTERVAL; ifi->ifi_ra_timer = rtadvd_add_timer(ra_timeout, ra_timer_update, ifi, ifi); ra_timer_update(ifi, &ifi->ifi_ra_timer->rat_tm); rtadvd_set_timer(&ifi->ifi_ra_timer->rat_tm, ifi->ifi_ra_timer); } else if (prefixchange && (ifi->ifi_flags & IFF_UP)) { /* * An advertised prefix has been added or invalidated. * Will notice the change in a short delay. */ set_short_delay(ifi); } } return; } void rtadvd_input(struct sockinfo *s) { ssize_t i; int *hlimp = NULL; #ifdef OLDRAWSOCKET struct ip6_hdr *ip; #endif struct icmp6_hdr *icp; int ifindex = 0; struct cmsghdr *cm; struct in6_pktinfo *pi = NULL; char ntopbuf[INET6_ADDRSTRLEN], ifnamebuf[IFNAMSIZ]; struct in6_addr dst = in6addr_any; struct ifinfo *ifi; syslog(LOG_DEBUG, "<%s> enter", __func__); if (s == NULL) { syslog(LOG_ERR, "<%s> internal error", __func__); exit(1); } /* * Get message. We reset msg_controllen since the field could * be modified if we had received a message before setting * receive options. */ rcvmhdr.msg_controllen = rcvcmsgbuflen; if ((i = recvmsg(s->si_fd, &rcvmhdr, 0)) < 0) return; /* extract optional information via Advanced API */ for (cm = (struct cmsghdr *)CMSG_FIRSTHDR(&rcvmhdr); cm; cm = (struct cmsghdr *)CMSG_NXTHDR(&rcvmhdr, cm)) { if (cm->cmsg_level == IPPROTO_IPV6 && cm->cmsg_type == IPV6_PKTINFO && cm->cmsg_len == CMSG_LEN(sizeof(struct in6_pktinfo))) { pi = (struct in6_pktinfo *)(CMSG_DATA(cm)); ifindex = pi->ipi6_ifindex; dst = pi->ipi6_addr; } if (cm->cmsg_level == IPPROTO_IPV6 && cm->cmsg_type == IPV6_HOPLIMIT && cm->cmsg_len == CMSG_LEN(sizeof(int))) hlimp = (int *)CMSG_DATA(cm); } if (ifindex == 0) { syslog(LOG_ERR, "failed to get receiving interface"); return; } if (hlimp == NULL) { syslog(LOG_ERR, "failed to get receiving hop limit"); return; } /* * If we happen to receive data on an interface which is now gone * or down, just discard the data. */ ifi = if_indextoifinfo(pi->ipi6_ifindex); if (ifi == NULL || !(ifi->ifi_flags & IFF_UP)) { syslog(LOG_INFO, "<%s> received data on a disabled interface (%s)", __func__, (ifi == NULL) ? "[gone]" : ifi->ifi_ifname); return; } #ifdef OLDRAWSOCKET if ((size_t)i < sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr)) { syslog(LOG_ERR, "packet size(%d) is too short", i); return; } ip = (struct ip6_hdr *)rcvmhdr.msg_iov[0].iov_base; icp = (struct icmp6_hdr *)(ip + 1); /* XXX: ext. hdr? */ #else if ((size_t)i < sizeof(struct icmp6_hdr)) { syslog(LOG_ERR, "packet size(%zd) is too short", i); return; } icp = (struct icmp6_hdr *)rcvmhdr.msg_iov[0].iov_base; #endif switch (icp->icmp6_type) { case ND_ROUTER_SOLICIT: /* * Message verification - RFC 4861 6.1.1 * XXX: these checks must be done in the kernel as well, * but we can't completely rely on them. */ if (*hlimp != 255) { syslog(LOG_NOTICE, "RS with invalid hop limit(%d) " "received from %s on %s", *hlimp, inet_ntop(AF_INET6, &rcvfrom.sin6_addr, ntopbuf, sizeof(ntopbuf)), if_indextoname(pi->ipi6_ifindex, ifnamebuf)); return; } if (icp->icmp6_code) { syslog(LOG_NOTICE, "RS with invalid ICMP6 code(%d) " "received from %s on %s", icp->icmp6_code, inet_ntop(AF_INET6, &rcvfrom.sin6_addr, ntopbuf, sizeof(ntopbuf)), if_indextoname(pi->ipi6_ifindex, ifnamebuf)); return; } if ((size_t)i < sizeof(struct nd_router_solicit)) { syslog(LOG_NOTICE, "RS from %s on %s does not have enough " "length (len = %zd)", inet_ntop(AF_INET6, &rcvfrom.sin6_addr, ntopbuf, sizeof(ntopbuf)), if_indextoname(pi->ipi6_ifindex, ifnamebuf), i); return; } rs_input(i, (struct nd_router_solicit *)icp, pi, &rcvfrom); break; case ND_ROUTER_ADVERT: /* * Message verification - RFC 4861 6.1.2 * XXX: there's the same dilemma as above... */ if (!IN6_IS_ADDR_LINKLOCAL(&rcvfrom.sin6_addr)) { syslog(LOG_NOTICE, "RA with non-linklocal source address " "received from %s on %s", inet_ntop(AF_INET6, &rcvfrom.sin6_addr, ntopbuf, sizeof(ntopbuf)), if_indextoname(pi->ipi6_ifindex, ifnamebuf)); return; } if (*hlimp != 255) { syslog(LOG_NOTICE, "RA with invalid hop limit(%d) " "received from %s on %s", *hlimp, inet_ntop(AF_INET6, &rcvfrom.sin6_addr, ntopbuf, sizeof(ntopbuf)), if_indextoname(pi->ipi6_ifindex, ifnamebuf)); return; } if (icp->icmp6_code) { syslog(LOG_NOTICE, "RA with invalid ICMP6 code(%d) " "received from %s on %s", icp->icmp6_code, inet_ntop(AF_INET6, &rcvfrom.sin6_addr, ntopbuf, sizeof(ntopbuf)), if_indextoname(pi->ipi6_ifindex, ifnamebuf)); return; } if ((size_t)i < sizeof(struct nd_router_advert)) { syslog(LOG_NOTICE, "RA from %s on %s does not have enough " "length (len = %zd)", inet_ntop(AF_INET6, &rcvfrom.sin6_addr, ntopbuf, sizeof(ntopbuf)), if_indextoname(pi->ipi6_ifindex, ifnamebuf), i); return; } ra_input(i, (struct nd_router_advert *)icp, pi, &rcvfrom); break; case ICMP6_ROUTER_RENUMBERING: if (mcastif == NULL) { syslog(LOG_ERR, "received a router renumbering " "message, but not allowed to be accepted"); break; } rr_input(i, (struct icmp6_router_renum *)icp, pi, &rcvfrom, &dst); break; default: /* * Note that this case is POSSIBLE, especially just * after invocation of the daemon. This is because we * could receive message after opening the socket and * before setting ICMP6 type filter(see sock_open()). */ syslog(LOG_ERR, "invalid icmp type(%d)", icp->icmp6_type); return; } return; } static void rs_input(int len, struct nd_router_solicit *rs, struct in6_pktinfo *pi, struct sockaddr_in6 *from) { char ntopbuf[INET6_ADDRSTRLEN]; char ifnamebuf[IFNAMSIZ]; union nd_opt ndopts; struct rainfo *rai; struct ifinfo *ifi; struct soliciter *sol; syslog(LOG_DEBUG, "<%s> RS received from %s on %s", __func__, inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), if_indextoname(pi->ipi6_ifindex, ifnamebuf)); /* ND option check */ memset(&ndopts, 0, sizeof(ndopts)); TAILQ_INIT(&ndopts.opt_list); if (nd6_options((struct nd_opt_hdr *)(rs + 1), len - sizeof(struct nd_router_solicit), &ndopts, NDOPT_FLAG_SRCLINKADDR)) { syslog(LOG_INFO, "<%s> ND option check failed for an RS from %s on %s", __func__, inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), if_indextoname(pi->ipi6_ifindex, ifnamebuf)); return; } /* * If the IP source address is the unspecified address, there * must be no source link-layer address option in the message. * (RFC 4861 6.1.1) */ if (IN6_IS_ADDR_UNSPECIFIED(&from->sin6_addr) && ndopts.opt_src_lladdr) { syslog(LOG_INFO, "<%s> RS from unspecified src on %s has a link-layer" " address option", __func__, if_indextoname(pi->ipi6_ifindex, ifnamebuf)); goto done; } ifi = if_indextoifinfo(pi->ipi6_ifindex); if (ifi == NULL) { syslog(LOG_INFO, "<%s> if (idx=%d) not found. Why?", __func__, pi->ipi6_ifindex); goto done; } rai = ifi->ifi_rainfo; if (rai == NULL) { syslog(LOG_INFO, "<%s> RS received on non advertising interface(%s)", __func__, if_indextoname(pi->ipi6_ifindex, ifnamebuf)); goto done; } rai->rai_ifinfo->ifi_rsinput++; /* * Decide whether to send RA according to the rate-limit * consideration. */ /* record sockaddr waiting for RA, if possible */ sol = (struct soliciter *)malloc(sizeof(*sol)); if (sol) { sol->sol_addr = *from; /* XXX RFC 2553 need clarification on flowinfo */ sol->sol_addr.sin6_flowinfo = 0; TAILQ_INSERT_TAIL(&rai->rai_soliciter, sol, sol_next); } /* * If there is already a waiting RS packet, don't * update the timer. */ if (ifi->ifi_rs_waitcount++) goto done; set_short_delay(ifi); done: free_ndopts(&ndopts); return; } static void set_short_delay(struct ifinfo *ifi) { long delay; /* must not be greater than 1000000 */ struct timespec interval, now, min_delay, tm_tmp, *rest; if (ifi->ifi_ra_timer == NULL) return; /* * Compute a random delay. If the computed value * corresponds to a time later than the time the next * multicast RA is scheduled to be sent, ignore the random * delay and send the advertisement at the * already-scheduled time. RFC 4861 6.2.6 */ delay = arc4random_uniform(MAX_RA_DELAY_TIME); interval.tv_sec = 0; interval.tv_nsec = delay * 1000; rest = rtadvd_timer_rest(ifi->ifi_ra_timer); if (TS_CMP(rest, &interval, <)) { syslog(LOG_DEBUG, "<%s> random delay is larger than " "the rest of the current timer", __func__); interval = *rest; } /* * If we sent a multicast Router Advertisement within * the last MIN_DELAY_BETWEEN_RAS seconds, schedule * the advertisement to be sent at a time corresponding to * MIN_DELAY_BETWEEN_RAS plus the random value after the * previous advertisement was sent. */ clock_gettime(CLOCK_MONOTONIC_FAST, &now); TS_SUB(&now, &ifi->ifi_ra_lastsent, &tm_tmp); min_delay.tv_sec = MIN_DELAY_BETWEEN_RAS; min_delay.tv_nsec = 0; if (TS_CMP(&tm_tmp, &min_delay, <)) { TS_SUB(&min_delay, &tm_tmp, &min_delay); TS_ADD(&min_delay, &interval, &interval); } rtadvd_set_timer(&interval, ifi->ifi_ra_timer); } static int check_accept_rtadv(int idx) { struct ifinfo *ifi; TAILQ_FOREACH(ifi, &ifilist, ifi_next) { if (ifi->ifi_ifindex == idx) break; } if (ifi == NULL) { syslog(LOG_DEBUG, "<%s> if (idx=%d) not found. Why?", __func__, idx); return (0); } #if (__FreeBSD_version < 900000) /* * RA_RECV: !ip6.forwarding && ip6.accept_rtadv * RA_SEND: ip6.forwarding */ return ((getinet6sysctl(IPV6CTL_FORWARDING) == 0) && (getinet6sysctl(IPV6CTL_ACCEPT_RTADV) == 1)); #else /* * RA_RECV: ND6_IFF_ACCEPT_RTADV * RA_SEND: ip6.forwarding */ if (update_ifinfo_nd_flags(ifi) != 0) { syslog(LOG_ERR, "cannot get nd6 flags (idx=%d)", idx); return (0); } return (ifi->ifi_nd_flags & ND6_IFF_ACCEPT_RTADV); #endif } static void ra_input(int len, struct nd_router_advert *nra, struct in6_pktinfo *pi, struct sockaddr_in6 *from) { struct rainfo *rai; struct ifinfo *ifi; char ntopbuf[INET6_ADDRSTRLEN]; char ifnamebuf[IFNAMSIZ]; union nd_opt ndopts; const char *on_off[] = {"OFF", "ON"}; uint32_t reachabletime, retranstimer, mtu; int inconsistent = 0; int error; syslog(LOG_DEBUG, "<%s> RA received from %s on %s", __func__, inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), if_indextoname(pi->ipi6_ifindex, ifnamebuf)); /* ND option check */ memset(&ndopts, 0, sizeof(ndopts)); TAILQ_INIT(&ndopts.opt_list); error = nd6_options((struct nd_opt_hdr *)(nra + 1), len - sizeof(struct nd_router_advert), &ndopts, NDOPT_FLAG_SRCLINKADDR | NDOPT_FLAG_PREFIXINFO | NDOPT_FLAG_MTU | NDOPT_FLAG_RDNSS | NDOPT_FLAG_DNSSL); if (error) { syslog(LOG_INFO, "<%s> ND option check failed for an RA from %s on %s", __func__, inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), if_indextoname(pi->ipi6_ifindex, ifnamebuf)); return; } /* * RA consistency check according to RFC 4861 6.2.7 */ ifi = if_indextoifinfo(pi->ipi6_ifindex); if (ifi->ifi_rainfo == NULL) { syslog(LOG_INFO, "<%s> received RA from %s on non-advertising" " interface(%s)", __func__, inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), if_indextoname(pi->ipi6_ifindex, ifnamebuf)); goto done; } rai = ifi->ifi_rainfo; ifi->ifi_rainput++; syslog(LOG_DEBUG, "<%s> ifi->ifi_rainput = %" PRIu64, __func__, ifi->ifi_rainput); /* Cur Hop Limit value */ if (nra->nd_ra_curhoplimit && rai->rai_hoplimit && nra->nd_ra_curhoplimit != rai->rai_hoplimit) { syslog(LOG_NOTICE, "CurHopLimit inconsistent on %s:" " %d from %s, %d from us", ifi->ifi_ifname, nra->nd_ra_curhoplimit, inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), rai->rai_hoplimit); inconsistent++; } /* M flag */ if ((nra->nd_ra_flags_reserved & ND_RA_FLAG_MANAGED) != rai->rai_managedflg) { syslog(LOG_NOTICE, "M flag inconsistent on %s:" " %s from %s, %s from us", ifi->ifi_ifname, on_off[!rai->rai_managedflg], inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), on_off[rai->rai_managedflg]); inconsistent++; } /* O flag */ if ((nra->nd_ra_flags_reserved & ND_RA_FLAG_OTHER) != rai->rai_otherflg) { syslog(LOG_NOTICE, "O flag inconsistent on %s:" " %s from %s, %s from us", ifi->ifi_ifname, on_off[!rai->rai_otherflg], inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), on_off[rai->rai_otherflg]); inconsistent++; } /* Reachable Time */ reachabletime = ntohl(nra->nd_ra_reachable); if (reachabletime && rai->rai_reachabletime && reachabletime != rai->rai_reachabletime) { syslog(LOG_NOTICE, "ReachableTime inconsistent on %s:" " %d from %s, %d from us", ifi->ifi_ifname, reachabletime, inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), rai->rai_reachabletime); inconsistent++; } /* Retrans Timer */ retranstimer = ntohl(nra->nd_ra_retransmit); if (retranstimer && rai->rai_retranstimer && retranstimer != rai->rai_retranstimer) { syslog(LOG_NOTICE, "RetranceTimer inconsistent on %s:" " %d from %s, %d from us", ifi->ifi_ifname, retranstimer, inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), rai->rai_retranstimer); inconsistent++; } /* Values in the MTU options */ if (ndopts.opt_mtu) { mtu = ntohl(ndopts.opt_mtu->nd_opt_mtu_mtu); if (mtu && rai->rai_linkmtu && mtu != rai->rai_linkmtu) { syslog(LOG_NOTICE, "MTU option value inconsistent on %s:" " %d from %s, %d from us", ifi->ifi_ifname, mtu, inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), rai->rai_linkmtu); inconsistent++; } } /* Preferred and Valid Lifetimes for prefixes */ { struct nd_optlist *nol; if (ndopts.opt_pi) if (prefix_check(ndopts.opt_pi, rai, from)) inconsistent++; TAILQ_FOREACH(nol, &ndopts.opt_list, nol_next) if (prefix_check((struct nd_opt_prefix_info *)nol->nol_opt, rai, from)) inconsistent++; } if (inconsistent) ifi->ifi_rainconsistent++; done: free_ndopts(&ndopts); return; } static uint32_t udiff(uint32_t u, uint32_t v) { return (u >= v ? u - v : v - u); } -/* return a non-zero value if the received prefix is inconsitent with ours */ +/* return a non-zero value if the received prefix is inconsistent with ours */ static int prefix_check(struct nd_opt_prefix_info *pinfo, struct rainfo *rai, struct sockaddr_in6 *from) { struct ifinfo *ifi; uint32_t preferred_time, valid_time; struct prefix *pfx; int inconsistent = 0; char ntopbuf[INET6_ADDRSTRLEN]; char prefixbuf[INET6_ADDRSTRLEN]; struct timespec now; #if 0 /* impossible */ if (pinfo->nd_opt_pi_type != ND_OPT_PREFIX_INFORMATION) return (0); #endif ifi = rai->rai_ifinfo; /* * log if the adveritsed prefix has link-local scope(sanity check?) */ if (IN6_IS_ADDR_LINKLOCAL(&pinfo->nd_opt_pi_prefix)) syslog(LOG_INFO, "<%s> link-local prefix %s/%d is advertised " "from %s on %s", __func__, inet_ntop(AF_INET6, &pinfo->nd_opt_pi_prefix, prefixbuf, sizeof(prefixbuf)), pinfo->nd_opt_pi_prefix_len, inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), ifi->ifi_ifname); if ((pfx = find_prefix(rai, &pinfo->nd_opt_pi_prefix, pinfo->nd_opt_pi_prefix_len)) == NULL) { syslog(LOG_INFO, "<%s> prefix %s/%d from %s on %s is not in our list", __func__, inet_ntop(AF_INET6, &pinfo->nd_opt_pi_prefix, prefixbuf, sizeof(prefixbuf)), pinfo->nd_opt_pi_prefix_len, inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), ifi->ifi_ifname); return (0); } preferred_time = ntohl(pinfo->nd_opt_pi_preferred_time); if (pfx->pfx_pltimeexpire) { /* * The lifetime is decremented in real time, so we should * compare the expiration time. * (RFC 2461 Section 6.2.7.) * XXX: can we really expect that all routers on the link * have synchronized clocks? */ clock_gettime(CLOCK_MONOTONIC_FAST, &now); preferred_time += now.tv_sec; if (!pfx->pfx_timer && rai->rai_clockskew && udiff(preferred_time, pfx->pfx_pltimeexpire) > rai->rai_clockskew) { syslog(LOG_INFO, "<%s> preferred lifetime for %s/%d" " (decr. in real time) inconsistent on %s:" " %" PRIu32 " from %s, %" PRIu32 " from us", __func__, inet_ntop(AF_INET6, &pinfo->nd_opt_pi_prefix, prefixbuf, sizeof(prefixbuf)), pinfo->nd_opt_pi_prefix_len, ifi->ifi_ifname, preferred_time, inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), pfx->pfx_pltimeexpire); inconsistent++; } } else if (!pfx->pfx_timer && preferred_time != pfx->pfx_preflifetime) syslog(LOG_INFO, "<%s> preferred lifetime for %s/%d" " inconsistent on %s:" " %d from %s, %d from us", __func__, inet_ntop(AF_INET6, &pinfo->nd_opt_pi_prefix, prefixbuf, sizeof(prefixbuf)), pinfo->nd_opt_pi_prefix_len, ifi->ifi_ifname, preferred_time, inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), pfx->pfx_preflifetime); valid_time = ntohl(pinfo->nd_opt_pi_valid_time); if (pfx->pfx_vltimeexpire) { clock_gettime(CLOCK_MONOTONIC_FAST, &now); valid_time += now.tv_sec; if (!pfx->pfx_timer && rai->rai_clockskew && udiff(valid_time, pfx->pfx_vltimeexpire) > rai->rai_clockskew) { syslog(LOG_INFO, "<%s> valid lifetime for %s/%d" " (decr. in real time) inconsistent on %s:" " %d from %s, %" PRIu32 " from us", __func__, inet_ntop(AF_INET6, &pinfo->nd_opt_pi_prefix, prefixbuf, sizeof(prefixbuf)), pinfo->nd_opt_pi_prefix_len, ifi->ifi_ifname, preferred_time, inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), pfx->pfx_vltimeexpire); inconsistent++; } } else if (!pfx->pfx_timer && valid_time != pfx->pfx_validlifetime) { syslog(LOG_INFO, "<%s> valid lifetime for %s/%d" " inconsistent on %s:" " %d from %s, %d from us", __func__, inet_ntop(AF_INET6, &pinfo->nd_opt_pi_prefix, prefixbuf, sizeof(prefixbuf)), pinfo->nd_opt_pi_prefix_len, ifi->ifi_ifname, valid_time, inet_ntop(AF_INET6, &from->sin6_addr, ntopbuf, sizeof(ntopbuf)), pfx->pfx_validlifetime); inconsistent++; } return (inconsistent); } struct prefix * find_prefix(struct rainfo *rai, struct in6_addr *prefix, int plen) { struct prefix *pfx; int bytelen, bitlen; char bitmask; TAILQ_FOREACH(pfx, &rai->rai_prefix, pfx_next) { if (plen != pfx->pfx_prefixlen) continue; bytelen = plen / 8; bitlen = plen % 8; bitmask = 0xff << (8 - bitlen); if (memcmp((void *)prefix, (void *)&pfx->pfx_prefix, bytelen)) continue; if (bitlen == 0 || ((prefix->s6_addr[bytelen] & bitmask) == (pfx->pfx_prefix.s6_addr[bytelen] & bitmask))) { return (pfx); } } return (NULL); } /* check if p0/plen0 matches p1/plen1; return 1 if matches, otherwise 0. */ int prefix_match(struct in6_addr *p0, int plen0, struct in6_addr *p1, int plen1) { int bytelen, bitlen; char bitmask; if (plen0 < plen1) return (0); bytelen = plen1 / 8; bitlen = plen1 % 8; bitmask = 0xff << (8 - bitlen); if (memcmp((void *)p0, (void *)p1, bytelen)) return (0); if (bitlen == 0 || ((p0->s6_addr[bytelen] & bitmask) == (p1->s6_addr[bytelen] & bitmask))) { return (1); } return (0); } static int nd6_options(struct nd_opt_hdr *hdr, int limit, union nd_opt *ndopts, uint32_t optflags) { int optlen = 0; for (; limit > 0; limit -= optlen) { if ((size_t)limit < sizeof(struct nd_opt_hdr)) { syslog(LOG_INFO, "<%s> short option header", __func__); goto bad; } hdr = (struct nd_opt_hdr *)((caddr_t)hdr + optlen); if (hdr->nd_opt_len == 0) { syslog(LOG_INFO, "<%s> bad ND option length(0) (type = %d)", __func__, hdr->nd_opt_type); goto bad; } optlen = hdr->nd_opt_len << 3; if (optlen > limit) { syslog(LOG_INFO, "<%s> short option", __func__); goto bad; } if (hdr->nd_opt_type > ND_OPT_MTU && hdr->nd_opt_type != ND_OPT_RDNSS && hdr->nd_opt_type != ND_OPT_DNSSL) { syslog(LOG_INFO, "<%s> unknown ND option(type %d)", __func__, hdr->nd_opt_type); continue; } if ((ndopt_flags[hdr->nd_opt_type] & optflags) == 0) { syslog(LOG_INFO, "<%s> unexpected ND option(type %d)", __func__, hdr->nd_opt_type); continue; } /* * Option length check. Do it here for all fixed-length * options. */ switch (hdr->nd_opt_type) { case ND_OPT_MTU: if (optlen == sizeof(struct nd_opt_mtu)) break; goto skip; case ND_OPT_RDNSS: if (optlen >= 24 && (optlen - sizeof(struct nd_opt_rdnss)) % 16 == 0) break; goto skip; case ND_OPT_DNSSL: if (optlen >= 16 && (optlen - sizeof(struct nd_opt_dnssl)) % 8 == 0) break; goto skip; case ND_OPT_PREFIX_INFORMATION: if (optlen == sizeof(struct nd_opt_prefix_info)) break; skip: syslog(LOG_INFO, "<%s> invalid option length", __func__); continue; } switch (hdr->nd_opt_type) { case ND_OPT_TARGET_LINKADDR: case ND_OPT_REDIRECTED_HEADER: case ND_OPT_RDNSS: case ND_OPT_DNSSL: break; /* we don't care about these options */ case ND_OPT_SOURCE_LINKADDR: case ND_OPT_MTU: if (ndopts->opt_array[hdr->nd_opt_type]) { syslog(LOG_INFO, "<%s> duplicated ND option (type = %d)", __func__, hdr->nd_opt_type); } ndopts->opt_array[hdr->nd_opt_type] = hdr; break; case ND_OPT_PREFIX_INFORMATION: { struct nd_optlist *nol; if (ndopts->opt_pi == 0) { ndopts->opt_pi = (struct nd_opt_prefix_info *)hdr; continue; } nol = malloc(sizeof(*nol)); if (nol == NULL) { syslog(LOG_ERR, "<%s> can't allocate memory", __func__); goto bad; } nol->nol_opt = hdr; TAILQ_INSERT_TAIL(&(ndopts->opt_list), nol, nol_next); break; } default: /* impossible */ break; } } return (0); bad: free_ndopts(ndopts); return (-1); } static void free_ndopts(union nd_opt *ndopts) { struct nd_optlist *nol; while ((nol = TAILQ_FIRST(&ndopts->opt_list)) != NULL) { TAILQ_REMOVE(&ndopts->opt_list, nol, nol_next); free(nol); } } void sock_open(struct sockinfo *s) { struct icmp6_filter filt; int on; /* XXX: should be max MTU attached to the node */ static char answer[1500]; syslog(LOG_DEBUG, "<%s> enter", __func__); if (s == NULL) { syslog(LOG_ERR, "<%s> internal error", __func__); exit(1); } rcvcmsgbuflen = CMSG_SPACE(sizeof(struct in6_pktinfo)) + CMSG_SPACE(sizeof(int)); rcvcmsgbuf = (char *)malloc(rcvcmsgbuflen); if (rcvcmsgbuf == NULL) { syslog(LOG_ERR, "<%s> not enough core", __func__); exit(1); } sndcmsgbuflen = CMSG_SPACE(sizeof(struct in6_pktinfo)) + CMSG_SPACE(sizeof(int)); sndcmsgbuf = (char *)malloc(sndcmsgbuflen); if (sndcmsgbuf == NULL) { syslog(LOG_ERR, "<%s> not enough core", __func__); exit(1); } if ((s->si_fd = socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6)) < 0) { syslog(LOG_ERR, "<%s> socket: %s", __func__, strerror(errno)); exit(1); } /* specify to tell receiving interface */ on = 1; if (setsockopt(s->si_fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &on, sizeof(on)) < 0) { syslog(LOG_ERR, "<%s> IPV6_RECVPKTINFO: %s", __func__, strerror(errno)); exit(1); } on = 1; /* specify to tell value of hoplimit field of received IP6 hdr */ if (setsockopt(s->si_fd, IPPROTO_IPV6, IPV6_RECVHOPLIMIT, &on, sizeof(on)) < 0) { syslog(LOG_ERR, "<%s> IPV6_RECVHOPLIMIT: %s", __func__, strerror(errno)); exit(1); } ICMP6_FILTER_SETBLOCKALL(&filt); ICMP6_FILTER_SETPASS(ND_ROUTER_SOLICIT, &filt); ICMP6_FILTER_SETPASS(ND_ROUTER_ADVERT, &filt); if (mcastif != NULL) ICMP6_FILTER_SETPASS(ICMP6_ROUTER_RENUMBERING, &filt); if (setsockopt(s->si_fd, IPPROTO_ICMPV6, ICMP6_FILTER, &filt, sizeof(filt)) < 0) { syslog(LOG_ERR, "<%s> IICMP6_FILTER: %s", __func__, strerror(errno)); exit(1); } /* initialize msghdr for receiving packets */ rcviov[0].iov_base = (caddr_t)answer; rcviov[0].iov_len = sizeof(answer); rcvmhdr.msg_name = (caddr_t)&rcvfrom; rcvmhdr.msg_namelen = sizeof(rcvfrom); rcvmhdr.msg_iov = rcviov; rcvmhdr.msg_iovlen = 1; rcvmhdr.msg_control = (caddr_t) rcvcmsgbuf; rcvmhdr.msg_controllen = rcvcmsgbuflen; /* initialize msghdr for sending packets */ sndmhdr.msg_namelen = sizeof(struct sockaddr_in6); sndmhdr.msg_iov = sndiov; sndmhdr.msg_iovlen = 1; sndmhdr.msg_control = (caddr_t)sndcmsgbuf; sndmhdr.msg_controllen = sndcmsgbuflen; return; } /* open a routing socket to watch the routing table */ static void rtsock_open(struct sockinfo *s) { if (s == NULL) { syslog(LOG_ERR, "<%s> internal error", __func__); exit(1); } if ((s->si_fd = socket(PF_ROUTE, SOCK_RAW, 0)) < 0) { syslog(LOG_ERR, "<%s> socket: %s", __func__, strerror(errno)); exit(1); } } struct ifinfo * if_indextoifinfo(int idx) { struct ifinfo *ifi; char *name, name0[IFNAMSIZ]; /* Check if the interface has a valid name or not. */ if (if_indextoname(idx, name0) == NULL) return (NULL); TAILQ_FOREACH(ifi, &ifilist, ifi_next) { if (ifi->ifi_ifindex == idx) return (ifi); } if (ifi != NULL) syslog(LOG_DEBUG, "<%s> ifi found (idx=%d)", __func__, idx); else syslog(LOG_DEBUG, "<%s> ifi not found (idx=%d)", __func__, idx); return (NULL); /* search failed */ } void ra_output(struct ifinfo *ifi) { int i; struct cmsghdr *cm; struct in6_pktinfo *pi; struct soliciter *sol; struct rainfo *rai; switch (ifi->ifi_state) { case IFI_STATE_CONFIGURED: rai = ifi->ifi_rainfo; break; case IFI_STATE_TRANSITIVE: rai = ifi->ifi_rainfo_trans; break; case IFI_STATE_UNCONFIGURED: syslog(LOG_DEBUG, "<%s> %s is unconfigured. " "Skip sending RAs.", __func__, ifi->ifi_ifname); return; default: rai = NULL; } if (rai == NULL) { syslog(LOG_DEBUG, "<%s> rainfo is NULL on %s." "Skip sending RAs.", __func__, ifi->ifi_ifname); return; } if (!(ifi->ifi_flags & IFF_UP)) { syslog(LOG_DEBUG, "<%s> %s is not up. " "Skip sending RAs.", __func__, ifi->ifi_ifname); return; } /* * Check lifetime, ACCEPT_RTADV flag, and ip6.forwarding. * * (lifetime == 0) = output * (lifetime != 0 && (check_accept_rtadv()) = no output * * Basically, hosts MUST NOT send Router Advertisement * messages at any time (RFC 4861, Section 6.2.3). However, it * would sometimes be useful to allow hosts to advertise some * parameters such as prefix information and link MTU. Thus, * we allow hosts to invoke rtadvd only when router lifetime * (on every advertising interface) is explicitly set * zero. (see also the above section) */ syslog(LOG_DEBUG, "<%s> check lifetime=%d, ACCEPT_RTADV=%d, ip6.forwarding=%d " "on %s", __func__, rai->rai_lifetime, check_accept_rtadv(ifi->ifi_ifindex), getinet6sysctl(IPV6CTL_FORWARDING), ifi->ifi_ifname); if (rai->rai_lifetime != 0) { if (getinet6sysctl(IPV6CTL_FORWARDING) == 0) { syslog(LOG_ERR, "non-zero lifetime RA " "but net.inet6.ip6.forwarding=0. " "Ignored."); return; } if (check_accept_rtadv(ifi->ifi_ifindex)) { syslog(LOG_ERR, "non-zero lifetime RA " "on RA receiving interface %s." " Ignored.", ifi->ifi_ifname); return; } } make_packet(rai); /* XXX: inefficient */ sndmhdr.msg_name = (caddr_t)&sin6_linklocal_allnodes; sndmhdr.msg_iov[0].iov_base = (caddr_t)rai->rai_ra_data; sndmhdr.msg_iov[0].iov_len = rai->rai_ra_datalen; cm = CMSG_FIRSTHDR(&sndmhdr); /* specify the outgoing interface */ cm->cmsg_level = IPPROTO_IPV6; cm->cmsg_type = IPV6_PKTINFO; cm->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo)); pi = (struct in6_pktinfo *)CMSG_DATA(cm); memset(&pi->ipi6_addr, 0, sizeof(pi->ipi6_addr)); /*XXX*/ pi->ipi6_ifindex = ifi->ifi_ifindex; /* specify the hop limit of the packet */ { int hoplimit = 255; cm = CMSG_NXTHDR(&sndmhdr, cm); cm->cmsg_level = IPPROTO_IPV6; cm->cmsg_type = IPV6_HOPLIMIT; cm->cmsg_len = CMSG_LEN(sizeof(int)); memcpy(CMSG_DATA(cm), &hoplimit, sizeof(int)); } syslog(LOG_DEBUG, "<%s> send RA on %s, # of RS waitings = %d", __func__, ifi->ifi_ifname, ifi->ifi_rs_waitcount); i = sendmsg(sock.si_fd, &sndmhdr, 0); if (i < 0 || (size_t)i != rai->rai_ra_datalen) { if (i < 0) { syslog(LOG_ERR, "<%s> sendmsg on %s: %s", __func__, ifi->ifi_ifname, strerror(errno)); } } /* * unicast advertisements * XXX commented out. reason: though spec does not forbit it, unicast * advert does not really help */ while ((sol = TAILQ_FIRST(&rai->rai_soliciter)) != NULL) { TAILQ_REMOVE(&rai->rai_soliciter, sol, sol_next); free(sol); } /* update timestamp */ clock_gettime(CLOCK_MONOTONIC_FAST, &ifi->ifi_ra_lastsent); /* update counter */ ifi->ifi_rs_waitcount = 0; ifi->ifi_raoutput++; switch (ifi->ifi_state) { case IFI_STATE_CONFIGURED: if (ifi->ifi_burstcount > 0) ifi->ifi_burstcount--; break; case IFI_STATE_TRANSITIVE: ifi->ifi_burstcount--; if (ifi->ifi_burstcount == 0) { if (ifi->ifi_rainfo == ifi->ifi_rainfo_trans) { /* Initial burst finished. */ if (ifi->ifi_rainfo_trans != NULL) ifi->ifi_rainfo_trans = NULL; } /* Remove burst RA information */ if (ifi->ifi_rainfo_trans != NULL) { rm_rainfo(ifi->ifi_rainfo_trans); ifi->ifi_rainfo_trans = NULL; } if (ifi->ifi_rainfo != NULL) { /* * TRANSITIVE -> CONFIGURED * * After initial burst or transition from * one configuration to another, * ifi_rainfo always points to the next RA * information. */ ifi->ifi_state = IFI_STATE_CONFIGURED; syslog(LOG_DEBUG, "<%s> ifname=%s marked as " "CONFIGURED.", __func__, ifi->ifi_ifname); } else { /* * TRANSITIVE -> UNCONFIGURED * * If ifi_rainfo points to NULL, this * interface is shutting down. * */ int error; ifi->ifi_state = IFI_STATE_UNCONFIGURED; syslog(LOG_DEBUG, "<%s> ifname=%s marked as " "UNCONFIGURED.", __func__, ifi->ifi_ifname); error = sock_mc_leave(&sock, ifi->ifi_ifindex); if (error) exit(1); } } break; } } /* process RA timer */ struct rtadvd_timer * ra_timeout(void *arg) { struct ifinfo *ifi; ifi = (struct ifinfo *)arg; syslog(LOG_DEBUG, "<%s> RA timer on %s is expired", __func__, ifi->ifi_ifname); ra_output(ifi); return (ifi->ifi_ra_timer); } /* update RA timer */ void ra_timer_update(void *arg, struct timespec *tm) { uint16_t interval; struct rainfo *rai; struct ifinfo *ifi; ifi = (struct ifinfo *)arg; rai = ifi->ifi_rainfo; interval = 0; switch (ifi->ifi_state) { case IFI_STATE_UNCONFIGURED: return; break; case IFI_STATE_CONFIGURED: /* * Whenever a multicast advertisement is sent from * an interface, the timer is reset to a * uniformly-distributed random value between the * interface's configured MinRtrAdvInterval and * MaxRtrAdvInterval (RFC4861 6.2.4). */ interval = rai->rai_mininterval; interval += arc4random_uniform(rai->rai_maxinterval - rai->rai_mininterval); break; case IFI_STATE_TRANSITIVE: /* * For the first few advertisements (up to * MAX_INITIAL_RTR_ADVERTISEMENTS), if the randomly chosen * interval is greater than * MAX_INITIAL_RTR_ADVERT_INTERVAL, the timer SHOULD be * set to MAX_INITIAL_RTR_ADVERT_INTERVAL instead. (RFC * 4861 6.2.4) * * In such cases, the router SHOULD transmit one or more * (but not more than MAX_FINAL_RTR_ADVERTISEMENTS) final * multicast Router Advertisements on the interface with a * Router Lifetime field of zero. (RFC 4861 6.2.5) */ interval = ifi->ifi_burstinterval; break; } tm->tv_sec = interval; tm->tv_nsec = 0; syslog(LOG_DEBUG, "<%s> RA timer on %s is set to %ld:%ld", __func__, ifi->ifi_ifname, (long int)tm->tv_sec, (long int)tm->tv_nsec / 1000); return; }