diff --git a/sys/cam/cam.c b/sys/cam/cam.c index ae1286c8bc6a..ce7dc81b3495 100644 --- a/sys/cam/cam.c +++ b/sys/cam/cam.c @@ -1,573 +1,645 @@ /*- * Generic utility routines for the Common Access Method layer. * * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include #include +#include #include #else /* _KERNEL */ #include #include #include #include #endif /* _KERNEL */ #include #include #include #include #include #ifdef _KERNEL #include #include #include FEATURE(scbus, "SCSI devices support"); #endif static int camstatusentrycomp(const void *key, const void *member); const struct cam_status_entry cam_status_table[] = { { CAM_REQ_INPROG, "CCB request is in progress" }, { CAM_REQ_CMP, "CCB request completed without error" }, { CAM_REQ_ABORTED, "CCB request aborted by the host" }, { CAM_UA_ABORT, "Unable to abort CCB request" }, { CAM_REQ_CMP_ERR, "CCB request completed with an error" }, { CAM_BUSY, "CAM subsystem is busy" }, { CAM_REQ_INVALID, "CCB request was invalid" }, { CAM_PATH_INVALID, "Supplied Path ID is invalid" }, { CAM_DEV_NOT_THERE, "Device Not Present" }, { CAM_UA_TERMIO, "Unable to terminate I/O CCB request" }, { CAM_SEL_TIMEOUT, "Selection Timeout" }, { CAM_CMD_TIMEOUT, "Command timeout" }, { CAM_SCSI_STATUS_ERROR, "SCSI Status Error" }, { CAM_MSG_REJECT_REC, "Message Reject Reveived" }, { CAM_SCSI_BUS_RESET, "SCSI Bus Reset Sent/Received" }, { CAM_UNCOR_PARITY, "Uncorrectable parity/CRC error" }, { CAM_AUTOSENSE_FAIL, "Auto-Sense Retrieval Failed" }, { CAM_NO_HBA, "No HBA Detected" }, { CAM_DATA_RUN_ERR, "Data Overrun error" }, { CAM_UNEXP_BUSFREE, "Unexpected Bus Free" }, { CAM_SEQUENCE_FAIL, "Target Bus Phase Sequence Failure" }, { CAM_CCB_LEN_ERR, "CCB length supplied is inadequate" }, { CAM_PROVIDE_FAIL, "Unable to provide requested capability" }, { CAM_BDR_SENT, "SCSI BDR Message Sent" }, { CAM_REQ_TERMIO, "CCB request terminated by the host" }, { CAM_UNREC_HBA_ERROR, "Unrecoverable Host Bus Adapter Error" }, { CAM_REQ_TOO_BIG, "The request was too large for this host" }, { CAM_REQUEUE_REQ, "Unconditionally Re-queue Request", }, { CAM_ATA_STATUS_ERROR, "ATA Status Error" }, { CAM_SCSI_IT_NEXUS_LOST,"Initiator/Target Nexus Lost" }, { CAM_SMP_STATUS_ERROR, "SMP Status Error" }, { CAM_IDE, "Initiator Detected Error Message Received" }, { CAM_RESRC_UNAVAIL, "Resource Unavailable" }, { CAM_UNACKED_EVENT, "Unacknowledged Event by Host" }, { CAM_MESSAGE_RECV, "Message Received in Host Target Mode" }, { CAM_INVALID_CDB, "Invalid CDB received in Host Target Mode" }, { CAM_LUN_INVALID, "Invalid Lun" }, { CAM_TID_INVALID, "Invalid Target ID" }, { CAM_FUNC_NOTAVAIL, "Function Not Available" }, { CAM_NO_NEXUS, "Nexus Not Established" }, { CAM_IID_INVALID, "Invalid Initiator ID" }, { CAM_CDB_RECVD, "CDB Received" }, { CAM_LUN_ALRDY_ENA, "LUN Already Enabled for Target Mode" }, { CAM_SCSI_BUSY, "SCSI Bus Busy" }, }; #ifdef _KERNEL SYSCTL_NODE(_kern, OID_AUTO, cam, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Subsystem"); #ifndef CAM_DEFAULT_SORT_IO_QUEUES #define CAM_DEFAULT_SORT_IO_QUEUES 1 #endif int cam_sort_io_queues = CAM_DEFAULT_SORT_IO_QUEUES; SYSCTL_INT(_kern_cam, OID_AUTO, sort_io_queues, CTLFLAG_RWTUN, &cam_sort_io_queues, 0, "Sort IO queues to try and optimise disk access patterns"); #endif void cam_strvis(u_int8_t *dst, const u_int8_t *src, int srclen, int dstlen) { cam_strvis_flag(dst, src, srclen, dstlen, CAM_STRVIS_FLAG_NONASCII_ESC); } void cam_strvis_flag(u_int8_t *dst, const u_int8_t *src, int srclen, int dstlen, uint32_t flags) { struct sbuf sb; sbuf_new(&sb, dst, dstlen, SBUF_FIXEDLEN); cam_strvis_sbuf(&sb, src, srclen, flags); sbuf_finish(&sb); } void cam_strvis_sbuf(struct sbuf *sb, const u_int8_t *src, int srclen, uint32_t flags) { /* Trim leading/trailing spaces, nulls. */ while (srclen > 0 && src[0] == ' ') src++, srclen--; while (srclen > 0 && (src[srclen-1] == ' ' || src[srclen-1] == '\0')) srclen--; while (srclen > 0) { if (*src < 0x20 || *src >= 0x80) { /* SCSI-II Specifies that these should never occur. */ /* non-printable character */ switch (flags & CAM_STRVIS_FLAG_NONASCII_MASK) { case CAM_STRVIS_FLAG_NONASCII_ESC: sbuf_printf(sb, "\\%c%c%c", ((*src & 0300) >> 6) + '0', ((*src & 0070) >> 3) + '0', ((*src & 0007) >> 0) + '0'); break; case CAM_STRVIS_FLAG_NONASCII_RAW: /* * If we run into a NUL, just transform it * into a space. */ if (*src != 0x00) sbuf_putc(sb, *src); else sbuf_putc(sb, ' '); break; case CAM_STRVIS_FLAG_NONASCII_SPC: sbuf_putc(sb, ' '); break; case CAM_STRVIS_FLAG_NONASCII_TRIM: default: break; } } else { /* normal character */ sbuf_putc(sb, *src); } src++; srclen--; } } /* * Compare string with pattern, returning 0 on match. * Short pattern matches trailing blanks in name, * Shell globbing rules apply: * matches 0 or more characters, * ? matchces one character, [...] denotes a set to match one char, * [^...] denotes a complimented set to match one character. * Spaces in str used to match anything in the pattern string * but was removed because it's a bug. No current patterns require * it, as far as I know, but it's impossible to know what drives * returned. * * Each '*' generates recursion, so keep the number of * in check. */ int cam_strmatch(const u_int8_t *str, const u_int8_t *pattern, int str_len) { while (*pattern != '\0' && str_len > 0) { if (*pattern == '*') { pattern++; if (*pattern == '\0') return (0); do { if (cam_strmatch(str, pattern, str_len) == 0) return (0); str++; str_len--; } while (str_len > 0); return (1); } else if (*pattern == '[') { int negate_range, ok; uint8_t pc = UCHAR_MAX; uint8_t sc; ok = 0; sc = *str++; str_len--; pattern++; if ((negate_range = (*pattern == '^')) != 0) pattern++; while ((*pattern != ']') && *pattern != '\0') { if (*pattern == '-') { if (pattern[1] == '\0') /* Bad pattern */ return (1); if (sc >= pc && sc <= pattern[1]) ok = 1; pattern++; } else if (*pattern == sc) ok = 1; pc = *pattern; pattern++; } if (ok == negate_range) return (1); pattern++; } else if (*pattern == '?') { /* * NB: || *str == ' ' of the old code is a bug and was * removed. If you add it back, keep this the last if * before the naked else */ pattern++; str++; str_len--; } else { if (*str != *pattern) return (1); pattern++; str++; str_len--; } } /* '*' is allowed to match nothing, so gobble it */ while (*pattern == '*') pattern++; if ( *pattern != '\0') { /* Pattern not fully consumed. Not a match */ return (1); } /* Eat trailing spaces, which get added by SAT */ while (str_len > 0 && *str == ' ') { str++; str_len--; } return (str_len); } caddr_t cam_quirkmatch(caddr_t target, caddr_t quirk_table, int num_entries, int entry_size, cam_quirkmatch_t *comp_func) { for (; num_entries > 0; num_entries--, quirk_table += entry_size) { if ((*comp_func)(target, quirk_table) == 0) return (quirk_table); } return (NULL); } const struct cam_status_entry* cam_fetch_status_entry(cam_status status) { status &= CAM_STATUS_MASK; return (bsearch(&status, &cam_status_table, nitems(cam_status_table), sizeof(*cam_status_table), camstatusentrycomp)); } static int camstatusentrycomp(const void *key, const void *member) { cam_status status; const struct cam_status_entry *table_entry; status = *(const cam_status *)key; table_entry = (const struct cam_status_entry *)member; return (status - table_entry->status_code); } #ifdef _KERNEL char * cam_error_string(union ccb *ccb, char *str, int str_len, cam_error_string_flags flags, cam_error_proto_flags proto_flags) #else /* !_KERNEL */ char * cam_error_string(struct cam_device *device, union ccb *ccb, char *str, int str_len, cam_error_string_flags flags, cam_error_proto_flags proto_flags) #endif /* _KERNEL/!_KERNEL */ { char path_str[64]; struct sbuf sb; if ((ccb == NULL) || (str == NULL) || (str_len <= 0)) return(NULL); if (flags == CAM_ESF_NONE) return(NULL); switch (ccb->ccb_h.func_code) { case XPT_ATA_IO: switch (proto_flags & CAM_EPF_LEVEL_MASK) { case CAM_EPF_NONE: break; case CAM_EPF_ALL: case CAM_EPF_NORMAL: proto_flags |= CAM_EAF_PRINT_RESULT; /* FALLTHROUGH */ case CAM_EPF_MINIMAL: proto_flags |= CAM_EAF_PRINT_STATUS; /* FALLTHROUGH */ default: break; } break; case XPT_SCSI_IO: switch (proto_flags & CAM_EPF_LEVEL_MASK) { case CAM_EPF_NONE: break; case CAM_EPF_ALL: case CAM_EPF_NORMAL: proto_flags |= CAM_ESF_PRINT_SENSE; /* FALLTHROUGH */ case CAM_EPF_MINIMAL: proto_flags |= CAM_ESF_PRINT_STATUS; /* FALLTHROUGH */ default: break; } break; case XPT_SMP_IO: switch (proto_flags & CAM_EPF_LEVEL_MASK) { case CAM_EPF_NONE: break; case CAM_EPF_ALL: proto_flags |= CAM_ESMF_PRINT_FULL_CMD; /* FALLTHROUGH */ case CAM_EPF_NORMAL: case CAM_EPF_MINIMAL: proto_flags |= CAM_ESMF_PRINT_STATUS; /* FALLTHROUGH */ default: break; } break; default: break; } #ifdef _KERNEL xpt_path_string(ccb->csio.ccb_h.path, path_str, sizeof(path_str)); #else /* !_KERNEL */ cam_path_string(device, path_str, sizeof(path_str)); #endif /* _KERNEL/!_KERNEL */ sbuf_new(&sb, str, str_len, 0); if (flags & CAM_ESF_COMMAND) { sbuf_cat(&sb, path_str); switch (ccb->ccb_h.func_code) { case XPT_ATA_IO: ata_command_sbuf(&ccb->ataio, &sb); break; case XPT_SCSI_IO: #ifdef _KERNEL scsi_command_string(&ccb->csio, &sb); #else /* !_KERNEL */ scsi_command_string(device, &ccb->csio, &sb); #endif /* _KERNEL/!_KERNEL */ break; case XPT_SMP_IO: smp_command_sbuf(&ccb->smpio, &sb, path_str, 79 - strlen(path_str), (proto_flags & CAM_ESMF_PRINT_FULL_CMD) ? 79 : 0); break; case XPT_NVME_IO: case XPT_NVME_ADMIN: nvme_command_sbuf(&ccb->nvmeio, &sb); break; default: sbuf_printf(&sb, "CAM func %#x", ccb->ccb_h.func_code); break; } sbuf_printf(&sb, "\n"); } if (flags & CAM_ESF_CAM_STATUS) { cam_status status; const struct cam_status_entry *entry; sbuf_cat(&sb, path_str); status = ccb->ccb_h.status & CAM_STATUS_MASK; entry = cam_fetch_status_entry(status); if (entry == NULL) sbuf_printf(&sb, "CAM status: Unknown (%#x)\n", ccb->ccb_h.status); else sbuf_printf(&sb, "CAM status: %s\n", entry->status_text); } if (flags & CAM_ESF_PROTO_STATUS) { switch (ccb->ccb_h.func_code) { case XPT_ATA_IO: if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_ATA_STATUS_ERROR) break; if (proto_flags & CAM_EAF_PRINT_STATUS) { sbuf_cat(&sb, path_str); ata_status_sbuf(&ccb->ataio, &sb); sbuf_printf(&sb, "\n"); } if (proto_flags & CAM_EAF_PRINT_RESULT) { sbuf_cat(&sb, path_str); sbuf_printf(&sb, "RES: "); ata_res_sbuf(&ccb->ataio.res, &sb); sbuf_printf(&sb, "\n"); } break; case XPT_SCSI_IO: if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_SCSI_STATUS_ERROR) break; if (proto_flags & CAM_ESF_PRINT_STATUS) { sbuf_cat(&sb, path_str); sbuf_printf(&sb, "SCSI status: %s\n", scsi_status_string(&ccb->csio)); } if ((proto_flags & CAM_ESF_PRINT_SENSE) && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND) && (ccb->ccb_h.status & CAM_AUTOSNS_VALID)) { #ifdef _KERNEL scsi_sense_sbuf(&ccb->csio, &sb, SSS_FLAG_NONE); #else /* !_KERNEL */ scsi_sense_sbuf(device, &ccb->csio, &sb, SSS_FLAG_NONE); #endif /* _KERNEL/!_KERNEL */ } break; case XPT_SMP_IO: if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_SMP_STATUS_ERROR) break; if (proto_flags & CAM_ESF_PRINT_STATUS) { sbuf_cat(&sb, path_str); sbuf_printf(&sb, "SMP status: %s (%#x)\n", smp_error_desc(ccb->smpio.smp_response[2]), ccb->smpio.smp_response[2]); } /* There is no SMP equivalent to SCSI sense. */ break; default: break; } } sbuf_finish(&sb); return(sbuf_data(&sb)); } #ifdef _KERNEL void cam_error_print(union ccb *ccb, cam_error_string_flags flags, cam_error_proto_flags proto_flags) { char str[512]; printf("%s", cam_error_string(ccb, str, sizeof(str), flags, proto_flags)); } #else /* !_KERNEL */ void cam_error_print(struct cam_device *device, union ccb *ccb, cam_error_string_flags flags, cam_error_proto_flags proto_flags, FILE *ofile) { char str[512]; if ((device == NULL) || (ccb == NULL) || (ofile == NULL)) return; fprintf(ofile, "%s", cam_error_string(device, ccb, str, sizeof(str), flags, proto_flags)); } #endif /* _KERNEL/!_KERNEL */ /* * Common calculate geometry fuction * * Caller should set ccg->volume_size and block_size. * The extended parameter should be zero if extended translation * should not be used. */ void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended) { uint32_t size_mb, secs_per_cylinder; if (ccg->block_size == 0) { ccg->ccb_h.status = CAM_REQ_CMP_ERR; return; } size_mb = (1024L * 1024L) / ccg->block_size; if (size_mb == 0) { ccg->ccb_h.status = CAM_REQ_CMP_ERR; return; } size_mb = ccg->volume_size / size_mb; if (size_mb > 1024 && extended) { ccg->heads = 255; ccg->secs_per_track = 63; } else { ccg->heads = 64; ccg->secs_per_track = 32; } secs_per_cylinder = ccg->heads * ccg->secs_per_track; if (secs_per_cylinder == 0) { ccg->ccb_h.status = CAM_REQ_CMP_ERR; return; } ccg->cylinders = ccg->volume_size / secs_per_cylinder; ccg->ccb_h.status = CAM_REQ_CMP; } + +#ifdef _KERNEL +struct memdesc +memdesc_ccb(union ccb *ccb) +{ + struct ccb_hdr *ccb_h; + void *data_ptr; + uint32_t dxfer_len; + uint16_t sglist_cnt; + + ccb_h = &ccb->ccb_h; + switch (ccb_h->func_code) { + case XPT_SCSI_IO: { + struct ccb_scsiio *csio; + + csio = &ccb->csio; + data_ptr = csio->data_ptr; + dxfer_len = csio->dxfer_len; + sglist_cnt = csio->sglist_cnt; + break; + } + case XPT_CONT_TARGET_IO: { + struct ccb_scsiio *ctio; + + ctio = &ccb->ctio; + data_ptr = ctio->data_ptr; + dxfer_len = ctio->dxfer_len; + sglist_cnt = ctio->sglist_cnt; + break; + } + case XPT_ATA_IO: { + struct ccb_ataio *ataio; + + ataio = &ccb->ataio; + data_ptr = ataio->data_ptr; + dxfer_len = ataio->dxfer_len; + sglist_cnt = 0; + break; + } + case XPT_NVME_IO: + case XPT_NVME_ADMIN: { + struct ccb_nvmeio *nvmeio; + + nvmeio = &ccb->nvmeio; + data_ptr = nvmeio->data_ptr; + dxfer_len = nvmeio->dxfer_len; + sglist_cnt = nvmeio->sglist_cnt; + break; + } + default: + panic("%s: Unsupported func code %d", __func__, + ccb_h->func_code); + } + + switch ((ccb_h->flags & CAM_DATA_MASK)) { + case CAM_DATA_VADDR: + return (memdesc_vaddr(data_ptr, dxfer_len)); + case CAM_DATA_PADDR: + return (memdesc_paddr((vm_paddr_t)(uintptr_t)data_ptr, + dxfer_len)); + case CAM_DATA_SG: + return (memdesc_vlist(data_ptr, sglist_cnt)); + case CAM_DATA_SG_PADDR: + return (memdesc_plist(data_ptr, sglist_cnt)); + case CAM_DATA_BIO: + return (memdesc_bio(data_ptr)); + default: + panic("%s: flags 0x%X unimplemented", __func__, ccb_h->flags); + } +} +#endif diff --git a/sys/kern/subr_bus_dma.c b/sys/kern/subr_bus_dma.c index 65a08aeba17c..bfaad30b37d3 100644 --- a/sys/kern/subr_bus_dma.c +++ b/sys/kern/subr_bus_dma.c @@ -1,904 +1,781 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2012 EMC Corp. * All rights reserved. * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_bus.h" #include "opt_iommu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Convenience function for manipulating driver locks from busdma (during * busdma_swi, for example). */ void busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) { struct mtx *dmtx; dmtx = (struct mtx *)arg; switch (op) { case BUS_DMA_LOCK: mtx_lock(dmtx); break; case BUS_DMA_UNLOCK: mtx_unlock(dmtx); break; default: panic("Unknown operation 0x%x for busdma_lock_mutex!", op); } } /* * dflt_lock should never get called. It gets put into the dma tag when * lockfunc == NULL, which is only valid if the maps that are associated * with the tag are meant to never be deferred. * * XXX Should have a way to identify which driver is responsible here. */ void _busdma_dflt_lock(void *arg, bus_dma_lock_op_t op) { panic("driver error: _bus_dma_dflt_lock called"); } /* * Load up data starting at offset within a region specified by a * list of virtual address ranges until either length or the region * are exhausted. */ static int _bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs, int flags, size_t offset, size_t length) { int error; error = 0; for (; sglist_cnt > 0 && length != 0; sglist_cnt--, list++) { char *addr; size_t ds_len; KASSERT((offset < list->ds_len), ("Invalid mid-segment offset")); addr = (char *)(uintptr_t)list->ds_addr + offset; ds_len = list->ds_len - offset; offset = 0; if (ds_len > length) ds_len = length; length -= ds_len; KASSERT((ds_len != 0), ("Segment length is zero")); error = _bus_dmamap_load_buffer(dmat, map, addr, ds_len, pmap, flags, NULL, nsegs); if (error) break; } return (error); } /* * Load a list of physical addresses. */ static int _bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags) { int error; error = 0; for (; sglist_cnt > 0; sglist_cnt--, list++) { error = _bus_dmamap_load_phys(dmat, map, (vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL, nsegs); if (error) break; } return (error); } /* * Load an unmapped mbuf */ static int _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags) { int error, i, off, len, pglen, pgoff, seglen, segoff; M_ASSERTEXTPG(m); len = m->m_len; error = 0; /* Skip over any data removed from the front. */ off = mtod(m, vm_offset_t); if (m->m_epg_hdrlen != 0) { if (off >= m->m_epg_hdrlen) { off -= m->m_epg_hdrlen; } else { seglen = m->m_epg_hdrlen - off; segoff = off; seglen = min(seglen, len); off = 0; len -= seglen; error = _bus_dmamap_load_buffer(dmat, map, &m->m_epg_hdr[segoff], seglen, kernel_pmap, flags, segs, nsegs); } } pgoff = m->m_epg_1st_off; for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) { pglen = m_epg_pagelen(m, i, pgoff); if (off >= pglen) { off -= pglen; pgoff = 0; continue; } seglen = pglen - off; segoff = pgoff + off; off = 0; seglen = min(seglen, len); len -= seglen; error = _bus_dmamap_load_phys(dmat, map, m->m_epg_pa[i] + segoff, seglen, flags, segs, nsegs); pgoff = 0; }; if (len != 0 && error == 0) { KASSERT((off + len) <= m->m_epg_trllen, ("off + len > trail (%d + %d > %d)", off, len, m->m_epg_trllen)); error = _bus_dmamap_load_buffer(dmat, map, &m->m_epg_trail[off], len, kernel_pmap, flags, segs, nsegs); } return (error); } /* * Load a single mbuf. */ static int _bus_dmamap_load_single_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m, bus_dma_segment_t *segs, int *nsegs, int flags) { int error; error = 0; if ((m->m_flags & M_EXTPG) != 0) error = _bus_dmamap_load_mbuf_epg(dmat, map, m, segs, nsegs, flags); else error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len, kernel_pmap, flags | BUS_DMA_LOAD_MBUF, segs, nsegs); CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, *nsegs); return (error); } /* * Load an mbuf chain. */ static int _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags) { struct mbuf *m; int error; error = 0; for (m = m0; m != NULL && error == 0; m = m->m_next) { if (m->m_len > 0) { if ((m->m_flags & M_EXTPG) != 0) error = _bus_dmamap_load_mbuf_epg(dmat, map, m, segs, nsegs, flags); else error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len, kernel_pmap, flags | BUS_DMA_LOAD_MBUF, segs, nsegs); } } CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, *nsegs); return (error); } /* * Load from block io. */ static int _bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, int *nsegs, int flags) { if ((bio->bio_flags & BIO_VLIST) != 0) { bus_dma_segment_t *segs = (bus_dma_segment_t *)bio->bio_data; return (_bus_dmamap_load_vlist(dmat, map, segs, bio->bio_ma_n, kernel_pmap, nsegs, flags, bio->bio_ma_offset, bio->bio_bcount)); } if ((bio->bio_flags & BIO_UNMAPPED) != 0) return (_bus_dmamap_load_ma(dmat, map, bio->bio_ma, bio->bio_bcount, bio->bio_ma_offset, flags, NULL, nsegs)); return (_bus_dmamap_load_buffer(dmat, map, bio->bio_data, bio->bio_bcount, kernel_pmap, flags, NULL, nsegs)); } int bus_dmamap_load_ma_triv(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, int *segp) { vm_paddr_t paddr; bus_size_t len; int error, i; error = 0; for (i = 0; tlen > 0; i++, tlen -= len) { len = min(PAGE_SIZE - ma_offs, tlen); paddr = VM_PAGE_TO_PHYS(ma[i]) + ma_offs; error = _bus_dmamap_load_phys(dmat, map, paddr, len, flags, segs, segp); if (error != 0) break; ma_offs = 0; } return (error); } -/* - * Load a cam control block. - */ -static int -_bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, - int *nsegs, int flags) -{ - struct ccb_hdr *ccb_h; - void *data_ptr; - int error; - uint32_t dxfer_len; - uint16_t sglist_cnt; - - error = 0; - ccb_h = &ccb->ccb_h; - switch (ccb_h->func_code) { - case XPT_SCSI_IO: { - struct ccb_scsiio *csio; - - csio = &ccb->csio; - data_ptr = csio->data_ptr; - dxfer_len = csio->dxfer_len; - sglist_cnt = csio->sglist_cnt; - break; - } - case XPT_CONT_TARGET_IO: { - struct ccb_scsiio *ctio; - - ctio = &ccb->ctio; - data_ptr = ctio->data_ptr; - dxfer_len = ctio->dxfer_len; - sglist_cnt = ctio->sglist_cnt; - break; - } - case XPT_ATA_IO: { - struct ccb_ataio *ataio; - - ataio = &ccb->ataio; - data_ptr = ataio->data_ptr; - dxfer_len = ataio->dxfer_len; - sglist_cnt = 0; - break; - } - case XPT_NVME_IO: - case XPT_NVME_ADMIN: { - struct ccb_nvmeio *nvmeio; - - nvmeio = &ccb->nvmeio; - data_ptr = nvmeio->data_ptr; - dxfer_len = nvmeio->dxfer_len; - sglist_cnt = nvmeio->sglist_cnt; - break; - } - default: - panic("_bus_dmamap_load_ccb: Unsupported func code %d", - ccb_h->func_code); - } - - switch ((ccb_h->flags & CAM_DATA_MASK)) { - case CAM_DATA_VADDR: - error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len, - kernel_pmap, flags, NULL, nsegs); - break; - case CAM_DATA_PADDR: - error = _bus_dmamap_load_phys(dmat, map, - (vm_paddr_t)(uintptr_t)data_ptr, dxfer_len, flags, NULL, - nsegs); - break; - case CAM_DATA_SG: - error = _bus_dmamap_load_vlist(dmat, map, - (bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap, - nsegs, flags, 0, dxfer_len); - break; - case CAM_DATA_SG_PADDR: - error = _bus_dmamap_load_plist(dmat, map, - (bus_dma_segment_t *)data_ptr, sglist_cnt, nsegs, flags); - break; - case CAM_DATA_BIO: - error = _bus_dmamap_load_bio(dmat, map, (struct bio *)data_ptr, - nsegs, flags); - break; - default: - panic("_bus_dmamap_load_ccb: flags 0x%X unimplemented", - ccb_h->flags); - } - return (error); -} - /* * Load a uio. */ static int _bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, int *nsegs, int flags) { bus_size_t resid; bus_size_t minlen; struct iovec *iov; pmap_t pmap; caddr_t addr; int error, i; if (uio->uio_segflg == UIO_USERSPACE) { KASSERT(uio->uio_td != NULL, ("bus_dmamap_load_uio: USERSPACE but no proc")); pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); } else pmap = kernel_pmap; resid = uio->uio_resid; iov = uio->uio_iov; error = 0; for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) { /* * Now at the first iovec to load. Load each iovec * until we have exhausted the residual count. */ addr = (caddr_t) iov[i].iov_base; minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; if (minlen > 0) { error = _bus_dmamap_load_buffer(dmat, map, addr, minlen, pmap, flags, NULL, nsegs); resid -= minlen; } } return (error); } /* * Map the buffer buf into bus space using the dmamap map. */ int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; struct memdesc mem; int error; int nsegs; #ifdef KMSAN mem = memdesc_vaddr(buf, buflen); _bus_dmamap_load_kmsan(dmat, map, &mem); #endif if ((flags & BUS_DMA_NOWAIT) == 0) { mem = memdesc_vaddr(buf, buflen); _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); } nsegs = -1; error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap, flags, NULL, &nsegs); nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); if (error == EINPROGRESS) return (error); segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, error); else (*callback)(callback_arg, segs, nsegs, 0); /* * Return ENOMEM to the caller so that it can pass it up the stack. * This error only happens when NOWAIT is set, so deferral is disabled. */ if (error == ENOMEM) return (error); return (0); } int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, bus_dmamap_callback2_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; int nsegs, error; M_ASSERTPKTHDR(m0); #ifdef KMSAN struct memdesc mem = memdesc_mbuf(m0); _bus_dmamap_load_kmsan(dmat, map, &mem); #endif flags |= BUS_DMA_NOWAIT; nsegs = -1; error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags); ++nsegs; segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, 0, error); else (*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error); CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); return (error); } int bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags) { int error; #ifdef KMSAN struct memdesc mem = memdesc_mbuf(m0); _bus_dmamap_load_kmsan(dmat, map, &mem); #endif flags |= BUS_DMA_NOWAIT; *nsegs = -1; error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags); ++*nsegs; _bus_dmamap_complete(dmat, map, segs, *nsegs, error); return (error); } int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, bus_dmamap_callback2_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; int nsegs, error; #ifdef KMSAN struct memdesc mem = memdesc_uio(uio); _bus_dmamap_load_kmsan(dmat, map, &mem); #endif flags |= BUS_DMA_NOWAIT; nsegs = -1; error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags); nsegs++; segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, 0, error); else (*callback)(callback_arg, segs, nsegs, uio->uio_resid, error); CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); return (error); } int bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { - bus_dma_segment_t *segs; struct ccb_hdr *ccb_h; struct memdesc mem; - int error; - int nsegs; - -#ifdef KMSAN - mem = memdesc_ccb(ccb); - _bus_dmamap_load_kmsan(dmat, map, &mem); -#endif ccb_h = &ccb->ccb_h; if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) { callback(callback_arg, NULL, 0, 0); return (0); } - if ((flags & BUS_DMA_NOWAIT) == 0) { - mem = memdesc_ccb(ccb); - _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); - } - nsegs = -1; - error = _bus_dmamap_load_ccb(dmat, map, ccb, &nsegs, flags); - nsegs++; - - CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", - __func__, dmat, flags, error, nsegs); - - if (error == EINPROGRESS) - return (error); - segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); - if (error) - (*callback)(callback_arg, segs, 0, error); - else - (*callback)(callback_arg, segs, nsegs, error); - /* - * Return ENOMEM to the caller so that it can pass it up the stack. - * This error only happens when NOWAIT is set, so deferral is disabled. - */ - if (error == ENOMEM) - return (error); - - return (0); + mem = memdesc_ccb(ccb); + return (bus_dmamap_load_mem(dmat, map, &mem, callback, callback_arg, + flags)); } int bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; struct memdesc mem; int error; int nsegs; #ifdef KMSAN mem = memdesc_bio(bio); _bus_dmamap_load_kmsan(dmat, map, &mem); #endif if ((flags & BUS_DMA_NOWAIT) == 0) { mem = memdesc_bio(bio); _bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg); } nsegs = -1; error = _bus_dmamap_load_bio(dmat, map, bio, &nsegs, flags); nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); if (error == EINPROGRESS) return (error); segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, error); else (*callback)(callback_arg, segs, nsegs, error); /* * Return ENOMEM to the caller so that it can pass it up the stack. * This error only happens when NOWAIT is set, so deferral is disabled. */ if (error == ENOMEM) return (error); return (0); } int bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; int error; int nsegs; #ifdef KMSAN _bus_dmamap_load_kmsan(dmat, map, mem); #endif if ((flags & BUS_DMA_NOWAIT) == 0) _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg); nsegs = -1; error = 0; switch (mem->md_type) { case MEMDESC_VADDR: error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr, mem->md_opaque, kernel_pmap, flags, NULL, &nsegs); break; case MEMDESC_PADDR: error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr, mem->md_opaque, flags, NULL, &nsegs); break; case MEMDESC_VLIST: error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list, mem->md_opaque, kernel_pmap, &nsegs, flags, 0, SIZE_T_MAX); break; case MEMDESC_PLIST: error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list, mem->md_opaque, &nsegs, flags); break; case MEMDESC_BIO: error = _bus_dmamap_load_bio(dmat, map, mem->u.md_bio, &nsegs, flags); break; case MEMDESC_UIO: error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio, &nsegs, flags); break; case MEMDESC_MBUF: error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf, NULL, &nsegs, flags); break; - case MEMDESC_CCB: - error = _bus_dmamap_load_ccb(dmat, map, mem->u.md_ccb, &nsegs, - flags); - break; } nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); if (error == EINPROGRESS) return (error); segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, error); else (*callback)(callback_arg, segs, nsegs, 0); /* * Return ENOMEM to the caller so that it can pass it up the stack. * This error only happens when NOWAIT is set, so deferral is disabled. */ if (error == ENOMEM) return (error); return (0); } int bus_dmamap_load_crp_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, struct crypto_buffer *cb, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { bus_dma_segment_t *segs; int error; int nsegs; flags |= BUS_DMA_NOWAIT; nsegs = -1; error = 0; switch (cb->cb_type) { case CRYPTO_BUF_CONTIG: error = _bus_dmamap_load_buffer(dmat, map, cb->cb_buf, cb->cb_buf_len, kernel_pmap, flags, NULL, &nsegs); break; case CRYPTO_BUF_MBUF: error = _bus_dmamap_load_mbuf_sg(dmat, map, cb->cb_mbuf, NULL, &nsegs, flags); break; case CRYPTO_BUF_SINGLE_MBUF: error = _bus_dmamap_load_single_mbuf(dmat, map, cb->cb_mbuf, NULL, &nsegs, flags); break; case CRYPTO_BUF_UIO: error = _bus_dmamap_load_uio(dmat, map, cb->cb_uio, &nsegs, flags); break; case CRYPTO_BUF_VMPAGE: error = _bus_dmamap_load_ma(dmat, map, cb->cb_vm_page, cb->cb_vm_page_len, cb->cb_vm_page_offset, flags, NULL, &nsegs); break; default: error = EINVAL; } nsegs++; CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d", __func__, dmat, flags, error, nsegs); if (error == EINPROGRESS) return (error); segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error); if (error) (*callback)(callback_arg, segs, 0, error); else (*callback)(callback_arg, segs, nsegs, 0); /* * Return ENOMEM to the caller so that it can pass it up the stack. * This error only happens when NOWAIT is set, so deferral is disabled. */ if (error == ENOMEM) return (error); return (0); } int bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp, bus_dmamap_callback_t *callback, void *callback_arg, int flags) { return (bus_dmamap_load_crp_buffer(dmat, map, &crp->crp_buf, callback, callback_arg, flags)); } void bus_dma_template_init(bus_dma_template_t *t, bus_dma_tag_t parent) { if (t == NULL) return; t->parent = parent; t->alignment = 1; t->boundary = 0; t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR; t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE; t->nsegments = BUS_SPACE_UNRESTRICTED; t->lockfunc = NULL; t->lockfuncarg = NULL; t->flags = 0; } int bus_dma_template_tag(bus_dma_template_t *t, bus_dma_tag_t *dmat) { if (t == NULL || dmat == NULL) return (EINVAL); return (bus_dma_tag_create(t->parent, t->alignment, t->boundary, t->lowaddr, t->highaddr, NULL, NULL, t->maxsize, t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg, dmat)); } void bus_dma_template_fill(bus_dma_template_t *t, bus_dma_param_t *kv, u_int count) { bus_dma_param_t *pkv; while (count) { pkv = &kv[--count]; switch (pkv->key) { case BD_PARAM_PARENT: t->parent = pkv->ptr; break; case BD_PARAM_ALIGNMENT: t->alignment = pkv->num; break; case BD_PARAM_BOUNDARY: t->boundary = pkv->num; break; case BD_PARAM_LOWADDR: t->lowaddr = pkv->pa; break; case BD_PARAM_HIGHADDR: t->highaddr = pkv->pa; break; case BD_PARAM_MAXSIZE: t->maxsize = pkv->num; break; case BD_PARAM_NSEGMENTS: t->nsegments = pkv->num; break; case BD_PARAM_MAXSEGSIZE: t->maxsegsize = pkv->num; break; case BD_PARAM_FLAGS: t->flags = pkv->num; break; case BD_PARAM_LOCKFUNC: t->lockfunc = pkv->ptr; break; case BD_PARAM_LOCKFUNCARG: t->lockfuncarg = pkv->ptr; break; case BD_PARAM_NAME: t->name = pkv->ptr; break; case BD_PARAM_INVALID: default: KASSERT(0, ("Invalid key %d\n", pkv->key)); break; } } return; } #ifndef IOMMU bool bus_dma_iommu_set_buswide(device_t dev); int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t start, vm_size_t length, int flags); bool bus_dma_iommu_set_buswide(device_t dev) { return (false); } int bus_dma_iommu_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t start, vm_size_t length, int flags) { return (0); } #endif diff --git a/sys/kern/subr_msan.c b/sys/kern/subr_msan.c index 236693cfd841..756d1fca4910 100644 --- a/sys/kern/subr_msan.c +++ b/sys/kern/subr_msan.c @@ -1,1621 +1,1543 @@ /* $NetBSD: subr_msan.c,v 1.14 2020/09/09 16:29:59 maxv Exp $ */ /* * Copyright (c) 2019-2020 Maxime Villard, m00nbsd.net * All rights reserved. * Copyright (c) 2021 The FreeBSD Foundation * * Portions of this software were developed by Mark Johnston under sponsorship * from the FreeBSD Foundation. * * This code is part of the KMSAN subsystem of the NetBSD kernel. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #define SAN_RUNTIME #include __FBSDID("$FreeBSD$"); #if 0 __KERNEL_RCSID(0, "$NetBSD: subr_msan.c,v 1.14 2020/09/09 16:29:59 maxv Exp $"); #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include -#include - #include #include #include #include void kmsan_init_arg(size_t); void kmsan_init_ret(size_t); /* -------------------------------------------------------------------------- */ /* * Part of the compiler ABI. */ typedef struct { uint8_t *shad; msan_orig_t *orig; } msan_meta_t; #define MSAN_PARAM_SIZE 800 #define MSAN_RETVAL_SIZE 800 typedef struct { uint8_t param_shadow[MSAN_PARAM_SIZE]; uint8_t retval_shadow[MSAN_RETVAL_SIZE]; uint8_t va_arg_shadow[MSAN_PARAM_SIZE]; uint8_t va_arg_origin[MSAN_PARAM_SIZE]; uint64_t va_arg_overflow_size; msan_orig_t param_origin[MSAN_PARAM_SIZE / sizeof(msan_orig_t)]; msan_orig_t retval_origin; } msan_tls_t; /* -------------------------------------------------------------------------- */ #define MSAN_NCONTEXT 4 #define MSAN_ORIG_MASK (~0x3) typedef struct kmsan_td { size_t ctx; msan_tls_t tls[MSAN_NCONTEXT]; } msan_td_t; static msan_tls_t dummy_tls; /* * Use separate dummy regions for loads and stores: stores may mark the region * as uninitialized, and that can trigger false positives. */ static uint8_t msan_dummy_shad[PAGE_SIZE] __aligned(PAGE_SIZE); static uint8_t msan_dummy_write_shad[PAGE_SIZE] __aligned(PAGE_SIZE); static uint8_t msan_dummy_orig[PAGE_SIZE] __aligned(PAGE_SIZE); static msan_td_t msan_thread0; static bool kmsan_enabled __read_mostly; static bool kmsan_reporting = false; /* * Avoid clobbering any thread-local state before we panic. */ #define kmsan_panic(f, ...) do { \ kmsan_enabled = false; \ panic(f, __VA_ARGS__); \ } while (0) #define REPORT(f, ...) do { \ if (panic_on_violation) { \ kmsan_panic(f, __VA_ARGS__); \ } else { \ struct stack st; \ \ stack_save(&st); \ printf(f "\n", __VA_ARGS__); \ stack_print_ddb(&st); \ } \ } while (0) FEATURE(kmsan, "Kernel memory sanitizer"); static SYSCTL_NODE(_debug, OID_AUTO, kmsan, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "KMSAN options"); static bool panic_on_violation = 1; SYSCTL_BOOL(_debug_kmsan, OID_AUTO, panic_on_violation, CTLFLAG_RWTUN, &panic_on_violation, 0, "Panic if an invalid access is detected"); static MALLOC_DEFINE(M_KMSAN, "kmsan", "Kernel memory sanitizer"); /* -------------------------------------------------------------------------- */ static inline const char * kmsan_orig_name(int type) { switch (type) { case KMSAN_TYPE_STACK: return ("stack"); case KMSAN_TYPE_KMEM: return ("kmem"); case KMSAN_TYPE_MALLOC: return ("malloc"); case KMSAN_TYPE_UMA: return ("UMA"); default: return ("unknown"); } } static void kmsan_report_hook(const void *addr, size_t size, size_t off, const char *hook) { msan_orig_t *orig; const char *typename; char *var, *fn; uintptr_t ptr; long foff; char buf[128]; int type; if (__predict_false(KERNEL_PANICKED() || kdb_active || kmsan_reporting)) return; kmsan_reporting = true; __compiler_membar(); orig = (msan_orig_t *)kmsan_md_addr_to_orig((vm_offset_t)addr); orig = (msan_orig_t *)((uintptr_t)orig & MSAN_ORIG_MASK); if (*orig == 0) { REPORT("MSan: Uninitialized memory in %s, offset %zu", hook, off); goto out; } kmsan_md_orig_decode(*orig, &type, &ptr); typename = kmsan_orig_name(type); if (linker_ddb_search_symbol_name((caddr_t)ptr, buf, sizeof(buf), &foff) == 0) { REPORT("MSan: Uninitialized %s memory in %s, " "offset %zu/%zu, addr %p, from %s+%#lx", typename, hook, off, size, addr, buf, foff); } else if (__builtin_memcmp((void *)ptr, "----", 4) == 0) { /* * The format of the string is: "----var@function". Parse it to * display a nice warning. */ var = (char *)ptr + 4; strlcpy(buf, var, sizeof(buf)); var = buf; fn = strchr(buf, '@'); *fn++ = '\0'; REPORT("MSan: Uninitialized %s memory in %s, offset %zu, " "variable '%s' from %s", typename, hook, off, var, fn); } else { REPORT("MSan: Uninitialized %s memory in %s, " "offset %zu/%zu, addr %p, PC %p", typename, hook, off, size, addr, (void *)ptr); } out: __compiler_membar(); kmsan_reporting = false; } static void kmsan_report_inline(msan_orig_t orig, unsigned long pc) { const char *typename; char *var, *fn; uintptr_t ptr; char buf[128]; long foff; int type; if (__predict_false(KERNEL_PANICKED() || kdb_active || kmsan_reporting)) return; kmsan_reporting = true; __compiler_membar(); if (orig == 0) { REPORT("MSan: uninitialized variable in %p", (void *)pc); goto out; } kmsan_md_orig_decode(orig, &type, &ptr); typename = kmsan_orig_name(type); if (linker_ddb_search_symbol_name((caddr_t)ptr, buf, sizeof(buf), &foff) == 0) { REPORT("MSan: Uninitialized %s memory from %s+%#lx", typename, buf, foff); } else if (__builtin_memcmp((void *)ptr, "----", 4) == 0) { /* * The format of the string is: "----var@function". Parse it to * display a nice warning. */ var = (char *)ptr + 4; strlcpy(buf, var, sizeof(buf)); var = buf; fn = strchr(buf, '@'); *fn++ = '\0'; REPORT("MSan: Uninitialized variable '%s' from %s", var, fn); } else { REPORT("MSan: Uninitialized %s memory, origin %x", typename, orig); } out: __compiler_membar(); kmsan_reporting = false; } /* -------------------------------------------------------------------------- */ static inline msan_meta_t kmsan_meta_get(const void *addr, size_t size, const bool write) { msan_meta_t ret; if (__predict_false(!kmsan_enabled)) { ret.shad = write ? msan_dummy_write_shad : msan_dummy_shad; ret.orig = (msan_orig_t *)msan_dummy_orig; } else if (__predict_false(kmsan_md_unsupported((vm_offset_t)addr))) { ret.shad = write ? msan_dummy_write_shad : msan_dummy_shad; ret.orig = (msan_orig_t *)msan_dummy_orig; } else { ret.shad = (void *)kmsan_md_addr_to_shad((vm_offset_t)addr); ret.orig = (msan_orig_t *)kmsan_md_addr_to_orig((vm_offset_t)addr); ret.orig = (msan_orig_t *)((uintptr_t)ret.orig & MSAN_ORIG_MASK); } return (ret); } static inline void kmsan_origin_fill(const void *addr, msan_orig_t o, size_t size) { msan_orig_t *orig; size_t i; if (__predict_false(!kmsan_enabled)) return; if (__predict_false(kmsan_md_unsupported((vm_offset_t)addr))) return; orig = (msan_orig_t *)kmsan_md_addr_to_orig((vm_offset_t)addr); size += ((uintptr_t)orig & (sizeof(*orig) - 1)); orig = (msan_orig_t *)((uintptr_t)orig & MSAN_ORIG_MASK); for (i = 0; i < size; i += 4) { orig[i / 4] = o; } } static inline void kmsan_shadow_fill(uintptr_t addr, uint8_t c, size_t size) { uint8_t *shad; if (__predict_false(!kmsan_enabled)) return; if (__predict_false(kmsan_md_unsupported(addr))) return; shad = (uint8_t *)kmsan_md_addr_to_shad(addr); __builtin_memset(shad, c, size); } static inline void kmsan_meta_copy(void *dst, const void *src, size_t size) { uint8_t *orig_src, *orig_dst; uint8_t *shad_src, *shad_dst; msan_orig_t *_src, *_dst; size_t i; if (__predict_false(!kmsan_enabled)) return; if (__predict_false(kmsan_md_unsupported((vm_offset_t)dst))) return; if (__predict_false(kmsan_md_unsupported((vm_offset_t)src))) { kmsan_shadow_fill((uintptr_t)dst, KMSAN_STATE_INITED, size); return; } shad_src = (uint8_t *)kmsan_md_addr_to_shad((vm_offset_t)src); shad_dst = (uint8_t *)kmsan_md_addr_to_shad((vm_offset_t)dst); __builtin_memmove(shad_dst, shad_src, size); orig_src = (uint8_t *)kmsan_md_addr_to_orig((vm_offset_t)src); orig_dst = (uint8_t *)kmsan_md_addr_to_orig((vm_offset_t)dst); for (i = 0; i < size; i++) { _src = (msan_orig_t *)((uintptr_t)orig_src & MSAN_ORIG_MASK); _dst = (msan_orig_t *)((uintptr_t)orig_dst & MSAN_ORIG_MASK); *_dst = *_src; orig_src++; orig_dst++; } } static inline void kmsan_shadow_check(uintptr_t addr, size_t size, const char *hook) { uint8_t *shad; size_t i; if (__predict_false(!kmsan_enabled)) return; if (__predict_false(kmsan_md_unsupported(addr))) return; shad = (uint8_t *)kmsan_md_addr_to_shad(addr); for (i = 0; i < size; i++) { if (__predict_true(shad[i] == 0)) continue; kmsan_report_hook((const char *)addr + i, size, i, hook); break; } } void kmsan_init_arg(size_t n) { msan_td_t *mtd; uint8_t *arg; if (__predict_false(!kmsan_enabled)) return; if (__predict_false(curthread == NULL)) return; mtd = curthread->td_kmsan; arg = mtd->tls[mtd->ctx].param_shadow; __builtin_memset(arg, 0, n); } void kmsan_init_ret(size_t n) { msan_td_t *mtd; uint8_t *arg; if (__predict_false(!kmsan_enabled)) return; if (__predict_false(curthread == NULL)) return; mtd = curthread->td_kmsan; arg = mtd->tls[mtd->ctx].retval_shadow; __builtin_memset(arg, 0, n); } static void kmsan_check_arg(size_t size, const char *hook) { msan_td_t *mtd; uint8_t *arg; size_t i; if (__predict_false(!kmsan_enabled)) return; if (__predict_false(curthread == NULL)) return; mtd = curthread->td_kmsan; arg = mtd->tls[mtd->ctx].param_shadow; for (i = 0; i < size; i++) { if (__predict_true(arg[i] == 0)) continue; kmsan_report_hook((const char *)arg + i, size, i, hook); break; } } void kmsan_thread_alloc(struct thread *td) { msan_td_t *mtd; if (!kmsan_enabled) return; mtd = td->td_kmsan; if (mtd == NULL) { /* We might be recycling a thread. */ kmsan_init_arg(sizeof(size_t) + sizeof(struct malloc_type *) + sizeof(int)); mtd = malloc(sizeof(*mtd), M_KMSAN, M_WAITOK); } kmsan_memset(mtd, 0, sizeof(*mtd)); mtd->ctx = 0; if (td->td_kstack != 0) kmsan_mark((void *)td->td_kstack, ptoa(td->td_kstack_pages), KMSAN_STATE_UNINIT); td->td_kmsan = mtd; } void kmsan_thread_free(struct thread *td) { msan_td_t *mtd; if (!kmsan_enabled) return; if (__predict_false(td == curthread)) kmsan_panic("%s: freeing KMSAN TLS for curthread", __func__); mtd = td->td_kmsan; kmsan_init_arg(sizeof(void *) + sizeof(struct malloc_type *)); free(mtd, M_KMSAN); td->td_kmsan = NULL; } void kmsan_intr_enter(void); void kmsan_intr_leave(void); void kmsan_intr_enter(void) { msan_td_t *mtd; if (__predict_false(!kmsan_enabled)) return; mtd = curthread->td_kmsan; mtd->ctx++; if (__predict_false(mtd->ctx >= MSAN_NCONTEXT)) kmsan_panic("%s: mtd->ctx = %zu", __func__, mtd->ctx); } void kmsan_intr_leave(void) { msan_td_t *mtd; if (__predict_false(!kmsan_enabled)) return; mtd = curthread->td_kmsan; if (__predict_false(mtd->ctx == 0)) kmsan_panic("%s: mtd->ctx = %zu", __func__, mtd->ctx); mtd->ctx--; } /* -------------------------------------------------------------------------- */ void kmsan_shadow_map(vm_offset_t addr, size_t size) { size_t npages, i; vm_offset_t va; MPASS(addr % PAGE_SIZE == 0); MPASS(size % PAGE_SIZE == 0); if (!kmsan_enabled) return; npages = atop(size); va = kmsan_md_addr_to_shad(addr); for (i = 0; i < npages; i++) { pmap_san_enter(va + ptoa(i)); } va = kmsan_md_addr_to_orig(addr); for (i = 0; i < npages; i++) { pmap_san_enter(va + ptoa(i)); } } void kmsan_orig(const void *addr, size_t size, int type, uintptr_t pc) { msan_orig_t orig; orig = kmsan_md_orig_encode(type, pc); kmsan_origin_fill(addr, orig, size); } void kmsan_mark(const void *addr, size_t size, uint8_t c) { kmsan_shadow_fill((uintptr_t)addr, c, size); } void kmsan_mark_bio(const struct bio *bp, uint8_t c) { kmsan_mark(bp->bio_data, bp->bio_length, c); } -static void -kmsan_mark_ccb(const union ccb *ccb, uint8_t c) -{ - if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_IN) - return; - if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) - return; - - switch (ccb->ccb_h.func_code) { - case XPT_SCSI_IO: { - const struct ccb_scsiio *scsiio; - - scsiio = &ccb->ctio; - kmsan_mark(scsiio->data_ptr, scsiio->dxfer_len, c); - break; - } - case XPT_ATA_IO: { - const struct ccb_ataio *ataio; - - ataio = &ccb->ataio; - kmsan_mark(ataio->data_ptr, ataio->dxfer_len, c); - break; - } - case XPT_NVME_IO: { - const struct ccb_nvmeio *nvmeio; - - nvmeio = &ccb->nvmeio; - kmsan_mark(nvmeio->data_ptr, nvmeio->dxfer_len, c); - break; - } - default: - kmsan_panic("%s: unhandled CCB type %d", __func__, - ccb->ccb_h.func_code); - } -} - void kmsan_mark_mbuf(const struct mbuf *m, uint8_t c) { do { if ((m->m_flags & M_EXTPG) == 0) kmsan_mark(m->m_data, m->m_len, c); m = m->m_next; } while (m != NULL); } void kmsan_check(const void *p, size_t sz, const char *descr) { kmsan_shadow_check((uintptr_t)p, sz, descr); } void kmsan_check_bio(const struct bio *bp, const char *descr) { kmsan_shadow_check((uintptr_t)bp->bio_data, bp->bio_length, descr); } -void -kmsan_check_ccb(const union ccb *ccb, const char *descr) -{ - if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_OUT) - return; - switch (ccb->ccb_h.func_code) { - case XPT_SCSI_IO: { - const struct ccb_scsiio *scsiio; - - scsiio = &ccb->ctio; - kmsan_check(scsiio->data_ptr, scsiio->dxfer_len, descr); - break; - } - case XPT_ATA_IO: { - const struct ccb_ataio *ataio; - - ataio = &ccb->ataio; - kmsan_check(ataio->data_ptr, ataio->dxfer_len, descr); - break; - } - case XPT_NVME_IO: { - const struct ccb_nvmeio *nvmeio; - - nvmeio = &ccb->nvmeio; - kmsan_check(nvmeio->data_ptr, nvmeio->dxfer_len, descr); - break; - } - default: - kmsan_panic("%s: unhandled CCB type %d", __func__, - ccb->ccb_h.func_code); - } -} - void kmsan_check_mbuf(const struct mbuf *m, const char *descr) { do { kmsan_shadow_check((uintptr_t)mtod(m, void *), m->m_len, descr); } while ((m = m->m_next) != NULL); } void kmsan_init(void) { int disabled; disabled = 0; TUNABLE_INT_FETCH("debug.kmsan.disabled", &disabled); if (disabled) return; /* Initialize the TLS for curthread. */ msan_thread0.ctx = 0; thread0.td_kmsan = &msan_thread0; /* Now officially enabled. */ kmsan_enabled = true; } /* -------------------------------------------------------------------------- */ msan_meta_t __msan_metadata_ptr_for_load_n(void *, size_t); msan_meta_t __msan_metadata_ptr_for_store_n(void *, size_t); msan_meta_t __msan_metadata_ptr_for_load_n(void *addr, size_t size) { return (kmsan_meta_get(addr, size, false)); } msan_meta_t __msan_metadata_ptr_for_store_n(void *addr, size_t size) { return (kmsan_meta_get(addr, size, true)); } #define MSAN_META_FUNC(size) \ msan_meta_t __msan_metadata_ptr_for_load_##size(void *); \ msan_meta_t __msan_metadata_ptr_for_load_##size(void *addr) \ { \ return (kmsan_meta_get(addr, size, false)); \ } \ msan_meta_t __msan_metadata_ptr_for_store_##size(void *); \ msan_meta_t __msan_metadata_ptr_for_store_##size(void *addr) \ { \ return (kmsan_meta_get(addr, size, true)); \ } MSAN_META_FUNC(1) MSAN_META_FUNC(2) MSAN_META_FUNC(4) MSAN_META_FUNC(8) void __msan_instrument_asm_store(const void *, size_t); msan_orig_t __msan_chain_origin(msan_orig_t); void __msan_poison(const void *, size_t); void __msan_unpoison(const void *, size_t); void __msan_poison_alloca(const void *, uint64_t, const char *); void __msan_unpoison_alloca(const void *, uint64_t); void __msan_warning(msan_orig_t); msan_tls_t *__msan_get_context_state(void); void __msan_instrument_asm_store(const void *addr, size_t size) { kmsan_shadow_fill((uintptr_t)addr, KMSAN_STATE_INITED, size); } msan_orig_t __msan_chain_origin(msan_orig_t origin) { return (origin); } void __msan_poison(const void *addr, size_t size) { kmsan_shadow_fill((uintptr_t)addr, KMSAN_STATE_UNINIT, size); } void __msan_unpoison(const void *addr, size_t size) { kmsan_shadow_fill((uintptr_t)addr, KMSAN_STATE_INITED, size); } void __msan_poison_alloca(const void *addr, uint64_t size, const char *descr) { msan_orig_t orig; orig = kmsan_md_orig_encode(KMSAN_TYPE_STACK, (uintptr_t)descr); kmsan_origin_fill(addr, orig, size); kmsan_shadow_fill((uintptr_t)addr, KMSAN_STATE_UNINIT, size); } void __msan_unpoison_alloca(const void *addr, uint64_t size) { kmsan_shadow_fill((uintptr_t)addr, KMSAN_STATE_INITED, size); } void __msan_warning(msan_orig_t origin) { if (__predict_false(!kmsan_enabled)) return; kmsan_report_inline(origin, KMSAN_RET_ADDR); } msan_tls_t * __msan_get_context_state(void) { msan_td_t *mtd; /* * When APs are started, they execute some C code before curthread is * set. We have to handle that here. */ if (__predict_false(!kmsan_enabled || curthread == NULL)) return (&dummy_tls); mtd = curthread->td_kmsan; return (&mtd->tls[mtd->ctx]); } /* -------------------------------------------------------------------------- */ /* * Function hooks. Mostly ASM functions which need KMSAN wrappers to handle * initialized areas properly. */ void * kmsan_memcpy(void *dst, const void *src, size_t len) { /* No kmsan_check_arg, because inlined. */ kmsan_init_ret(sizeof(void *)); if (__predict_true(len != 0)) { kmsan_meta_copy(dst, src, len); } return (__builtin_memcpy(dst, src, len)); } int kmsan_memcmp(const void *b1, const void *b2, size_t len) { const uint8_t *_b1 = b1, *_b2 = b2; size_t i; kmsan_check_arg(sizeof(b1) + sizeof(b2) + sizeof(len), "memcmp():args"); kmsan_init_ret(sizeof(int)); for (i = 0; i < len; i++) { if (*_b1 != *_b2) { kmsan_shadow_check((uintptr_t)b1, i + 1, "memcmp():arg1"); kmsan_shadow_check((uintptr_t)b2, i + 1, "memcmp():arg2"); return (*_b1 - *_b2); } _b1++, _b2++; } return (0); } void * kmsan_memset(void *dst, int c, size_t len) { /* No kmsan_check_arg, because inlined. */ kmsan_shadow_fill((uintptr_t)dst, KMSAN_STATE_INITED, len); kmsan_init_ret(sizeof(void *)); return (__builtin_memset(dst, c, len)); } void * kmsan_memmove(void *dst, const void *src, size_t len) { /* No kmsan_check_arg, because inlined. */ if (__predict_true(len != 0)) { kmsan_meta_copy(dst, src, len); } kmsan_init_ret(sizeof(void *)); return (__builtin_memmove(dst, src, len)); } __strong_reference(kmsan_memcpy, __msan_memcpy); __strong_reference(kmsan_memset, __msan_memset); __strong_reference(kmsan_memmove, __msan_memmove); char * kmsan_strcpy(char *dst, const char *src) { const char *_src = src; char *_dst = dst; size_t len = 0; kmsan_check_arg(sizeof(dst) + sizeof(src), "strcpy():args"); while (1) { len++; *dst = *src; if (*src == '\0') break; src++, dst++; } kmsan_shadow_check((uintptr_t)_src, len, "strcpy():arg2"); kmsan_shadow_fill((uintptr_t)_dst, KMSAN_STATE_INITED, len); kmsan_init_ret(sizeof(char *)); return (_dst); } int kmsan_strcmp(const char *s1, const char *s2) { const char *_s1 = s1, *_s2 = s2; size_t len = 0; kmsan_check_arg(sizeof(s1) + sizeof(s2), "strcmp():args"); kmsan_init_ret(sizeof(int)); while (1) { len++; if (*s1 != *s2) break; if (*s1 == '\0') { kmsan_shadow_check((uintptr_t)_s1, len, "strcmp():arg1"); kmsan_shadow_check((uintptr_t)_s2, len, "strcmp():arg2"); return (0); } s1++, s2++; } kmsan_shadow_check((uintptr_t)_s1, len, "strcmp():arg1"); kmsan_shadow_check((uintptr_t)_s2, len, "strcmp():arg2"); return (*(const unsigned char *)s1 - *(const unsigned char *)s2); } size_t kmsan_strlen(const char *str) { const char *s; kmsan_check_arg(sizeof(str), "strlen():args"); s = str; while (1) { if (*s == '\0') break; s++; } kmsan_shadow_check((uintptr_t)str, (size_t)(s - str) + 1, "strlen():arg1"); kmsan_init_ret(sizeof(size_t)); return (s - str); } int kmsan_copyin(const void *, void *, size_t); int kmsan_copyout(const void *, void *, size_t); int kmsan_copyinstr(const void *, void *, size_t, size_t *); int kmsan_copyin(const void *uaddr, void *kaddr, size_t len) { int ret; kmsan_check_arg(sizeof(uaddr) + sizeof(kaddr) + sizeof(len), "copyin():args"); ret = copyin(uaddr, kaddr, len); if (ret == 0) kmsan_shadow_fill((uintptr_t)kaddr, KMSAN_STATE_INITED, len); kmsan_init_ret(sizeof(int)); return (ret); } int kmsan_copyout(const void *kaddr, void *uaddr, size_t len) { kmsan_check_arg(sizeof(kaddr) + sizeof(uaddr) + sizeof(len), "copyout():args"); kmsan_shadow_check((uintptr_t)kaddr, len, "copyout():arg1"); kmsan_init_ret(sizeof(int)); return (copyout(kaddr, uaddr, len)); } int kmsan_copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) { size_t _done; int ret; kmsan_check_arg(sizeof(uaddr) + sizeof(kaddr) + sizeof(len) + sizeof(done), "copyinstr():args"); ret = copyinstr(uaddr, kaddr, len, &_done); if (ret == 0) kmsan_shadow_fill((uintptr_t)kaddr, KMSAN_STATE_INITED, _done); if (done != NULL) { *done = _done; kmsan_shadow_fill((uintptr_t)done, KMSAN_STATE_INITED, sizeof(size_t)); } kmsan_init_ret(sizeof(int)); return (ret); } /* -------------------------------------------------------------------------- */ int kmsan_fubyte(volatile const void *base) { int ret; kmsan_check_arg(sizeof(base), "fubyte(): args"); ret = fubyte(base); kmsan_init_ret(sizeof(int)); return (ret); } int kmsan_fuword16(volatile const void *base) { int ret; kmsan_check_arg(sizeof(base), "fuword16(): args"); ret = fuword16(base); kmsan_init_ret(sizeof(int)); return (ret); } int kmsan_fueword(volatile const void *base, long *val) { int ret; kmsan_check_arg(sizeof(base) + sizeof(val), "fueword(): args"); ret = fueword(base, val); if (ret == 0) kmsan_shadow_fill((uintptr_t)val, KMSAN_STATE_INITED, sizeof(*val)); kmsan_init_ret(sizeof(int)); return (ret); } int kmsan_fueword32(volatile const void *base, int32_t *val) { int ret; kmsan_check_arg(sizeof(base) + sizeof(val), "fueword32(): args"); ret = fueword32(base, val); if (ret == 0) kmsan_shadow_fill((uintptr_t)val, KMSAN_STATE_INITED, sizeof(*val)); kmsan_init_ret(sizeof(int)); return (ret); } int kmsan_fueword64(volatile const void *base, int64_t *val) { int ret; kmsan_check_arg(sizeof(base) + sizeof(val), "fueword64(): args"); ret = fueword64(base, val); if (ret == 0) kmsan_shadow_fill((uintptr_t)val, KMSAN_STATE_INITED, sizeof(*val)); kmsan_init_ret(sizeof(int)); return (ret); } int kmsan_subyte(volatile void *base, int byte) { int ret; kmsan_check_arg(sizeof(base) + sizeof(byte), "subyte():args"); ret = subyte(base, byte); kmsan_init_ret(sizeof(int)); return (ret); } int kmsan_suword(volatile void *base, long word) { int ret; kmsan_check_arg(sizeof(base) + sizeof(word), "suword():args"); ret = suword(base, word); kmsan_init_ret(sizeof(int)); return (ret); } int kmsan_suword16(volatile void *base, int word) { int ret; kmsan_check_arg(sizeof(base) + sizeof(word), "suword16():args"); ret = suword16(base, word); kmsan_init_ret(sizeof(int)); return (ret); } int kmsan_suword32(volatile void *base, int32_t word) { int ret; kmsan_check_arg(sizeof(base) + sizeof(word), "suword32():args"); ret = suword32(base, word); kmsan_init_ret(sizeof(int)); return (ret); } int kmsan_suword64(volatile void *base, int64_t word) { int ret; kmsan_check_arg(sizeof(base) + sizeof(word), "suword64():args"); ret = suword64(base, word); kmsan_init_ret(sizeof(int)); return (ret); } int kmsan_casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp, uint32_t newval) { int ret; kmsan_check_arg(sizeof(base) + sizeof(oldval) + sizeof(oldvalp) + sizeof(newval), "casueword32(): args"); ret = casueword32(base, oldval, oldvalp, newval); kmsan_shadow_fill((uintptr_t)oldvalp, KMSAN_STATE_INITED, sizeof(*oldvalp)); kmsan_init_ret(sizeof(int)); return (ret); } int kmsan_casueword(volatile u_long *base, u_long oldval, u_long *oldvalp, u_long newval) { int ret; kmsan_check_arg(sizeof(base) + sizeof(oldval) + sizeof(oldvalp) + sizeof(newval), "casueword32(): args"); ret = casueword(base, oldval, oldvalp, newval); kmsan_shadow_fill((uintptr_t)oldvalp, KMSAN_STATE_INITED, sizeof(*oldvalp)); kmsan_init_ret(sizeof(int)); return (ret); } /* -------------------------------------------------------------------------- */ #include #include #define _MSAN_ATOMIC_FUNC_ADD(name, type) \ void kmsan_atomic_add_##name(volatile type *ptr, type val) \ { \ kmsan_check_arg(sizeof(ptr) + sizeof(val), \ "atomic_add_" #name "():args"); \ kmsan_shadow_check((uintptr_t)ptr, sizeof(type), \ "atomic_add_" #name "():ptr"); \ atomic_add_##name(ptr, val); \ } #define MSAN_ATOMIC_FUNC_ADD(name, type) \ _MSAN_ATOMIC_FUNC_ADD(name, type) \ _MSAN_ATOMIC_FUNC_ADD(acq_##name, type) \ _MSAN_ATOMIC_FUNC_ADD(rel_##name, type) #define _MSAN_ATOMIC_FUNC_SUBTRACT(name, type) \ void kmsan_atomic_subtract_##name(volatile type *ptr, type val) \ { \ kmsan_check_arg(sizeof(ptr) + sizeof(val), \ "atomic_subtract_" #name "():args"); \ kmsan_shadow_check((uintptr_t)ptr, sizeof(type), \ "atomic_subtract_" #name "():ptr"); \ atomic_subtract_##name(ptr, val); \ } #define MSAN_ATOMIC_FUNC_SUBTRACT(name, type) \ _MSAN_ATOMIC_FUNC_SUBTRACT(name, type) \ _MSAN_ATOMIC_FUNC_SUBTRACT(acq_##name, type) \ _MSAN_ATOMIC_FUNC_SUBTRACT(rel_##name, type) #define _MSAN_ATOMIC_FUNC_SET(name, type) \ void kmsan_atomic_set_##name(volatile type *ptr, type val) \ { \ kmsan_check_arg(sizeof(ptr) + sizeof(val), \ "atomic_set_" #name "():args"); \ kmsan_shadow_check((uintptr_t)ptr, sizeof(type), \ "atomic_set_" #name "():ptr"); \ atomic_set_##name(ptr, val); \ } #define MSAN_ATOMIC_FUNC_SET(name, type) \ _MSAN_ATOMIC_FUNC_SET(name, type) \ _MSAN_ATOMIC_FUNC_SET(acq_##name, type) \ _MSAN_ATOMIC_FUNC_SET(rel_##name, type) #define _MSAN_ATOMIC_FUNC_CLEAR(name, type) \ void kmsan_atomic_clear_##name(volatile type *ptr, type val) \ { \ kmsan_check_arg(sizeof(ptr) + sizeof(val), \ "atomic_clear_" #name "():args"); \ kmsan_shadow_check((uintptr_t)ptr, sizeof(type), \ "atomic_clear_" #name "():ptr"); \ atomic_clear_##name(ptr, val); \ } #define MSAN_ATOMIC_FUNC_CLEAR(name, type) \ _MSAN_ATOMIC_FUNC_CLEAR(name, type) \ _MSAN_ATOMIC_FUNC_CLEAR(acq_##name, type) \ _MSAN_ATOMIC_FUNC_CLEAR(rel_##name, type) #define MSAN_ATOMIC_FUNC_FETCHADD(name, type) \ type kmsan_atomic_fetchadd_##name(volatile type *ptr, type val) \ { \ kmsan_check_arg(sizeof(ptr) + sizeof(val), \ "atomic_fetchadd_" #name "():args"); \ kmsan_shadow_check((uintptr_t)ptr, sizeof(type), \ "atomic_fetchadd_" #name "():ptr"); \ kmsan_init_ret(sizeof(type)); \ return (atomic_fetchadd_##name(ptr, val)); \ } #define MSAN_ATOMIC_FUNC_READANDCLEAR(name, type) \ type kmsan_atomic_readandclear_##name(volatile type *ptr) \ { \ kmsan_check_arg(sizeof(ptr), \ "atomic_readandclear_" #name "():args"); \ kmsan_shadow_check((uintptr_t)ptr, sizeof(type), \ "atomic_readandclear_" #name "():ptr"); \ kmsan_init_ret(sizeof(type)); \ return (atomic_readandclear_##name(ptr)); \ } #define MSAN_ATOMIC_FUNC_TESTANDCLEAR(name, type) \ int kmsan_atomic_testandclear_##name(volatile type *ptr, u_int v) \ { \ kmsan_check_arg(sizeof(ptr) + sizeof(v), \ "atomic_testandclear_" #name "():args"); \ kmsan_shadow_check((uintptr_t)ptr, sizeof(type), \ "atomic_testandclear_" #name "():ptr"); \ kmsan_init_ret(sizeof(int)); \ return (atomic_testandclear_##name(ptr, v)); \ } #define MSAN_ATOMIC_FUNC_TESTANDSET(name, type) \ int kmsan_atomic_testandset_##name(volatile type *ptr, u_int v) \ { \ kmsan_check_arg(sizeof(ptr) + sizeof(v), \ "atomic_testandset_" #name "():args"); \ kmsan_shadow_check((uintptr_t)ptr, sizeof(type), \ "atomic_testandset_" #name "():ptr"); \ kmsan_init_ret(sizeof(int)); \ return (atomic_testandset_##name(ptr, v)); \ } #define MSAN_ATOMIC_FUNC_SWAP(name, type) \ type kmsan_atomic_swap_##name(volatile type *ptr, type val) \ { \ kmsan_check_arg(sizeof(ptr) + sizeof(val), \ "atomic_swap_" #name "():args"); \ kmsan_shadow_check((uintptr_t)ptr, sizeof(type), \ "atomic_swap_" #name "():ptr"); \ kmsan_init_ret(sizeof(type)); \ return (atomic_swap_##name(ptr, val)); \ } #define _MSAN_ATOMIC_FUNC_CMPSET(name, type) \ int kmsan_atomic_cmpset_##name(volatile type *ptr, type oval, \ type nval) \ { \ kmsan_check_arg(sizeof(ptr) + sizeof(oval) + \ sizeof(nval), "atomic_cmpset_" #name "():args"); \ kmsan_shadow_check((uintptr_t)ptr, sizeof(type), \ "atomic_cmpset_" #name "():ptr"); \ kmsan_init_ret(sizeof(int)); \ return (atomic_cmpset_##name(ptr, oval, nval)); \ } #define MSAN_ATOMIC_FUNC_CMPSET(name, type) \ _MSAN_ATOMIC_FUNC_CMPSET(name, type) \ _MSAN_ATOMIC_FUNC_CMPSET(acq_##name, type) \ _MSAN_ATOMIC_FUNC_CMPSET(rel_##name, type) #define _MSAN_ATOMIC_FUNC_FCMPSET(name, type) \ int kmsan_atomic_fcmpset_##name(volatile type *ptr, type *oval, \ type nval) \ { \ kmsan_check_arg(sizeof(ptr) + sizeof(oval) + \ sizeof(nval), "atomic_fcmpset_" #name "():args"); \ kmsan_shadow_check((uintptr_t)ptr, sizeof(type), \ "atomic_fcmpset_" #name "():ptr"); \ kmsan_init_ret(sizeof(int)); \ return (atomic_fcmpset_##name(ptr, oval, nval)); \ } #define MSAN_ATOMIC_FUNC_FCMPSET(name, type) \ _MSAN_ATOMIC_FUNC_FCMPSET(name, type) \ _MSAN_ATOMIC_FUNC_FCMPSET(acq_##name, type) \ _MSAN_ATOMIC_FUNC_FCMPSET(rel_##name, type) #define MSAN_ATOMIC_FUNC_THREAD_FENCE(name) \ void kmsan_atomic_thread_fence_##name(void) \ { \ atomic_thread_fence_##name(); \ } #define _MSAN_ATOMIC_FUNC_LOAD(name, type) \ type kmsan_atomic_load_##name(volatile type *ptr) \ { \ kmsan_check_arg(sizeof(ptr), \ "atomic_load_" #name "():args"); \ kmsan_shadow_check((uintptr_t)ptr, sizeof(type), \ "atomic_load_" #name "():ptr"); \ kmsan_init_ret(sizeof(type)); \ return (atomic_load_##name(ptr)); \ } #define MSAN_ATOMIC_FUNC_LOAD(name, type) \ _MSAN_ATOMIC_FUNC_LOAD(name, type) \ _MSAN_ATOMIC_FUNC_LOAD(acq_##name, type) #define _MSAN_ATOMIC_FUNC_STORE(name, type) \ void kmsan_atomic_store_##name(volatile type *ptr, type val) \ { \ kmsan_check_arg(sizeof(ptr) + sizeof(val), \ "atomic_store_" #name "():args"); \ kmsan_shadow_fill((uintptr_t)ptr, KMSAN_STATE_INITED, \ sizeof(type)); \ atomic_store_##name(ptr, val); \ } #define MSAN_ATOMIC_FUNC_STORE(name, type) \ _MSAN_ATOMIC_FUNC_STORE(name, type) \ _MSAN_ATOMIC_FUNC_STORE(rel_##name, type) MSAN_ATOMIC_FUNC_ADD(8, uint8_t); MSAN_ATOMIC_FUNC_ADD(16, uint16_t); MSAN_ATOMIC_FUNC_ADD(32, uint32_t); MSAN_ATOMIC_FUNC_ADD(64, uint64_t); MSAN_ATOMIC_FUNC_ADD(int, u_int); MSAN_ATOMIC_FUNC_ADD(long, u_long); MSAN_ATOMIC_FUNC_ADD(ptr, uintptr_t); MSAN_ATOMIC_FUNC_SUBTRACT(8, uint8_t); MSAN_ATOMIC_FUNC_SUBTRACT(16, uint16_t); MSAN_ATOMIC_FUNC_SUBTRACT(32, uint32_t); MSAN_ATOMIC_FUNC_SUBTRACT(64, uint64_t); MSAN_ATOMIC_FUNC_SUBTRACT(int, u_int); MSAN_ATOMIC_FUNC_SUBTRACT(long, u_long); MSAN_ATOMIC_FUNC_SUBTRACT(ptr, uintptr_t); MSAN_ATOMIC_FUNC_SET(8, uint8_t); MSAN_ATOMIC_FUNC_SET(16, uint16_t); MSAN_ATOMIC_FUNC_SET(32, uint32_t); MSAN_ATOMIC_FUNC_SET(64, uint64_t); MSAN_ATOMIC_FUNC_SET(int, u_int); MSAN_ATOMIC_FUNC_SET(long, u_long); MSAN_ATOMIC_FUNC_SET(ptr, uintptr_t); MSAN_ATOMIC_FUNC_CLEAR(8, uint8_t); MSAN_ATOMIC_FUNC_CLEAR(16, uint16_t); MSAN_ATOMIC_FUNC_CLEAR(32, uint32_t); MSAN_ATOMIC_FUNC_CLEAR(64, uint64_t); MSAN_ATOMIC_FUNC_CLEAR(int, u_int); MSAN_ATOMIC_FUNC_CLEAR(long, u_long); MSAN_ATOMIC_FUNC_CLEAR(ptr, uintptr_t); MSAN_ATOMIC_FUNC_FETCHADD(32, uint32_t); MSAN_ATOMIC_FUNC_FETCHADD(64, uint64_t); MSAN_ATOMIC_FUNC_FETCHADD(int, u_int); MSAN_ATOMIC_FUNC_FETCHADD(long, u_long); MSAN_ATOMIC_FUNC_READANDCLEAR(32, uint32_t); MSAN_ATOMIC_FUNC_READANDCLEAR(64, uint64_t); MSAN_ATOMIC_FUNC_READANDCLEAR(int, u_int); MSAN_ATOMIC_FUNC_READANDCLEAR(long, u_long); MSAN_ATOMIC_FUNC_READANDCLEAR(ptr, uintptr_t); MSAN_ATOMIC_FUNC_TESTANDCLEAR(32, uint32_t); MSAN_ATOMIC_FUNC_TESTANDCLEAR(64, uint64_t); MSAN_ATOMIC_FUNC_TESTANDCLEAR(int, u_int); MSAN_ATOMIC_FUNC_TESTANDCLEAR(long, u_long); MSAN_ATOMIC_FUNC_TESTANDSET(32, uint32_t); MSAN_ATOMIC_FUNC_TESTANDSET(64, uint64_t); MSAN_ATOMIC_FUNC_TESTANDSET(int, u_int); MSAN_ATOMIC_FUNC_TESTANDSET(long, u_long); MSAN_ATOMIC_FUNC_SWAP(32, uint32_t); MSAN_ATOMIC_FUNC_SWAP(64, uint64_t); MSAN_ATOMIC_FUNC_SWAP(int, u_int); MSAN_ATOMIC_FUNC_SWAP(long, u_long); MSAN_ATOMIC_FUNC_SWAP(ptr, uintptr_t); MSAN_ATOMIC_FUNC_CMPSET(8, uint8_t); MSAN_ATOMIC_FUNC_CMPSET(16, uint16_t); MSAN_ATOMIC_FUNC_CMPSET(32, uint32_t); MSAN_ATOMIC_FUNC_CMPSET(64, uint64_t); MSAN_ATOMIC_FUNC_CMPSET(int, u_int); MSAN_ATOMIC_FUNC_CMPSET(long, u_long); MSAN_ATOMIC_FUNC_CMPSET(ptr, uintptr_t); MSAN_ATOMIC_FUNC_FCMPSET(8, uint8_t); MSAN_ATOMIC_FUNC_FCMPSET(16, uint16_t); MSAN_ATOMIC_FUNC_FCMPSET(32, uint32_t); MSAN_ATOMIC_FUNC_FCMPSET(64, uint64_t); MSAN_ATOMIC_FUNC_FCMPSET(int, u_int); MSAN_ATOMIC_FUNC_FCMPSET(long, u_long); MSAN_ATOMIC_FUNC_FCMPSET(ptr, uintptr_t); _MSAN_ATOMIC_FUNC_LOAD(bool, bool); MSAN_ATOMIC_FUNC_LOAD(8, uint8_t); MSAN_ATOMIC_FUNC_LOAD(16, uint16_t); MSAN_ATOMIC_FUNC_LOAD(32, uint32_t); MSAN_ATOMIC_FUNC_LOAD(64, uint64_t); MSAN_ATOMIC_FUNC_LOAD(char, u_char); MSAN_ATOMIC_FUNC_LOAD(short, u_short); MSAN_ATOMIC_FUNC_LOAD(int, u_int); MSAN_ATOMIC_FUNC_LOAD(long, u_long); MSAN_ATOMIC_FUNC_LOAD(ptr, uintptr_t); _MSAN_ATOMIC_FUNC_STORE(bool, bool); MSAN_ATOMIC_FUNC_STORE(8, uint8_t); MSAN_ATOMIC_FUNC_STORE(16, uint16_t); MSAN_ATOMIC_FUNC_STORE(32, uint32_t); MSAN_ATOMIC_FUNC_STORE(64, uint64_t); MSAN_ATOMIC_FUNC_STORE(char, u_char); MSAN_ATOMIC_FUNC_STORE(short, u_short); MSAN_ATOMIC_FUNC_STORE(int, u_int); MSAN_ATOMIC_FUNC_STORE(long, u_long); MSAN_ATOMIC_FUNC_STORE(ptr, uintptr_t); MSAN_ATOMIC_FUNC_THREAD_FENCE(acq); MSAN_ATOMIC_FUNC_THREAD_FENCE(rel); MSAN_ATOMIC_FUNC_THREAD_FENCE(acq_rel); MSAN_ATOMIC_FUNC_THREAD_FENCE(seq_cst); void kmsan_atomic_interrupt_fence(void) { atomic_interrupt_fence(); } /* -------------------------------------------------------------------------- */ #include #include #include int kmsan_bus_space_map(bus_space_tag_t tag, bus_addr_t hnd, bus_size_t size, int flags, bus_space_handle_t *handlep) { return (bus_space_map(tag, hnd, size, flags, handlep)); } void kmsan_bus_space_unmap(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t size) { bus_space_unmap(tag, hnd, size); } int kmsan_bus_space_subregion(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t offset, bus_size_t size, bus_space_handle_t *handlep) { return (bus_space_subregion(tag, hnd, offset, size, handlep)); } void kmsan_bus_space_free(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t size) { bus_space_free(tag, hnd, size); } void kmsan_bus_space_barrier(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t offset, bus_size_t size, int flags) { bus_space_barrier(tag, hnd, offset, size, flags); } /* XXXMJ x86-specific */ #define MSAN_BUS_READ_FUNC(func, width, type) \ type kmsan_bus_space_read##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset) \ { \ type ret; \ if ((tag) != X86_BUS_SPACE_IO) \ kmsan_shadow_fill((uintptr_t)(hnd + offset), \ KMSAN_STATE_INITED, (width)); \ ret = bus_space_read##func##_##width(tag, hnd, offset); \ kmsan_init_ret(sizeof(type)); \ return (ret); \ } \ #define MSAN_BUS_READ_PTR_FUNC(func, width, type) \ void kmsan_bus_space_read_##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t size, type *buf, \ bus_size_t count) \ { \ kmsan_shadow_fill((uintptr_t)buf, KMSAN_STATE_INITED, \ (width) * count); \ bus_space_read_##func##_##width(tag, hnd, size, buf, \ count); \ } MSAN_BUS_READ_FUNC(, 1, uint8_t) MSAN_BUS_READ_FUNC(_stream, 1, uint8_t) MSAN_BUS_READ_PTR_FUNC(multi, 1, uint8_t) MSAN_BUS_READ_PTR_FUNC(multi_stream, 1, uint8_t) MSAN_BUS_READ_PTR_FUNC(region, 1, uint8_t) MSAN_BUS_READ_PTR_FUNC(region_stream, 1, uint8_t) MSAN_BUS_READ_FUNC(, 2, uint16_t) MSAN_BUS_READ_FUNC(_stream, 2, uint16_t) MSAN_BUS_READ_PTR_FUNC(multi, 2, uint16_t) MSAN_BUS_READ_PTR_FUNC(multi_stream, 2, uint16_t) MSAN_BUS_READ_PTR_FUNC(region, 2, uint16_t) MSAN_BUS_READ_PTR_FUNC(region_stream, 2, uint16_t) MSAN_BUS_READ_FUNC(, 4, uint32_t) MSAN_BUS_READ_FUNC(_stream, 4, uint32_t) MSAN_BUS_READ_PTR_FUNC(multi, 4, uint32_t) MSAN_BUS_READ_PTR_FUNC(multi_stream, 4, uint32_t) MSAN_BUS_READ_PTR_FUNC(region, 4, uint32_t) MSAN_BUS_READ_PTR_FUNC(region_stream, 4, uint32_t) MSAN_BUS_READ_FUNC(, 8, uint64_t) #define MSAN_BUS_WRITE_FUNC(func, width, type) \ void kmsan_bus_space_write##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type value) \ { \ bus_space_write##func##_##width(tag, hnd, offset, value);\ } \ #define MSAN_BUS_WRITE_PTR_FUNC(func, width, type) \ void kmsan_bus_space_write_##func##_##width(bus_space_tag_t tag,\ bus_space_handle_t hnd, bus_size_t size, const type *buf, \ bus_size_t count) \ { \ kmsan_shadow_check((uintptr_t)buf, sizeof(type) * count,\ "bus_space_write()"); \ bus_space_write_##func##_##width(tag, hnd, size, buf, \ count); \ } MSAN_BUS_WRITE_FUNC(, 1, uint8_t) MSAN_BUS_WRITE_FUNC(_stream, 1, uint8_t) MSAN_BUS_WRITE_PTR_FUNC(multi, 1, uint8_t) MSAN_BUS_WRITE_PTR_FUNC(multi_stream, 1, uint8_t) MSAN_BUS_WRITE_PTR_FUNC(region, 1, uint8_t) MSAN_BUS_WRITE_PTR_FUNC(region_stream, 1, uint8_t) MSAN_BUS_WRITE_FUNC(, 2, uint16_t) MSAN_BUS_WRITE_FUNC(_stream, 2, uint16_t) MSAN_BUS_WRITE_PTR_FUNC(multi, 2, uint16_t) MSAN_BUS_WRITE_PTR_FUNC(multi_stream, 2, uint16_t) MSAN_BUS_WRITE_PTR_FUNC(region, 2, uint16_t) MSAN_BUS_WRITE_PTR_FUNC(region_stream, 2, uint16_t) MSAN_BUS_WRITE_FUNC(, 4, uint32_t) MSAN_BUS_WRITE_FUNC(_stream, 4, uint32_t) MSAN_BUS_WRITE_PTR_FUNC(multi, 4, uint32_t) MSAN_BUS_WRITE_PTR_FUNC(multi_stream, 4, uint32_t) MSAN_BUS_WRITE_PTR_FUNC(region, 4, uint32_t) MSAN_BUS_WRITE_PTR_FUNC(region_stream, 4, uint32_t) MSAN_BUS_WRITE_FUNC(, 8, uint64_t) #define MSAN_BUS_SET_FUNC(func, width, type) \ void kmsan_bus_space_set_##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type value, \ bus_size_t count) \ { \ bus_space_set_##func##_##width(tag, hnd, offset, value, \ count); \ } MSAN_BUS_SET_FUNC(multi, 1, uint8_t) MSAN_BUS_SET_FUNC(region, 1, uint8_t) MSAN_BUS_SET_FUNC(multi_stream, 1, uint8_t) MSAN_BUS_SET_FUNC(region_stream, 1, uint8_t) MSAN_BUS_SET_FUNC(multi, 2, uint16_t) MSAN_BUS_SET_FUNC(region, 2, uint16_t) MSAN_BUS_SET_FUNC(multi_stream, 2, uint16_t) MSAN_BUS_SET_FUNC(region_stream, 2, uint16_t) MSAN_BUS_SET_FUNC(multi, 4, uint32_t) MSAN_BUS_SET_FUNC(region, 4, uint32_t) MSAN_BUS_SET_FUNC(multi_stream, 4, uint32_t) MSAN_BUS_SET_FUNC(region_stream, 4, uint32_t) /* -------------------------------------------------------------------------- */ void kmsan_bus_dmamap_sync(struct memdesc *desc, bus_dmasync_op_t op) { /* * Some drivers, e.g., nvme, use the same code path for loading device * read and write requests, and will thus specify both flags. In this * case we should not do any checking since it will generally lead to * false positives. */ if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) == BUS_DMASYNC_PREWRITE) { switch (desc->md_type) { case MEMDESC_VADDR: kmsan_check(desc->u.md_vaddr, desc->md_opaque, "dmasync"); break; case MEMDESC_BIO: kmsan_check_bio(desc->u.md_bio, "dmasync"); break; case MEMDESC_MBUF: kmsan_check_mbuf(desc->u.md_mbuf, "dmasync"); break; - case MEMDESC_CCB: - kmsan_check_ccb(desc->u.md_ccb, "dmasync"); - break; case 0: break; default: kmsan_panic("%s: unhandled memdesc type %d", __func__, desc->md_type); } } if ((op & BUS_DMASYNC_POSTREAD) != 0) { switch (desc->md_type) { case MEMDESC_VADDR: kmsan_mark(desc->u.md_vaddr, desc->md_opaque, KMSAN_STATE_INITED); break; case MEMDESC_BIO: kmsan_mark_bio(desc->u.md_bio, KMSAN_STATE_INITED); break; case MEMDESC_MBUF: kmsan_mark_mbuf(desc->u.md_mbuf, KMSAN_STATE_INITED); break; - case MEMDESC_CCB: - kmsan_mark_ccb(desc->u.md_ccb, KMSAN_STATE_INITED); - break; case 0: break; default: kmsan_panic("%s: unhandled memdesc type %d", __func__, desc->md_type); } } } diff --git a/sys/sys/memdesc.h b/sys/sys/memdesc.h index 68dc83d03ef0..1c92ae5b1eb5 100644 --- a/sys/sys/memdesc.h +++ b/sys/sys/memdesc.h @@ -1,158 +1,148 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2012 EMC Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_MEMDESC_H_ #define _SYS_MEMDESC_H_ struct bio; struct bus_dma_segment; struct uio; struct mbuf; union ccb; /* * struct memdesc encapsulates various memory descriptors and provides * abstract access to them. */ struct memdesc { union { void *md_vaddr; vm_paddr_t md_paddr; struct bus_dma_segment *md_list; struct bio *md_bio; struct uio *md_uio; struct mbuf *md_mbuf; - union ccb *md_ccb; } u; size_t md_opaque; /* type specific data. */ uint32_t md_type; /* Type of memory. */ }; #define MEMDESC_VADDR 1 /* Contiguous virtual address. */ #define MEMDESC_PADDR 2 /* Contiguous physical address. */ #define MEMDESC_VLIST 3 /* scatter/gather list of kva addresses. */ #define MEMDESC_PLIST 4 /* scatter/gather list of physical addresses. */ #define MEMDESC_BIO 5 /* Pointer to a bio (block io). */ #define MEMDESC_UIO 6 /* Pointer to a uio (any io). */ #define MEMDESC_MBUF 7 /* Pointer to a mbuf (network io). */ -#define MEMDESC_CCB 8 /* Cam control block. (scsi/ata io). */ static inline struct memdesc memdesc_vaddr(void *vaddr, size_t len) { struct memdesc mem; mem.u.md_vaddr = vaddr; mem.md_opaque = len; mem.md_type = MEMDESC_VADDR; return (mem); } static inline struct memdesc memdesc_paddr(vm_paddr_t paddr, size_t len) { struct memdesc mem; mem.u.md_paddr = paddr; mem.md_opaque = len; mem.md_type = MEMDESC_PADDR; return (mem); } static inline struct memdesc memdesc_vlist(struct bus_dma_segment *vlist, int sglist_cnt) { struct memdesc mem; mem.u.md_list = vlist; mem.md_opaque = sglist_cnt; mem.md_type = MEMDESC_VLIST; return (mem); } static inline struct memdesc memdesc_plist(struct bus_dma_segment *plist, int sglist_cnt) { struct memdesc mem; mem.u.md_list = plist; mem.md_opaque = sglist_cnt; mem.md_type = MEMDESC_PLIST; return (mem); } static inline struct memdesc memdesc_bio(struct bio *bio) { struct memdesc mem; mem.u.md_bio = bio; mem.md_type = MEMDESC_BIO; return (mem); } static inline struct memdesc memdesc_uio(struct uio *uio) { struct memdesc mem; mem.u.md_uio = uio; mem.md_type = MEMDESC_UIO; return (mem); } static inline struct memdesc memdesc_mbuf(struct mbuf *mbuf) { struct memdesc mem; mem.u.md_mbuf = mbuf; mem.md_type = MEMDESC_MBUF; return (mem); } -static inline struct memdesc -memdesc_ccb(union ccb *ccb) -{ - struct memdesc mem; +struct memdesc memdesc_ccb(union ccb *ccb); - mem.u.md_ccb = ccb; - mem.md_type = MEMDESC_CCB; - - return (mem); -} #endif /* _SYS_MEMDESC_H_ */