Index: sys/cam/scsi/scsi_low.h =================================================================== --- sys/cam/scsi/scsi_low.h +++ sys/cam/scsi/scsi_low.h @@ -634,26 +634,22 @@ /************************************************* * Message macro defs *************************************************/ -#define SCSI_LOW_SETUP_PHASE(ti, phase) \ -{ \ +#define SCSI_LOW_SETUP_PHASE(ti, phase) do { \ (ti)->ti_ophase = ti->ti_phase; \ (ti)->ti_phase = (phase); \ -} +} while (0) -#define SCSI_LOW_SETUP_MSGPHASE(slp, PHASE) \ -{ \ +#define SCSI_LOW_SETUP_MSGPHASE(slp, PHASE) do { \ (slp)->sl_msgphase = (PHASE); \ -} +} while (0) -#define SCSI_LOW_ASSERT_ATN(slp) \ -{ \ +#define SCSI_LOW_ASSERT_ATN(slp) do { \ (slp)->sl_atten = 1; \ -} +} while (0) -#define SCSI_LOW_DEASSERT_ATN(slp) \ -{ \ +#define SCSI_LOW_DEASSERT_ATN(slp) do { \ (slp)->sl_atten = 0; \ -} +} while (0) /************************************************* * Inline functions Index: sys/kern/kern_clocksource.c =================================================================== --- sys/kern/kern_clocksource.c +++ sys/kern/kern_clocksource.c @@ -69,21 +69,19 @@ static struct mtx et_hw_mtx; -#define ET_HW_LOCK(state) \ - { \ - if (timer->et_flags & ET_FLAGS_PERCPU) \ - mtx_lock_spin(&(state)->et_hw_mtx); \ - else \ - mtx_lock_spin(&et_hw_mtx); \ - } - -#define ET_HW_UNLOCK(state) \ - { \ - if (timer->et_flags & ET_FLAGS_PERCPU) \ - mtx_unlock_spin(&(state)->et_hw_mtx); \ - else \ - mtx_unlock_spin(&et_hw_mtx); \ - } +#define ET_HW_LOCK(state) do { \ + if (timer->et_flags & ET_FLAGS_PERCPU) \ + mtx_lock_spin(&(state)->et_hw_mtx); \ + else \ + mtx_lock_spin(&et_hw_mtx); \ +} while (0) + +#define ET_HW_UNLOCK(state) do { \ + if (timer->et_flags & ET_FLAGS_PERCPU) \ + mtx_unlock_spin(&(state)->et_hw_mtx); \ + else \ + mtx_unlock_spin(&et_hw_mtx); \ +} while (0) static struct eventtimer *timer = NULL; static sbintime_t timerperiod; /* Timer period for periodic mode. */ Index: sys/kern/subr_kobj.c =================================================================== --- sys/kern/subr_kobj.c +++ sys/kern/subr_kobj.c @@ -62,7 +62,7 @@ #define KOBJ_LOCK() mtx_lock(&kobj_mtx) #define KOBJ_UNLOCK() mtx_unlock(&kobj_mtx) -#define KOBJ_ASSERT(what) mtx_assert(&kobj_mtx, what); +#define KOBJ_ASSERT(what) mtx_assert(&kobj_mtx, what) SYSCTL_INT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD, &kobj_next_id, 0, ""); Index: sys/kern/subr_vmem.c =================================================================== --- sys/kern/subr_vmem.c +++ sys/kern/subr_vmem.c @@ -196,7 +196,7 @@ #define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock) #define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF) #define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock) -#define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED); +#define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED) #define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align))) Index: sys/kern/sysv_sem.c =================================================================== --- sys/kern/sysv_sem.c +++ sys/kern/sysv_sem.c @@ -124,9 +124,9 @@ static unsigned sem_prison_slot; /* prison OSD slot */ #define SEMUNDO_MTX sem_undo_mtx -#define SEMUNDO_LOCK() mtx_lock(&SEMUNDO_MTX); -#define SEMUNDO_UNLOCK() mtx_unlock(&SEMUNDO_MTX); -#define SEMUNDO_LOCKASSERT(how) mtx_assert(&SEMUNDO_MTX, (how)); +#define SEMUNDO_LOCK() mtx_lock(&SEMUNDO_MTX) +#define SEMUNDO_UNLOCK() mtx_unlock(&SEMUNDO_MTX) +#define SEMUNDO_LOCKASSERT(how) mtx_assert(&SEMUNDO_MTX, (how)) struct sem { u_short semval; /* semaphore value */ Index: sys/libkern/zlib.c =================================================================== --- sys/libkern/zlib.c +++ sys/libkern/zlib.c @@ -2196,20 +2196,19 @@ * Remove the smallest element from the heap and recreate the heap with * one less element. Updates heap and heap_len. */ -#define pqremove(s, tree, top) \ -{\ - top = s->heap[SMALLEST]; \ - s->heap[SMALLEST] = s->heap[s->heap_len--]; \ - pqdownheap(s, tree, SMALLEST); \ -} +#define pqremove(s, tree, top) do { \ + (top) = (s)->heap[SMALLEST]; \ + (s)->heap[SMALLEST] = (s)->heap[(s)->heap_len--]; \ + pqdownheap(s, tree, SMALLEST); \ +} while (0) /* =========================================================================== * Compares to subtrees, using the tree depth as tie breaker when * the subtrees have equal frequency. This minimizes the worst case length. */ -#define smaller(tree, n, m, depth) \ - (tree[n].Freq < tree[m].Freq || \ - (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m])) +#define smaller(tree, n, m, depth) \ + ((tree)[n].Freq < (tree)[m].Freq || \ + ((tree)[n].Freq == (tree)[m].Freq && (depth)[n] <= (depth)[m])) /* =========================================================================== * Restore the heap property by moving down the tree starting at node k, Index: sys/net/radix.h =================================================================== --- sys/net/radix.h +++ sys/net/radix.h @@ -151,11 +151,11 @@ #ifndef _KERNEL #define R_Malloc(p, t, n) (p = (t) malloc((unsigned int)(n))) #define R_Zalloc(p, t, n) (p = (t) calloc(1,(unsigned int)(n))) -#define R_Free(p) free((char *)p); +#define R_Free(p) free((char *)(p)) #else #define R_Malloc(p, t, n) (p = (t) malloc((unsigned long)(n), M_RTABLE, M_NOWAIT)) #define R_Zalloc(p, t, n) (p = (t) malloc((unsigned long)(n), M_RTABLE, M_NOWAIT | M_ZERO)) -#define R_Free(p) free((caddr_t)p, M_RTABLE); +#define R_Free(p) free((caddr_t)(p), M_RTABLE) #define RADIX_NODE_HEAD_LOCK_INIT(rnh) \ rw_init_flags(&(rnh)->rnh_lock, "radix node head", 0) Index: sys/net/vnet.c =================================================================== --- sys/net/vnet.c +++ sys/net/vnet.c @@ -190,10 +190,10 @@ struct sx vnet_sysinit_sxlock; -#define VNET_SYSINIT_WLOCK() sx_xlock(&vnet_sysinit_sxlock); -#define VNET_SYSINIT_WUNLOCK() sx_xunlock(&vnet_sysinit_sxlock); -#define VNET_SYSINIT_RLOCK() sx_slock(&vnet_sysinit_sxlock); -#define VNET_SYSINIT_RUNLOCK() sx_sunlock(&vnet_sysinit_sxlock); +#define VNET_SYSINIT_WLOCK() sx_xlock(&vnet_sysinit_sxlock) +#define VNET_SYSINIT_WUNLOCK() sx_xunlock(&vnet_sysinit_sxlock) +#define VNET_SYSINIT_RLOCK() sx_slock(&vnet_sysinit_sxlock) +#define VNET_SYSINIT_RUNLOCK() sx_sunlock(&vnet_sysinit_sxlock) struct vnet_data_free { uintptr_t vnd_start; Index: sys/netgraph/ng_pipe.c =================================================================== --- sys/netgraph/ng_pipe.c +++ sys/netgraph/ng_pipe.c @@ -106,7 +106,7 @@ typedef struct node_priv *priv_p; /* Macro for calculating the virtual time for packet dequeueing in WFQ */ -#define FIFO_VTIME_SORT(plen) \ +#define FIFO_VTIME_SORT(plen) do { \ if (hinfo->cfg.wfq && hinfo->cfg.bandwidth) { \ ngp_f->vtime.tv_usec = now->tv_usec + ((uint64_t) (plen) \ + priv->overhead ) * hinfo->run.fifo_queues * \ @@ -125,7 +125,7 @@ TAILQ_INSERT_BEFORE(ngp_f1, ngp_f, fifo_le); \ } else \ TAILQ_INSERT_TAIL(&hinfo->fifo_head, ngp_f, fifo_le); \ - +} while (0) static void parse_cfg(struct ng_pipe_hookcfg *, struct ng_pipe_hookcfg *, struct hookinfo *, priv_p); @@ -806,7 +806,7 @@ if (hinfo->cfg.wfq) { TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); FIFO_VTIME_SORT(TAILQ_FIRST( - &ngp_f->packet_head)->m->m_pkthdr.len) + &ngp_f->packet_head)->m->m_pkthdr.len); } } else { TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le); Index: sys/netgraph/ng_tcpmss.c =================================================================== --- sys/netgraph/ng_tcpmss.c +++ sys/netgraph/ng_tcpmss.c @@ -141,7 +141,10 @@ NETGRAPH_INIT(tcpmss, &ng_tcpmss_typestruct); -#define ERROUT(x) { error = (x); goto done; } +#define ERROUT(x) do { \ + error = (x); \ + goto done; \ +} while (0) /* * Node constructor. No special actions required. @@ -404,7 +407,7 @@ acc += acc >> 16; \ cksum = (u_short) acc; \ } \ -} while (0); +} while (0) static int correct_mss(struct tcphdr *tc, int hlen, uint16_t maxmss, int flags) Index: sys/netinet/in_var.h =================================================================== --- sys/netinet/in_var.h +++ sys/netinet/in_var.h @@ -152,12 +152,12 @@ #define INADDR_TO_IFP(addr, ifp) \ /* struct in_addr addr; */ \ /* struct ifnet *ifp; */ \ -{ \ +do { \ struct in_ifaddr *ia; \ \ INADDR_TO_IFADDR(addr, ia); \ (ifp) = (ia == NULL) ? NULL : ia->ia_ifp; \ -} +} while (0) /* * Macro for finding the internet address structure (in_ifaddr) corresponding Index: sys/security/audit/audit_bsm_db.c =================================================================== --- sys/security/audit/audit_bsm_db.c +++ sys/security/audit/audit_bsm_db.c @@ -100,7 +100,7 @@ static struct sx evnamemap_lock; static struct evname_list evnamemap_hash[EVNAMEMAP_HASH_TABLE_SIZE]; -#define EVNAMEMAP_LOCK_INIT() sx_init(&evnamemap_lock, "evnamemap_lock"); +#define EVNAMEMAP_LOCK_INIT() sx_init(&evnamemap_lock, "evnamemap_lock") #define EVNAMEMAP_RLOCK() sx_slock(&evnamemap_lock) #define EVNAMEMAP_RUNLOCK() sx_sunlock(&evnamemap_lock) #define EVNAMEMAP_WLOCK() sx_xlock(&evnamemap_lock) Index: sys/sys/ata.h =================================================================== --- sys/sys/ata.h +++ sys/sys/ata.h @@ -572,9 +572,9 @@ #define ATA_SENSE_VOLUME_OVERFLOW 0x0d /* volume overflow */ #define ATA_SENSE_MISCOMPARE 0x0e /* data dont match the medium */ #define ATA_SENSE_RESERVED 0x0f -#define ATA_SENSE_ILI 0x20; -#define ATA_SENSE_EOM 0x40; -#define ATA_SENSE_FILEMARK 0x80; +#define ATA_SENSE_ILI 0x20 +#define ATA_SENSE_EOM 0x40 +#define ATA_SENSE_FILEMARK 0x80 u_int32_t cmd_info; /* cmd information */ u_int8_t sense_length; /* additional sense len (n-7) */ Index: sys/sys/sched.h =================================================================== --- sys/sys/sched.h +++ sys/sys/sched.h @@ -191,7 +191,7 @@ * Sched stats are always incremented in critical sections so no atomic * is necesssary to increment them. */ -#define SCHED_STAT_INC(var) DPCPU_GET(var)++; +#define SCHED_STAT_INC(var) (DPCPU_GET(var)++) #else #define SCHED_STAT_DEFINE_VAR(name, descr, ptr) #define SCHED_STAT_DEFINE(name, descr) Index: sys/sys/time.h =================================================================== --- sys/sys/time.h +++ sys/sys/time.h @@ -505,11 +505,10 @@ #define SBT2FREQ(sbt) ((SBT_1S + ((sbt) >> 1)) / (sbt)) -#define FREQ2BT(freq, bt) \ -{ \ - (bt)->sec = 0; \ - (bt)->frac = ((uint64_t)0x8000000000000000 / (freq)) << 1; \ -} +#define FREQ2BT(freq, bt) do { \ + (bt)->sec = 0; \ + (bt)->frac = ((uint64_t)0x8000000000000000 / (freq)) << 1; \ +} while (0) #define TIMESEL(sbt, sbt2) \ (((sbt2) >= sbt_timethreshold) ? \ Index: sys/vm/vm_map.c =================================================================== --- sys/vm/vm_map.c +++ sys/vm/vm_map.c @@ -164,15 +164,14 @@ * Asserts that the starting and ending region * addresses fall within the valid range of the map. */ -#define VM_MAP_RANGE_CHECK(map, start, end) \ - { \ - if (start < vm_map_min(map)) \ - start = vm_map_min(map); \ - if (end > vm_map_max(map)) \ - end = vm_map_max(map); \ - if (start > end) \ - start = end; \ - } +#define VM_MAP_RANGE_CHECK(map, start, end) do { \ + if ((start) < vm_map_min(map)) \ + (start) = vm_map_min(map); \ + if ((end) > vm_map_max(map)) \ + (end) = vm_map_max(map); \ + if ((start) > (end)) \ + (start) = (end); \ +} while (0) /* * vm_map_startup: @@ -1673,11 +1672,10 @@ * the specified address; if necessary, * it splits the entry into two. */ -#define vm_map_clip_start(map, entry, startaddr) \ -{ \ - if (startaddr > entry->start) \ - _vm_map_clip_start(map, entry, startaddr); \ -} +#define vm_map_clip_start(map, entry, startaddr) do { \ + if ((startaddr) > (entry)->start) \ + _vm_map_clip_start((map), (entry), (startaddr)); \ +} while (0) /* * This routine is called only when it is known that @@ -1760,11 +1758,10 @@ * the specified address; if necessary, * it splits the entry into two. */ -#define vm_map_clip_end(map, entry, endaddr) \ -{ \ - if ((endaddr) < (entry->end)) \ - _vm_map_clip_end((map), (entry), (endaddr)); \ -} +#define vm_map_clip_end(map, entry, endaddr) do { \ + if ((endaddr) < (entry)->end) \ + _vm_map_clip_end((map), (entry), (endaddr)); \ +} while (0) /* * This routine is called only when it is known that Index: sys/x86/iommu/intel_dmar.h =================================================================== --- sys/x86/iommu/intel_dmar.h +++ sys/x86/iommu/intel_dmar.h @@ -511,8 +511,7 @@ extern struct timespec dmar_hw_timeout; -#define DMAR_WAIT_UNTIL(cond) \ -{ \ +#define DMAR_WAIT_UNTIL(cond) do { \ struct timespec last, curr; \ bool forever; \ \ @@ -537,7 +536,7 @@ } \ cpu_spinwait(); \ } \ -} +} while (0) #ifdef INVARIANTS #define TD_PREP_PINNED_ASSERT \