Index: sys/amd64/include/xen/synch_bitops.h =================================================================== --- sys/amd64/include/xen/synch_bitops.h +++ sys/amd64/include/xen/synch_bitops.h @@ -10,28 +10,28 @@ #define ADDR (*(volatile long *) addr) -static __inline__ void synch_set_bit(int nr, volatile void * addr) +static __inline__ void synch_set_bit(u_int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btsl %1,%0" : "=m" (ADDR) : "Ir" (nr) : "memory" ); } -static __inline__ void synch_clear_bit(int nr, volatile void * addr) +static __inline__ void synch_clear_bit(u_int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btrl %1,%0" : "=m" (ADDR) : "Ir" (nr) : "memory" ); } -static __inline__ void synch_change_bit(int nr, volatile void * addr) +static __inline__ void synch_change_bit(u_int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btcl %1,%0" : "=m" (ADDR) : "Ir" (nr) : "memory" ); } -static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) +static __inline__ int synch_test_and_set_bit(u_int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( @@ -40,7 +40,7 @@ return oldbit; } -static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) +static __inline__ int synch_test_and_clear_bit(u_int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( @@ -49,7 +49,7 @@ return oldbit; } -static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr) +static __inline__ int synch_test_and_change_bit(u_int nr, volatile void * addr) { int oldbit; @@ -106,13 +106,13 @@ return old; } -static __inline__ int synch_const_test_bit(int nr, const volatile void * addr) +static __inline__ int synch_const_test_bit(u_int nr, const volatile void * addr) { return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; } -static __inline__ int synch_var_test_bit(int nr, volatile void * addr) +static __inline__ int synch_var_test_bit(u_int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( Index: sys/i386/include/xen/synch_bitops.h =================================================================== --- sys/i386/include/xen/synch_bitops.h +++ sys/i386/include/xen/synch_bitops.h @@ -9,28 +9,28 @@ #define ADDR (*(volatile long *) addr) -static __inline__ void synch_set_bit(int nr, volatile void * addr) +static __inline__ void synch_set_bit(u_int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btsl %1,%0" : "=m" (ADDR) : "Ir" (nr) : "memory" ); } -static __inline__ void synch_clear_bit(int nr, volatile void * addr) +static __inline__ void synch_clear_bit(u_int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btrl %1,%0" : "=m" (ADDR) : "Ir" (nr) : "memory" ); } -static __inline__ void synch_change_bit(int nr, volatile void * addr) +static __inline__ void synch_change_bit(u_int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btcl %1,%0" : "=m" (ADDR) : "Ir" (nr) : "memory" ); } -static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) +static __inline__ int synch_test_and_set_bit(u_int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( @@ -39,7 +39,7 @@ return oldbit; } -static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) +static __inline__ int synch_test_and_clear_bit(u_int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( @@ -48,7 +48,7 @@ return oldbit; } -static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr) +static __inline__ int synch_test_and_change_bit(u_int nr, volatile void * addr) { int oldbit; @@ -115,13 +115,13 @@ return old; } -static __inline__ int synch_const_test_bit(int nr, const volatile void * addr) +static __inline__ int synch_const_test_bit(u_int nr, const volatile void * addr) { return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; } -static __inline__ int synch_var_test_bit(int nr, volatile void * addr) +static __inline__ int synch_var_test_bit(u_int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( Index: sys/x86/xen/xen_intr.c =================================================================== --- sys/x86/xen/xen_intr.c +++ sys/x86/xen/xen_intr.c @@ -120,10 +120,10 @@ struct xenisrc { struct intsrc xi_intsrc; enum evtchn_type xi_type; - int xi_cpu; /* VCPU for delivery. */ + u_int xi_cpu; /* VCPU for delivery. */ int xi_vector; /* Global isrc vector number. */ evtchn_port_t xi_port; - int xi_virq; + u_int xi_virq; void *xi_cookie; u_int xi_close:1; /* close on unbind? */ u_int xi_masked:1; @@ -272,7 +272,7 @@ static struct xenisrc * xen_intr_find_unused_isrc(enum evtchn_type type) { - int isrc_idx; + u_int isrc_idx; KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held")); @@ -540,7 +540,7 @@ l2i = LONG_BIT - 1; continue; } - l1i = ffsl(masked_l1) - 1; + l1i = (u_int)ffsl((long)masked_l1) - 1; do { l2 = xen_intr_active_ports(pc, s, l1i); @@ -553,7 +553,7 @@ l2i = LONG_BIT - 1; break; } - l2i = ffsl(masked_l2) - 1; + l2i = (u_int)ffsl((long)masked_l2) - 1; /* process port */ port = (l1i * LONG_BIT) + l2i; @@ -565,7 +565,7 @@ /* Make sure we are firing on the right vCPU */ KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)), - ("Received unexpected event on vCPU#%d, event bound to vCPU#%d", + ("Received unexpected event on vCPU#%d, event bound to vCPU#%u", PCPU_GET(cpuid), isrc->xi_cpu)); intr_execute_handlers(&isrc->xi_intsrc, trap_frame); @@ -600,7 +600,7 @@ { shared_info_t *s = HYPERVISOR_shared_info; struct xen_intr_pcpu_data *pcpu; - int i; + u_int i; if (!xen_domain()) return (0); @@ -618,7 +618,7 @@ } for (i = 0; i < nitems(s->evtchn_mask); i++) - atomic_store_rel_long(&s->evtchn_mask[i], ~0); + atomic_store_rel_long(&s->evtchn_mask[i], ~0UL); intr_register_pic(&xen_intr_pic); @@ -669,8 +669,8 @@ xen_rebind_ipi(struct xenisrc *isrc) { #ifdef SMP - int cpu = isrc->xi_cpu; - int vcpu_id = pcpu_find(cpu)->pc_vcpu_id; + u_int cpu = isrc->xi_cpu; + u_int vcpu_id = pcpu_find(cpu)->pc_vcpu_id; int error; struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id }; @@ -698,8 +698,8 @@ static void xen_rebind_virq(struct xenisrc *isrc) { - int cpu = isrc->xi_cpu; - int vcpu_id = pcpu_find(cpu)->pc_vcpu_id; + u_int cpu = isrc->xi_cpu; + u_int vcpu_id = pcpu_find(cpu)->pc_vcpu_id; int error; struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq, .vcpu = vcpu_id }; @@ -733,7 +733,7 @@ shared_info_t *s = HYPERVISOR_shared_info; struct xenisrc *isrc; u_int isrc_idx; - int i; + u_int i; if (suspend_cancelled) return; @@ -749,7 +749,7 @@ /* Mask all event channels. */ for (i = 0; i < nitems(s->evtchn_mask); i++) - atomic_store_rel_long(&s->evtchn_mask[i], ~0); + atomic_store_rel_long(&s->evtchn_mask[i], ~0UL); /* Remove port -> isrc mappings */ memset(xen_intr_port_to_isrc, 0, sizeof(xen_intr_port_to_isrc)); @@ -1079,7 +1079,7 @@ driver_filter_t filter, driver_intr_t handler, void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep) { - int vcpu_id = pcpu_find(cpu)->pc_vcpu_id; + u_int vcpu_id = pcpu_find(cpu)->pc_vcpu_id; struct xenisrc *isrc; struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = vcpu_id }; int error; @@ -1139,7 +1139,7 @@ enum intr_type flags, xen_intr_handle_t *port_handlep) { #ifdef SMP - int vcpu_id = pcpu_find(cpu)->pc_vcpu_id; + u_int vcpu_id = pcpu_find(cpu)->pc_vcpu_id; struct xenisrc *isrc; struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id }; /* Same size as the one used by intr_handler->ih_name. */ @@ -1327,7 +1327,7 @@ { struct xen_intr_pcpu_data *pcpu; shared_info_t *s = HYPERVISOR_shared_info; - int i; + u_int i; db_printf("Port %d Type: %s\n", isrc->xi_port, xen_intr_print_type(isrc->xi_type)); @@ -1341,7 +1341,7 @@ db_printf("\tPer-CPU Masks: "); CPU_FOREACH(i) { pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu); - db_printf("cpu#%d: %d ", i, + db_printf("cpu#%u: %d ", i, !!xen_test_bit(isrc->xi_port, pcpu->evtchn_enabled)); } db_printf("\n"); @@ -1349,7 +1349,7 @@ DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn) { - int i; + u_int i; if (!xen_domain()) { db_printf("Only available on Xen guests\n"); Index: sys/xen/xen-os.h =================================================================== --- sys/xen/xen-os.h +++ sys/xen/xen-os.h @@ -125,7 +125,7 @@ #define NBPL (NBBY * sizeof(long)) static inline bool -xen_test_bit(int bit, volatile long *addr) +xen_test_bit(u_int bit, volatile long *addr) { unsigned long mask = 1UL << (bit % NBPL); @@ -133,13 +133,13 @@ } static inline void -xen_set_bit(int bit, volatile long *addr) +xen_set_bit(u_int bit, volatile long *addr) { atomic_set_long(&addr[bit / NBPL], 1UL << (bit % NBPL)); } static inline void -xen_clear_bit(int bit, volatile long *addr) +xen_clear_bit(u_int bit, volatile long *addr) { atomic_clear_long(&addr[bit / NBPL], 1UL << (bit % NBPL)); }