Index: head/sys/powerpc/powernv/platform_powernv.c =================================================================== --- head/sys/powerpc/powernv/platform_powernv.c (revision 328533) +++ head/sys/powerpc/powernv/platform_powernv.c (revision 328534) @@ -1,448 +1,467 @@ /*- * Copyright (c) 2015 Nathan Whitehorn + * Copyright (c) 2017-2018 Semihalf * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "platform_if.h" #include "opal.h" #ifdef SMP extern void *ap_pcpu; #endif static int powernv_probe(platform_t); static int powernv_attach(platform_t); void powernv_mem_regions(platform_t, struct mem_region *phys, int *physsz, struct mem_region *avail, int *availsz); static u_long powernv_timebase_freq(platform_t, struct cpuref *cpuref); static int powernv_smp_first_cpu(platform_t, struct cpuref *cpuref); static int powernv_smp_next_cpu(platform_t, struct cpuref *cpuref); static int powernv_smp_get_bsp(platform_t, struct cpuref *cpuref); static void powernv_smp_ap_init(platform_t); #ifdef SMP static int powernv_smp_start_cpu(platform_t, struct pcpu *cpu); static struct cpu_group *powernv_smp_topo(platform_t plat); #endif static void powernv_reset(platform_t); static void powernv_cpu_idle(sbintime_t sbt); +static int powernv_cpuref_init(void); static platform_method_t powernv_methods[] = { PLATFORMMETHOD(platform_probe, powernv_probe), PLATFORMMETHOD(platform_attach, powernv_attach), PLATFORMMETHOD(platform_mem_regions, powernv_mem_regions), PLATFORMMETHOD(platform_timebase_freq, powernv_timebase_freq), PLATFORMMETHOD(platform_smp_ap_init, powernv_smp_ap_init), PLATFORMMETHOD(platform_smp_first_cpu, powernv_smp_first_cpu), PLATFORMMETHOD(platform_smp_next_cpu, powernv_smp_next_cpu), PLATFORMMETHOD(platform_smp_get_bsp, powernv_smp_get_bsp), #ifdef SMP PLATFORMMETHOD(platform_smp_start_cpu, powernv_smp_start_cpu), PLATFORMMETHOD(platform_smp_topo, powernv_smp_topo), #endif PLATFORMMETHOD(platform_reset, powernv_reset), { 0, 0 } }; static platform_def_t powernv_platform = { "powernv", powernv_methods, 0 }; +static struct cpuref platform_cpuref[MAXCPU]; +static int platform_cpuref_cnt; +static int platform_cpuref_valid; + PLATFORM_DEF(powernv_platform); -static int powernv_boot_pir; +static uint64_t powernv_boot_pir; static int powernv_probe(platform_t plat) { if (opal_check() == 0) return (BUS_PROBE_SPECIFIC); return (ENXIO); } static int powernv_attach(platform_t plat) { uint32_t nptlp, shift = 0, slb_encoding = 0; int32_t lp_size, lp_encoding; char buf[255]; pcell_t prop; phandle_t cpu; int res, len, node, idx; /* Ping OPAL again just to make sure */ opal_check(); +#if BYTE_ORDER == LITTLE_ENDIAN + opal_call(OPAL_REINIT_CPUS, 2 /* Little endian */); +#else + opal_call(OPAL_REINIT_CPUS, 1 /* Big endian */); +#endif + cpu_idle_hook = powernv_cpu_idle; powernv_boot_pir = mfspr(SPR_PIR); /* Init CPU bits */ powernv_smp_ap_init(plat); + powernv_cpuref_init(); + /* Set SLB count from device tree */ cpu = OF_peer(0); cpu = OF_child(cpu); while (cpu != 0) { res = OF_getprop(cpu, "name", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpus") == 0) break; cpu = OF_peer(cpu); } if (cpu == 0) goto out; cpu = OF_child(cpu); while (cpu != 0) { res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpu") == 0) break; cpu = OF_peer(cpu); } if (cpu == 0) goto out; res = OF_getencprop(cpu, "ibm,slb-size", &prop, sizeof(prop)); if (res > 0) n_slbs = prop; /* * Scan the large page size property for PAPR compatible machines. * See PAPR D.5 Changes to Section 5.1.4, 'CPU Node Properties' * for the encoding of the property. */ len = OF_getproplen(node, "ibm,segment-page-sizes"); if (len > 0) { /* * We have to use a variable length array on the stack * since we have very limited stack space. */ pcell_t arr[len/sizeof(cell_t)]; res = OF_getencprop(cpu, "ibm,segment-page-sizes", arr, sizeof(arr)); len /= 4; idx = 0; while (len > 0) { shift = arr[idx]; slb_encoding = arr[idx + 1]; nptlp = arr[idx + 2]; idx += 3; len -= 3; while (len > 0 && nptlp) { lp_size = arr[idx]; lp_encoding = arr[idx+1]; if (slb_encoding == SLBV_L && lp_encoding == 0) break; idx += 2; len -= 2; nptlp--; } if (nptlp && slb_encoding == SLBV_L && lp_encoding == 0) break; } if (len == 0) panic("Standard large pages (SLB[L] = 1, PTE[LP] = 0) " "not supported by this system."); moea64_large_page_shift = shift; moea64_large_page_size = 1ULL << lp_size; } out: return (0); } void powernv_mem_regions(platform_t plat, struct mem_region *phys, int *physsz, struct mem_region *avail, int *availsz) { ofw_mem_regions(phys, physsz, avail, availsz); } static u_long powernv_timebase_freq(platform_t plat, struct cpuref *cpuref) { - phandle_t phandle; + char buf[8]; + phandle_t cpu, dev, root; + int res; int32_t ticks = -1; - phandle = cpuref->cr_hwref; + root = OF_peer(0); + dev = OF_child(root); + while (dev != 0) { + res = OF_getprop(dev, "name", buf, sizeof(buf)); + if (res > 0 && strcmp(buf, "cpus") == 0) + break; + dev = OF_peer(dev); + } - OF_getencprop(phandle, "timebase-frequency", &ticks, sizeof(ticks)); + for (cpu = OF_child(dev); cpu != 0; cpu = OF_peer(cpu)) { + res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); + if (res > 0 && strcmp(buf, "cpu") == 0) + break; + } + if (cpu == 0) + return (512000000); + OF_getencprop(cpu, "timebase-frequency", &ticks, sizeof(ticks)); + if (ticks <= 0) panic("Unable to determine timebase frequency!"); return (ticks); + } static int -powernv_smp_first_cpu(platform_t plat, struct cpuref *cpuref) +powernv_cpuref_init(void) { - char buf[8]; - phandle_t cpu, dev, root; - int res, cpuid; + phandle_t cpu, dev; + char buf[32]; + int a, res, tmp_cpuref_cnt; + static struct cpuref tmp_cpuref[MAXCPU]; + cell_t interrupt_servers[32]; + uint64_t bsp; - root = OF_peer(0); + if (platform_cpuref_valid) + return (0); - dev = OF_child(root); + dev = OF_peer(0); + dev = OF_child(dev); while (dev != 0) { res = OF_getprop(dev, "name", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpus") == 0) break; dev = OF_peer(dev); } - if (dev == 0) { - /* - * psim doesn't have a name property on the /cpus node, - * but it can be found directly - */ - dev = OF_finddevice("/cpus"); - if (dev == 0) - return (ENOENT); - } - cpu = OF_child(dev); - - while (cpu != 0) { + bsp = 0; + tmp_cpuref_cnt = 0; + for (cpu = OF_child(dev); cpu != 0; cpu = OF_peer(cpu)) { res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); - if (res > 0 && strcmp(buf, "cpu") == 0) - break; - cpu = OF_peer(cpu); - } - if (cpu == 0) - return (ENOENT); + if (res > 0 && strcmp(buf, "cpu") == 0) { + res = OF_getproplen(cpu, "ibm,ppc-interrupt-server#s"); + if (res > 0) { - cpuref->cr_hwref = cpu; - res = OF_getencprop(cpu, "ibm,ppc-interrupt-server#s", &cpuid, - sizeof(cpuid)); - if (res <= 0) - res = OF_getencprop(cpu, "reg", &cpuid, sizeof(cpuid)); - if (res <= 0) - cpuid = 0; - cpuref->cr_cpuid = cpuid; - return (0); -} + OF_getencprop(cpu, "ibm,ppc-interrupt-server#s", + interrupt_servers, res); -static int -powernv_smp_next_cpu(platform_t plat, struct cpuref *cpuref) -{ - char buf[8]; - phandle_t cpu; - int i, res, cpuid; + for (a = 0; a < res/sizeof(cell_t); a++) { + tmp_cpuref[tmp_cpuref_cnt].cr_hwref = interrupt_servers[a]; + tmp_cpuref[tmp_cpuref_cnt].cr_cpuid = tmp_cpuref_cnt; - /* Check for whether it should be the next thread */ - res = OF_getproplen(cpuref->cr_hwref, "ibm,ppc-interrupt-server#s"); - if (res > 0) { - cell_t interrupt_servers[res/sizeof(cell_t)]; - OF_getencprop(cpuref->cr_hwref, "ibm,ppc-interrupt-server#s", - interrupt_servers, res); - for (i = 0; i < res/sizeof(cell_t) - 1; i++) { - if (interrupt_servers[i] == cpuref->cr_cpuid) { - cpuref->cr_cpuid = interrupt_servers[i+1]; - return (0); + if (interrupt_servers[a] == (uint32_t)powernv_boot_pir) + bsp = tmp_cpuref_cnt; + + tmp_cpuref_cnt++; + } } } } - /* Next CPU core/package */ - cpu = OF_peer(cpuref->cr_hwref); - while (cpu != 0) { - res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); - if (res > 0 && strcmp(buf, "cpu") == 0) - break; - cpu = OF_peer(cpu); + /* Map IDs, so BSP has CPUID 0 regardless of hwref */ + for (a = bsp; a < tmp_cpuref_cnt; a++) { + platform_cpuref[platform_cpuref_cnt].cr_hwref = tmp_cpuref[a].cr_hwref; + platform_cpuref[platform_cpuref_cnt].cr_cpuid = platform_cpuref_cnt; + platform_cpuref_cnt++; } - if (cpu == 0) - return (ENOENT); + for (a = 0; a < bsp; a++) { + platform_cpuref[platform_cpuref_cnt].cr_hwref = tmp_cpuref[a].cr_hwref; + platform_cpuref[platform_cpuref_cnt].cr_cpuid = platform_cpuref_cnt; + platform_cpuref_cnt++; + } - cpuref->cr_hwref = cpu; - res = OF_getencprop(cpu, "ibm,ppc-interrupt-server#s", &cpuid, - sizeof(cpuid)); - if (res <= 0) - res = OF_getencprop(cpu, "reg", &cpuid, sizeof(cpuid)); - if (res <= 0) - cpuid = 0; - cpuref->cr_cpuid = cpuid; + platform_cpuref_valid = 1; return (0); } static int -powernv_smp_get_bsp(platform_t plat, struct cpuref *cpuref) +powernv_smp_first_cpu(platform_t plat, struct cpuref *cpuref) { - phandle_t chosen; - int cpuid, res; - struct cpuref i; + if (platform_cpuref_valid == 0) + return (EINVAL); - chosen = OF_finddevice("/chosen"); - if (chosen == 0) - return (ENOENT); + cpuref->cr_cpuid = 0; + cpuref->cr_hwref = platform_cpuref[0].cr_hwref; - res = OF_getencprop(chosen, "fdtbootcpu", &cpuid, sizeof(cpuid)); - if (res < 0) - return (ENOENT); + return (0); +} - /* XXX: FDT from kexec lies sometimes. PIR seems not to. */ - if (cpuid == 0) - cpuid = powernv_boot_pir; +static int +powernv_smp_next_cpu(platform_t plat, struct cpuref *cpuref) +{ + int id; - cpuref->cr_cpuid = cpuid; + if (platform_cpuref_valid == 0) + return (EINVAL); - if (powernv_smp_first_cpu(plat, &i) != 0) + id = cpuref->cr_cpuid + 1; + if (id >= platform_cpuref_cnt) return (ENOENT); - cpuref->cr_hwref = i.cr_hwref; - do { - if (i.cr_cpuid == cpuid) { - cpuref->cr_hwref = i.cr_hwref; - break; - } - } while (powernv_smp_next_cpu(plat, &i) == 0); + cpuref->cr_cpuid = platform_cpuref[id].cr_cpuid; + cpuref->cr_hwref = platform_cpuref[id].cr_hwref; return (0); } +static int +powernv_smp_get_bsp(platform_t plat, struct cpuref *cpuref) +{ + + cpuref->cr_cpuid = platform_cpuref[0].cr_cpuid; + cpuref->cr_hwref = platform_cpuref[0].cr_hwref; + return (0); +} + #ifdef SMP static int powernv_smp_start_cpu(platform_t plat, struct pcpu *pc) { int result; ap_pcpu = pc; powerpc_sync(); - result = opal_call(OPAL_START_CPU, pc->pc_cpuid, EXC_RST); + result = opal_call(OPAL_START_CPU, pc->pc_hwref, EXC_RST); if (result != OPAL_SUCCESS) { printf("OPAL error (%d): unable to start AP %d\n", - result, pc->pc_cpuid); + result, (int)pc->pc_hwref); return (ENXIO); } return (0); } static struct cpu_group * powernv_smp_topo(platform_t plat) { - struct pcpu *pc, *last_pc; - int i, ncores, ncpus; + char buf[8]; + phandle_t cpu, dev, root; + int res, nthreads; - ncores = ncpus = 0; - last_pc = NULL; - CPU_FOREACH(i) { - pc = pcpu_find(i); - if (pc == NULL) + root = OF_peer(0); + + dev = OF_child(root); + while (dev != 0) { + res = OF_getprop(dev, "name", buf, sizeof(buf)); + if (res > 0 && strcmp(buf, "cpus") == 0) + break; + dev = OF_peer(dev); + } + + nthreads = 1; + for (cpu = OF_child(dev); cpu != 0; cpu = OF_peer(cpu)) { + res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); + if (res <= 0 || strcmp(buf, "cpu") != 0) continue; - if (last_pc == NULL || pc->pc_hwref != last_pc->pc_hwref) - ncores++; - last_pc = pc; - ncpus++; + + res = OF_getproplen(cpu, "ibm,ppc-interrupt-server#s"); + + if (res >= 0) + nthreads = res / sizeof(cell_t); + else + nthreads = 1; + break; } - if (ncpus % ncores != 0) { + if (mp_ncpus % nthreads != 0) { printf("WARNING: Irregular SMP topology. Performance may be " - "suboptimal (%d CPUS, %d cores)\n", ncpus, ncores); + "suboptimal (%d threads, %d on first core)\n", + mp_ncpus, nthreads); return (smp_topo_none()); } /* Don't do anything fancier for non-threaded SMP */ - if (ncpus == ncores) + if (nthreads == 1) return (smp_topo_none()); -#ifdef NOTYET /* smp_topo_1level() fails with non-consecutive CPU IDs */ - return (smp_topo_1level(CG_SHARE_L1, ncpus / ncores, CG_FLAG_SMT)); -#else - return (smp_topo_none()); -#endif + return (smp_topo_1level(CG_SHARE_L1, nthreads, CG_FLAG_SMT)); } + #endif static void powernv_reset(platform_t platform) { opal_call(OPAL_CEC_REBOOT); } static void powernv_smp_ap_init(platform_t platform) { register_t msr; /* LPID must not be altered when PSL_DR or PSL_IR is set */ msr = mfmsr(); mtmsr(msr & ~(PSL_DR | PSL_IR)); isync(); /* Direct interrupts to SRR instead of HSRR and reset LPCR otherwise */ mtspr(SPR_LPID, 0); isync(); mtmsr(msr); mtspr(SPR_LPCR, LPCR_LPES); isync(); } static void powernv_cpu_idle(sbintime_t sbt) { } Index: head/sys/powerpc/pseries/platform_chrp.c =================================================================== --- head/sys/powerpc/pseries/platform_chrp.c (revision 328533) +++ head/sys/powerpc/pseries/platform_chrp.c (revision 328534) @@ -1,530 +1,501 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Marcel Moolenaar * Copyright (c) 2009 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "platform_if.h" #ifdef SMP extern void *ap_pcpu; #endif #ifdef __powerpc64__ static uint8_t splpar_vpa[MAXCPU][640] __aligned(128); /* XXX: dpcpu */ #endif static vm_offset_t realmaxaddr = VM_MAX_ADDRESS; static int chrp_probe(platform_t); static int chrp_attach(platform_t); void chrp_mem_regions(platform_t, struct mem_region *phys, int *physsz, struct mem_region *avail, int *availsz); static vm_offset_t chrp_real_maxaddr(platform_t); static u_long chrp_timebase_freq(platform_t, struct cpuref *cpuref); static int chrp_smp_first_cpu(platform_t, struct cpuref *cpuref); static int chrp_smp_next_cpu(platform_t, struct cpuref *cpuref); static int chrp_smp_get_bsp(platform_t, struct cpuref *cpuref); static void chrp_smp_ap_init(platform_t); +static int chrp_cpuref_init(void); #ifdef SMP static int chrp_smp_start_cpu(platform_t, struct pcpu *cpu); static struct cpu_group *chrp_smp_topo(platform_t plat); #endif static void chrp_reset(platform_t); #ifdef __powerpc64__ #include "phyp-hvcall.h" static void phyp_cpu_idle(sbintime_t sbt); #endif +static struct cpuref platform_cpuref[MAXCPU]; +static int platform_cpuref_cnt; +static int platform_cpuref_valid; + static platform_method_t chrp_methods[] = { PLATFORMMETHOD(platform_probe, chrp_probe), PLATFORMMETHOD(platform_attach, chrp_attach), PLATFORMMETHOD(platform_mem_regions, chrp_mem_regions), PLATFORMMETHOD(platform_real_maxaddr, chrp_real_maxaddr), PLATFORMMETHOD(platform_timebase_freq, chrp_timebase_freq), PLATFORMMETHOD(platform_smp_ap_init, chrp_smp_ap_init), PLATFORMMETHOD(platform_smp_first_cpu, chrp_smp_first_cpu), PLATFORMMETHOD(platform_smp_next_cpu, chrp_smp_next_cpu), PLATFORMMETHOD(platform_smp_get_bsp, chrp_smp_get_bsp), #ifdef SMP PLATFORMMETHOD(platform_smp_start_cpu, chrp_smp_start_cpu), PLATFORMMETHOD(platform_smp_topo, chrp_smp_topo), #endif PLATFORMMETHOD(platform_reset, chrp_reset), { 0, 0 } }; static platform_def_t chrp_platform = { "chrp", chrp_methods, 0 }; PLATFORM_DEF(chrp_platform); static int chrp_probe(platform_t plat) { if (OF_finddevice("/memory") != -1 || OF_finddevice("/memory@0") != -1) return (BUS_PROBE_GENERIC); return (ENXIO); } static int chrp_attach(platform_t plat) { #ifdef __powerpc64__ int i; /* XXX: check for /rtas/ibm,hypertas-functions? */ if (!(mfmsr() & PSL_HV)) { struct mem_region *phys, *avail; int nphys, navail; mem_regions(&phys, &nphys, &avail, &navail); realmaxaddr = phys[0].mr_size; pmap_mmu_install("mmu_phyp", BUS_PROBE_SPECIFIC); cpu_idle_hook = phyp_cpu_idle; /* Set up important VPA fields */ for (i = 0; i < MAXCPU; i++) { bzero(splpar_vpa[i], sizeof(splpar_vpa)); /* First two: VPA size */ splpar_vpa[i][4] = (uint8_t)((sizeof(splpar_vpa[i]) >> 8) & 0xff); splpar_vpa[i][5] = (uint8_t)(sizeof(splpar_vpa[i]) & 0xff); splpar_vpa[i][0xba] = 1; /* Maintain FPRs */ splpar_vpa[i][0xbb] = 1; /* Maintain PMCs */ splpar_vpa[i][0xfc] = 0xff; /* Maintain full SLB */ splpar_vpa[i][0xfd] = 0xff; splpar_vpa[i][0xff] = 1; /* Maintain Altivec */ } mb(); /* Set up hypervisor CPU stuff */ chrp_smp_ap_init(plat); } #endif + chrp_cpuref_init(); /* Some systems (e.g. QEMU) need Open Firmware to stand down */ ofw_quiesce(); return (0); } static int parse_drconf_memory(struct mem_region *ofmem, int *msz, struct mem_region *ofavail, int *asz) { phandle_t phandle; vm_offset_t base; int i, idx, len, lasz, lmsz, res; uint32_t flags, lmb_size[2]; uint32_t *dmem; lmsz = *msz; lasz = *asz; phandle = OF_finddevice("/ibm,dynamic-reconfiguration-memory"); if (phandle == -1) /* No drconf node, return. */ return (0); res = OF_getencprop(phandle, "ibm,lmb-size", lmb_size, sizeof(lmb_size)); if (res == -1) return (0); printf("Logical Memory Block size: %d MB\n", lmb_size[1] >> 20); /* Parse the /ibm,dynamic-memory. The first position gives the # of entries. The next two words reflect the address of the memory block. The next four words are the DRC index, reserved, list index and flags. (see PAPR C.6.6.2 ibm,dynamic-reconfiguration-memory) #el Addr DRC-idx res list-idx flags ------------------------------------------------- | 4 | 8 | 4 | 4 | 4 | 4 |.... ------------------------------------------------- */ len = OF_getproplen(phandle, "ibm,dynamic-memory"); if (len > 0) { /* We have to use a variable length array on the stack since we have very limited stack space. */ cell_t arr[len/sizeof(cell_t)]; res = OF_getencprop(phandle, "ibm,dynamic-memory", arr, sizeof(arr)); if (res == -1) return (0); /* Number of elements */ idx = arr[0]; /* First address, in arr[1], arr[2]*/ dmem = &arr[1]; for (i = 0; i < idx; i++) { base = ((uint64_t)dmem[0] << 32) + dmem[1]; dmem += 4; flags = dmem[1]; /* Use region only if available and not reserved. */ if ((flags & 0x8) && !(flags & 0x80)) { ofmem[lmsz].mr_start = base; ofmem[lmsz].mr_size = (vm_size_t)lmb_size[1]; ofavail[lasz].mr_start = base; ofavail[lasz].mr_size = (vm_size_t)lmb_size[1]; lmsz++; lasz++; } dmem += 2; } } *msz = lmsz; *asz = lasz; return (1); } void chrp_mem_regions(platform_t plat, struct mem_region *phys, int *physsz, struct mem_region *avail, int *availsz) { vm_offset_t maxphysaddr; int i; ofw_mem_regions(phys, physsz, avail, availsz); parse_drconf_memory(phys, physsz, avail, availsz); /* * On some firmwares (SLOF), some memory may be marked available that * doesn't actually exist. This manifests as an extension of the last * available segment past the end of physical memory, so truncate that * one. */ maxphysaddr = 0; for (i = 0; i < *physsz; i++) if (phys[i].mr_start + phys[i].mr_size > maxphysaddr) maxphysaddr = phys[i].mr_start + phys[i].mr_size; for (i = 0; i < *availsz; i++) if (avail[i].mr_start + avail[i].mr_size > maxphysaddr) avail[i].mr_size = maxphysaddr - avail[i].mr_start; } static vm_offset_t chrp_real_maxaddr(platform_t plat) { return (realmaxaddr); } static u_long chrp_timebase_freq(platform_t plat, struct cpuref *cpuref) { phandle_t phandle; int32_t ticks = -1; phandle = cpuref->cr_hwref; OF_getencprop(phandle, "timebase-frequency", &ticks, sizeof(ticks)); if (ticks <= 0) panic("Unable to determine timebase frequency!"); return (ticks); } static int chrp_smp_first_cpu(platform_t plat, struct cpuref *cpuref) { - char buf[8]; - phandle_t cpu, dev, root; - int res, cpuid; - root = OF_peer(0); + if (platform_cpuref_valid == 0) + return (EINVAL); - dev = OF_child(root); - while (dev != 0) { - res = OF_getprop(dev, "name", buf, sizeof(buf)); - if (res > 0 && strcmp(buf, "cpus") == 0) - break; - dev = OF_peer(dev); - } - if (dev == 0) { - /* - * psim doesn't have a name property on the /cpus node, - * but it can be found directly - */ - dev = OF_finddevice("/cpus"); - if (dev == 0) - return (ENOENT); - } + cpuref->cr_cpuid = 0; + cpuref->cr_hwref = platform_cpuref[0].cr_hwref; - cpu = OF_child(dev); - - while (cpu != 0) { - res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); - if (res > 0 && strcmp(buf, "cpu") == 0) - break; - cpu = OF_peer(cpu); - } - if (cpu == 0) - return (ENOENT); - - cpuref->cr_hwref = cpu; - res = OF_getencprop(cpu, "ibm,ppc-interrupt-server#s", &cpuid, - sizeof(cpuid)); - if (res <= 0) - res = OF_getencprop(cpu, "reg", &cpuid, sizeof(cpuid)); - if (res <= 0) - cpuid = 0; - cpuref->cr_cpuid = cpuid; - return (0); } static int chrp_smp_next_cpu(platform_t plat, struct cpuref *cpuref) { - char buf[8]; - phandle_t cpu; - int i, res, cpuid; + int id; - /* Check for whether it should be the next thread */ - res = OF_getproplen(cpuref->cr_hwref, "ibm,ppc-interrupt-server#s"); - if (res > 0) { - cell_t interrupt_servers[res/sizeof(cell_t)]; - OF_getencprop(cpuref->cr_hwref, "ibm,ppc-interrupt-server#s", - interrupt_servers, res); - for (i = 0; i < res/sizeof(cell_t) - 1; i++) { - if (interrupt_servers[i] == cpuref->cr_cpuid) { - cpuref->cr_cpuid = interrupt_servers[i+1]; - return (0); - } - } - } + if (platform_cpuref_valid == 0) + return (EINVAL); - /* Next CPU core/package */ - cpu = OF_peer(cpuref->cr_hwref); - while (cpu != 0) { - res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); - if (res > 0 && strcmp(buf, "cpu") == 0) - break; - cpu = OF_peer(cpu); - } - if (cpu == 0) + id = cpuref->cr_cpuid + 1; + if (id >= platform_cpuref_cnt) return (ENOENT); - cpuref->cr_hwref = cpu; - res = OF_getencprop(cpu, "ibm,ppc-interrupt-server#s", &cpuid, - sizeof(cpuid)); - if (res <= 0) - res = OF_getencprop(cpu, "reg", &cpuid, sizeof(cpuid)); - if (res <= 0) - cpuid = 0; - cpuref->cr_cpuid = cpuid; + cpuref->cr_cpuid = platform_cpuref[id].cr_cpuid; + cpuref->cr_hwref = platform_cpuref[id].cr_hwref; return (0); } static int chrp_smp_get_bsp(platform_t plat, struct cpuref *cpuref) { - ihandle_t inst; - phandle_t bsp, chosen; - int res, cpuid; - chosen = OF_finddevice("/chosen"); - if (chosen == 0) - return (ENXIO); + cpuref->cr_cpuid = platform_cpuref[0].cr_cpuid; + cpuref->cr_hwref = platform_cpuref[0].cr_hwref; + return (0); +} - res = OF_getencprop(chosen, "cpu", &inst, sizeof(inst)); - if (res < 0) - return (ENXIO); +static int +chrp_cpuref_init(void) +{ + phandle_t cpu, dev; + char buf[32]; + int a, res; + cell_t interrupt_servers[32]; + uint64_t bsp; - bsp = OF_instance_to_package(inst); + if (platform_cpuref_valid) + return (0); - /* Pick the primary thread. Can it be any other? */ - cpuref->cr_hwref = bsp; - res = OF_getencprop(bsp, "ibm,ppc-interrupt-server#s", &cpuid, - sizeof(cpuid)); - if (res <= 0) - res = OF_getencprop(bsp, "reg", &cpuid, sizeof(cpuid)); - if (res <= 0) - cpuid = 0; - cpuref->cr_cpuid = cpuid; + dev = OF_peer(0); + dev = OF_child(dev); + while (dev != 0) { + res = OF_getprop(dev, "name", buf, sizeof(buf)); + if (res > 0 && strcmp(buf, "cpus") == 0) + break; + dev = OF_peer(dev); + } + bsp = 0; + for (cpu = OF_child(dev); cpu != 0; cpu = OF_peer(cpu)) { + res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); + if (res > 0 && strcmp(buf, "cpu") == 0) { + res = OF_getproplen(cpu, "ibm,ppc-interrupt-server#s"); + if (res > 0) { + + + OF_getencprop(cpu, "ibm,ppc-interrupt-server#s", + interrupt_servers, res); + + for (a = 0; a < res/sizeof(cell_t); a++) { + platform_cpuref[platform_cpuref_cnt].cr_hwref = interrupt_servers[a]; + platform_cpuref[platform_cpuref_cnt].cr_cpuid = platform_cpuref_cnt; + + platform_cpuref_cnt++; + } + } + } + } + + platform_cpuref_valid = 1; + return (0); } + #ifdef SMP static int chrp_smp_start_cpu(platform_t plat, struct pcpu *pc) { cell_t start_cpu; int result, err, timeout; if (!rtas_exists()) { printf("RTAS uninitialized: unable to start AP %d\n", pc->pc_cpuid); return (ENXIO); } start_cpu = rtas_token_lookup("start-cpu"); if (start_cpu == -1) { printf("RTAS unknown method: unable to start AP %d\n", pc->pc_cpuid); return (ENXIO); } ap_pcpu = pc; powerpc_sync(); - result = rtas_call_method(start_cpu, 3, 1, pc->pc_cpuid, EXC_RST, pc, + result = rtas_call_method(start_cpu, 3, 1, pc->pc_hwref, EXC_RST, pc, &err); if (result < 0 || err != 0) { printf("RTAS error (%d/%d): unable to start AP %d\n", result, err, pc->pc_cpuid); return (ENXIO); } timeout = 10000; while (!pc->pc_awake && timeout--) DELAY(100); return ((pc->pc_awake) ? 0 : EBUSY); } static struct cpu_group * chrp_smp_topo(platform_t plat) { struct pcpu *pc, *last_pc; int i, ncores, ncpus; ncores = ncpus = 0; last_pc = NULL; for (i = 0; i <= mp_maxid; i++) { pc = pcpu_find(i); if (pc == NULL) continue; if (last_pc == NULL || pc->pc_hwref != last_pc->pc_hwref) ncores++; last_pc = pc; ncpus++; } if (ncpus % ncores != 0) { printf("WARNING: Irregular SMP topology. Performance may be " "suboptimal (%d CPUS, %d cores)\n", ncpus, ncores); return (smp_topo_none()); } /* Don't do anything fancier for non-threaded SMP */ if (ncpus == ncores) return (smp_topo_none()); return (smp_topo_1level(CG_SHARE_L1, ncpus / ncores, CG_FLAG_SMT)); } #endif static void chrp_reset(platform_t platform) { OF_reboot(); } #ifdef __powerpc64__ static void phyp_cpu_idle(sbintime_t sbt) { register_t msr; msr = mfmsr(); mtmsr(msr & ~PSL_EE); if (sched_runnable()) { mtmsr(msr); return; } phyp_hcall(H_CEDE); /* Re-enables interrupts internally */ mtmsr(msr); } static void chrp_smp_ap_init(platform_t platform) { if (!(mfmsr() & PSL_HV)) { /* Register VPA */ - phyp_hcall(H_REGISTER_VPA, 1UL, PCPU_GET(cpuid), - splpar_vpa[PCPU_GET(cpuid)]); + phyp_hcall(H_REGISTER_VPA, 1UL, PCPU_GET(hwref), + splpar_vpa[PCPU_GET(hwref)]); /* Set interrupt priority */ phyp_hcall(H_CPPR, 0xff); } } #else static void chrp_smp_ap_init(platform_t platform) { } #endif Index: head/sys/powerpc/pseries/xics.c =================================================================== --- head/sys/powerpc/pseries/xics.c (revision 328533) +++ head/sys/powerpc/pseries/xics.c (revision 328534) @@ -1,484 +1,492 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright 2011 Nathan Whitehorn * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef POWERNV #include #endif #include "phyp-hvcall.h" #include "pic_if.h" #define XICP_PRIORITY 5 /* Random non-zero number */ #define XICP_IPI 2 #define MAX_XICP_IRQS (1<<24) /* 24-bit XIRR field */ static int xicp_probe(device_t); static int xicp_attach(device_t); static int xics_probe(device_t); static int xics_attach(device_t); static void xicp_bind(device_t dev, u_int irq, cpuset_t cpumask); static void xicp_dispatch(device_t, struct trapframe *); static void xicp_enable(device_t, u_int, u_int); static void xicp_eoi(device_t, u_int); static void xicp_ipi(device_t, u_int); static void xicp_mask(device_t, u_int); static void xicp_unmask(device_t, u_int); static device_method_t xicp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xicp_probe), DEVMETHOD(device_attach, xicp_attach), /* PIC interface */ DEVMETHOD(pic_bind, xicp_bind), DEVMETHOD(pic_dispatch, xicp_dispatch), DEVMETHOD(pic_enable, xicp_enable), DEVMETHOD(pic_eoi, xicp_eoi), DEVMETHOD(pic_ipi, xicp_ipi), DEVMETHOD(pic_mask, xicp_mask), DEVMETHOD(pic_unmask, xicp_unmask), DEVMETHOD_END }; static device_method_t xics_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xics_probe), DEVMETHOD(device_attach, xics_attach), DEVMETHOD_END }; struct xicp_softc { struct mtx sc_mtx; struct resource *mem[MAXCPU]; int cpu_range[2]; int ibm_int_on; int ibm_int_off; int ibm_get_xive; int ibm_set_xive; /* XXX: inefficient -- hash table? tree? */ struct { int irq; int vector; int cpu; } intvecs[256]; int nintvecs; }; static driver_t xicp_driver = { "xicp", xicp_methods, sizeof(struct xicp_softc) }; static driver_t xics_driver = { "xics", xics_methods, 0 }; static devclass_t xicp_devclass; static devclass_t xics_devclass; EARLY_DRIVER_MODULE(xicp, ofwbus, xicp_driver, xicp_devclass, 0, 0, BUS_PASS_INTERRUPT-1); EARLY_DRIVER_MODULE(xics, ofwbus, xics_driver, xics_devclass, 0, 0, BUS_PASS_INTERRUPT); #ifdef POWERNV static struct resource * xicp_mem_for_cpu(int cpu) { device_t dev; struct xicp_softc *sc; int i; for (i = 0; (dev = devclass_get_device(xicp_devclass, i)) != NULL; i++){ sc = device_get_softc(dev); if (cpu >= sc->cpu_range[0] && cpu < sc->cpu_range[1]) return (sc->mem[cpu - sc->cpu_range[0]]); } return (NULL); } #endif static int xicp_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "ibm,ppc-xicp")) return (ENXIO); device_set_desc(dev, "External Interrupt Presentation Controller"); return (BUS_PROBE_GENERIC); } static int xics_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "ibm,ppc-xics")) return (ENXIO); device_set_desc(dev, "External Interrupt Source Controller"); return (BUS_PROBE_GENERIC); } static int xicp_attach(device_t dev) { struct xicp_softc *sc = device_get_softc(dev); phandle_t phandle = ofw_bus_get_node(dev); if (rtas_exists()) { sc->ibm_int_on = rtas_token_lookup("ibm,int-on"); sc->ibm_int_off = rtas_token_lookup("ibm,int-off"); sc->ibm_set_xive = rtas_token_lookup("ibm,set-xive"); sc->ibm_get_xive = rtas_token_lookup("ibm,get-xive"); #ifdef POWERNV } else if (opal_check() == 0) { /* No init needed */ #endif } else { device_printf(dev, "Cannot attach without RTAS or OPAL\n"); return (ENXIO); } if (OF_hasprop(phandle, "ibm,interrupt-server-ranges")) { OF_getencprop(phandle, "ibm,interrupt-server-ranges", sc->cpu_range, sizeof(sc->cpu_range)); sc->cpu_range[1] += sc->cpu_range[0]; device_printf(dev, "Handling CPUs %d-%d\n", sc->cpu_range[0], sc->cpu_range[1]-1); } else { sc->cpu_range[0] = 0; sc->cpu_range[1] = mp_ncpus; } #ifdef POWERNV if (mfmsr() & PSL_HV) { int i; for (i = 0; i < sc->cpu_range[1] - sc->cpu_range[0]; i++) { sc->mem[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (sc->mem[i] == NULL) { device_printf(dev, "Could not alloc mem " "resource %d\n", i); return (ENXIO); } /* Unmask interrupts on all cores */ bus_write_1(sc->mem[i], 4, 0xff); bus_write_1(sc->mem[i], 12, 0xff); } } #endif mtx_init(&sc->sc_mtx, "XICP", NULL, MTX_DEF); sc->nintvecs = 0; powerpc_register_pic(dev, OF_xref_from_node(phandle), MAX_XICP_IRQS, 1 /* Number of IPIs */, FALSE); root_pic = dev; return (0); } static int xics_attach(device_t dev) { phandle_t phandle = ofw_bus_get_node(dev); /* The XICP (root PIC) will handle all our interrupts */ powerpc_register_pic(root_pic, OF_xref_from_node(phandle), MAX_XICP_IRQS, 1 /* Number of IPIs */, FALSE); return (0); } /* * PIC I/F methods. */ static void xicp_bind(device_t dev, u_int irq, cpuset_t cpumask) { struct xicp_softc *sc = device_get_softc(dev); cell_t status, cpu; int ncpus, i, error; + /* Ignore IPIs */ + if (irq == MAX_XICP_IRQS) + return; + /* * This doesn't appear to actually support affinity groups, so pick a * random CPU. */ ncpus = 0; CPU_FOREACH(cpu) if (CPU_ISSET(cpu, &cpumask)) ncpus++; i = mftb() % ncpus; ncpus = 0; CPU_FOREACH(cpu) { if (!CPU_ISSET(cpu, &cpumask)) continue; if (ncpus == i) break; ncpus++; } + cpu = pcpu_find(cpu)->pc_hwref; + /* XXX: super inefficient */ for (i = 0; i < sc->nintvecs; i++) { if (sc->intvecs[i].irq == irq) { sc->intvecs[i].cpu = cpu; break; } } KASSERT(i < sc->nintvecs, ("Binding non-configured interrupt")); if (rtas_exists()) error = rtas_call_method(sc->ibm_set_xive, 3, 1, irq, cpu, XICP_PRIORITY, &status); #ifdef POWERNV else error = opal_call(OPAL_SET_XIVE, irq, cpu << 2, XICP_PRIORITY); #endif if (error < 0) panic("Cannot bind interrupt %d to CPU %d", irq, cpu); } static void xicp_dispatch(device_t dev, struct trapframe *tf) { struct xicp_softc *sc; struct resource *regs = NULL; uint64_t xirr, junk; int i; #ifdef POWERNV if (mfmsr() & PSL_HV) { - regs = xicp_mem_for_cpu(PCPU_GET(cpuid)); + regs = xicp_mem_for_cpu(PCPU_GET(hwref)); KASSERT(regs != NULL, - ("Can't find regs for CPU %d", PCPU_GET(cpuid))); + ("Can't find regs for CPU %ld", (uintptr_t)PCPU_GET(hwref))); } #endif sc = device_get_softc(dev); for (;;) { /* Return value in R4, use the PFT call */ if (regs) { xirr = bus_read_4(regs, 4); } else { /* Return value in R4, use the PFT call */ phyp_pft_hcall(H_XIRR, 0, 0, 0, 0, &xirr, &junk, &junk); } xirr &= 0x00ffffff; if (xirr == 0) { /* No more pending interrupts? */ if (regs) bus_write_1(regs, 4, 0xff); else phyp_hcall(H_CPPR, (uint64_t)0xff); break; } if (xirr == XICP_IPI) { /* Magic number for IPIs */ xirr = MAX_XICP_IRQS; /* Map to FreeBSD magic */ /* Clear IPI */ if (regs) bus_write_1(regs, 12, 0xff); else - phyp_hcall(H_IPI, (uint64_t)(PCPU_GET(cpuid)), + phyp_hcall(H_IPI, (uint64_t)(PCPU_GET(hwref)), 0xff); } /* XXX: super inefficient */ for (i = 0; i < sc->nintvecs; i++) { if (sc->intvecs[i].irq == xirr) break; } KASSERT(i < sc->nintvecs, ("Unmapped XIRR")); powerpc_dispatch_intr(sc->intvecs[i].vector, tf); } } static void xicp_enable(device_t dev, u_int irq, u_int vector) { struct xicp_softc *sc; cell_t status, cpu; sc = device_get_softc(dev); KASSERT(sc->nintvecs + 1 < nitems(sc->intvecs), ("Too many XICP interrupts")); /* Bind to this CPU to start: distrib. ID is last entry in gserver# */ - cpu = PCPU_GET(cpuid); + cpu = PCPU_GET(hwref); mtx_lock(&sc->sc_mtx); sc->intvecs[sc->nintvecs].irq = irq; sc->intvecs[sc->nintvecs].vector = vector; sc->intvecs[sc->nintvecs].cpu = cpu; mb(); sc->nintvecs++; mtx_unlock(&sc->sc_mtx); /* IPIs are also enabled */ if (irq == MAX_XICP_IRQS) return; if (rtas_exists()) { rtas_call_method(sc->ibm_set_xive, 3, 1, irq, cpu, XICP_PRIORITY, &status); xicp_unmask(dev, irq); #ifdef POWERNV } else { status = opal_call(OPAL_SET_XIVE, irq, cpu << 2, XICP_PRIORITY); /* Unmask implicit for OPAL */ if (status != 0) panic("OPAL_SET_XIVE IRQ %d -> cpu %d failed: %d", irq, cpu, status); #endif } } static void xicp_eoi(device_t dev, u_int irq) { uint64_t xirr; if (irq == MAX_XICP_IRQS) /* Remap IPI interrupt to internal value */ irq = XICP_IPI; xirr = irq | (XICP_PRIORITY << 24); #ifdef POWERNV if (mfmsr() & PSL_HV) - bus_write_4(xicp_mem_for_cpu(PCPU_GET(cpuid)), 4, xirr); + bus_write_4(xicp_mem_for_cpu(PCPU_GET(hwref)), 4, xirr); else #endif phyp_hcall(H_EOI, xirr); } static void xicp_ipi(device_t dev, u_int cpu) { #ifdef POWERNV + cpu = pcpu_find(cpu)->pc_hwref; + if (mfmsr() & PSL_HV) bus_write_1(xicp_mem_for_cpu(cpu), 12, XICP_PRIORITY); else #endif phyp_hcall(H_IPI, (uint64_t)cpu, XICP_PRIORITY); } static void xicp_mask(device_t dev, u_int irq) { struct xicp_softc *sc = device_get_softc(dev); cell_t status; if (irq == MAX_XICP_IRQS) return; if (rtas_exists()) { rtas_call_method(sc->ibm_int_off, 1, 1, irq, &status); #ifdef POWERNV } else { int i; for (i = 0; i < sc->nintvecs; i++) { if (sc->intvecs[i].irq == irq) { break; } } KASSERT(i < sc->nintvecs, ("Masking unconfigured interrupt")); opal_call(OPAL_SET_XIVE, irq, sc->intvecs[i].cpu << 2, 0xff); #endif } } static void xicp_unmask(device_t dev, u_int irq) { struct xicp_softc *sc = device_get_softc(dev); cell_t status; if (irq == MAX_XICP_IRQS) return; if (rtas_exists()) { rtas_call_method(sc->ibm_int_on, 1, 1, irq, &status); #ifdef POWERNV } else { int i; for (i = 0; i < sc->nintvecs; i++) { if (sc->intvecs[i].irq == irq) { break; } } KASSERT(i < sc->nintvecs, ("Unmasking unconfigured interrupt")); opal_call(OPAL_SET_XIVE, irq, sc->intvecs[i].cpu << 2, XICP_PRIORITY); #endif } }